blob: 0fe6051ab19596d24dbab56cfa32e4d800e870f6 [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _MLXSW_SPECTRUM_H
38#define _MLXSW_SPECTRUM_H
39
40#include <linux/types.h>
41#include <linux/netdevice.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020042#include <linux/rhashtable.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020043#include <linux/bitops.h>
44#include <linux/if_vlan.h>
Ido Schimmel7f71eb42015-12-15 16:03:37 +010045#include <linux/list.h>
Ido Schimmel8e8dfe92016-04-06 17:10:10 +020046#include <linux/dcbnl.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020047#include <linux/in6.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020048#include <net/switchdev.h>
49
Elad Raz3a49b4f2016-01-10 21:06:28 +010050#include "port.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020051#include "core.h"
52
53#define MLXSW_SP_VFID_BASE VLAN_N_VID
Ido Schimmel99724c12016-07-04 08:23:14 +020054#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
55
56#define MLXSW_SP_RFID_BASE 15360
57#define MLXSW_SP_RIF_MAX 800
Ido Schimmel7f71eb42015-12-15 16:03:37 +010058
Jiri Pirko0d65fc12015-12-03 12:12:28 +010059#define MLXSW_SP_LAG_MAX 64
60#define MLXSW_SP_PORT_PER_LAG_MAX 16
Jiri Pirko56ade8f2015-10-16 14:01:37 +020061
Elad Raz53ae6282016-01-10 21:06:26 +010062#define MLXSW_SP_MID_MAX 7000
63
Ido Schimmel18f1e702016-02-26 17:32:31 +010064#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
65
Jiri Pirko53342022016-07-04 08:23:08 +020066#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
67#define MLXSW_SP_LPM_TREE_MAX 22
68#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
69
Jiri Pirko6b75c482016-07-04 08:23:09 +020070#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
71
Ido Schimmel18f1e702016-02-26 17:32:31 +010072#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
73
Ido Schimmel1a198442016-04-06 17:10:02 +020074#define MLXSW_SP_BYTES_PER_CELL 96
75
76#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
Jiri Pirko0f433fa2016-04-14 18:19:24 +020077#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
Ido Schimmel1a198442016-04-06 17:10:02 +020078
Jiri Pirkoc6022422016-07-05 11:27:46 +020079#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
80#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */
81#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */
82
Ido Schimmel9f7ec052016-04-06 17:10:14 +020083/* Maximum delay buffer needed in case of PAUSE frames, in cells.
84 * Assumes 100m cable and maximum MTU.
85 */
86#define MLXSW_SP_PAUSE_DELAY 612
87
Ido Schimmeld81a6bd2016-04-06 17:10:16 +020088#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
89
90static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
91{
92 delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
93 return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
94}
95
Jiri Pirko56ade8f2015-10-16 14:01:37 +020096struct mlxsw_sp_port;
97
Jiri Pirko0d65fc12015-12-03 12:12:28 +010098struct mlxsw_sp_upper {
99 struct net_device *dev;
100 unsigned int ref_count;
101};
102
Ido Schimmeld0ec8752016-06-20 23:04:12 +0200103struct mlxsw_sp_fid {
Ido Schimmel1c800752016-06-20 23:04:20 +0200104 void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100105 struct list_head list;
Ido Schimmeld0ec8752016-06-20 23:04:12 +0200106 unsigned int ref_count;
107 struct net_device *dev;
Ido Schimmel99724c12016-07-04 08:23:14 +0200108 struct mlxsw_sp_rif *r;
Ido Schimmeld0ec8752016-06-20 23:04:12 +0200109 u16 fid;
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100110};
111
Ido Schimmelfa3054f2016-07-02 11:00:16 +0200112struct mlxsw_sp_rif {
113 struct net_device *dev;
Ido Schimmel99724c12016-07-04 08:23:14 +0200114 unsigned int ref_count;
Ido Schimmel6e095fd2016-07-04 08:23:13 +0200115 struct mlxsw_sp_fid *f;
116 unsigned char addr[ETH_ALEN];
117 int mtu;
Ido Schimmelfa3054f2016-07-02 11:00:16 +0200118 u16 rif;
119};
120
Elad Raz3a49b4f2016-01-10 21:06:28 +0100121struct mlxsw_sp_mid {
122 struct list_head list;
123 unsigned char addr[ETH_ALEN];
124 u16 vid;
125 u16 mid;
126 unsigned int ref_count;
127};
128
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100129static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
130{
131 return MLXSW_SP_VFID_BASE + vfid;
132}
133
Ido Schimmelaac78a42015-12-15 16:03:42 +0100134static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
135{
136 return fid - MLXSW_SP_VFID_BASE;
137}
138
139static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
140{
Ido Schimmel99724c12016-07-04 08:23:14 +0200141 return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
142}
143
144static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
145{
146 return fid >= MLXSW_SP_RFID_BASE;
147}
148
149static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
150{
151 return MLXSW_SP_RFID_BASE + rif;
Ido Schimmelaac78a42015-12-15 16:03:42 +0100152}
153
Jiri Pirko078f9c72016-04-14 18:19:19 +0200154struct mlxsw_sp_sb_pr {
155 enum mlxsw_reg_sbpr_mode mode;
156 u32 size;
157};
158
Jiri Pirko2d0ed392016-04-14 18:19:30 +0200159struct mlxsw_cp_sb_occ {
160 u32 cur;
161 u32 max;
162};
163
Jiri Pirko078f9c72016-04-14 18:19:19 +0200164struct mlxsw_sp_sb_cm {
165 u32 min_buff;
166 u32 max_buff;
167 u8 pool;
Jiri Pirko2d0ed392016-04-14 18:19:30 +0200168 struct mlxsw_cp_sb_occ occ;
Jiri Pirko078f9c72016-04-14 18:19:19 +0200169};
170
171struct mlxsw_sp_sb_pm {
172 u32 min_buff;
173 u32 max_buff;
Jiri Pirko2d0ed392016-04-14 18:19:30 +0200174 struct mlxsw_cp_sb_occ occ;
Jiri Pirko078f9c72016-04-14 18:19:19 +0200175};
176
177#define MLXSW_SP_SB_POOL_COUNT 4
178#define MLXSW_SP_SB_TC_COUNT 8
179
180struct mlxsw_sp_sb {
181 struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
182 struct {
183 struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
184 struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
185 } ports[MLXSW_PORT_MAX_PORTS];
186};
187
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200188#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
189
190struct mlxsw_sp_prefix_usage {
191 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
192};
193
Jiri Pirko53342022016-07-04 08:23:08 +0200194enum mlxsw_sp_l3proto {
195 MLXSW_SP_L3_PROTO_IPV4,
196 MLXSW_SP_L3_PROTO_IPV6,
197};
198
199struct mlxsw_sp_lpm_tree {
200 u8 id; /* tree ID */
201 unsigned int ref_count;
202 enum mlxsw_sp_l3proto proto;
203 struct mlxsw_sp_prefix_usage prefix_usage;
204};
205
Jiri Pirko6b75c482016-07-04 08:23:09 +0200206struct mlxsw_sp_fib;
207
208struct mlxsw_sp_vr {
209 u16 id; /* virtual router ID */
210 bool used;
211 enum mlxsw_sp_l3proto proto;
212 u32 tb_id; /* kernel fib table id */
213 struct mlxsw_sp_lpm_tree *lpm_tree;
214 struct mlxsw_sp_fib *fib;
215};
216
Jiri Pirko53342022016-07-04 08:23:08 +0200217struct mlxsw_sp_router {
218 struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200219 struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200220 struct rhashtable neigh_ht;
Yotam Gigic723c7352016-07-05 11:27:43 +0200221 struct {
222 struct delayed_work dw;
223 unsigned long interval; /* ms */
224 } neighs_update;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200225 struct list_head nexthop_group_list;
Jiri Pirko53342022016-07-04 08:23:08 +0200226};
227
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200228struct mlxsw_sp {
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100229 struct {
230 struct list_head list;
Ido Schimmel99724c12016-07-04 08:23:14 +0200231 DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +0200232 } vfids;
Elad Raz3a49b4f2016-01-10 21:06:28 +0100233 struct {
234 struct list_head list;
Ido Schimmeld8651fd2016-06-20 23:04:07 +0200235 DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
Elad Raz3a49b4f2016-01-10 21:06:28 +0100236 } br_mids;
Ido Schimmel14d39462016-06-20 23:04:15 +0200237 struct list_head fids; /* VLAN-aware bridge FIDs */
Ido Schimmelfa3054f2016-07-02 11:00:16 +0200238 struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX];
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200239 struct mlxsw_sp_port **ports;
240 struct mlxsw_core *core;
241 const struct mlxsw_bus_info *bus_info;
242 unsigned char base_mac[ETH_ALEN];
243 struct {
244 struct delayed_work dw;
245#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
246 unsigned int interval; /* ms */
247 } fdb_notify;
Ido Schimmel869f63a2016-03-08 12:59:33 -0800248#define MLXSW_SP_MIN_AGEING_TIME 10
249#define MLXSW_SP_MAX_AGEING_TIME 1000000
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200250#define MLXSW_SP_DEFAULT_AGEING_TIME 300
251 u32 ageing_time;
Jiri Pirko0d65fc12015-12-03 12:12:28 +0100252 struct mlxsw_sp_upper master_bridge;
253 struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
Ido Schimmel558c2d52016-02-26 17:32:29 +0100254 u8 port_to_module[MLXSW_PORT_MAX_PORTS];
Jiri Pirko078f9c72016-04-14 18:19:19 +0200255 struct mlxsw_sp_sb sb;
Jiri Pirko53342022016-07-04 08:23:08 +0200256 struct mlxsw_sp_router router;
Jiri Pirkob090ef02016-07-05 11:27:47 +0200257 struct {
258 DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
259 } kvdl;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200260};
261
Jiri Pirko0d65fc12015-12-03 12:12:28 +0100262static inline struct mlxsw_sp_upper *
263mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
264{
265 return &mlxsw_sp->lags[lag_id];
266}
267
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200268struct mlxsw_sp_port_pcpu_stats {
269 u64 rx_packets;
270 u64 rx_bytes;
271 u64 tx_packets;
272 u64 tx_bytes;
273 struct u64_stats_sync syncp;
274 u32 tx_dropped;
275};
276
277struct mlxsw_sp_port {
Jiri Pirko932762b2016-04-08 19:11:21 +0200278 struct mlxsw_core_port core_port; /* must be first */
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200279 struct net_device *dev;
280 struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
281 struct mlxsw_sp *mlxsw_sp;
282 u8 local_port;
283 u8 stp_state;
Jiri Pirko0d9b9702015-10-28 10:16:56 +0100284 u8 learning:1,
285 learning_sync:1,
Ido Schimmel02930382015-10-28 10:16:58 +0100286 uc_flood:1,
Jiri Pirko0d65fc12015-12-03 12:12:28 +0100287 bridged:1,
Ido Schimmel18f1e702016-02-26 17:32:31 +0100288 lagged:1,
289 split:1;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200290 u16 pvid;
Jiri Pirko0d65fc12015-12-03 12:12:28 +0100291 u16 lag_id;
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100292 struct {
293 struct list_head list;
Ido Schimmeld0ec8752016-06-20 23:04:12 +0200294 struct mlxsw_sp_fid *f;
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100295 u16 vid;
296 } vport;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200297 struct {
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200298 u8 tx_pause:1,
299 rx_pause:1;
300 } link;
301 struct {
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200302 struct ieee_ets *ets;
Ido Schimmelcc7cf512016-04-06 17:10:11 +0200303 struct ieee_maxrate *maxrate;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200304 struct ieee_pfc *pfc;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200305 } dcb;
Ido Schimmeld664b412016-06-09 09:51:40 +0200306 struct {
307 u8 module;
308 u8 width;
309 u8 lane;
310 } mapping;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200311 /* 802.1Q bridge VLANs */
Ido Schimmelbd40e9d2015-12-15 16:03:36 +0100312 unsigned long *active_vlans;
Elad Razfc1273a2016-01-06 13:01:11 +0100313 unsigned long *untagged_vlans;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200314 /* VLAN interfaces */
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100315 struct list_head vports_list;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200316};
317
Jiri Pirko7ce856a2016-07-04 08:23:12 +0200318struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
319void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
320
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200321static inline bool
322mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
323{
324 return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
325}
326
Jiri Pirko0d65fc12015-12-03 12:12:28 +0100327static inline struct mlxsw_sp_port *
328mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
329{
330 struct mlxsw_sp_port *mlxsw_sp_port;
331 u8 local_port;
332
333 local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
334 lag_id, port_index);
335 mlxsw_sp_port = mlxsw_sp->ports[local_port];
336 return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
337}
338
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100339static inline u16
340mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
341{
342 return mlxsw_sp_vport->vport.vid;
343}
344
Ido Schimmel6381b3a2016-06-20 23:04:16 +0200345static inline bool
346mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
347{
348 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
349
350 return vid != 0;
351}
352
Ido Schimmel41b996c2016-06-20 23:04:17 +0200353static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
354 struct mlxsw_sp_fid *f)
355{
356 mlxsw_sp_vport->vport.f = f;
357}
358
359static inline struct mlxsw_sp_fid *
Ido Schimmeld0ec8752016-06-20 23:04:12 +0200360mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100361{
Ido Schimmel41b996c2016-06-20 23:04:17 +0200362 return mlxsw_sp_vport->vport.f;
363}
364
365static inline struct net_device *
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +0200366mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
Ido Schimmel41b996c2016-06-20 23:04:17 +0200367{
368 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
369
Ido Schimmel56918b62016-06-20 23:04:18 +0200370 return f ? f->dev : NULL;
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100371}
372
373static inline struct mlxsw_sp_port *
374mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
375{
376 struct mlxsw_sp_port *mlxsw_sp_vport;
377
378 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
379 vport.list) {
380 if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
381 return mlxsw_sp_vport;
382 }
383
384 return NULL;
385}
386
Ido Schimmelaac78a42015-12-15 16:03:42 +0100387static inline struct mlxsw_sp_port *
Ido Schimmeld0ec8752016-06-20 23:04:12 +0200388mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
389 u16 fid)
Ido Schimmelaac78a42015-12-15 16:03:42 +0100390{
391 struct mlxsw_sp_port *mlxsw_sp_vport;
392
393 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
394 vport.list) {
Ido Schimmel41b996c2016-06-20 23:04:17 +0200395 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
396
Ido Schimmel56918b62016-06-20 23:04:18 +0200397 if (f && f->fid == fid)
Ido Schimmelaac78a42015-12-15 16:03:42 +0100398 return mlxsw_sp_vport;
399 }
400
401 return NULL;
402}
403
Ido Schimmel701b1862016-07-04 08:23:16 +0200404static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
405 u16 fid)
406{
407 struct mlxsw_sp_fid *f;
408
409 list_for_each_entry(f, &mlxsw_sp->fids, list)
410 if (f->fid == fid)
411 return f;
412
413 return NULL;
414}
415
416static inline struct mlxsw_sp_fid *
417mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
418 const struct net_device *br_dev)
419{
420 struct mlxsw_sp_fid *f;
421
422 list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
423 if (f->dev == br_dev)
424 return f;
425
426 return NULL;
427}
428
Ido Schimmelfa3054f2016-07-02 11:00:16 +0200429static inline struct mlxsw_sp_rif *
430mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
431 const struct net_device *dev)
432{
433 int i;
434
435 for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
436 if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
437 return mlxsw_sp->rifs[i];
438
439 return NULL;
440}
441
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200442enum mlxsw_sp_flood_table {
443 MLXSW_SP_FLOOD_TABLE_UC,
444 MLXSW_SP_FLOOD_TABLE_BM,
445};
446
447int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
Jiri Pirko0f433fa2016-04-14 18:19:24 +0200448void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200449int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
Jiri Pirko0f433fa2016-04-14 18:19:24 +0200450int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
451 unsigned int sb_index, u16 pool_index,
452 struct devlink_sb_pool_info *pool_info);
453int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
454 unsigned int sb_index, u16 pool_index, u32 size,
455 enum devlink_sb_threshold_type threshold_type);
456int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
457 unsigned int sb_index, u16 pool_index,
458 u32 *p_threshold);
459int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
460 unsigned int sb_index, u16 pool_index,
461 u32 threshold);
462int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
463 unsigned int sb_index, u16 tc_index,
464 enum devlink_sb_pool_type pool_type,
465 u16 *p_pool_index, u32 *p_threshold);
466int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
467 unsigned int sb_index, u16 tc_index,
468 enum devlink_sb_pool_type pool_type,
469 u16 pool_index, u32 threshold);
Jiri Pirko2d0ed392016-04-14 18:19:30 +0200470int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
471 unsigned int sb_index);
472int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
473 unsigned int sb_index);
474int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
475 unsigned int sb_index, u16 pool_index,
476 u32 *p_cur, u32 *p_max);
477int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
478 unsigned int sb_index, u16 tc_index,
479 enum devlink_sb_pool_type pool_type,
480 u32 *p_cur, u32 *p_max);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200481
482int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
483void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
484int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
485void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
486void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
487int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
488 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
489 u16 vid);
490int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
491 u16 vid_end, bool is_member, bool untagged);
492int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
493 u16 vid);
Ido Schimmele6060022016-06-20 23:04:11 +0200494int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
Ido Schimmel47a0a9e2016-06-20 23:04:08 +0200495 bool set);
Ido Schimmel4dc236c2016-01-27 15:20:16 +0100496void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
Ido Schimmel28a01d22016-02-18 11:30:02 +0100497int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
Ido Schimmelfe3f6d12016-06-20 23:04:19 +0200498int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
Ido Schimmel6e095fd2016-07-04 08:23:13 +0200499int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
500 bool adding);
Ido Schimmel701b1862016-07-04 08:23:16 +0200501struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
502void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
Ido Schimmel99f44bb2016-07-04 08:23:17 +0200503void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
504 struct mlxsw_sp_rif *r);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200505int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
506 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
507 bool dwrr, u8 dwrr_weight);
508int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
509 u8 switch_prio, u8 tclass);
510int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200511 u8 *prio_tc, bool pause_en,
512 struct ieee_pfc *my_pfc);
Ido Schimmelcc7cf512016-04-06 17:10:11 +0200513int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
514 enum mlxsw_reg_qeec_hr hr, u8 index,
515 u8 next_index, u32 maxrate);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200516
Ido Schimmelf00817d2016-04-06 17:10:09 +0200517#ifdef CONFIG_MLXSW_SPECTRUM_DCB
518
519int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
520void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
521
522#else
523
524static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
525{
526 return 0;
527}
528
529static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
530{}
531
532#endif
533
Ido Schimmel464dce12016-07-02 11:00:15 +0200534int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
535void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
Jiri Pirko61c503f2016-07-04 08:23:11 +0200536int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
537 const struct switchdev_obj_ipv4_fib *fib4,
538 struct switchdev_trans *trans);
539int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
540 const struct switchdev_obj_ipv4_fib *fib4);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200541int mlxsw_sp_router_neigh_construct(struct net_device *dev,
542 struct neighbour *n);
543void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
544 struct neighbour *n);
Ido Schimmel464dce12016-07-02 11:00:15 +0200545
Jiri Pirkob090ef02016-07-05 11:27:47 +0200546int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
547void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
548
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200549#endif