blob: eb7c4549f4640d407358957e37ed19918f88eebf [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
Jiri Pirko22a67762017-02-03 10:29:07 +01003 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
Jiri Pirko56ade8f2015-10-16 14:01:37 +02005 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
Jiri Pirko1d20d232016-10-27 15:12:59 +020040#include <linux/pci.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020041#include <linux/netdevice.h>
42#include <linux/etherdevice.h>
43#include <linux/ethtool.h>
44#include <linux/slab.h>
45#include <linux/device.h>
46#include <linux/skbuff.h>
47#include <linux/if_vlan.h>
48#include <linux/if_bridge.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/bitops.h>
Ido Schimmel7f71eb42015-12-15 16:03:37 +010052#include <linux/list.h>
Ido Schimmel80bedf12016-06-20 23:03:59 +020053#include <linux/notifier.h>
Ido Schimmel90183b92016-04-06 17:10:08 +020054#include <linux/dcbnl.h>
Ido Schimmel99724c12016-07-04 08:23:14 +020055#include <linux/inetdevice.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020056#include <net/switchdev.h>
Yotam Gigi763b4b72016-07-21 12:03:17 +020057#include <net/pkt_cls.h>
58#include <net/tc_act/tc_mirred.h>
Jiri Pirkoe7322632016-09-01 10:37:43 +020059#include <net/netevent.h>
Yotam Gigi98d0f7b2017-01-23 11:07:11 +010060#include <net/tc_act/tc_sample.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020061#include <net/addrconf.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020062
63#include "spectrum.h"
Jiri Pirko1d20d232016-10-27 15:12:59 +020064#include "pci.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020065#include "core.h"
66#include "reg.h"
67#include "port.h"
68#include "trap.h"
69#include "txheader.h"
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +010070#include "spectrum_cnt.h"
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +020071#include "spectrum_dpipe.h"
Yotam Gigie5e5c882017-05-23 21:56:27 +020072#include "../mlxfw/mlxfw.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020073
Yotam Gigi6b742192017-05-23 21:56:29 +020074#define MLXSW_FWREV_MAJOR 13
75#define MLXSW_FWREV_MINOR 1420
76#define MLXSW_FWREV_SUBMINOR 122
77
78static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
79 .major = MLXSW_FWREV_MAJOR,
80 .minor = MLXSW_FWREV_MINOR,
81 .subminor = MLXSW_FWREV_SUBMINOR
82};
83
84#define MLXSW_SP_FW_FILENAME \
Yotam Gigia4e1ce22017-06-04 16:49:58 +020085 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
Yotam Gigi6b742192017-05-23 21:56:29 +020086 "." __stringify(MLXSW_FWREV_MINOR) \
87 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
88
Jiri Pirko56ade8f2015-10-16 14:01:37 +020089static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
90static const char mlxsw_sp_driver_version[] = "1.0";
91
92/* tx_hdr_version
93 * Tx header version.
94 * Must be set to 1.
95 */
96MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
97
98/* tx_hdr_ctl
99 * Packet control type.
100 * 0 - Ethernet control (e.g. EMADs, LACP)
101 * 1 - Ethernet data
102 */
103MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
104
105/* tx_hdr_proto
106 * Packet protocol type. Must be set to 1 (Ethernet).
107 */
108MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
109
110/* tx_hdr_rx_is_router
111 * Packet is sent from the router. Valid for data packets only.
112 */
113MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
114
115/* tx_hdr_fid_valid
116 * Indicates if the 'fid' field is valid and should be used for
117 * forwarding lookup. Valid for data packets only.
118 */
119MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
120
121/* tx_hdr_swid
122 * Switch partition ID. Must be set to 0.
123 */
124MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
125
126/* tx_hdr_control_tclass
127 * Indicates if the packet should use the control TClass and not one
128 * of the data TClasses.
129 */
130MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
131
132/* tx_hdr_etclass
133 * Egress TClass to be used on the egress device on the egress port.
134 */
135MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
136
137/* tx_hdr_port_mid
138 * Destination local port for unicast packets.
139 * Destination multicast ID for multicast packets.
140 *
141 * Control packets are directed to a specific egress port, while data
142 * packets are transmitted through the CPU port (0) into the switch partition,
143 * where forwarding rules are applied.
144 */
145MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
146
147/* tx_hdr_fid
148 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
149 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
150 * Valid for data packets only.
151 */
152MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
153
154/* tx_hdr_type
155 * 0 - Data packets
156 * 6 - Control packets
157 */
158MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
159
Yotam Gigie5e5c882017-05-23 21:56:27 +0200160struct mlxsw_sp_mlxfw_dev {
161 struct mlxfw_dev mlxfw_dev;
162 struct mlxsw_sp *mlxsw_sp;
163};
164
165static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
166 u16 component_index, u32 *p_max_size,
167 u8 *p_align_bits, u16 *p_max_write_size)
168{
169 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
170 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
171 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
172 char mcqi_pl[MLXSW_REG_MCQI_LEN];
173 int err;
174
175 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
176 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
177 if (err)
178 return err;
179 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
180 p_max_write_size);
181
182 *p_align_bits = max_t(u8, *p_align_bits, 2);
183 *p_max_write_size = min_t(u16, *p_max_write_size,
184 MLXSW_REG_MCDA_MAX_DATA_LEN);
185 return 0;
186}
187
188static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
189{
190 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
191 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
192 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
193 char mcc_pl[MLXSW_REG_MCC_LEN];
194 u8 control_state;
195 int err;
196
197 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
198 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
199 if (err)
200 return err;
201
202 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
203 if (control_state != MLXFW_FSM_STATE_IDLE)
204 return -EBUSY;
205
206 mlxsw_reg_mcc_pack(mcc_pl,
207 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
208 0, *fwhandle, 0);
209 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
210}
211
212static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
213 u32 fwhandle, u16 component_index,
214 u32 component_size)
215{
216 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
217 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
218 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
219 char mcc_pl[MLXSW_REG_MCC_LEN];
220
221 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
222 component_index, fwhandle, component_size);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
224}
225
226static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
227 u32 fwhandle, u8 *data, u16 size,
228 u32 offset)
229{
230 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
231 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
233 char mcda_pl[MLXSW_REG_MCDA_LEN];
234
235 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
237}
238
239static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
240 u32 fwhandle, u16 component_index)
241{
242 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
243 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
244 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
245 char mcc_pl[MLXSW_REG_MCC_LEN];
246
247 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
248 component_index, fwhandle, 0);
249 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
250}
251
252static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
253{
254 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
255 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
256 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
257 char mcc_pl[MLXSW_REG_MCC_LEN];
258
259 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
260 fwhandle, 0);
261 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
262}
263
264static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
265 enum mlxfw_fsm_state *fsm_state,
266 enum mlxfw_fsm_state_err *fsm_state_err)
267{
268 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
269 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
271 char mcc_pl[MLXSW_REG_MCC_LEN];
272 u8 control_state;
273 u8 error_code;
274 int err;
275
276 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
277 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
278 if (err)
279 return err;
280
281 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
282 *fsm_state = control_state;
283 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
284 MLXFW_FSM_STATE_ERR_MAX);
285 return 0;
286}
287
288static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
289{
290 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
291 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
292 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
293 char mcc_pl[MLXSW_REG_MCC_LEN];
294
295 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
296 fwhandle, 0);
297 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
298}
299
300static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
301{
302 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
303 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
304 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
305 char mcc_pl[MLXSW_REG_MCC_LEN];
306
307 mlxsw_reg_mcc_pack(mcc_pl,
308 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
309 fwhandle, 0);
310 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
311}
312
313static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
314 .component_query = mlxsw_sp_component_query,
315 .fsm_lock = mlxsw_sp_fsm_lock,
316 .fsm_component_update = mlxsw_sp_fsm_component_update,
317 .fsm_block_download = mlxsw_sp_fsm_block_download,
318 .fsm_component_verify = mlxsw_sp_fsm_component_verify,
319 .fsm_activate = mlxsw_sp_fsm_activate,
320 .fsm_query_state = mlxsw_sp_fsm_query_state,
321 .fsm_cancel = mlxsw_sp_fsm_cancel,
322 .fsm_release = mlxsw_sp_fsm_release
323};
324
Yotam Gigice6ef68f2017-06-01 16:26:46 +0300325static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
326 const struct firmware *firmware)
327{
328 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
329 .mlxfw_dev = {
330 .ops = &mlxsw_sp_mlxfw_dev_ops,
331 .psid = mlxsw_sp->bus_info->psid,
332 .psid_size = strlen(mlxsw_sp->bus_info->psid),
333 },
334 .mlxsw_sp = mlxsw_sp
335 };
336
337 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
338}
339
Yotam Gigi6b742192017-05-23 21:56:29 +0200340static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
341 const struct mlxsw_fw_rev *b)
342{
343 if (a->major != b->major)
344 return a->major > b->major;
345 if (a->minor != b->minor)
346 return a->minor > b->minor;
347 return a->subminor >= b->subminor;
348}
349
350static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
351{
352 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
Yotam Gigi6b742192017-05-23 21:56:29 +0200353 const struct firmware *firmware;
354 int err;
355
356 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
357 return 0;
358
359 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
360 rev->major, rev->minor, rev->subminor);
361 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
362 MLXSW_SP_FW_FILENAME);
363
364 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
365 mlxsw_sp->bus_info->dev);
366 if (err) {
367 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
368 MLXSW_SP_FW_FILENAME);
369 return err;
370 }
371
Yotam Gigice6ef68f2017-06-01 16:26:46 +0300372 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
Yotam Gigi6b742192017-05-23 21:56:29 +0200373 release_firmware(firmware);
374 return err;
375}
376
Arkadi Sharshevsky1abcbcc2017-03-11 09:42:53 +0100377int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
378 unsigned int counter_index, u64 *packets,
379 u64 *bytes)
380{
381 char mgpc_pl[MLXSW_REG_MGPC_LEN];
382 int err;
383
384 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
385 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
386 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
387 if (err)
388 return err;
389 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
390 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
391 return 0;
392}
393
394static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
395 unsigned int counter_index)
396{
397 char mgpc_pl[MLXSW_REG_MGPC_LEN];
398
399 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
400 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
401 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
402}
403
404int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
405 unsigned int *p_counter_index)
406{
407 int err;
408
409 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
410 p_counter_index);
411 if (err)
412 return err;
413 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
414 if (err)
415 goto err_counter_clear;
416 return 0;
417
418err_counter_clear:
419 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
420 *p_counter_index);
421 return err;
422}
423
424void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
425 unsigned int counter_index)
426{
427 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
428 counter_index);
429}
430
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200431static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
432 const struct mlxsw_tx_info *tx_info)
433{
434 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
435
436 memset(txhdr, 0, MLXSW_TXHDR_LEN);
437
438 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
439 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
440 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
441 mlxsw_tx_hdr_swid_set(txhdr, 0);
442 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
443 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
444 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
445}
446
Ido Schimmelfe9ccc72017-05-16 19:38:31 +0200447int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
448 u8 state)
449{
450 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
451 enum mlxsw_reg_spms_state spms_state;
452 char *spms_pl;
453 int err;
454
455 switch (state) {
456 case BR_STATE_FORWARDING:
457 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
458 break;
459 case BR_STATE_LEARNING:
460 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
461 break;
462 case BR_STATE_LISTENING: /* fall-through */
463 case BR_STATE_DISABLED: /* fall-through */
464 case BR_STATE_BLOCKING:
465 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
466 break;
467 default:
468 BUG();
469 }
470
471 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
472 if (!spms_pl)
473 return -ENOMEM;
474 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
475 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
476
477 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
478 kfree(spms_pl);
479 return err;
480}
481
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200482static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
483{
Elad Raz5b090742016-10-28 21:35:46 +0200484 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200485 int err;
486
487 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
488 if (err)
489 return err;
490 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
491 return 0;
492}
493
Yotam Gigi763b4b72016-07-21 12:03:17 +0200494static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
495{
Yotam Gigi763b4b72016-07-21 12:03:17 +0200496 int i;
497
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200498 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
Yotam Gigi763b4b72016-07-21 12:03:17 +0200499 return -EIO;
500
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200501 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
502 MAX_SPAN);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200503 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
504 sizeof(struct mlxsw_sp_span_entry),
505 GFP_KERNEL);
506 if (!mlxsw_sp->span.entries)
507 return -ENOMEM;
508
509 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
510 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
511
512 return 0;
513}
514
515static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
516{
517 int i;
518
519 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
520 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
521
522 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
523 }
524 kfree(mlxsw_sp->span.entries);
525}
526
527static struct mlxsw_sp_span_entry *
528mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
529{
530 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
531 struct mlxsw_sp_span_entry *span_entry;
532 char mpat_pl[MLXSW_REG_MPAT_LEN];
533 u8 local_port = port->local_port;
534 int index;
535 int i;
536 int err;
537
538 /* find a free entry to use */
539 index = -1;
540 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
541 if (!mlxsw_sp->span.entries[i].used) {
542 index = i;
543 span_entry = &mlxsw_sp->span.entries[i];
544 break;
545 }
546 }
547 if (index < 0)
548 return NULL;
549
550 /* create a new port analayzer entry for local_port */
551 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
552 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
553 if (err)
554 return NULL;
555
556 span_entry->used = true;
557 span_entry->id = index;
Yotam Gigi2d644d42016-11-11 16:34:25 +0100558 span_entry->ref_count = 1;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200559 span_entry->local_port = local_port;
560 return span_entry;
561}
562
563static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_span_entry *span_entry)
565{
566 u8 local_port = span_entry->local_port;
567 char mpat_pl[MLXSW_REG_MPAT_LEN];
568 int pa_id = span_entry->id;
569
570 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
571 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
572 span_entry->used = false;
573}
574
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200575static struct mlxsw_sp_span_entry *
576mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200577{
578 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
579 int i;
580
581 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
582 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
583
584 if (curr->used && curr->local_port == port->local_port)
585 return curr;
586 }
587 return NULL;
588}
589
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200590static struct mlxsw_sp_span_entry
591*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200592{
593 struct mlxsw_sp_span_entry *span_entry;
594
595 span_entry = mlxsw_sp_span_entry_find(port);
596 if (span_entry) {
Yotam Gigi2d644d42016-11-11 16:34:25 +0100597 /* Already exists, just take a reference */
Yotam Gigi763b4b72016-07-21 12:03:17 +0200598 span_entry->ref_count++;
599 return span_entry;
600 }
601
602 return mlxsw_sp_span_entry_create(port);
603}
604
605static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
606 struct mlxsw_sp_span_entry *span_entry)
607{
Yotam Gigi2d644d42016-11-11 16:34:25 +0100608 WARN_ON(!span_entry->ref_count);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200609 if (--span_entry->ref_count == 0)
610 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
611 return 0;
612}
613
614static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
615{
616 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
617 struct mlxsw_sp_span_inspected_port *p;
618 int i;
619
620 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
621 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
622
623 list_for_each_entry(p, &curr->bound_ports_list, list)
624 if (p->local_port == port->local_port &&
625 p->type == MLXSW_SP_SPAN_EGRESS)
626 return true;
627 }
628
629 return false;
630}
631
Ido Schimmel18281f22017-03-24 08:02:51 +0100632static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
633 int mtu)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200634{
Ido Schimmel18281f22017-03-24 08:02:51 +0100635 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200636}
637
638static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
639{
640 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
641 char sbib_pl[MLXSW_REG_SBIB_LEN];
642 int err;
643
644 /* If port is egress mirrored, the shared buffer size should be
645 * updated according to the mtu value
646 */
647 if (mlxsw_sp_span_is_egress_mirror(port)) {
Ido Schimmel18281f22017-03-24 08:02:51 +0100648 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
649
650 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200651 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
652 if (err) {
653 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
654 return err;
655 }
656 }
657
658 return 0;
659}
660
661static struct mlxsw_sp_span_inspected_port *
662mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
663 struct mlxsw_sp_span_entry *span_entry)
664{
665 struct mlxsw_sp_span_inspected_port *p;
666
667 list_for_each_entry(p, &span_entry->bound_ports_list, list)
668 if (port->local_port == p->local_port)
669 return p;
670 return NULL;
671}
672
673static int
674mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
675 struct mlxsw_sp_span_entry *span_entry,
676 enum mlxsw_sp_span_type type)
677{
678 struct mlxsw_sp_span_inspected_port *inspected_port;
679 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
680 char mpar_pl[MLXSW_REG_MPAR_LEN];
681 char sbib_pl[MLXSW_REG_SBIB_LEN];
682 int pa_id = span_entry->id;
683 int err;
684
685 /* if it is an egress SPAN, bind a shared buffer to it */
686 if (type == MLXSW_SP_SPAN_EGRESS) {
Ido Schimmel18281f22017-03-24 08:02:51 +0100687 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
688 port->dev->mtu);
689
690 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200691 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
692 if (err) {
693 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
694 return err;
695 }
696 }
697
698 /* bind the port to the SPAN entry */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200699 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
700 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200701 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
702 if (err)
703 goto err_mpar_reg_write;
704
705 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
706 if (!inspected_port) {
707 err = -ENOMEM;
708 goto err_inspected_port_alloc;
709 }
710 inspected_port->local_port = port->local_port;
711 inspected_port->type = type;
712 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
713
714 return 0;
715
716err_mpar_reg_write:
717err_inspected_port_alloc:
718 if (type == MLXSW_SP_SPAN_EGRESS) {
719 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
720 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
721 }
722 return err;
723}
724
725static void
726mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
727 struct mlxsw_sp_span_entry *span_entry,
728 enum mlxsw_sp_span_type type)
729{
730 struct mlxsw_sp_span_inspected_port *inspected_port;
731 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
732 char mpar_pl[MLXSW_REG_MPAR_LEN];
733 char sbib_pl[MLXSW_REG_SBIB_LEN];
734 int pa_id = span_entry->id;
735
736 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
737 if (!inspected_port)
738 return;
739
740 /* remove the inspected port */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200741 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
742 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200743 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
744
745 /* remove the SBIB buffer if it was egress SPAN */
746 if (type == MLXSW_SP_SPAN_EGRESS) {
747 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
748 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
749 }
750
751 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
752
753 list_del(&inspected_port->list);
754 kfree(inspected_port);
755}
756
757static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
758 struct mlxsw_sp_port *to,
759 enum mlxsw_sp_span_type type)
760{
761 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
762 struct mlxsw_sp_span_entry *span_entry;
763 int err;
764
765 span_entry = mlxsw_sp_span_entry_get(to);
766 if (!span_entry)
767 return -ENOENT;
768
769 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
770 span_entry->id);
771
772 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
773 if (err)
774 goto err_port_bind;
775
776 return 0;
777
778err_port_bind:
779 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
780 return err;
781}
782
783static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
784 struct mlxsw_sp_port *to,
785 enum mlxsw_sp_span_type type)
786{
787 struct mlxsw_sp_span_entry *span_entry;
788
789 span_entry = mlxsw_sp_span_entry_find(to);
790 if (!span_entry) {
791 netdev_err(from->dev, "no span entry found\n");
792 return;
793 }
794
795 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
796 span_entry->id);
797 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
798}
799
Yotam Gigi98d0f7b2017-01-23 11:07:11 +0100800static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
801 bool enable, u32 rate)
802{
803 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
804 char mpsc_pl[MLXSW_REG_MPSC_LEN];
805
806 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
807 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
808}
809
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200810static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
811 bool is_up)
812{
813 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
814 char paos_pl[MLXSW_REG_PAOS_LEN];
815
816 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
817 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
818 MLXSW_PORT_ADMIN_STATUS_DOWN);
819 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
820}
821
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200822static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
823 unsigned char *addr)
824{
825 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
826 char ppad_pl[MLXSW_REG_PPAD_LEN];
827
828 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
829 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
830 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
831}
832
833static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
834{
835 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
836 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
837
838 ether_addr_copy(addr, mlxsw_sp->base_mac);
839 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
840 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
841}
842
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200843static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
844{
845 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
846 char pmtu_pl[MLXSW_REG_PMTU_LEN];
847 int max_mtu;
848 int err;
849
850 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
851 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
852 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
853 if (err)
854 return err;
855 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
856
857 if (mtu > max_mtu)
858 return -EINVAL;
859
860 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
861 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
862}
863
864static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
865{
866 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel5b153852017-06-08 08:47:44 +0200867 char pspa_pl[MLXSW_REG_PSPA_LEN];
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200868
Ido Schimmel5b153852017-06-08 08:47:44 +0200869 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
870 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200871}
872
Ido Schimmela1107482017-05-26 08:37:39 +0200873int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200874{
875 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
876 char svpe_pl[MLXSW_REG_SVPE_LEN];
877
878 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
879 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
880}
881
Ido Schimmel7cbc4272017-05-16 19:38:33 +0200882int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
883 bool learn_enable)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200884{
885 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
886 char *spvmlr_pl;
887 int err;
888
889 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
890 if (!spvmlr_pl)
891 return -ENOMEM;
Ido Schimmel7cbc4272017-05-16 19:38:33 +0200892 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
893 learn_enable);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200894 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
895 kfree(spvmlr_pl);
896 return err;
897}
898
Ido Schimmelb02eae92017-05-16 19:38:34 +0200899static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
900 u16 vid)
901{
902 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
903 char spvid_pl[MLXSW_REG_SPVID_LEN];
904
905 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
906 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
907}
908
909static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
910 bool allow)
911{
912 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
913 char spaft_pl[MLXSW_REG_SPAFT_LEN];
914
915 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
916 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
917}
918
919int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
920{
921 int err;
922
923 if (!vid) {
924 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
925 if (err)
926 return err;
927 } else {
928 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
929 if (err)
930 return err;
931 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
932 if (err)
933 goto err_port_allow_untagged_set;
934 }
935
936 mlxsw_sp_port->pvid = vid;
937 return 0;
938
939err_port_allow_untagged_set:
940 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
941 return err;
942}
943
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200944static int
945mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
946{
947 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
948 char sspr_pl[MLXSW_REG_SSPR_LEN];
949
950 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
951 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
952}
953
Ido Schimmeld664b412016-06-09 09:51:40 +0200954static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
955 u8 local_port, u8 *p_module,
956 u8 *p_width, u8 *p_lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200957{
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200958 char pmlp_pl[MLXSW_REG_PMLP_LEN];
959 int err;
960
Ido Schimmel558c2d52016-02-26 17:32:29 +0100961 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200962 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
963 if (err)
964 return err;
Ido Schimmel558c2d52016-02-26 17:32:29 +0100965 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
966 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
Ido Schimmel2bf9a582016-04-05 10:20:04 +0200967 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200968 return 0;
969}
970
Ido Schimmel2e915e02017-06-08 08:47:45 +0200971static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel18f1e702016-02-26 17:32:31 +0100972 u8 module, u8 width, u8 lane)
973{
Ido Schimmel2e915e02017-06-08 08:47:45 +0200974 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel18f1e702016-02-26 17:32:31 +0100975 char pmlp_pl[MLXSW_REG_PMLP_LEN];
976 int i;
977
Ido Schimmel2e915e02017-06-08 08:47:45 +0200978 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
Ido Schimmel18f1e702016-02-26 17:32:31 +0100979 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
980 for (i = 0; i < width; i++) {
981 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
982 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
983 }
984
985 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
986}
987
Ido Schimmel2e915e02017-06-08 08:47:45 +0200988static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100989{
Ido Schimmel2e915e02017-06-08 08:47:45 +0200990 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100991 char pmlp_pl[MLXSW_REG_PMLP_LEN];
992
Ido Schimmel2e915e02017-06-08 08:47:45 +0200993 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100994 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
995 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
996}
997
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200998static int mlxsw_sp_port_open(struct net_device *dev)
999{
1000 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1001 int err;
1002
1003 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1004 if (err)
1005 return err;
1006 netif_start_queue(dev);
1007 return 0;
1008}
1009
1010static int mlxsw_sp_port_stop(struct net_device *dev)
1011{
1012 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1013
1014 netif_stop_queue(dev);
1015 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1016}
1017
1018static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
1019 struct net_device *dev)
1020{
1021 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1022 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1023 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1024 const struct mlxsw_tx_info tx_info = {
1025 .local_port = mlxsw_sp_port->local_port,
1026 .is_emad = false,
1027 };
1028 u64 len;
1029 int err;
1030
Jiri Pirko307c2432016-04-08 19:11:22 +02001031 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001032 return NETDEV_TX_BUSY;
1033
1034 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
1035 struct sk_buff *skb_orig = skb;
1036
1037 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
1038 if (!skb) {
1039 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1040 dev_kfree_skb_any(skb_orig);
1041 return NETDEV_TX_OK;
1042 }
Arkadi Sharshevsky36bf38d2017-01-12 09:10:37 +01001043 dev_consume_skb_any(skb_orig);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001044 }
1045
1046 if (eth_skb_pad(skb)) {
1047 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1048 return NETDEV_TX_OK;
1049 }
1050
1051 mlxsw_sp_txhdr_construct(skb, &tx_info);
Nogah Frankel63dcdd32016-06-17 15:09:05 +02001052 /* TX header is consumed by HW on the way so we shouldn't count its
1053 * bytes as being sent.
1054 */
1055 len = skb->len - MLXSW_TXHDR_LEN;
1056
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001057 /* Due to a race we might fail here because of a full queue. In that
1058 * unlikely case we simply drop the packet.
1059 */
Jiri Pirko307c2432016-04-08 19:11:22 +02001060 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001061
1062 if (!err) {
1063 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1064 u64_stats_update_begin(&pcpu_stats->syncp);
1065 pcpu_stats->tx_packets++;
1066 pcpu_stats->tx_bytes += len;
1067 u64_stats_update_end(&pcpu_stats->syncp);
1068 } else {
1069 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1070 dev_kfree_skb_any(skb);
1071 }
1072 return NETDEV_TX_OK;
1073}
1074
Jiri Pirkoc5b9b512015-12-03 12:12:22 +01001075static void mlxsw_sp_set_rx_mode(struct net_device *dev)
1076{
1077}
1078
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001079static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
1080{
1081 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1082 struct sockaddr *addr = p;
1083 int err;
1084
1085 if (!is_valid_ether_addr(addr->sa_data))
1086 return -EADDRNOTAVAIL;
1087
1088 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
1089 if (err)
1090 return err;
1091 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1092 return 0;
1093}
1094
Ido Schimmel18281f22017-03-24 08:02:51 +01001095static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
1096 int mtu)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001097{
Ido Schimmel18281f22017-03-24 08:02:51 +01001098 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
Ido Schimmelf417f042017-03-24 08:02:50 +01001099}
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001100
Ido Schimmelf417f042017-03-24 08:02:50 +01001101#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
Ido Schimmel18281f22017-03-24 08:02:51 +01001102
1103static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1104 u16 delay)
Ido Schimmelf417f042017-03-24 08:02:50 +01001105{
Ido Schimmel18281f22017-03-24 08:02:51 +01001106 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
1107 BITS_PER_BYTE));
1108 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
1109 mtu);
Ido Schimmelf417f042017-03-24 08:02:50 +01001110}
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001111
Ido Schimmel18281f22017-03-24 08:02:51 +01001112/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
Ido Schimmelf417f042017-03-24 08:02:50 +01001113 * Assumes 100m cable and maximum MTU.
1114 */
Ido Schimmel18281f22017-03-24 08:02:51 +01001115#define MLXSW_SP_PAUSE_DELAY 58752
1116
1117static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1118 u16 delay, bool pfc, bool pause)
Ido Schimmelf417f042017-03-24 08:02:50 +01001119{
1120 if (pfc)
Ido Schimmel18281f22017-03-24 08:02:51 +01001121 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
Ido Schimmelf417f042017-03-24 08:02:50 +01001122 else if (pause)
Ido Schimmel18281f22017-03-24 08:02:51 +01001123 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001124 else
Ido Schimmelf417f042017-03-24 08:02:50 +01001125 return 0;
1126}
1127
1128static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
1129 bool lossy)
1130{
1131 if (lossy)
1132 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
1133 else
1134 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
1135 thres);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001136}
1137
1138int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001139 u8 *prio_tc, bool pause_en,
1140 struct ieee_pfc *my_pfc)
Ido Schimmelff6551e2016-04-06 17:10:03 +02001141{
1142 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001143 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
1144 u16 delay = !!my_pfc ? my_pfc->delay : 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001145 char pbmc_pl[MLXSW_REG_PBMC_LEN];
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001146 int i, j, err;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001147
1148 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
1149 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1150 if (err)
1151 return err;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001152
1153 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1154 bool configure = false;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001155 bool pfc = false;
Ido Schimmelf417f042017-03-24 08:02:50 +01001156 bool lossy;
1157 u16 thres;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001158
1159 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
1160 if (prio_tc[j] == i) {
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001161 pfc = pfc_en & BIT(j);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001162 configure = true;
1163 break;
1164 }
1165 }
1166
1167 if (!configure)
1168 continue;
Ido Schimmelf417f042017-03-24 08:02:50 +01001169
1170 lossy = !(pfc || pause_en);
Ido Schimmel18281f22017-03-24 08:02:51 +01001171 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
1172 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
1173 pause_en);
Ido Schimmelf417f042017-03-24 08:02:50 +01001174 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001175 }
1176
Ido Schimmelff6551e2016-04-06 17:10:03 +02001177 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1178}
1179
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001180static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001181 int mtu, bool pause_en)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001182{
1183 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
1184 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001185 struct ieee_pfc *my_pfc;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001186 u8 *prio_tc;
1187
1188 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001189 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001190
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001191 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001192 pause_en, my_pfc);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001193}
1194
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001195static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
1196{
1197 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001198 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001199 int err;
1200
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001201 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001202 if (err)
1203 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001204 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
1205 if (err)
1206 goto err_span_port_mtu_update;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001207 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1208 if (err)
1209 goto err_port_mtu_set;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001210 dev->mtu = mtu;
1211 return 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001212
1213err_port_mtu_set:
Yotam Gigi763b4b72016-07-21 12:03:17 +02001214 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1215err_span_port_mtu_update:
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001216 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
Ido Schimmelff6551e2016-04-06 17:10:03 +02001217 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001218}
1219
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +03001220static int
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001221mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1222 struct rtnl_link_stats64 *stats)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001223{
1224 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1225 struct mlxsw_sp_port_pcpu_stats *p;
1226 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1227 u32 tx_dropped = 0;
1228 unsigned int start;
1229 int i;
1230
1231 for_each_possible_cpu(i) {
1232 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1233 do {
1234 start = u64_stats_fetch_begin_irq(&p->syncp);
1235 rx_packets = p->rx_packets;
1236 rx_bytes = p->rx_bytes;
1237 tx_packets = p->tx_packets;
1238 tx_bytes = p->tx_bytes;
1239 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1240
1241 stats->rx_packets += rx_packets;
1242 stats->rx_bytes += rx_bytes;
1243 stats->tx_packets += tx_packets;
1244 stats->tx_bytes += tx_bytes;
1245 /* tx_dropped is u32, updated without syncp protection. */
1246 tx_dropped += p->tx_dropped;
1247 }
1248 stats->tx_dropped = tx_dropped;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001249 return 0;
1250}
1251
Or Gerlitz3df5b3c2016-11-22 23:09:54 +02001252static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001253{
1254 switch (attr_id) {
1255 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1256 return true;
1257 }
1258
1259 return false;
1260}
1261
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +03001262static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1263 void *sp)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001264{
1265 switch (attr_id) {
1266 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1267 return mlxsw_sp_port_get_sw_stats64(dev, sp);
1268 }
1269
1270 return -EINVAL;
1271}
1272
1273static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1274 int prio, char *ppcnt_pl)
1275{
1276 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1278
1279 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1280 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1281}
1282
1283static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1284 struct rtnl_link_stats64 *stats)
1285{
1286 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1287 int err;
1288
1289 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1290 0, ppcnt_pl);
1291 if (err)
1292 goto out;
1293
1294 stats->tx_packets =
1295 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1296 stats->rx_packets =
1297 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1298 stats->tx_bytes =
1299 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1300 stats->rx_bytes =
1301 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1302 stats->multicast =
1303 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1304
1305 stats->rx_crc_errors =
1306 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1307 stats->rx_frame_errors =
1308 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1309
1310 stats->rx_length_errors = (
1311 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1312 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1313 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1314
1315 stats->rx_errors = (stats->rx_crc_errors +
1316 stats->rx_frame_errors + stats->rx_length_errors);
1317
1318out:
1319 return err;
1320}
1321
1322static void update_stats_cache(struct work_struct *work)
1323{
1324 struct mlxsw_sp_port *mlxsw_sp_port =
1325 container_of(work, struct mlxsw_sp_port,
1326 hw_stats.update_dw.work);
1327
1328 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1329 goto out;
1330
1331 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1332 mlxsw_sp_port->hw_stats.cache);
1333
1334out:
1335 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1336 MLXSW_HW_STATS_UPDATE_TIME);
1337}
1338
1339/* Return the stats from a cache that is updated periodically,
1340 * as this function might get called in an atomic context.
1341 */
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001342static void
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001343mlxsw_sp_port_get_stats64(struct net_device *dev,
1344 struct rtnl_link_stats64 *stats)
1345{
1346 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1347
1348 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001349}
1350
Jiri Pirko93cd0812017-04-18 16:55:35 +02001351static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1352 u16 vid_begin, u16 vid_end,
1353 bool is_member, bool untagged)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001354{
1355 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1356 char *spvm_pl;
1357 int err;
1358
1359 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1360 if (!spvm_pl)
1361 return -ENOMEM;
1362
1363 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1364 vid_end, is_member, untagged);
1365 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1366 kfree(spvm_pl);
1367 return err;
1368}
1369
Jiri Pirko93cd0812017-04-18 16:55:35 +02001370int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1371 u16 vid_end, bool is_member, bool untagged)
1372{
1373 u16 vid, vid_e;
1374 int err;
1375
1376 for (vid = vid_begin; vid <= vid_end;
1377 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1378 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1379 vid_end);
1380
1381 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1382 is_member, untagged);
1383 if (err)
1384 return err;
1385 }
1386
1387 return 0;
1388}
1389
Ido Schimmelc57529e2017-05-26 08:37:31 +02001390static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001391{
Ido Schimmelc57529e2017-05-26 08:37:31 +02001392 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001393
Ido Schimmelc57529e2017-05-26 08:37:31 +02001394 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1395 &mlxsw_sp_port->vlans_list, list)
1396 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001397}
1398
Ido Schimmel31a08a52017-05-26 08:37:26 +02001399static struct mlxsw_sp_port_vlan *
1400mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1401{
1402 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmelc57529e2017-05-26 08:37:31 +02001403 bool untagged = vid == 1;
1404 int err;
1405
1406 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1407 if (err)
1408 return ERR_PTR(err);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001409
1410 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001411 if (!mlxsw_sp_port_vlan) {
1412 err = -ENOMEM;
1413 goto err_port_vlan_alloc;
1414 }
Ido Schimmel31a08a52017-05-26 08:37:26 +02001415
1416 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1417 mlxsw_sp_port_vlan->vid = vid;
1418 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1419
1420 return mlxsw_sp_port_vlan;
Ido Schimmelc57529e2017-05-26 08:37:31 +02001421
1422err_port_vlan_alloc:
1423 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1424 return ERR_PTR(err);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001425}
1426
1427static void
1428mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1429{
Ido Schimmelc57529e2017-05-26 08:37:31 +02001430 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1431 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02001432
Ido Schimmel31a08a52017-05-26 08:37:26 +02001433 list_del(&mlxsw_sp_port_vlan->list);
1434 kfree(mlxsw_sp_port_vlan);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001435 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1436}
1437
1438struct mlxsw_sp_port_vlan *
1439mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1440{
1441 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1442
1443 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1444 if (mlxsw_sp_port_vlan)
1445 return mlxsw_sp_port_vlan;
1446
1447 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1448}
1449
1450void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1451{
Ido Schimmela1107482017-05-26 08:37:39 +02001452 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1453
Ido Schimmelc57529e2017-05-26 08:37:31 +02001454 if (mlxsw_sp_port_vlan->bridge_port)
1455 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
Ido Schimmela1107482017-05-26 08:37:39 +02001456 else if (fid)
1457 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001458
1459 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001460}
1461
Ido Schimmel05978482016-08-17 16:39:30 +02001462static int mlxsw_sp_port_add_vid(struct net_device *dev,
1463 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001464{
1465 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001466
1467 /* VLAN 0 is added to HW filter when device goes up, but it is
1468 * reserved in our case, so simply return.
1469 */
1470 if (!vid)
1471 return 0;
1472
Ido Schimmelc57529e2017-05-26 08:37:31 +02001473 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001474}
1475
Ido Schimmel32d863f2016-07-02 11:00:10 +02001476static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1477 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001478{
1479 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001480 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001481
1482 /* VLAN 0 is removed from HW filter when device goes down, but
1483 * it is reserved in our case, so simply return.
1484 */
1485 if (!vid)
1486 return 0;
1487
Ido Schimmel31a08a52017-05-26 08:37:26 +02001488 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001489 if (!mlxsw_sp_port_vlan)
Ido Schimmel31a08a52017-05-26 08:37:26 +02001490 return 0;
Ido Schimmelc57529e2017-05-26 08:37:31 +02001491 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001492
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001493 return 0;
1494}
1495
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001496static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1497 size_t len)
1498{
1499 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmeld664b412016-06-09 09:51:40 +02001500 u8 module = mlxsw_sp_port->mapping.module;
1501 u8 width = mlxsw_sp_port->mapping.width;
1502 u8 lane = mlxsw_sp_port->mapping.lane;
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001503 int err;
1504
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001505 if (!mlxsw_sp_port->split)
1506 err = snprintf(name, len, "p%d", module + 1);
1507 else
1508 err = snprintf(name, len, "p%ds%d", module + 1,
1509 lane / width);
1510
1511 if (err >= len)
1512 return -EINVAL;
1513
1514 return 0;
1515}
1516
Yotam Gigi763b4b72016-07-21 12:03:17 +02001517static struct mlxsw_sp_port_mall_tc_entry *
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001518mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1519 unsigned long cookie) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001520 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1521
1522 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1523 if (mall_tc_entry->cookie == cookie)
1524 return mall_tc_entry;
1525
1526 return NULL;
1527}
1528
1529static int
1530mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001531 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001532 const struct tc_action *a,
1533 bool ingress)
1534{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001535 struct net *net = dev_net(mlxsw_sp_port->dev);
1536 enum mlxsw_sp_span_type span_type;
1537 struct mlxsw_sp_port *to_port;
1538 struct net_device *to_dev;
1539 int ifindex;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001540
1541 ifindex = tcf_mirred_ifindex(a);
1542 to_dev = __dev_get_by_index(net, ifindex);
1543 if (!to_dev) {
1544 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1545 return -EINVAL;
1546 }
1547
1548 if (!mlxsw_sp_port_dev_check(to_dev)) {
1549 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
Yotam Gigie915ac62017-01-09 11:25:48 +01001550 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001551 }
1552 to_port = netdev_priv(to_dev);
1553
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001554 mirror->to_local_port = to_port->local_port;
1555 mirror->ingress = ingress;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001556 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001557 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1558}
Yotam Gigi763b4b72016-07-21 12:03:17 +02001559
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001560static void
1561mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1562 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1563{
1564 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1565 enum mlxsw_sp_span_type span_type;
1566 struct mlxsw_sp_port *to_port;
1567
1568 to_port = mlxsw_sp->ports[mirror->to_local_port];
1569 span_type = mirror->ingress ?
1570 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1571 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001572}
1573
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001574static int
1575mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1576 struct tc_cls_matchall_offload *cls,
1577 const struct tc_action *a,
1578 bool ingress)
1579{
1580 int err;
1581
1582 if (!mlxsw_sp_port->sample)
1583 return -EOPNOTSUPP;
1584 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1585 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1586 return -EEXIST;
1587 }
1588 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1589 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1590 return -EOPNOTSUPP;
1591 }
1592
1593 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1594 tcf_sample_psample_group(a));
1595 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1596 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1597 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1598
1599 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1600 if (err)
1601 goto err_port_sample_set;
1602 return 0;
1603
1604err_port_sample_set:
1605 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1606 return err;
1607}
1608
1609static void
1610mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1611{
1612 if (!mlxsw_sp_port->sample)
1613 return;
1614
1615 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1616 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1617}
1618
Yotam Gigi763b4b72016-07-21 12:03:17 +02001619static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001620 struct tc_cls_matchall_offload *f,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001621 bool ingress)
1622{
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001623 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001624 __be16 protocol = f->common.protocol;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001625 const struct tc_action *a;
WANG Cong22dc13c2016-08-13 22:35:00 -07001626 LIST_HEAD(actions);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001627 int err;
1628
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001629 if (!tcf_exts_has_one_action(f->exts)) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001630 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
Yotam Gigie915ac62017-01-09 11:25:48 +01001631 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001632 }
1633
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001634 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1635 if (!mall_tc_entry)
1636 return -ENOMEM;
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001637 mall_tc_entry->cookie = f->cookie;
Ido Schimmel86cb13e2016-07-25 13:12:33 +03001638
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001639 tcf_exts_to_list(f->exts, &actions);
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001640 a = list_first_entry(&actions, struct tc_action, list);
1641
1642 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1643 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1644
1645 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1646 mirror = &mall_tc_entry->mirror;
1647 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1648 mirror, a, ingress);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001649 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1650 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001651 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001652 a, ingress);
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001653 } else {
1654 err = -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001655 }
1656
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001657 if (err)
1658 goto err_add_action;
1659
1660 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001661 return 0;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001662
1663err_add_action:
1664 kfree(mall_tc_entry);
1665 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001666}
1667
1668static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001669 struct tc_cls_matchall_offload *f)
Yotam Gigi763b4b72016-07-21 12:03:17 +02001670{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001671 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001672
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001673 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001674 f->cookie);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001675 if (!mall_tc_entry) {
1676 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1677 return;
1678 }
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001679 list_del(&mall_tc_entry->list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001680
1681 switch (mall_tc_entry->type) {
1682 case MLXSW_SP_PORT_MALL_MIRROR:
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001683 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1684 &mall_tc_entry->mirror);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001685 break;
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001686 case MLXSW_SP_PORT_MALL_SAMPLE:
1687 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1688 break;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001689 default:
1690 WARN_ON(1);
1691 }
1692
Yotam Gigi763b4b72016-07-21 12:03:17 +02001693 kfree(mall_tc_entry);
1694}
1695
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001696static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001697 struct tc_cls_matchall_offload *f)
Yotam Gigi763b4b72016-07-21 12:03:17 +02001698{
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001699 bool ingress = TC_H_MAJ(f->common.handle) == TC_H_MAJ(TC_H_INGRESS);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001700
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001701 if (f->common.chain_index)
Jiri Pirkoa5fcf8a2017-06-06 17:00:16 +02001702 return -EOPNOTSUPP;
1703
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001704 switch (f->command) {
1705 case TC_CLSMATCHALL_REPLACE:
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001706 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001707 ingress);
1708 case TC_CLSMATCHALL_DESTROY:
1709 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1710 return 0;
1711 default:
1712 return -EOPNOTSUPP;
1713 }
1714}
1715
1716static int
1717mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001718 struct tc_cls_flower_offload *f)
1719{
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001720 bool ingress = TC_H_MAJ(f->common.handle) == TC_H_MAJ(TC_H_INGRESS);
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001721
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001722 if (f->common.chain_index)
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001723 return -EOPNOTSUPP;
1724
1725 switch (f->command) {
1726 case TC_CLSFLOWER_REPLACE:
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001727 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001728 case TC_CLSFLOWER_DESTROY:
1729 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
1730 return 0;
1731 case TC_CLSFLOWER_STATS:
1732 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
1733 default:
1734 return -EOPNOTSUPP;
1735 }
1736}
1737
1738static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
Jiri Pirkode4784c2017-08-07 10:15:32 +02001739 void *type_data)
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001740{
1741 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1742
Jiri Pirko2572ac52017-08-07 10:15:17 +02001743 switch (type) {
Jiri Pirkoade9b652017-08-07 10:15:18 +02001744 case TC_SETUP_CLSMATCHALL:
Jiri Pirkode4784c2017-08-07 10:15:32 +02001745 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data);
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001746 case TC_SETUP_CLSFLOWER:
Jiri Pirkode4784c2017-08-07 10:15:32 +02001747 return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data);
Jiri Pirko2572ac52017-08-07 10:15:17 +02001748 default:
1749 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001750 }
Yotam Gigi763b4b72016-07-21 12:03:17 +02001751}
1752
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001753static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1754 .ndo_open = mlxsw_sp_port_open,
1755 .ndo_stop = mlxsw_sp_port_stop,
1756 .ndo_start_xmit = mlxsw_sp_port_xmit,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001757 .ndo_setup_tc = mlxsw_sp_setup_tc,
Jiri Pirkoc5b9b512015-12-03 12:12:22 +01001758 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001759 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1760 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1761 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001762 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1763 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001764 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1765 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001766 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001767};
1768
1769static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1770 struct ethtool_drvinfo *drvinfo)
1771{
1772 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1773 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1774
1775 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1776 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1777 sizeof(drvinfo->version));
1778 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1779 "%d.%d.%d",
1780 mlxsw_sp->bus_info->fw_rev.major,
1781 mlxsw_sp->bus_info->fw_rev.minor,
1782 mlxsw_sp->bus_info->fw_rev.subminor);
1783 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1784 sizeof(drvinfo->bus_info));
1785}
1786
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001787static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1788 struct ethtool_pauseparam *pause)
1789{
1790 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1791
1792 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1793 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1794}
1795
1796static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1797 struct ethtool_pauseparam *pause)
1798{
1799 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1800
1801 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1802 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1803 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1804
1805 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1806 pfcc_pl);
1807}
1808
1809static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1810 struct ethtool_pauseparam *pause)
1811{
1812 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1813 bool pause_en = pause->tx_pause || pause->rx_pause;
1814 int err;
1815
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001816 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1817 netdev_err(dev, "PFC already enabled on port\n");
1818 return -EINVAL;
1819 }
1820
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001821 if (pause->autoneg) {
1822 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1823 return -EINVAL;
1824 }
1825
1826 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1827 if (err) {
1828 netdev_err(dev, "Failed to configure port's headroom\n");
1829 return err;
1830 }
1831
1832 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1833 if (err) {
1834 netdev_err(dev, "Failed to set PAUSE parameters\n");
1835 goto err_port_pause_configure;
1836 }
1837
1838 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1839 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1840
1841 return 0;
1842
1843err_port_pause_configure:
1844 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1845 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1846 return err;
1847}
1848
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001849struct mlxsw_sp_port_hw_stats {
1850 char str[ETH_GSTRING_LEN];
Jiri Pirko412791d2016-10-21 16:07:19 +02001851 u64 (*getter)(const char *payload);
Ido Schimmel18281f22017-03-24 08:02:51 +01001852 bool cells_bytes;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001853};
1854
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001855static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001856 {
1857 .str = "a_frames_transmitted_ok",
1858 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1859 },
1860 {
1861 .str = "a_frames_received_ok",
1862 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1863 },
1864 {
1865 .str = "a_frame_check_sequence_errors",
1866 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1867 },
1868 {
1869 .str = "a_alignment_errors",
1870 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1871 },
1872 {
1873 .str = "a_octets_transmitted_ok",
1874 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1875 },
1876 {
1877 .str = "a_octets_received_ok",
1878 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1879 },
1880 {
1881 .str = "a_multicast_frames_xmitted_ok",
1882 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1883 },
1884 {
1885 .str = "a_broadcast_frames_xmitted_ok",
1886 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1887 },
1888 {
1889 .str = "a_multicast_frames_received_ok",
1890 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1891 },
1892 {
1893 .str = "a_broadcast_frames_received_ok",
1894 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1895 },
1896 {
1897 .str = "a_in_range_length_errors",
1898 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1899 },
1900 {
1901 .str = "a_out_of_range_length_field",
1902 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1903 },
1904 {
1905 .str = "a_frame_too_long_errors",
1906 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1907 },
1908 {
1909 .str = "a_symbol_error_during_carrier",
1910 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1911 },
1912 {
1913 .str = "a_mac_control_frames_transmitted",
1914 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1915 },
1916 {
1917 .str = "a_mac_control_frames_received",
1918 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1919 },
1920 {
1921 .str = "a_unsupported_opcodes_received",
1922 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1923 },
1924 {
1925 .str = "a_pause_mac_ctrl_frames_received",
1926 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1927 },
1928 {
1929 .str = "a_pause_mac_ctrl_frames_xmitted",
1930 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1931 },
1932};
1933
1934#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1935
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001936static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1937 {
1938 .str = "rx_octets_prio",
1939 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1940 },
1941 {
1942 .str = "rx_frames_prio",
1943 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1944 },
1945 {
1946 .str = "tx_octets_prio",
1947 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1948 },
1949 {
1950 .str = "tx_frames_prio",
1951 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1952 },
1953 {
1954 .str = "rx_pause_prio",
1955 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1956 },
1957 {
1958 .str = "rx_pause_duration_prio",
1959 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1960 },
1961 {
1962 .str = "tx_pause_prio",
1963 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1964 },
1965 {
1966 .str = "tx_pause_duration_prio",
1967 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1968 },
1969};
1970
1971#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1972
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001973static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1974 {
1975 .str = "tc_transmit_queue_tc",
Ido Schimmel18281f22017-03-24 08:02:51 +01001976 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
1977 .cells_bytes = true,
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001978 },
1979 {
1980 .str = "tc_no_buffer_discard_uc_tc",
1981 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1982 },
1983};
1984
1985#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1986
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001987#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001988 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1989 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001990 IEEE_8021QAZ_MAX_TCS)
1991
1992static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1993{
1994 int i;
1995
1996 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1997 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1998 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1999 *p += ETH_GSTRING_LEN;
2000 }
2001}
2002
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002003static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2004{
2005 int i;
2006
2007 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2008 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2009 mlxsw_sp_port_hw_tc_stats[i].str, tc);
2010 *p += ETH_GSTRING_LEN;
2011 }
2012}
2013
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002014static void mlxsw_sp_port_get_strings(struct net_device *dev,
2015 u32 stringset, u8 *data)
2016{
2017 u8 *p = data;
2018 int i;
2019
2020 switch (stringset) {
2021 case ETH_SS_STATS:
2022 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2023 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2024 ETH_GSTRING_LEN);
2025 p += ETH_GSTRING_LEN;
2026 }
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002027
2028 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2029 mlxsw_sp_port_get_prio_strings(&p, i);
2030
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002031 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2032 mlxsw_sp_port_get_tc_strings(&p, i);
2033
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002034 break;
2035 }
2036}
2037
Ido Schimmel3a66ee32015-11-27 13:45:55 +01002038static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2039 enum ethtool_phys_id_state state)
2040{
2041 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2042 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2043 char mlcr_pl[MLXSW_REG_MLCR_LEN];
2044 bool active;
2045
2046 switch (state) {
2047 case ETHTOOL_ID_ACTIVE:
2048 active = true;
2049 break;
2050 case ETHTOOL_ID_INACTIVE:
2051 active = false;
2052 break;
2053 default:
2054 return -EOPNOTSUPP;
2055 }
2056
2057 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2058 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2059}
2060
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002061static int
2062mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2063 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2064{
2065 switch (grp) {
2066 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2067 *p_hw_stats = mlxsw_sp_port_hw_stats;
2068 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2069 break;
2070 case MLXSW_REG_PPCNT_PRIO_CNT:
2071 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2072 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2073 break;
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002074 case MLXSW_REG_PPCNT_TC_CNT:
2075 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2076 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2077 break;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002078 default:
2079 WARN_ON(1);
Yotam Gigie915ac62017-01-09 11:25:48 +01002080 return -EOPNOTSUPP;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002081 }
2082 return 0;
2083}
2084
2085static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2086 enum mlxsw_reg_ppcnt_grp grp, int prio,
2087 u64 *data, int data_index)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002088{
Ido Schimmel18281f22017-03-24 08:02:51 +01002089 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2090 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002091 struct mlxsw_sp_port_hw_stats *hw_stats;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002092 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002093 int i, len;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002094 int err;
2095
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002096 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2097 if (err)
2098 return;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002099 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
Ido Schimmel18281f22017-03-24 08:02:51 +01002100 for (i = 0; i < len; i++) {
Colin Ian Kingfaac0ff2016-09-23 12:02:45 +01002101 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
Ido Schimmel18281f22017-03-24 08:02:51 +01002102 if (!hw_stats[i].cells_bytes)
2103 continue;
2104 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2105 data[data_index + i]);
2106 }
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002107}
2108
2109static void mlxsw_sp_port_get_stats(struct net_device *dev,
2110 struct ethtool_stats *stats, u64 *data)
2111{
2112 int i, data_index = 0;
2113
2114 /* IEEE 802.3 Counters */
2115 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2116 data, data_index);
2117 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2118
2119 /* Per-Priority Counters */
2120 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2121 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2122 data, data_index);
2123 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2124 }
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002125
2126 /* Per-TC Counters */
2127 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2128 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2129 data, data_index);
2130 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2131 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002132}
2133
2134static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2135{
2136 switch (sset) {
2137 case ETH_SS_STATS:
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002138 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002139 default:
2140 return -EOPNOTSUPP;
2141 }
2142}
2143
2144struct mlxsw_sp_port_link_mode {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002145 enum ethtool_link_mode_bit_indices mask_ethtool;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002146 u32 mask;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002147 u32 speed;
2148};
2149
2150static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2151 {
2152 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002153 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2154 .speed = SPEED_100,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002155 },
2156 {
2157 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2158 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002159 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2160 .speed = SPEED_1000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002161 },
2162 {
2163 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002164 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2165 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002166 },
2167 {
2168 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2169 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002170 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2171 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002172 },
2173 {
2174 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2175 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2176 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2177 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002178 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2179 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002180 },
2181 {
2182 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002183 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2184 .speed = SPEED_20000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002185 },
2186 {
2187 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002188 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2189 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002190 },
2191 {
2192 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002193 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2194 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002195 },
2196 {
2197 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002198 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2199 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002200 },
2201 {
2202 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002203 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2204 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002205 },
2206 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002207 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2208 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2209 .speed = SPEED_25000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002210 },
2211 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002212 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2213 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2214 .speed = SPEED_25000,
2215 },
2216 {
2217 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2218 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2219 .speed = SPEED_25000,
2220 },
2221 {
2222 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2223 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2224 .speed = SPEED_25000,
2225 },
2226 {
2227 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2228 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2229 .speed = SPEED_50000,
2230 },
2231 {
2232 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2233 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2234 .speed = SPEED_50000,
2235 },
2236 {
2237 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2238 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2239 .speed = SPEED_50000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002240 },
2241 {
2242 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002243 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2244 .speed = SPEED_56000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002245 },
2246 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002247 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2248 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2249 .speed = SPEED_56000,
2250 },
2251 {
2252 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2253 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2254 .speed = SPEED_56000,
2255 },
2256 {
2257 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2258 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2259 .speed = SPEED_56000,
2260 },
2261 {
2262 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2263 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2264 .speed = SPEED_100000,
2265 },
2266 {
2267 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2268 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2269 .speed = SPEED_100000,
2270 },
2271 {
2272 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2273 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2274 .speed = SPEED_100000,
2275 },
2276 {
2277 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2278 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2279 .speed = SPEED_100000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002280 },
2281};
2282
2283#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2284
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002285static void
2286mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2287 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002288{
2289 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2290 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2291 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2292 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2293 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2294 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002295 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002296
2297 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2298 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2299 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2300 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2301 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002302 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002303}
2304
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002305static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002306{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002307 int i;
2308
2309 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2310 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002311 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2312 mode);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002313 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002314}
2315
2316static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002317 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002318{
2319 u32 speed = SPEED_UNKNOWN;
2320 u8 duplex = DUPLEX_UNKNOWN;
2321 int i;
2322
2323 if (!carrier_ok)
2324 goto out;
2325
2326 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2327 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2328 speed = mlxsw_sp_port_link_mode[i].speed;
2329 duplex = DUPLEX_FULL;
2330 break;
2331 }
2332 }
2333out:
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002334 cmd->base.speed = speed;
2335 cmd->base.duplex = duplex;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002336}
2337
2338static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2339{
2340 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2341 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2342 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2343 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2344 return PORT_FIBRE;
2345
2346 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2347 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2348 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2349 return PORT_DA;
2350
2351 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2352 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2353 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2354 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2355 return PORT_NONE;
2356
2357 return PORT_OTHER;
2358}
2359
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002360static u32
2361mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002362{
2363 u32 ptys_proto = 0;
2364 int i;
2365
2366 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002367 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2368 cmd->link_modes.advertising))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002369 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2370 }
2371 return ptys_proto;
2372}
2373
2374static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2375{
2376 u32 ptys_proto = 0;
2377 int i;
2378
2379 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2380 if (speed == mlxsw_sp_port_link_mode[i].speed)
2381 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2382 }
2383 return ptys_proto;
2384}
2385
Ido Schimmel18f1e702016-02-26 17:32:31 +01002386static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2387{
2388 u32 ptys_proto = 0;
2389 int i;
2390
2391 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2392 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2393 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2394 }
2395 return ptys_proto;
2396}
2397
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002398static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2399 struct ethtool_link_ksettings *cmd)
2400{
2401 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2402 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2403 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2404
2405 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2406 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2407}
2408
2409static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2410 struct ethtool_link_ksettings *cmd)
2411{
2412 if (!autoneg)
2413 return;
2414
2415 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2416 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2417}
2418
2419static void
2420mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2421 struct ethtool_link_ksettings *cmd)
2422{
2423 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2424 return;
2425
2426 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2427 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2428}
2429
2430static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2431 struct ethtool_link_ksettings *cmd)
2432{
2433 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2434 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2435 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2436 char ptys_pl[MLXSW_REG_PTYS_LEN];
2437 u8 autoneg_status;
2438 bool autoneg;
2439 int err;
2440
2441 autoneg = mlxsw_sp_port->link.autoneg;
Elad Raz401c8b42016-10-28 21:35:52 +02002442 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002443 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2444 if (err)
2445 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002446 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2447 &eth_proto_oper);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002448
2449 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2450
2451 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2452
2453 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2454 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2455 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2456
2457 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2458 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2459 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2460 cmd);
2461
2462 return 0;
2463}
2464
2465static int
2466mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2467 const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002468{
2469 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2470 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2471 char ptys_pl[MLXSW_REG_PTYS_LEN];
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002472 u32 eth_proto_cap, eth_proto_new;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002473 bool autoneg;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002474 int err;
2475
Elad Raz401c8b42016-10-28 21:35:52 +02002476 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002477 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002478 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002479 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002480 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002481
2482 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2483 eth_proto_new = autoneg ?
2484 mlxsw_sp_to_ptys_advert_link(cmd) :
2485 mlxsw_sp_to_ptys_speed(cmd->base.speed);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002486
2487 eth_proto_new = eth_proto_new & eth_proto_cap;
2488 if (!eth_proto_new) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002489 netdev_err(dev, "No supported speed requested\n");
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002490 return -EINVAL;
2491 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002492
Elad Raz401c8b42016-10-28 21:35:52 +02002493 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2494 eth_proto_new);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002495 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002496 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002497 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002498
Ido Schimmel6277d462016-07-15 11:14:58 +02002499 if (!netif_running(dev))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002500 return 0;
2501
Ido Schimmel0c83f882016-09-12 13:26:23 +02002502 mlxsw_sp_port->link.autoneg = autoneg;
2503
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002504 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2505 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002506
2507 return 0;
2508}
2509
Yotam Gigice6ef68f2017-06-01 16:26:46 +03002510static int mlxsw_sp_flash_device(struct net_device *dev,
2511 struct ethtool_flash *flash)
2512{
2513 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2514 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2515 const struct firmware *firmware;
2516 int err;
2517
2518 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
2519 return -EOPNOTSUPP;
2520
2521 dev_hold(dev);
2522 rtnl_unlock();
2523
2524 err = request_firmware_direct(&firmware, flash->data, &dev->dev);
2525 if (err)
2526 goto out;
2527 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
2528 release_firmware(firmware);
2529out:
2530 rtnl_lock();
2531 dev_put(dev);
2532 return err;
2533}
2534
Arkadi Sharshevsky2ea10902017-06-14 09:27:40 +02002535#define MLXSW_SP_QSFP_I2C_ADDR 0x50
2536
2537static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
2538 u16 offset, u16 size, void *data,
2539 unsigned int *p_read_size)
2540{
2541 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2542 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
2543 char mcia_pl[MLXSW_REG_MCIA_LEN];
2544 int status;
2545 int err;
2546
2547 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
2548 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
2549 0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR);
2550
2551 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
2552 if (err)
2553 return err;
2554
2555 status = mlxsw_reg_mcia_status_get(mcia_pl);
2556 if (status)
2557 return -EIO;
2558
2559 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
2560 memcpy(data, eeprom_tmp, size);
2561 *p_read_size = size;
2562
2563 return 0;
2564}
2565
2566enum mlxsw_sp_eeprom_module_info_rev_id {
2567 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00,
2568 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01,
2569 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03,
2570};
2571
2572enum mlxsw_sp_eeprom_module_info_id {
2573 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03,
2574 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C,
2575 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
2576 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
2577};
2578
2579enum mlxsw_sp_eeprom_module_info {
2580 MLXSW_SP_EEPROM_MODULE_INFO_ID,
2581 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
2582 MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2583};
2584
2585static int mlxsw_sp_get_module_info(struct net_device *netdev,
2586 struct ethtool_modinfo *modinfo)
2587{
2588 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2589 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
2590 u8 module_rev_id, module_id;
2591 unsigned int read_size;
2592 int err;
2593
2594 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
2595 MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2596 module_info, &read_size);
2597 if (err)
2598 return err;
2599
2600 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
2601 return -EIO;
2602
2603 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
2604 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
2605
2606 switch (module_id) {
2607 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
2608 modinfo->type = ETH_MODULE_SFF_8436;
2609 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2610 break;
2611 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
2612 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
2613 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
2614 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
2615 modinfo->type = ETH_MODULE_SFF_8636;
2616 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2617 } else {
2618 modinfo->type = ETH_MODULE_SFF_8436;
2619 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2620 }
2621 break;
2622 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
2623 modinfo->type = ETH_MODULE_SFF_8472;
2624 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2625 break;
2626 default:
2627 return -EINVAL;
2628 }
2629
2630 return 0;
2631}
2632
2633static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
2634 struct ethtool_eeprom *ee,
2635 u8 *data)
2636{
2637 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2638 int offset = ee->offset;
2639 unsigned int read_size;
2640 int i = 0;
2641 int err;
2642
2643 if (!ee->len)
2644 return -EINVAL;
2645
2646 memset(data, 0, ee->len);
2647
2648 while (i < ee->len) {
2649 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
2650 ee->len - i, data + i,
2651 &read_size);
2652 if (err) {
2653 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
2654 return err;
2655 }
2656
2657 i += read_size;
2658 offset += read_size;
2659 }
2660
2661 return 0;
2662}
2663
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002664static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2665 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2666 .get_link = ethtool_op_get_link,
Ido Schimmel9f7ec052016-04-06 17:10:14 +02002667 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2668 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002669 .get_strings = mlxsw_sp_port_get_strings,
Ido Schimmel3a66ee32015-11-27 13:45:55 +01002670 .set_phys_id = mlxsw_sp_port_set_phys_id,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002671 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2672 .get_sset_count = mlxsw_sp_port_get_sset_count,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002673 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2674 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
Yotam Gigice6ef68f2017-06-01 16:26:46 +03002675 .flash_device = mlxsw_sp_flash_device,
Arkadi Sharshevsky2ea10902017-06-14 09:27:40 +02002676 .get_module_info = mlxsw_sp_get_module_info,
2677 .get_module_eeprom = mlxsw_sp_get_module_eeprom,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002678};
2679
Ido Schimmel18f1e702016-02-26 17:32:31 +01002680static int
2681mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2682{
2683 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2684 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2685 char ptys_pl[MLXSW_REG_PTYS_LEN];
2686 u32 eth_proto_admin;
2687
2688 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
Elad Raz401c8b42016-10-28 21:35:52 +02002689 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2690 eth_proto_admin);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002691 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2692}
2693
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002694int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2695 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2696 bool dwrr, u8 dwrr_weight)
Ido Schimmel90183b92016-04-06 17:10:08 +02002697{
2698 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2699 char qeec_pl[MLXSW_REG_QEEC_LEN];
2700
2701 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2702 next_index);
2703 mlxsw_reg_qeec_de_set(qeec_pl, true);
2704 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2705 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2706 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2707}
2708
Ido Schimmelcc7cf512016-04-06 17:10:11 +02002709int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2710 enum mlxsw_reg_qeec_hr hr, u8 index,
2711 u8 next_index, u32 maxrate)
Ido Schimmel90183b92016-04-06 17:10:08 +02002712{
2713 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2714 char qeec_pl[MLXSW_REG_QEEC_LEN];
2715
2716 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2717 next_index);
2718 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2719 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2720 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2721}
2722
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002723int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2724 u8 switch_prio, u8 tclass)
Ido Schimmel90183b92016-04-06 17:10:08 +02002725{
2726 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2727 char qtct_pl[MLXSW_REG_QTCT_LEN];
2728
2729 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2730 tclass);
2731 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2732}
2733
2734static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2735{
2736 int err, i;
2737
2738 /* Setup the elements hierarcy, so that each TC is linked to
2739 * one subgroup, which are all member in the same group.
2740 */
2741 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2742 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2743 0);
2744 if (err)
2745 return err;
2746 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2747 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2748 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2749 0, false, 0);
2750 if (err)
2751 return err;
2752 }
2753 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2754 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2755 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2756 false, 0);
2757 if (err)
2758 return err;
2759 }
2760
2761 /* Make sure the max shaper is disabled in all hierarcies that
2762 * support it.
2763 */
2764 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2765 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2766 MLXSW_REG_QEEC_MAS_DIS);
2767 if (err)
2768 return err;
2769 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2770 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2771 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2772 i, 0,
2773 MLXSW_REG_QEEC_MAS_DIS);
2774 if (err)
2775 return err;
2776 }
2777 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2778 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2779 MLXSW_REG_QEEC_HIERARCY_TC,
2780 i, i,
2781 MLXSW_REG_QEEC_MAS_DIS);
2782 if (err)
2783 return err;
2784 }
2785
2786 /* Map all priorities to traffic class 0. */
2787 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2788 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2789 if (err)
2790 return err;
2791 }
2792
2793 return 0;
2794}
2795
Ido Schimmel5b153852017-06-08 08:47:44 +02002796static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2797 bool split, u8 module, u8 width, u8 lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002798{
Ido Schimmelc57529e2017-05-26 08:37:31 +02002799 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002800 struct mlxsw_sp_port *mlxsw_sp_port;
2801 struct net_device *dev;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002802 int err;
2803
Ido Schimmel5b153852017-06-08 08:47:44 +02002804 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2805 if (err) {
2806 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2807 local_port);
2808 return err;
2809 }
2810
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002811 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
Ido Schimmel5b153852017-06-08 08:47:44 +02002812 if (!dev) {
2813 err = -ENOMEM;
2814 goto err_alloc_etherdev;
2815 }
Jiri Pirkof20a91f2016-10-27 15:13:00 +02002816 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002817 mlxsw_sp_port = netdev_priv(dev);
2818 mlxsw_sp_port->dev = dev;
2819 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2820 mlxsw_sp_port->local_port = local_port;
Ido Schimmelc57529e2017-05-26 08:37:31 +02002821 mlxsw_sp_port->pvid = 1;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002822 mlxsw_sp_port->split = split;
Ido Schimmeld664b412016-06-09 09:51:40 +02002823 mlxsw_sp_port->mapping.module = module;
2824 mlxsw_sp_port->mapping.width = width;
2825 mlxsw_sp_port->mapping.lane = lane;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002826 mlxsw_sp_port->link.autoneg = 1;
Ido Schimmel31a08a52017-05-26 08:37:26 +02002827 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02002828 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002829
2830 mlxsw_sp_port->pcpu_stats =
2831 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2832 if (!mlxsw_sp_port->pcpu_stats) {
2833 err = -ENOMEM;
2834 goto err_alloc_stats;
2835 }
2836
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002837 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2838 GFP_KERNEL);
2839 if (!mlxsw_sp_port->sample) {
2840 err = -ENOMEM;
2841 goto err_alloc_sample;
2842 }
2843
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002844 mlxsw_sp_port->hw_stats.cache =
2845 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2846
2847 if (!mlxsw_sp_port->hw_stats.cache) {
2848 err = -ENOMEM;
2849 goto err_alloc_hw_stats;
2850 }
2851 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2852 &update_stats_cache);
2853
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002854 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2855 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2856
Ido Schimmel2e915e02017-06-08 08:47:45 +02002857 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
Ido Schimmel5b153852017-06-08 08:47:44 +02002858 if (err) {
2859 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
2860 mlxsw_sp_port->local_port);
2861 goto err_port_module_map;
2862 }
2863
Ido Schimmel3247ff22016-09-08 08:16:02 +02002864 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2865 if (err) {
2866 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2867 mlxsw_sp_port->local_port);
2868 goto err_port_swid_set;
2869 }
2870
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002871 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2872 if (err) {
2873 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2874 mlxsw_sp_port->local_port);
2875 goto err_dev_addr_init;
2876 }
2877
2878 netif_carrier_off(dev);
2879
2880 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
Yotam Gigi763b4b72016-07-21 12:03:17 +02002881 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2882 dev->hw_features |= NETIF_F_HW_TC;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002883
Jarod Wilsond894be52016-10-20 13:55:16 -04002884 dev->min_mtu = 0;
2885 dev->max_mtu = ETH_MAX_MTU;
2886
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002887 /* Each packet needs to have a Tx header (metadata) on top all other
2888 * headers.
2889 */
Yotam Gigifeb7d382016-10-04 09:46:04 +02002890 dev->needed_headroom = MLXSW_TXHDR_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002891
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002892 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2893 if (err) {
2894 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2895 mlxsw_sp_port->local_port);
2896 goto err_port_system_port_mapping_set;
2897 }
2898
Ido Schimmel18f1e702016-02-26 17:32:31 +01002899 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2900 if (err) {
2901 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2902 mlxsw_sp_port->local_port);
2903 goto err_port_speed_by_width_set;
2904 }
2905
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002906 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2907 if (err) {
2908 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2909 mlxsw_sp_port->local_port);
2910 goto err_port_mtu_set;
2911 }
2912
2913 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2914 if (err)
2915 goto err_port_admin_status_set;
2916
2917 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2918 if (err) {
2919 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2920 mlxsw_sp_port->local_port);
2921 goto err_port_buffers_init;
2922 }
2923
Ido Schimmel90183b92016-04-06 17:10:08 +02002924 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2925 if (err) {
2926 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2927 mlxsw_sp_port->local_port);
2928 goto err_port_ets_init;
2929 }
2930
Ido Schimmelf00817d2016-04-06 17:10:09 +02002931 /* ETS and buffers must be initialized before DCB. */
2932 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2933 if (err) {
2934 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2935 mlxsw_sp_port->local_port);
2936 goto err_port_dcb_init;
2937 }
2938
Ido Schimmela1107482017-05-26 08:37:39 +02002939 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
Ido Schimmel45a4a162017-05-16 19:38:35 +02002940 if (err) {
Ido Schimmela1107482017-05-26 08:37:39 +02002941 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
Ido Schimmel45a4a162017-05-16 19:38:35 +02002942 mlxsw_sp_port->local_port);
Ido Schimmela1107482017-05-26 08:37:39 +02002943 goto err_port_fids_init;
Ido Schimmel45a4a162017-05-16 19:38:35 +02002944 }
2945
Ido Schimmelc57529e2017-05-26 08:37:31 +02002946 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2947 if (IS_ERR(mlxsw_sp_port_vlan)) {
2948 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
Ido Schimmel05978482016-08-17 16:39:30 +02002949 mlxsw_sp_port->local_port);
Ido Schimmelc57529e2017-05-26 08:37:31 +02002950 goto err_port_vlan_get;
Ido Schimmel05978482016-08-17 16:39:30 +02002951 }
2952
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002953 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
Ido Schimmel2f258442016-08-17 16:39:31 +02002954 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002955 err = register_netdev(dev);
2956 if (err) {
2957 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2958 mlxsw_sp_port->local_port);
2959 goto err_register_netdev;
2960 }
2961
Elad Razd808c7e2016-10-28 21:35:57 +02002962 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2963 mlxsw_sp_port, dev, mlxsw_sp_port->split,
2964 module);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002965 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002966 return 0;
2967
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002968err_register_netdev:
Ido Schimmel2f258442016-08-17 16:39:31 +02002969 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02002970 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmelc57529e2017-05-26 08:37:31 +02002971 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
2972err_port_vlan_get:
Ido Schimmela1107482017-05-26 08:37:39 +02002973 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
2974err_port_fids_init:
Ido Schimmel4de34eb2016-08-04 17:36:22 +03002975 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02002976err_port_dcb_init:
Ido Schimmel90183b92016-04-06 17:10:08 +02002977err_port_ets_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002978err_port_buffers_init:
2979err_port_admin_status_set:
2980err_port_mtu_set:
Ido Schimmel18f1e702016-02-26 17:32:31 +01002981err_port_speed_by_width_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002982err_port_system_port_mapping_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002983err_dev_addr_init:
Ido Schimmel3247ff22016-09-08 08:16:02 +02002984 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2985err_port_swid_set:
Ido Schimmel2e915e02017-06-08 08:47:45 +02002986 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
Ido Schimmel5b153852017-06-08 08:47:44 +02002987err_port_module_map:
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002988 kfree(mlxsw_sp_port->hw_stats.cache);
2989err_alloc_hw_stats:
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002990 kfree(mlxsw_sp_port->sample);
2991err_alloc_sample:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002992 free_percpu(mlxsw_sp_port->pcpu_stats);
2993err_alloc_stats:
2994 free_netdev(dev);
Ido Schimmel5b153852017-06-08 08:47:44 +02002995err_alloc_etherdev:
Jiri Pirko67963a32016-10-28 21:35:55 +02002996 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2997 return err;
2998}
2999
Ido Schimmel5b153852017-06-08 08:47:44 +02003000static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003001{
3002 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3003
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02003004 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
Jiri Pirko67963a32016-10-28 21:35:55 +02003005 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003006 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
Ido Schimmel2f258442016-08-17 16:39:31 +02003007 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02003008 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmelc57529e2017-05-26 08:37:31 +02003009 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
Ido Schimmela1107482017-05-26 08:37:39 +02003010 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02003011 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmel3e9b27b2016-02-26 17:32:28 +01003012 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
Ido Schimmel2e915e02017-06-08 08:47:45 +02003013 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02003014 kfree(mlxsw_sp_port->hw_stats.cache);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003015 kfree(mlxsw_sp_port->sample);
Yotam Gigi136f1442017-01-09 11:25:47 +01003016 free_percpu(mlxsw_sp_port->pcpu_stats);
Ido Schimmel31a08a52017-05-26 08:37:26 +02003017 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003018 free_netdev(mlxsw_sp_port->dev);
Jiri Pirko67963a32016-10-28 21:35:55 +02003019 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3020}
3021
Jiri Pirkof83e2102016-10-28 21:35:49 +02003022static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3023{
3024 return mlxsw_sp->ports[local_port] != NULL;
3025}
3026
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003027static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
3028{
3029 int i;
3030
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003031 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003032 if (mlxsw_sp_port_created(mlxsw_sp, i))
3033 mlxsw_sp_port_remove(mlxsw_sp, i);
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003034 kfree(mlxsw_sp->port_to_module);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003035 kfree(mlxsw_sp->ports);
3036}
3037
3038static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3039{
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003040 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
Ido Schimmeld664b412016-06-09 09:51:40 +02003041 u8 module, width, lane;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003042 size_t alloc_size;
3043 int i;
3044 int err;
3045
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003046 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003047 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3048 if (!mlxsw_sp->ports)
3049 return -ENOMEM;
3050
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003051 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
3052 if (!mlxsw_sp->port_to_module) {
3053 err = -ENOMEM;
3054 goto err_port_to_module_alloc;
3055 }
3056
3057 for (i = 1; i < max_ports; i++) {
Ido Schimmel558c2d52016-02-26 17:32:29 +01003058 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
Ido Schimmeld664b412016-06-09 09:51:40 +02003059 &width, &lane);
Ido Schimmel558c2d52016-02-26 17:32:29 +01003060 if (err)
3061 goto err_port_module_info_get;
3062 if (!width)
3063 continue;
3064 mlxsw_sp->port_to_module[i] = module;
Jiri Pirko67963a32016-10-28 21:35:55 +02003065 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3066 module, width, lane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003067 if (err)
3068 goto err_port_create;
3069 }
3070 return 0;
3071
3072err_port_create:
Ido Schimmel558c2d52016-02-26 17:32:29 +01003073err_port_module_info_get:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003074 for (i--; i >= 1; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003075 if (mlxsw_sp_port_created(mlxsw_sp, i))
3076 mlxsw_sp_port_remove(mlxsw_sp, i);
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003077 kfree(mlxsw_sp->port_to_module);
3078err_port_to_module_alloc:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003079 kfree(mlxsw_sp->ports);
3080 return err;
3081}
3082
Ido Schimmel18f1e702016-02-26 17:32:31 +01003083static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3084{
3085 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3086
3087 return local_port - offset;
3088}
3089
Ido Schimmelbe945352016-06-09 09:51:39 +02003090static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3091 u8 module, unsigned int count)
3092{
3093 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3094 int err, i;
3095
3096 for (i = 0; i < count; i++) {
Ido Schimmelbe945352016-06-09 09:51:39 +02003097 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
Ido Schimmeld664b412016-06-09 09:51:40 +02003098 module, width, i * width);
Ido Schimmelbe945352016-06-09 09:51:39 +02003099 if (err)
3100 goto err_port_create;
3101 }
3102
3103 return 0;
3104
3105err_port_create:
3106 for (i--; i >= 0; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003107 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3108 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmelbe945352016-06-09 09:51:39 +02003109 return err;
3110}
3111
3112static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3113 u8 base_port, unsigned int count)
3114{
3115 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3116 int i;
3117
3118 /* Split by four means we need to re-create two ports, otherwise
3119 * only one.
3120 */
3121 count = count / 2;
3122
3123 for (i = 0; i < count; i++) {
3124 local_port = base_port + i * 2;
3125 module = mlxsw_sp->port_to_module[local_port];
3126
Ido Schimmelbe945352016-06-09 09:51:39 +02003127 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
Ido Schimmeld664b412016-06-09 09:51:40 +02003128 width, 0);
Ido Schimmelbe945352016-06-09 09:51:39 +02003129 }
3130}
3131
Jiri Pirkob2f10572016-04-08 19:11:23 +02003132static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3133 unsigned int count)
Ido Schimmel18f1e702016-02-26 17:32:31 +01003134{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003135 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003136 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003137 u8 module, cur_width, base_port;
3138 int i;
3139 int err;
3140
3141 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3142 if (!mlxsw_sp_port) {
3143 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3144 local_port);
3145 return -EINVAL;
3146 }
3147
Ido Schimmeld664b412016-06-09 09:51:40 +02003148 module = mlxsw_sp_port->mapping.module;
3149 cur_width = mlxsw_sp_port->mapping.width;
3150
Ido Schimmel18f1e702016-02-26 17:32:31 +01003151 if (count != 2 && count != 4) {
3152 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3153 return -EINVAL;
3154 }
3155
Ido Schimmel18f1e702016-02-26 17:32:31 +01003156 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3157 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3158 return -EINVAL;
3159 }
3160
3161 /* Make sure we have enough slave (even) ports for the split. */
3162 if (count == 2) {
3163 base_port = local_port;
3164 if (mlxsw_sp->ports[base_port + 1]) {
3165 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3166 return -EINVAL;
3167 }
3168 } else {
3169 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3170 if (mlxsw_sp->ports[base_port + 1] ||
3171 mlxsw_sp->ports[base_port + 3]) {
3172 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3173 return -EINVAL;
3174 }
3175 }
3176
3177 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003178 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3179 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003180
Ido Schimmelbe945352016-06-09 09:51:39 +02003181 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3182 if (err) {
3183 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3184 goto err_port_split_create;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003185 }
3186
3187 return 0;
3188
Ido Schimmelbe945352016-06-09 09:51:39 +02003189err_port_split_create:
3190 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003191 return err;
3192}
3193
Jiri Pirkob2f10572016-04-08 19:11:23 +02003194static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
Ido Schimmel18f1e702016-02-26 17:32:31 +01003195{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003196 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003197 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmeld664b412016-06-09 09:51:40 +02003198 u8 cur_width, base_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003199 unsigned int count;
3200 int i;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003201
3202 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3203 if (!mlxsw_sp_port) {
3204 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3205 local_port);
3206 return -EINVAL;
3207 }
3208
3209 if (!mlxsw_sp_port->split) {
3210 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
3211 return -EINVAL;
3212 }
3213
Ido Schimmeld664b412016-06-09 09:51:40 +02003214 cur_width = mlxsw_sp_port->mapping.width;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003215 count = cur_width == 1 ? 4 : 2;
3216
3217 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3218
3219 /* Determine which ports to remove. */
3220 if (count == 2 && local_port >= base_port + 2)
3221 base_port = base_port + 2;
3222
3223 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003224 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3225 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003226
Ido Schimmelbe945352016-06-09 09:51:39 +02003227 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003228
3229 return 0;
3230}
3231
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003232static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3233 char *pude_pl, void *priv)
3234{
3235 struct mlxsw_sp *mlxsw_sp = priv;
3236 struct mlxsw_sp_port *mlxsw_sp_port;
3237 enum mlxsw_reg_pude_oper_status status;
3238 u8 local_port;
3239
3240 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3241 mlxsw_sp_port = mlxsw_sp->ports[local_port];
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003242 if (!mlxsw_sp_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003243 return;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003244
3245 status = mlxsw_reg_pude_oper_status_get(pude_pl);
3246 if (status == MLXSW_PORT_OPER_STATUS_UP) {
3247 netdev_info(mlxsw_sp_port->dev, "link up\n");
3248 netif_carrier_on(mlxsw_sp_port->dev);
3249 } else {
3250 netdev_info(mlxsw_sp_port->dev, "link down\n");
3251 netif_carrier_off(mlxsw_sp_port->dev);
3252 }
3253}
3254
Nogah Frankel14eeda92016-11-25 10:33:32 +01003255static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3256 u8 local_port, void *priv)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003257{
3258 struct mlxsw_sp *mlxsw_sp = priv;
3259 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3260 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3261
3262 if (unlikely(!mlxsw_sp_port)) {
3263 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3264 local_port);
3265 return;
3266 }
3267
3268 skb->dev = mlxsw_sp_port->dev;
3269
3270 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3271 u64_stats_update_begin(&pcpu_stats->syncp);
3272 pcpu_stats->rx_packets++;
3273 pcpu_stats->rx_bytes += skb->len;
3274 u64_stats_update_end(&pcpu_stats->syncp);
3275
3276 skb->protocol = eth_type_trans(skb, skb->dev);
3277 netif_receive_skb(skb);
3278}
3279
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02003280static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3281 void *priv)
3282{
3283 skb->offload_fwd_mark = 1;
Nogah Frankel14eeda92016-11-25 10:33:32 +01003284 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02003285}
3286
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003287static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3288 void *priv)
3289{
3290 struct mlxsw_sp *mlxsw_sp = priv;
3291 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3292 struct psample_group *psample_group;
3293 u32 size;
3294
3295 if (unlikely(!mlxsw_sp_port)) {
3296 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3297 local_port);
3298 goto out;
3299 }
3300 if (unlikely(!mlxsw_sp_port->sample)) {
3301 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3302 local_port);
3303 goto out;
3304 }
3305
3306 size = mlxsw_sp_port->sample->truncate ?
3307 mlxsw_sp_port->sample->trunc_size : skb->len;
3308
3309 rcu_read_lock();
3310 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3311 if (!psample_group)
3312 goto out_unlock;
3313 psample_sample_packet(psample_group, skb, size,
3314 mlxsw_sp_port->dev->ifindex, 0,
3315 mlxsw_sp_port->sample->rate);
3316out_unlock:
3317 rcu_read_unlock();
3318out:
3319 consume_skb(skb);
3320}
3321
Nogah Frankel117b0da2016-11-25 10:33:44 +01003322#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel0fb78a42016-11-25 10:33:39 +01003323 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01003324 _is_ctrl, SP_##_trap_group, DISCARD)
Ido Schimmel93393b32016-08-25 18:42:38 +02003325
Nogah Frankel117b0da2016-11-25 10:33:44 +01003326#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel14eeda92016-11-25 10:33:32 +01003327 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01003328 _is_ctrl, SP_##_trap_group, DISCARD)
3329
3330#define MLXSW_SP_EVENTL(_func, _trap_id) \
3331 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
Nogah Frankel14eeda92016-11-25 10:33:32 +01003332
Nogah Frankel45449132016-11-25 10:33:35 +01003333static const struct mlxsw_listener mlxsw_sp_listener[] = {
3334 /* Events */
Nogah Frankel117b0da2016-11-25 10:33:44 +01003335 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
Nogah Frankelee4a60d2016-11-25 10:33:29 +01003336 /* L2 traps */
Nogah Frankel117b0da2016-11-25 10:33:44 +01003337 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3338 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3339 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3340 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3341 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3342 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3343 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3344 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3345 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3346 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3347 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
Jiri Pirko9d41acc2017-04-18 16:55:38 +02003348 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
Arkadi Sharshevsky588823f2017-07-17 14:15:31 +02003349 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
3350 false),
3351 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3352 false),
3353 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
3354 false),
3355 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3356 false),
Ido Schimmel93393b32016-08-25 18:42:38 +02003357 /* L3 traps */
Ido Schimmel0fcc4842017-07-17 14:15:29 +02003358 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3359 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3360 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
Ido Schimmel0fcc4842017-07-17 14:15:29 +02003361 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003362 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
3363 false),
3364 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
3365 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
3366 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
3367 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
3368 false),
3369 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
3370 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
3371 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
Ido Schimmel0fcc4842017-07-17 14:15:29 +02003372 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003373 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
3374 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
3375 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3376 false),
3377 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3378 false),
3379 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3380 false),
3381 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3382 false),
3383 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
3384 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
3385 false),
3386 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
3387 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
Ido Schimmel7607dd32017-07-17 14:15:30 +02003388 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003389 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003390 /* PKT Sample trap */
3391 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
Jiri Pirko0db7b382017-06-06 14:12:05 +02003392 false, SP_IP2ME, DISCARD),
3393 /* ACL trap */
3394 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003395};
3396
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003397static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3398{
3399 char qpcr_pl[MLXSW_REG_QPCR_LEN];
3400 enum mlxsw_reg_qpcr_ir_units ir_units;
3401 int max_cpu_policers;
3402 bool is_bytes;
3403 u8 burst_size;
3404 u32 rate;
3405 int i, err;
3406
3407 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3408 return -EIO;
3409
3410 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3411
3412 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3413 for (i = 0; i < max_cpu_policers; i++) {
3414 is_bytes = false;
3415 switch (i) {
3416 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3417 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3418 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3419 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3420 rate = 128;
3421 burst_size = 7;
3422 break;
3423 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
Arkadi Sharshevsky588823f2017-07-17 14:15:31 +02003424 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003425 rate = 16 * 1024;
3426 burst_size = 10;
3427 break;
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003428 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003429 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3430 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003431 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003432 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3433 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003434 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003435 rate = 1024;
3436 burst_size = 7;
3437 break;
3438 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3439 is_bytes = true;
3440 rate = 4 * 1024;
3441 burst_size = 4;
3442 break;
3443 default:
3444 continue;
3445 }
3446
3447 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3448 burst_size);
3449 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3450 if (err)
3451 return err;
3452 }
3453
3454 return 0;
3455}
3456
Nogah Frankel579c82e2016-11-25 10:33:42 +01003457static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003458{
3459 char htgt_pl[MLXSW_REG_HTGT_LEN];
Nogah Frankel117b0da2016-11-25 10:33:44 +01003460 enum mlxsw_reg_htgt_trap_group i;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003461 int max_cpu_policers;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003462 int max_trap_groups;
3463 u8 priority, tc;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003464 u16 policer_id;
Nogah Frankel117b0da2016-11-25 10:33:44 +01003465 int err;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003466
3467 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3468 return -EIO;
3469
3470 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003471 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003472
3473 for (i = 0; i < max_trap_groups; i++) {
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003474 policer_id = i;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003475 switch (i) {
Nogah Frankel117b0da2016-11-25 10:33:44 +01003476 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3477 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3478 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3479 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3480 priority = 5;
3481 tc = 5;
3482 break;
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003483 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
Nogah Frankel117b0da2016-11-25 10:33:44 +01003484 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3485 priority = 4;
3486 tc = 4;
3487 break;
3488 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3489 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
Arkadi Sharshevsky588823f2017-07-17 14:15:31 +02003490 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
Nogah Frankel117b0da2016-11-25 10:33:44 +01003491 priority = 3;
3492 tc = 3;
3493 break;
3494 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003495 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
Nogah Frankel117b0da2016-11-25 10:33:44 +01003496 priority = 2;
3497 tc = 2;
3498 break;
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003499 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
Nogah Frankel117b0da2016-11-25 10:33:44 +01003500 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3501 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3502 priority = 1;
3503 tc = 1;
3504 break;
3505 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
Nogah Frankel579c82e2016-11-25 10:33:42 +01003506 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3507 tc = MLXSW_REG_HTGT_DEFAULT_TC;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003508 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003509 break;
3510 default:
3511 continue;
3512 }
Nogah Frankel117b0da2016-11-25 10:33:44 +01003513
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003514 if (max_cpu_policers <= policer_id &&
3515 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3516 return -EIO;
3517
3518 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003519 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3520 if (err)
3521 return err;
3522 }
3523
3524 return 0;
3525}
3526
3527static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3528{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003529 int i;
3530 int err;
3531
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003532 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3533 if (err)
3534 return err;
3535
Nogah Frankel579c82e2016-11-25 10:33:42 +01003536 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003537 if (err)
3538 return err;
3539
Nogah Frankel45449132016-11-25 10:33:35 +01003540 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003541 err = mlxsw_core_trap_register(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003542 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003543 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003544 if (err)
Nogah Frankel45449132016-11-25 10:33:35 +01003545 goto err_listener_register;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003546
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003547 }
3548 return 0;
3549
Nogah Frankel45449132016-11-25 10:33:35 +01003550err_listener_register:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003551 for (i--; i >= 0; i--) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003552 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003553 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003554 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003555 }
3556 return err;
3557}
3558
3559static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3560{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003561 int i;
3562
Nogah Frankel45449132016-11-25 10:33:35 +01003563 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003564 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003565 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003566 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003567 }
3568}
3569
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003570static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3571{
3572 char slcr_pl[MLXSW_REG_SLCR_LEN];
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003573 int err;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003574
3575 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3576 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3577 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3578 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3579 MLXSW_REG_SLCR_LAG_HASH_SIP |
3580 MLXSW_REG_SLCR_LAG_HASH_DIP |
3581 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3582 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3583 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003584 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3585 if (err)
3586 return err;
3587
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003588 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3589 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003590 return -EIO;
3591
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003592 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003593 sizeof(struct mlxsw_sp_upper),
3594 GFP_KERNEL);
3595 if (!mlxsw_sp->lags)
3596 return -ENOMEM;
3597
3598 return 0;
3599}
3600
3601static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3602{
3603 kfree(mlxsw_sp->lags);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003604}
3605
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003606static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3607{
3608 char htgt_pl[MLXSW_REG_HTGT_LEN];
3609
Nogah Frankel579c82e2016-11-25 10:33:42 +01003610 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3611 MLXSW_REG_HTGT_INVALID_POLICER,
3612 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3613 MLXSW_REG_HTGT_DEFAULT_TC);
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003614 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3615}
3616
Jiri Pirkob2f10572016-04-08 19:11:23 +02003617static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003618 const struct mlxsw_bus_info *mlxsw_bus_info)
3619{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003620 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003621 int err;
3622
3623 mlxsw_sp->core = mlxsw_core;
3624 mlxsw_sp->bus_info = mlxsw_bus_info;
3625
Yotam Gigi6b742192017-05-23 21:56:29 +02003626 err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3627 if (err) {
3628 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
3629 return err;
3630 }
3631
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003632 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3633 if (err) {
3634 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3635 return err;
3636 }
3637
Ido Schimmela1107482017-05-26 08:37:39 +02003638 err = mlxsw_sp_fids_init(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003639 if (err) {
Ido Schimmela1107482017-05-26 08:37:39 +02003640 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
Nogah Frankel45449132016-11-25 10:33:35 +01003641 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003642 }
3643
Ido Schimmela1107482017-05-26 08:37:39 +02003644 err = mlxsw_sp_traps_init(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003645 if (err) {
Ido Schimmela1107482017-05-26 08:37:39 +02003646 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3647 goto err_traps_init;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003648 }
3649
3650 err = mlxsw_sp_buffers_init(mlxsw_sp);
3651 if (err) {
3652 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3653 goto err_buffers_init;
3654 }
3655
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003656 err = mlxsw_sp_lag_init(mlxsw_sp);
3657 if (err) {
3658 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3659 goto err_lag_init;
3660 }
3661
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003662 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3663 if (err) {
3664 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3665 goto err_switchdev_init;
3666 }
3667
Ido Schimmel464dce12016-07-02 11:00:15 +02003668 err = mlxsw_sp_router_init(mlxsw_sp);
3669 if (err) {
3670 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3671 goto err_router_init;
3672 }
3673
Yotam Gigi763b4b72016-07-21 12:03:17 +02003674 err = mlxsw_sp_span_init(mlxsw_sp);
3675 if (err) {
3676 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3677 goto err_span_init;
3678 }
3679
Jiri Pirko22a67762017-02-03 10:29:07 +01003680 err = mlxsw_sp_acl_init(mlxsw_sp);
3681 if (err) {
3682 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3683 goto err_acl_init;
3684 }
3685
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003686 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3687 if (err) {
3688 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3689 goto err_counter_pool_init;
3690 }
3691
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +02003692 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3693 if (err) {
3694 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3695 goto err_dpipe_init;
3696 }
3697
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003698 err = mlxsw_sp_ports_create(mlxsw_sp);
3699 if (err) {
3700 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3701 goto err_ports_create;
3702 }
3703
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003704 return 0;
3705
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003706err_ports_create:
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +02003707 mlxsw_sp_dpipe_fini(mlxsw_sp);
3708err_dpipe_init:
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003709 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3710err_counter_pool_init:
Jiri Pirko22a67762017-02-03 10:29:07 +01003711 mlxsw_sp_acl_fini(mlxsw_sp);
3712err_acl_init:
Yotam Gigi763b4b72016-07-21 12:03:17 +02003713 mlxsw_sp_span_fini(mlxsw_sp);
3714err_span_init:
Ido Schimmel464dce12016-07-02 11:00:15 +02003715 mlxsw_sp_router_fini(mlxsw_sp);
3716err_router_init:
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003717 mlxsw_sp_switchdev_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003718err_switchdev_init:
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003719 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003720err_lag_init:
Jiri Pirko0f433fa2016-04-14 18:19:24 +02003721 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003722err_buffers_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003723 mlxsw_sp_traps_fini(mlxsw_sp);
Ido Schimmela1107482017-05-26 08:37:39 +02003724err_traps_init:
3725 mlxsw_sp_fids_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003726 return err;
3727}
3728
Jiri Pirkob2f10572016-04-08 19:11:23 +02003729static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003730{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003731 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003732
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003733 mlxsw_sp_ports_remove(mlxsw_sp);
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +02003734 mlxsw_sp_dpipe_fini(mlxsw_sp);
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003735 mlxsw_sp_counter_pool_fini(mlxsw_sp);
Jiri Pirko22a67762017-02-03 10:29:07 +01003736 mlxsw_sp_acl_fini(mlxsw_sp);
Yotam Gigi763b4b72016-07-21 12:03:17 +02003737 mlxsw_sp_span_fini(mlxsw_sp);
Ido Schimmel464dce12016-07-02 11:00:15 +02003738 mlxsw_sp_router_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003739 mlxsw_sp_switchdev_fini(mlxsw_sp);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003740 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko5113bfd2016-05-06 22:20:59 +02003741 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003742 mlxsw_sp_traps_fini(mlxsw_sp);
Ido Schimmela1107482017-05-26 08:37:39 +02003743 mlxsw_sp_fids_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003744}
3745
3746static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3747 .used_max_vepa_channels = 1,
3748 .max_vepa_channels = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003749 .used_max_mid = 1,
Elad Raz53ae6282016-01-10 21:06:26 +01003750 .max_mid = MLXSW_SP_MID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003751 .used_max_pgt = 1,
3752 .max_pgt = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003753 .used_flood_tables = 1,
3754 .used_flood_mode = 1,
3755 .flood_mode = 3,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003756 .max_fid_offset_flood_tables = 3,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003757 .fid_offset_flood_table_size = VLAN_N_VID - 1,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003758 .max_fid_flood_tables = 3,
Ido Schimmela1107482017-05-26 08:37:39 +02003759 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003760 .used_max_ib_mc = 1,
3761 .max_ib_mc = 0,
3762 .used_max_pkey = 1,
3763 .max_pkey = 0,
Nogah Frankel403547d2016-09-20 11:16:52 +02003764 .used_kvd_split_data = 1,
3765 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3766 .kvd_hash_single_parts = 2,
3767 .kvd_hash_double_parts = 1,
Jiri Pirkoc6022422016-07-05 11:27:46 +02003768 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003769 .swid_config = {
3770 {
3771 .used_type = 1,
3772 .type = MLXSW_PORT_SWID_TYPE_ETH,
3773 }
3774 },
Nogah Frankel57d316b2016-07-21 12:03:09 +02003775 .resource_query_enable = 1,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003776};
3777
3778static struct mlxsw_driver mlxsw_sp_driver = {
Jiri Pirko1d20d232016-10-27 15:12:59 +02003779 .kind = mlxsw_sp_driver_name,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003780 .priv_size = sizeof(struct mlxsw_sp),
3781 .init = mlxsw_sp_init,
3782 .fini = mlxsw_sp_fini,
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003783 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003784 .port_split = mlxsw_sp_port_split,
3785 .port_unsplit = mlxsw_sp_port_unsplit,
3786 .sb_pool_get = mlxsw_sp_sb_pool_get,
3787 .sb_pool_set = mlxsw_sp_sb_pool_set,
3788 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3789 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3790 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3791 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3792 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3793 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3794 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3795 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3796 .txhdr_construct = mlxsw_sp_txhdr_construct,
3797 .txhdr_len = MLXSW_TXHDR_LEN,
3798 .profile = &mlxsw_sp_config_profile,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003799};
3800
Jiri Pirko22a67762017-02-03 10:29:07 +01003801bool mlxsw_sp_port_dev_check(const struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003802{
3803 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3804}
3805
Jiri Pirko1182e532017-03-06 21:25:20 +01003806static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
David Aherndd823642016-10-17 19:15:49 -07003807{
Jiri Pirko1182e532017-03-06 21:25:20 +01003808 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
David Aherndd823642016-10-17 19:15:49 -07003809 int ret = 0;
3810
3811 if (mlxsw_sp_port_dev_check(lower_dev)) {
Jiri Pirko1182e532017-03-06 21:25:20 +01003812 *p_mlxsw_sp_port = netdev_priv(lower_dev);
David Aherndd823642016-10-17 19:15:49 -07003813 ret = 1;
3814 }
3815
3816 return ret;
3817}
3818
Ido Schimmelc57529e2017-05-26 08:37:31 +02003819struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003820{
Jiri Pirko1182e532017-03-06 21:25:20 +01003821 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003822
3823 if (mlxsw_sp_port_dev_check(dev))
3824 return netdev_priv(dev);
3825
Jiri Pirko1182e532017-03-06 21:25:20 +01003826 mlxsw_sp_port = NULL;
3827 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003828
Jiri Pirko1182e532017-03-06 21:25:20 +01003829 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003830}
3831
Ido Schimmel4724ba562017-03-10 08:53:39 +01003832struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003833{
3834 struct mlxsw_sp_port *mlxsw_sp_port;
3835
3836 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3837 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3838}
3839
Arkadi Sharshevskyaf0613782017-06-08 08:44:20 +02003840struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003841{
Jiri Pirko1182e532017-03-06 21:25:20 +01003842 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003843
3844 if (mlxsw_sp_port_dev_check(dev))
3845 return netdev_priv(dev);
3846
Jiri Pirko1182e532017-03-06 21:25:20 +01003847 mlxsw_sp_port = NULL;
3848 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3849 &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003850
Jiri Pirko1182e532017-03-06 21:25:20 +01003851 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003852}
3853
3854struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3855{
3856 struct mlxsw_sp_port *mlxsw_sp_port;
3857
3858 rcu_read_lock();
3859 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3860 if (mlxsw_sp_port)
3861 dev_hold(mlxsw_sp_port->dev);
3862 rcu_read_unlock();
3863 return mlxsw_sp_port;
3864}
3865
3866void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3867{
3868 dev_put(mlxsw_sp_port->dev);
3869}
3870
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003871static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003872{
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003873 char sldr_pl[MLXSW_REG_SLDR_LEN];
3874
3875 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3876 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3877}
3878
3879static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3880{
3881 char sldr_pl[MLXSW_REG_SLDR_LEN];
3882
3883 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3884 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3885}
3886
3887static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3888 u16 lag_id, u8 port_index)
3889{
3890 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3891 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3892
3893 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3894 lag_id, port_index);
3895 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3896}
3897
3898static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3899 u16 lag_id)
3900{
3901 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3902 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3903
3904 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3905 lag_id);
3906 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3907}
3908
3909static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3910 u16 lag_id)
3911{
3912 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3913 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3914
3915 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3916 lag_id);
3917 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3918}
3919
3920static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3921 u16 lag_id)
3922{
3923 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3924 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3925
3926 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3927 lag_id);
3928 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3929}
3930
3931static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3932 struct net_device *lag_dev,
3933 u16 *p_lag_id)
3934{
3935 struct mlxsw_sp_upper *lag;
3936 int free_lag_id = -1;
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003937 u64 max_lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003938 int i;
3939
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003940 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3941 for (i = 0; i < max_lag; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003942 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3943 if (lag->ref_count) {
3944 if (lag->dev == lag_dev) {
3945 *p_lag_id = i;
3946 return 0;
3947 }
3948 } else if (free_lag_id < 0) {
3949 free_lag_id = i;
3950 }
3951 }
3952 if (free_lag_id < 0)
3953 return -EBUSY;
3954 *p_lag_id = free_lag_id;
3955 return 0;
3956}
3957
3958static bool
3959mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3960 struct net_device *lag_dev,
3961 struct netdev_lag_upper_info *lag_upper_info)
3962{
3963 u16 lag_id;
3964
3965 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3966 return false;
3967 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3968 return false;
3969 return true;
3970}
3971
3972static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3973 u16 lag_id, u8 *p_port_index)
3974{
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003975 u64 max_lag_members;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003976 int i;
3977
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003978 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3979 MAX_LAG_MEMBERS);
3980 for (i = 0; i < max_lag_members; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003981 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3982 *p_port_index = i;
3983 return 0;
3984 }
3985 }
3986 return -EBUSY;
3987}
3988
3989static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3990 struct net_device *lag_dev)
3991{
3992 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmelc57529e2017-05-26 08:37:31 +02003993 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003994 struct mlxsw_sp_upper *lag;
3995 u16 lag_id;
3996 u8 port_index;
3997 int err;
3998
3999 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4000 if (err)
4001 return err;
4002 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4003 if (!lag->ref_count) {
4004 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4005 if (err)
4006 return err;
4007 lag->dev = lag_dev;
4008 }
4009
4010 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4011 if (err)
4012 return err;
4013 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4014 if (err)
4015 goto err_col_port_add;
4016 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
4017 if (err)
4018 goto err_col_port_enable;
4019
4020 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4021 mlxsw_sp_port->local_port);
4022 mlxsw_sp_port->lag_id = lag_id;
4023 mlxsw_sp_port->lagged = 1;
4024 lag->ref_count++;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02004025
Ido Schimmelc57529e2017-05-26 08:37:31 +02004026 /* Port is no longer usable as a router interface */
4027 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
4028 if (mlxsw_sp_port_vlan->fid)
Ido Schimmela1107482017-05-26 08:37:39 +02004029 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel86bf95b2016-07-02 11:00:11 +02004030
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004031 return 0;
4032
Ido Schimmel51554db2016-05-06 22:18:39 +02004033err_col_port_enable:
4034 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004035err_col_port_add:
4036 if (!lag->ref_count)
4037 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004038 return err;
4039}
4040
Ido Schimmel82e6db02016-06-20 23:04:04 +02004041static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4042 struct net_device *lag_dev)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004043{
4044 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004045 u16 lag_id = mlxsw_sp_port->lag_id;
Ido Schimmel1c800752016-06-20 23:04:20 +02004046 struct mlxsw_sp_upper *lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004047
4048 if (!mlxsw_sp_port->lagged)
Ido Schimmel82e6db02016-06-20 23:04:04 +02004049 return;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004050 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4051 WARN_ON(lag->ref_count == 0);
4052
Ido Schimmel82e6db02016-06-20 23:04:04 +02004053 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
4054 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004055
Ido Schimmelc57529e2017-05-26 08:37:31 +02004056 /* Any VLANs configured on the port are no longer valid */
4057 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01004058
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02004059 if (lag->ref_count == 1)
Ido Schimmel82e6db02016-06-20 23:04:04 +02004060 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004061
4062 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4063 mlxsw_sp_port->local_port);
4064 mlxsw_sp_port->lagged = 0;
4065 lag->ref_count--;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02004066
Ido Schimmelc57529e2017-05-26 08:37:31 +02004067 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
4068 /* Make sure untagged frames are allowed to ingress */
4069 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004070}
4071
Jiri Pirko74581202015-12-03 12:12:30 +01004072static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4073 u16 lag_id)
4074{
4075 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4076 char sldr_pl[MLXSW_REG_SLDR_LEN];
4077
4078 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4079 mlxsw_sp_port->local_port);
4080 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4081}
4082
4083static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4084 u16 lag_id)
4085{
4086 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4087 char sldr_pl[MLXSW_REG_SLDR_LEN];
4088
4089 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4090 mlxsw_sp_port->local_port);
4091 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4092}
4093
4094static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
4095 bool lag_tx_enabled)
4096{
4097 if (lag_tx_enabled)
4098 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
4099 mlxsw_sp_port->lag_id);
4100 else
4101 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4102 mlxsw_sp_port->lag_id);
4103}
4104
4105static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4106 struct netdev_lag_lower_state_info *info)
4107{
4108 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
4109}
4110
Jiri Pirko2b94e582017-04-18 16:55:37 +02004111static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4112 bool enable)
4113{
4114 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4115 enum mlxsw_reg_spms_state spms_state;
4116 char *spms_pl;
4117 u16 vid;
4118 int err;
4119
4120 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4121 MLXSW_REG_SPMS_STATE_DISCARDING;
4122
4123 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4124 if (!spms_pl)
4125 return -ENOMEM;
4126 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4127
4128 for (vid = 0; vid < VLAN_N_VID; vid++)
4129 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4130
4131 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4132 kfree(spms_pl);
4133 return err;
4134}
4135
4136static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4137{
4138 int err;
4139
Ido Schimmel4aafc362017-05-26 08:37:25 +02004140 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004141 if (err)
4142 return err;
Ido Schimmel4aafc362017-05-26 08:37:25 +02004143 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4144 if (err)
4145 goto err_port_stp_set;
Jiri Pirko2b94e582017-04-18 16:55:37 +02004146 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4147 true, false);
4148 if (err)
4149 goto err_port_vlan_set;
4150 return 0;
4151
4152err_port_vlan_set:
4153 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
Ido Schimmel4aafc362017-05-26 08:37:25 +02004154err_port_stp_set:
4155 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004156 return err;
4157}
4158
4159static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4160{
4161 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4162 false, false);
4163 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
Ido Schimmel4aafc362017-05-26 08:37:25 +02004164 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004165}
4166
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004167static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4168 struct net_device *dev,
Jiri Pirko74581202015-12-03 12:12:30 +01004169 unsigned long event, void *ptr)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004170{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004171 struct netdev_notifier_changeupper_info *info;
4172 struct mlxsw_sp_port *mlxsw_sp_port;
4173 struct net_device *upper_dev;
4174 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel80bedf12016-06-20 23:03:59 +02004175 int err = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004176
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004177 mlxsw_sp_port = netdev_priv(dev);
4178 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4179 info = ptr;
4180
4181 switch (event) {
4182 case NETDEV_PRECHANGEUPPER:
4183 upper_dev = info->upper_dev;
Ido Schimmel59fe9b32016-06-20 23:04:00 +02004184 if (!is_vlan_dev(upper_dev) &&
4185 !netif_is_lag_master(upper_dev) &&
Ido Schimmel7179eb52017-03-16 09:08:18 +01004186 !netif_is_bridge_master(upper_dev) &&
Jiri Pirko2b94e582017-04-18 16:55:37 +02004187 !netif_is_ovs_master(upper_dev))
Ido Schimmel59fe9b32016-06-20 23:04:00 +02004188 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02004189 if (!info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004190 break;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004191 if (netif_is_lag_master(upper_dev) &&
4192 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4193 info->upper_info))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004194 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02004195 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4196 return -EINVAL;
4197 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4198 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4199 return -EINVAL;
Jiri Pirko2b94e582017-04-18 16:55:37 +02004200 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
4201 return -EINVAL;
4202 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
4203 return -EINVAL;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004204 break;
4205 case NETDEV_CHANGEUPPER:
4206 upper_dev = info->upper_dev;
Ido Schimmelc57529e2017-05-26 08:37:31 +02004207 if (netif_is_bridge_master(upper_dev)) {
Ido Schimmel7117a572016-06-20 23:04:06 +02004208 if (info->linking)
4209 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004210 lower_dev,
Ido Schimmel7117a572016-06-20 23:04:06 +02004211 upper_dev);
4212 else
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004213 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4214 lower_dev,
4215 upper_dev);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004216 } else if (netif_is_lag_master(upper_dev)) {
Ido Schimmel80bedf12016-06-20 23:03:59 +02004217 if (info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004218 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4219 upper_dev);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004220 else
Ido Schimmel82e6db02016-06-20 23:04:04 +02004221 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4222 upper_dev);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004223 } else if (netif_is_ovs_master(upper_dev)) {
4224 if (info->linking)
4225 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4226 else
4227 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004228 }
4229 break;
4230 }
4231
Ido Schimmel80bedf12016-06-20 23:03:59 +02004232 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004233}
4234
Jiri Pirko74581202015-12-03 12:12:30 +01004235static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4236 unsigned long event, void *ptr)
4237{
4238 struct netdev_notifier_changelowerstate_info *info;
4239 struct mlxsw_sp_port *mlxsw_sp_port;
4240 int err;
4241
4242 mlxsw_sp_port = netdev_priv(dev);
4243 info = ptr;
4244
4245 switch (event) {
4246 case NETDEV_CHANGELOWERSTATE:
4247 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4248 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4249 info->lower_state_info);
4250 if (err)
4251 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4252 }
4253 break;
4254 }
4255
Ido Schimmel80bedf12016-06-20 23:03:59 +02004256 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004257}
4258
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004259static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4260 struct net_device *port_dev,
Jiri Pirko74581202015-12-03 12:12:30 +01004261 unsigned long event, void *ptr)
4262{
4263 switch (event) {
4264 case NETDEV_PRECHANGEUPPER:
4265 case NETDEV_CHANGEUPPER:
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004266 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4267 event, ptr);
Jiri Pirko74581202015-12-03 12:12:30 +01004268 case NETDEV_CHANGELOWERSTATE:
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004269 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4270 ptr);
Jiri Pirko74581202015-12-03 12:12:30 +01004271 }
4272
Ido Schimmel80bedf12016-06-20 23:03:59 +02004273 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004274}
4275
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004276static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4277 unsigned long event, void *ptr)
4278{
4279 struct net_device *dev;
4280 struct list_head *iter;
4281 int ret;
4282
4283 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4284 if (mlxsw_sp_port_dev_check(dev)) {
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004285 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4286 ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004287 if (ret)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004288 return ret;
4289 }
4290 }
4291
Ido Schimmel80bedf12016-06-20 23:03:59 +02004292 return 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004293}
4294
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004295static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4296 struct net_device *dev,
4297 unsigned long event, void *ptr,
4298 u16 vid)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004299{
4300 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4301 struct netdev_notifier_changeupper_info *info = ptr;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004302 struct net_device *upper_dev;
Ido Schimmel80bedf12016-06-20 23:03:59 +02004303 int err = 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004304
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004305 switch (event) {
4306 case NETDEV_PRECHANGEUPPER:
4307 upper_dev = info->upper_dev;
Ido Schimmelb1e45522017-04-30 19:47:14 +03004308 if (!netif_is_bridge_master(upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004309 return -EINVAL;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004310 break;
4311 case NETDEV_CHANGEUPPER:
4312 upper_dev = info->upper_dev;
Ido Schimmel1f880612017-03-10 08:53:35 +01004313 if (netif_is_bridge_master(upper_dev)) {
4314 if (info->linking)
Ido Schimmelc57529e2017-05-26 08:37:31 +02004315 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4316 vlan_dev,
4317 upper_dev);
Ido Schimmel1f880612017-03-10 08:53:35 +01004318 else
Ido Schimmelc57529e2017-05-26 08:37:31 +02004319 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4320 vlan_dev,
4321 upper_dev);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004322 } else {
Ido Schimmel1f880612017-03-10 08:53:35 +01004323 err = -EINVAL;
4324 WARN_ON(1);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004325 }
Ido Schimmel1f880612017-03-10 08:53:35 +01004326 break;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004327 }
4328
Ido Schimmel80bedf12016-06-20 23:03:59 +02004329 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004330}
4331
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004332static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4333 struct net_device *lag_dev,
4334 unsigned long event,
4335 void *ptr, u16 vid)
Ido Schimmel272c4472015-12-15 16:03:47 +01004336{
4337 struct net_device *dev;
4338 struct list_head *iter;
4339 int ret;
4340
4341 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4342 if (mlxsw_sp_port_dev_check(dev)) {
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004343 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4344 event, ptr,
4345 vid);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004346 if (ret)
Ido Schimmel272c4472015-12-15 16:03:47 +01004347 return ret;
4348 }
4349 }
4350
Ido Schimmel80bedf12016-06-20 23:03:59 +02004351 return 0;
Ido Schimmel272c4472015-12-15 16:03:47 +01004352}
4353
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004354static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4355 unsigned long event, void *ptr)
4356{
4357 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4358 u16 vid = vlan_dev_vlan_id(vlan_dev);
4359
Ido Schimmel272c4472015-12-15 16:03:47 +01004360 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004361 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4362 event, ptr, vid);
Ido Schimmel272c4472015-12-15 16:03:47 +01004363 else if (netif_is_lag_master(real_dev))
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004364 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4365 real_dev, event,
4366 ptr, vid);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004367
Ido Schimmel80bedf12016-06-20 23:03:59 +02004368 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004369}
4370
Ido Schimmelb1e45522017-04-30 19:47:14 +03004371static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4372{
4373 struct netdev_notifier_changeupper_info *info = ptr;
4374
4375 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4376 return false;
4377 return netif_is_l3_master(info->upper_dev);
4378}
4379
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004380static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4381 unsigned long event, void *ptr)
4382{
4383 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004384 int err = 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004385
Ido Schimmel6e095fd2016-07-04 08:23:13 +02004386 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4387 err = mlxsw_sp_netdevice_router_port_event(dev);
Ido Schimmelb1e45522017-04-30 19:47:14 +03004388 else if (mlxsw_sp_is_vrf_event(event, ptr))
4389 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
Ido Schimmel6e095fd2016-07-04 08:23:13 +02004390 else if (mlxsw_sp_port_dev_check(dev))
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004391 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004392 else if (netif_is_lag_master(dev))
4393 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4394 else if (is_vlan_dev(dev))
4395 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004396
Ido Schimmel80bedf12016-06-20 23:03:59 +02004397 return notifier_from_errno(err);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004398}
4399
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004400static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4401 .notifier_call = mlxsw_sp_netdevice_event,
4402};
4403
Ido Schimmel99724c12016-07-04 08:23:14 +02004404static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4405 .notifier_call = mlxsw_sp_inetaddr_event,
4406 .priority = 10, /* Must be called before FIB notifier block */
4407};
4408
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02004409static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
4410 .notifier_call = mlxsw_sp_inet6addr_event,
4411};
4412
Jiri Pirkoe7322632016-09-01 10:37:43 +02004413static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4414 .notifier_call = mlxsw_sp_router_netevent_event,
4415};
4416
Jiri Pirko1d20d232016-10-27 15:12:59 +02004417static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4418 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4419 {0, },
4420};
4421
4422static struct pci_driver mlxsw_sp_pci_driver = {
4423 .name = mlxsw_sp_driver_name,
4424 .id_table = mlxsw_sp_pci_id_table,
4425};
4426
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004427static int __init mlxsw_sp_module_init(void)
4428{
4429 int err;
4430
4431 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004432 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02004433 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004434 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4435
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004436 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4437 if (err)
4438 goto err_core_driver_register;
Jiri Pirko1d20d232016-10-27 15:12:59 +02004439
4440 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4441 if (err)
4442 goto err_pci_driver_register;
4443
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004444 return 0;
4445
Jiri Pirko1d20d232016-10-27 15:12:59 +02004446err_pci_driver_register:
4447 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004448err_core_driver_register:
Jiri Pirkoe7322632016-09-01 10:37:43 +02004449 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02004450 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
Jiri Pirkode7d6292016-09-01 10:37:42 +02004451 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004452 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4453 return err;
4454}
4455
4456static void __exit mlxsw_sp_module_exit(void)
4457{
Jiri Pirko1d20d232016-10-27 15:12:59 +02004458 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004459 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004460 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02004461 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004462 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004463 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4464}
4465
4466module_init(mlxsw_sp_module_init);
4467module_exit(mlxsw_sp_module_exit);
4468
4469MODULE_LICENSE("Dual BSD/GPL");
4470MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4471MODULE_DESCRIPTION("Mellanox Spectrum driver");
Jiri Pirko1d20d232016-10-27 15:12:59 +02004472MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
Yotam Gigi6b742192017-05-23 21:56:29 +02004473MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);