blob: 5332b8f0b3f71cc966098723c7ce7f6fef9ee25f [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
Jiri Pirko22a67762017-02-03 10:29:07 +01003 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
Jiri Pirko56ade8f2015-10-16 14:01:37 +02005 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
Jiri Pirko1d20d232016-10-27 15:12:59 +020040#include <linux/pci.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020041#include <linux/netdevice.h>
42#include <linux/etherdevice.h>
43#include <linux/ethtool.h>
44#include <linux/slab.h>
45#include <linux/device.h>
46#include <linux/skbuff.h>
47#include <linux/if_vlan.h>
48#include <linux/if_bridge.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/bitops.h>
Ido Schimmel7f71eb42015-12-15 16:03:37 +010052#include <linux/list.h>
Ido Schimmel80bedf12016-06-20 23:03:59 +020053#include <linux/notifier.h>
Ido Schimmel90183b92016-04-06 17:10:08 +020054#include <linux/dcbnl.h>
Ido Schimmel99724c12016-07-04 08:23:14 +020055#include <linux/inetdevice.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020056#include <net/switchdev.h>
Yotam Gigi763b4b72016-07-21 12:03:17 +020057#include <net/pkt_cls.h>
58#include <net/tc_act/tc_mirred.h>
Jiri Pirkoe7322632016-09-01 10:37:43 +020059#include <net/netevent.h>
Yotam Gigi98d0f7b2017-01-23 11:07:11 +010060#include <net/tc_act/tc_sample.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020061#include <net/addrconf.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020062
63#include "spectrum.h"
Jiri Pirko1d20d232016-10-27 15:12:59 +020064#include "pci.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020065#include "core.h"
66#include "reg.h"
67#include "port.h"
68#include "trap.h"
69#include "txheader.h"
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +010070#include "spectrum_cnt.h"
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +020071#include "spectrum_dpipe.h"
Yotam Gigie5e5c882017-05-23 21:56:27 +020072#include "../mlxfw/mlxfw.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020073
Yotam Gigi6b742192017-05-23 21:56:29 +020074#define MLXSW_FWREV_MAJOR 13
75#define MLXSW_FWREV_MINOR 1420
76#define MLXSW_FWREV_SUBMINOR 122
77
78static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
79 .major = MLXSW_FWREV_MAJOR,
80 .minor = MLXSW_FWREV_MINOR,
81 .subminor = MLXSW_FWREV_SUBMINOR
82};
83
84#define MLXSW_SP_FW_FILENAME \
Yotam Gigia4e1ce22017-06-04 16:49:58 +020085 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
Yotam Gigi6b742192017-05-23 21:56:29 +020086 "." __stringify(MLXSW_FWREV_MINOR) \
87 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
88
Jiri Pirko56ade8f2015-10-16 14:01:37 +020089static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
90static const char mlxsw_sp_driver_version[] = "1.0";
91
92/* tx_hdr_version
93 * Tx header version.
94 * Must be set to 1.
95 */
96MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
97
98/* tx_hdr_ctl
99 * Packet control type.
100 * 0 - Ethernet control (e.g. EMADs, LACP)
101 * 1 - Ethernet data
102 */
103MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
104
105/* tx_hdr_proto
106 * Packet protocol type. Must be set to 1 (Ethernet).
107 */
108MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
109
110/* tx_hdr_rx_is_router
111 * Packet is sent from the router. Valid for data packets only.
112 */
113MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
114
115/* tx_hdr_fid_valid
116 * Indicates if the 'fid' field is valid and should be used for
117 * forwarding lookup. Valid for data packets only.
118 */
119MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
120
121/* tx_hdr_swid
122 * Switch partition ID. Must be set to 0.
123 */
124MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
125
126/* tx_hdr_control_tclass
127 * Indicates if the packet should use the control TClass and not one
128 * of the data TClasses.
129 */
130MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
131
132/* tx_hdr_etclass
133 * Egress TClass to be used on the egress device on the egress port.
134 */
135MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
136
137/* tx_hdr_port_mid
138 * Destination local port for unicast packets.
139 * Destination multicast ID for multicast packets.
140 *
141 * Control packets are directed to a specific egress port, while data
142 * packets are transmitted through the CPU port (0) into the switch partition,
143 * where forwarding rules are applied.
144 */
145MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
146
147/* tx_hdr_fid
148 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
149 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
150 * Valid for data packets only.
151 */
152MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
153
154/* tx_hdr_type
155 * 0 - Data packets
156 * 6 - Control packets
157 */
158MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
159
Yotam Gigie5e5c882017-05-23 21:56:27 +0200160struct mlxsw_sp_mlxfw_dev {
161 struct mlxfw_dev mlxfw_dev;
162 struct mlxsw_sp *mlxsw_sp;
163};
164
165static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
166 u16 component_index, u32 *p_max_size,
167 u8 *p_align_bits, u16 *p_max_write_size)
168{
169 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
170 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
171 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
172 char mcqi_pl[MLXSW_REG_MCQI_LEN];
173 int err;
174
175 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
176 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
177 if (err)
178 return err;
179 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
180 p_max_write_size);
181
182 *p_align_bits = max_t(u8, *p_align_bits, 2);
183 *p_max_write_size = min_t(u16, *p_max_write_size,
184 MLXSW_REG_MCDA_MAX_DATA_LEN);
185 return 0;
186}
187
188static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
189{
190 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
191 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
192 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
193 char mcc_pl[MLXSW_REG_MCC_LEN];
194 u8 control_state;
195 int err;
196
197 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
198 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
199 if (err)
200 return err;
201
202 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
203 if (control_state != MLXFW_FSM_STATE_IDLE)
204 return -EBUSY;
205
206 mlxsw_reg_mcc_pack(mcc_pl,
207 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
208 0, *fwhandle, 0);
209 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
210}
211
212static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
213 u32 fwhandle, u16 component_index,
214 u32 component_size)
215{
216 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
217 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
218 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
219 char mcc_pl[MLXSW_REG_MCC_LEN];
220
221 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
222 component_index, fwhandle, component_size);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
224}
225
226static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
227 u32 fwhandle, u8 *data, u16 size,
228 u32 offset)
229{
230 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
231 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
233 char mcda_pl[MLXSW_REG_MCDA_LEN];
234
235 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
237}
238
239static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
240 u32 fwhandle, u16 component_index)
241{
242 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
243 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
244 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
245 char mcc_pl[MLXSW_REG_MCC_LEN];
246
247 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
248 component_index, fwhandle, 0);
249 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
250}
251
252static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
253{
254 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
255 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
256 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
257 char mcc_pl[MLXSW_REG_MCC_LEN];
258
259 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
260 fwhandle, 0);
261 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
262}
263
264static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
265 enum mlxfw_fsm_state *fsm_state,
266 enum mlxfw_fsm_state_err *fsm_state_err)
267{
268 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
269 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
271 char mcc_pl[MLXSW_REG_MCC_LEN];
272 u8 control_state;
273 u8 error_code;
274 int err;
275
276 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
277 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
278 if (err)
279 return err;
280
281 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
282 *fsm_state = control_state;
283 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
284 MLXFW_FSM_STATE_ERR_MAX);
285 return 0;
286}
287
288static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
289{
290 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
291 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
292 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
293 char mcc_pl[MLXSW_REG_MCC_LEN];
294
295 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
296 fwhandle, 0);
297 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
298}
299
300static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
301{
302 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
303 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
304 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
305 char mcc_pl[MLXSW_REG_MCC_LEN];
306
307 mlxsw_reg_mcc_pack(mcc_pl,
308 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
309 fwhandle, 0);
310 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
311}
312
313static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
314 .component_query = mlxsw_sp_component_query,
315 .fsm_lock = mlxsw_sp_fsm_lock,
316 .fsm_component_update = mlxsw_sp_fsm_component_update,
317 .fsm_block_download = mlxsw_sp_fsm_block_download,
318 .fsm_component_verify = mlxsw_sp_fsm_component_verify,
319 .fsm_activate = mlxsw_sp_fsm_activate,
320 .fsm_query_state = mlxsw_sp_fsm_query_state,
321 .fsm_cancel = mlxsw_sp_fsm_cancel,
322 .fsm_release = mlxsw_sp_fsm_release
323};
324
Yotam Gigice6ef68f2017-06-01 16:26:46 +0300325static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
326 const struct firmware *firmware)
327{
328 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
329 .mlxfw_dev = {
330 .ops = &mlxsw_sp_mlxfw_dev_ops,
331 .psid = mlxsw_sp->bus_info->psid,
332 .psid_size = strlen(mlxsw_sp->bus_info->psid),
333 },
334 .mlxsw_sp = mlxsw_sp
335 };
336
337 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
338}
339
Yotam Gigi6b742192017-05-23 21:56:29 +0200340static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
341 const struct mlxsw_fw_rev *b)
342{
343 if (a->major != b->major)
344 return a->major > b->major;
345 if (a->minor != b->minor)
346 return a->minor > b->minor;
347 return a->subminor >= b->subminor;
348}
349
350static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
351{
352 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
Yotam Gigi6b742192017-05-23 21:56:29 +0200353 const struct firmware *firmware;
354 int err;
355
356 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
357 return 0;
358
359 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
360 rev->major, rev->minor, rev->subminor);
361 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
362 MLXSW_SP_FW_FILENAME);
363
364 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
365 mlxsw_sp->bus_info->dev);
366 if (err) {
367 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
368 MLXSW_SP_FW_FILENAME);
369 return err;
370 }
371
Yotam Gigice6ef68f2017-06-01 16:26:46 +0300372 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
Yotam Gigi6b742192017-05-23 21:56:29 +0200373 release_firmware(firmware);
374 return err;
375}
376
Arkadi Sharshevsky1abcbcc2017-03-11 09:42:53 +0100377int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
378 unsigned int counter_index, u64 *packets,
379 u64 *bytes)
380{
381 char mgpc_pl[MLXSW_REG_MGPC_LEN];
382 int err;
383
384 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
Arkadi Sharshevsky6bba7e22017-08-24 08:40:07 +0200385 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
Arkadi Sharshevsky1abcbcc2017-03-11 09:42:53 +0100386 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
387 if (err)
388 return err;
389 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
390 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
391 return 0;
392}
393
394static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
395 unsigned int counter_index)
396{
397 char mgpc_pl[MLXSW_REG_MGPC_LEN];
398
399 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
Arkadi Sharshevsky6bba7e22017-08-24 08:40:07 +0200400 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
Arkadi Sharshevsky1abcbcc2017-03-11 09:42:53 +0100401 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
402}
403
404int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
405 unsigned int *p_counter_index)
406{
407 int err;
408
409 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
410 p_counter_index);
411 if (err)
412 return err;
413 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
414 if (err)
415 goto err_counter_clear;
416 return 0;
417
418err_counter_clear:
419 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
420 *p_counter_index);
421 return err;
422}
423
424void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
425 unsigned int counter_index)
426{
427 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
428 counter_index);
429}
430
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200431static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
432 const struct mlxsw_tx_info *tx_info)
433{
434 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
435
436 memset(txhdr, 0, MLXSW_TXHDR_LEN);
437
438 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
439 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
440 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
441 mlxsw_tx_hdr_swid_set(txhdr, 0);
442 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
443 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
444 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
445}
446
Ido Schimmelfe9ccc72017-05-16 19:38:31 +0200447int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
448 u8 state)
449{
450 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
451 enum mlxsw_reg_spms_state spms_state;
452 char *spms_pl;
453 int err;
454
455 switch (state) {
456 case BR_STATE_FORWARDING:
457 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
458 break;
459 case BR_STATE_LEARNING:
460 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
461 break;
462 case BR_STATE_LISTENING: /* fall-through */
463 case BR_STATE_DISABLED: /* fall-through */
464 case BR_STATE_BLOCKING:
465 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
466 break;
467 default:
468 BUG();
469 }
470
471 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
472 if (!spms_pl)
473 return -ENOMEM;
474 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
475 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
476
477 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
478 kfree(spms_pl);
479 return err;
480}
481
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200482static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
483{
Elad Raz5b090742016-10-28 21:35:46 +0200484 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200485 int err;
486
487 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
488 if (err)
489 return err;
490 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
491 return 0;
492}
493
Yotam Gigi763b4b72016-07-21 12:03:17 +0200494static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
495{
Yotam Gigi763b4b72016-07-21 12:03:17 +0200496 int i;
497
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200498 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
Yotam Gigi763b4b72016-07-21 12:03:17 +0200499 return -EIO;
500
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200501 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
502 MAX_SPAN);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200503 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
504 sizeof(struct mlxsw_sp_span_entry),
505 GFP_KERNEL);
506 if (!mlxsw_sp->span.entries)
507 return -ENOMEM;
508
509 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
510 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
511
512 return 0;
513}
514
515static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
516{
517 int i;
518
519 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
520 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
521
522 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
523 }
524 kfree(mlxsw_sp->span.entries);
525}
526
527static struct mlxsw_sp_span_entry *
528mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
529{
530 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
531 struct mlxsw_sp_span_entry *span_entry;
532 char mpat_pl[MLXSW_REG_MPAT_LEN];
533 u8 local_port = port->local_port;
534 int index;
535 int i;
536 int err;
537
538 /* find a free entry to use */
539 index = -1;
540 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
541 if (!mlxsw_sp->span.entries[i].used) {
542 index = i;
543 span_entry = &mlxsw_sp->span.entries[i];
544 break;
545 }
546 }
547 if (index < 0)
548 return NULL;
549
550 /* create a new port analayzer entry for local_port */
551 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
552 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
553 if (err)
554 return NULL;
555
556 span_entry->used = true;
557 span_entry->id = index;
Yotam Gigi2d644d42016-11-11 16:34:25 +0100558 span_entry->ref_count = 1;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200559 span_entry->local_port = local_port;
560 return span_entry;
561}
562
563static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_span_entry *span_entry)
565{
566 u8 local_port = span_entry->local_port;
567 char mpat_pl[MLXSW_REG_MPAT_LEN];
568 int pa_id = span_entry->id;
569
570 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
571 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
572 span_entry->used = false;
573}
574
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200575static struct mlxsw_sp_span_entry *
576mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200577{
578 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
579 int i;
580
581 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
582 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
583
584 if (curr->used && curr->local_port == port->local_port)
585 return curr;
586 }
587 return NULL;
588}
589
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200590static struct mlxsw_sp_span_entry
591*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200592{
593 struct mlxsw_sp_span_entry *span_entry;
594
595 span_entry = mlxsw_sp_span_entry_find(port);
596 if (span_entry) {
Yotam Gigi2d644d42016-11-11 16:34:25 +0100597 /* Already exists, just take a reference */
Yotam Gigi763b4b72016-07-21 12:03:17 +0200598 span_entry->ref_count++;
599 return span_entry;
600 }
601
602 return mlxsw_sp_span_entry_create(port);
603}
604
605static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
606 struct mlxsw_sp_span_entry *span_entry)
607{
Yotam Gigi2d644d42016-11-11 16:34:25 +0100608 WARN_ON(!span_entry->ref_count);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200609 if (--span_entry->ref_count == 0)
610 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
611 return 0;
612}
613
614static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
615{
616 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
617 struct mlxsw_sp_span_inspected_port *p;
618 int i;
619
620 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
621 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
622
623 list_for_each_entry(p, &curr->bound_ports_list, list)
624 if (p->local_port == port->local_port &&
625 p->type == MLXSW_SP_SPAN_EGRESS)
626 return true;
627 }
628
629 return false;
630}
631
Ido Schimmel18281f22017-03-24 08:02:51 +0100632static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
633 int mtu)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200634{
Ido Schimmel18281f22017-03-24 08:02:51 +0100635 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200636}
637
638static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
639{
640 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
641 char sbib_pl[MLXSW_REG_SBIB_LEN];
642 int err;
643
644 /* If port is egress mirrored, the shared buffer size should be
645 * updated according to the mtu value
646 */
647 if (mlxsw_sp_span_is_egress_mirror(port)) {
Ido Schimmel18281f22017-03-24 08:02:51 +0100648 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
649
650 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200651 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
652 if (err) {
653 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
654 return err;
655 }
656 }
657
658 return 0;
659}
660
661static struct mlxsw_sp_span_inspected_port *
662mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
663 struct mlxsw_sp_span_entry *span_entry)
664{
665 struct mlxsw_sp_span_inspected_port *p;
666
667 list_for_each_entry(p, &span_entry->bound_ports_list, list)
668 if (port->local_port == p->local_port)
669 return p;
670 return NULL;
671}
672
673static int
674mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
675 struct mlxsw_sp_span_entry *span_entry,
676 enum mlxsw_sp_span_type type)
677{
678 struct mlxsw_sp_span_inspected_port *inspected_port;
679 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
680 char mpar_pl[MLXSW_REG_MPAR_LEN];
681 char sbib_pl[MLXSW_REG_SBIB_LEN];
682 int pa_id = span_entry->id;
683 int err;
684
685 /* if it is an egress SPAN, bind a shared buffer to it */
686 if (type == MLXSW_SP_SPAN_EGRESS) {
Ido Schimmel18281f22017-03-24 08:02:51 +0100687 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
688 port->dev->mtu);
689
690 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200691 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
692 if (err) {
693 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
694 return err;
695 }
696 }
697
698 /* bind the port to the SPAN entry */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200699 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
700 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200701 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
702 if (err)
703 goto err_mpar_reg_write;
704
705 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
706 if (!inspected_port) {
707 err = -ENOMEM;
708 goto err_inspected_port_alloc;
709 }
710 inspected_port->local_port = port->local_port;
711 inspected_port->type = type;
712 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
713
714 return 0;
715
716err_mpar_reg_write:
717err_inspected_port_alloc:
718 if (type == MLXSW_SP_SPAN_EGRESS) {
719 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
720 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
721 }
722 return err;
723}
724
725static void
726mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
727 struct mlxsw_sp_span_entry *span_entry,
728 enum mlxsw_sp_span_type type)
729{
730 struct mlxsw_sp_span_inspected_port *inspected_port;
731 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
732 char mpar_pl[MLXSW_REG_MPAR_LEN];
733 char sbib_pl[MLXSW_REG_SBIB_LEN];
734 int pa_id = span_entry->id;
735
736 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
737 if (!inspected_port)
738 return;
739
740 /* remove the inspected port */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200741 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
742 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200743 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
744
745 /* remove the SBIB buffer if it was egress SPAN */
746 if (type == MLXSW_SP_SPAN_EGRESS) {
747 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
748 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
749 }
750
751 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
752
753 list_del(&inspected_port->list);
754 kfree(inspected_port);
755}
756
757static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
758 struct mlxsw_sp_port *to,
759 enum mlxsw_sp_span_type type)
760{
761 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
762 struct mlxsw_sp_span_entry *span_entry;
763 int err;
764
765 span_entry = mlxsw_sp_span_entry_get(to);
766 if (!span_entry)
767 return -ENOENT;
768
769 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
770 span_entry->id);
771
772 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
773 if (err)
774 goto err_port_bind;
775
776 return 0;
777
778err_port_bind:
779 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
780 return err;
781}
782
783static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
784 struct mlxsw_sp_port *to,
785 enum mlxsw_sp_span_type type)
786{
787 struct mlxsw_sp_span_entry *span_entry;
788
789 span_entry = mlxsw_sp_span_entry_find(to);
790 if (!span_entry) {
791 netdev_err(from->dev, "no span entry found\n");
792 return;
793 }
794
795 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
796 span_entry->id);
797 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
798}
799
Yotam Gigi98d0f7b2017-01-23 11:07:11 +0100800static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
801 bool enable, u32 rate)
802{
803 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
804 char mpsc_pl[MLXSW_REG_MPSC_LEN];
805
806 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
807 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
808}
809
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200810static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
811 bool is_up)
812{
813 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
814 char paos_pl[MLXSW_REG_PAOS_LEN];
815
816 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
817 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
818 MLXSW_PORT_ADMIN_STATUS_DOWN);
819 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
820}
821
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200822static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
823 unsigned char *addr)
824{
825 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
826 char ppad_pl[MLXSW_REG_PPAD_LEN];
827
828 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
829 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
830 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
831}
832
833static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
834{
835 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
836 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
837
838 ether_addr_copy(addr, mlxsw_sp->base_mac);
839 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
840 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
841}
842
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200843static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
844{
845 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
846 char pmtu_pl[MLXSW_REG_PMTU_LEN];
847 int max_mtu;
848 int err;
849
850 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
851 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
852 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
853 if (err)
854 return err;
855 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
856
857 if (mtu > max_mtu)
858 return -EINVAL;
859
860 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
861 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
862}
863
864static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
865{
866 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel5b153852017-06-08 08:47:44 +0200867 char pspa_pl[MLXSW_REG_PSPA_LEN];
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200868
Ido Schimmel5b153852017-06-08 08:47:44 +0200869 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
870 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200871}
872
Ido Schimmela1107482017-05-26 08:37:39 +0200873int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200874{
875 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
876 char svpe_pl[MLXSW_REG_SVPE_LEN];
877
878 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
879 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
880}
881
Ido Schimmel7cbc4272017-05-16 19:38:33 +0200882int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
883 bool learn_enable)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200884{
885 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
886 char *spvmlr_pl;
887 int err;
888
889 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
890 if (!spvmlr_pl)
891 return -ENOMEM;
Ido Schimmel7cbc4272017-05-16 19:38:33 +0200892 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
893 learn_enable);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200894 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
895 kfree(spvmlr_pl);
896 return err;
897}
898
Ido Schimmelb02eae92017-05-16 19:38:34 +0200899static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
900 u16 vid)
901{
902 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
903 char spvid_pl[MLXSW_REG_SPVID_LEN];
904
905 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
906 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
907}
908
909static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
910 bool allow)
911{
912 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
913 char spaft_pl[MLXSW_REG_SPAFT_LEN];
914
915 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
916 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
917}
918
919int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
920{
921 int err;
922
923 if (!vid) {
924 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
925 if (err)
926 return err;
927 } else {
928 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
929 if (err)
930 return err;
931 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
932 if (err)
933 goto err_port_allow_untagged_set;
934 }
935
936 mlxsw_sp_port->pvid = vid;
937 return 0;
938
939err_port_allow_untagged_set:
940 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
941 return err;
942}
943
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200944static int
945mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
946{
947 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
948 char sspr_pl[MLXSW_REG_SSPR_LEN];
949
950 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
951 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
952}
953
Ido Schimmeld664b412016-06-09 09:51:40 +0200954static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
955 u8 local_port, u8 *p_module,
956 u8 *p_width, u8 *p_lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200957{
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200958 char pmlp_pl[MLXSW_REG_PMLP_LEN];
959 int err;
960
Ido Schimmel558c2d52016-02-26 17:32:29 +0100961 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200962 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
963 if (err)
964 return err;
Ido Schimmel558c2d52016-02-26 17:32:29 +0100965 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
966 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
Ido Schimmel2bf9a582016-04-05 10:20:04 +0200967 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200968 return 0;
969}
970
Ido Schimmel2e915e02017-06-08 08:47:45 +0200971static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel18f1e702016-02-26 17:32:31 +0100972 u8 module, u8 width, u8 lane)
973{
Ido Schimmel2e915e02017-06-08 08:47:45 +0200974 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel18f1e702016-02-26 17:32:31 +0100975 char pmlp_pl[MLXSW_REG_PMLP_LEN];
976 int i;
977
Ido Schimmel2e915e02017-06-08 08:47:45 +0200978 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
Ido Schimmel18f1e702016-02-26 17:32:31 +0100979 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
980 for (i = 0; i < width; i++) {
981 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
982 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
983 }
984
985 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
986}
987
Ido Schimmel2e915e02017-06-08 08:47:45 +0200988static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100989{
Ido Schimmel2e915e02017-06-08 08:47:45 +0200990 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100991 char pmlp_pl[MLXSW_REG_PMLP_LEN];
992
Ido Schimmel2e915e02017-06-08 08:47:45 +0200993 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100994 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
995 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
996}
997
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200998static int mlxsw_sp_port_open(struct net_device *dev)
999{
1000 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1001 int err;
1002
1003 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1004 if (err)
1005 return err;
1006 netif_start_queue(dev);
1007 return 0;
1008}
1009
1010static int mlxsw_sp_port_stop(struct net_device *dev)
1011{
1012 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1013
1014 netif_stop_queue(dev);
1015 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1016}
1017
1018static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
1019 struct net_device *dev)
1020{
1021 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1022 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1023 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1024 const struct mlxsw_tx_info tx_info = {
1025 .local_port = mlxsw_sp_port->local_port,
1026 .is_emad = false,
1027 };
1028 u64 len;
1029 int err;
1030
Jiri Pirko307c2432016-04-08 19:11:22 +02001031 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001032 return NETDEV_TX_BUSY;
1033
1034 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
1035 struct sk_buff *skb_orig = skb;
1036
1037 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
1038 if (!skb) {
1039 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1040 dev_kfree_skb_any(skb_orig);
1041 return NETDEV_TX_OK;
1042 }
Arkadi Sharshevsky36bf38d2017-01-12 09:10:37 +01001043 dev_consume_skb_any(skb_orig);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001044 }
1045
1046 if (eth_skb_pad(skb)) {
1047 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1048 return NETDEV_TX_OK;
1049 }
1050
1051 mlxsw_sp_txhdr_construct(skb, &tx_info);
Nogah Frankel63dcdd32016-06-17 15:09:05 +02001052 /* TX header is consumed by HW on the way so we shouldn't count its
1053 * bytes as being sent.
1054 */
1055 len = skb->len - MLXSW_TXHDR_LEN;
1056
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001057 /* Due to a race we might fail here because of a full queue. In that
1058 * unlikely case we simply drop the packet.
1059 */
Jiri Pirko307c2432016-04-08 19:11:22 +02001060 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001061
1062 if (!err) {
1063 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1064 u64_stats_update_begin(&pcpu_stats->syncp);
1065 pcpu_stats->tx_packets++;
1066 pcpu_stats->tx_bytes += len;
1067 u64_stats_update_end(&pcpu_stats->syncp);
1068 } else {
1069 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1070 dev_kfree_skb_any(skb);
1071 }
1072 return NETDEV_TX_OK;
1073}
1074
Jiri Pirkoc5b9b512015-12-03 12:12:22 +01001075static void mlxsw_sp_set_rx_mode(struct net_device *dev)
1076{
1077}
1078
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001079static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
1080{
1081 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1082 struct sockaddr *addr = p;
1083 int err;
1084
1085 if (!is_valid_ether_addr(addr->sa_data))
1086 return -EADDRNOTAVAIL;
1087
1088 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
1089 if (err)
1090 return err;
1091 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1092 return 0;
1093}
1094
Ido Schimmel18281f22017-03-24 08:02:51 +01001095static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
1096 int mtu)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001097{
Ido Schimmel18281f22017-03-24 08:02:51 +01001098 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
Ido Schimmelf417f042017-03-24 08:02:50 +01001099}
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001100
Ido Schimmelf417f042017-03-24 08:02:50 +01001101#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
Ido Schimmel18281f22017-03-24 08:02:51 +01001102
1103static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1104 u16 delay)
Ido Schimmelf417f042017-03-24 08:02:50 +01001105{
Ido Schimmel18281f22017-03-24 08:02:51 +01001106 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
1107 BITS_PER_BYTE));
1108 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
1109 mtu);
Ido Schimmelf417f042017-03-24 08:02:50 +01001110}
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001111
Ido Schimmel18281f22017-03-24 08:02:51 +01001112/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
Ido Schimmelf417f042017-03-24 08:02:50 +01001113 * Assumes 100m cable and maximum MTU.
1114 */
Ido Schimmel18281f22017-03-24 08:02:51 +01001115#define MLXSW_SP_PAUSE_DELAY 58752
1116
1117static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1118 u16 delay, bool pfc, bool pause)
Ido Schimmelf417f042017-03-24 08:02:50 +01001119{
1120 if (pfc)
Ido Schimmel18281f22017-03-24 08:02:51 +01001121 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
Ido Schimmelf417f042017-03-24 08:02:50 +01001122 else if (pause)
Ido Schimmel18281f22017-03-24 08:02:51 +01001123 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001124 else
Ido Schimmelf417f042017-03-24 08:02:50 +01001125 return 0;
1126}
1127
1128static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
1129 bool lossy)
1130{
1131 if (lossy)
1132 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
1133 else
1134 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
1135 thres);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001136}
1137
1138int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001139 u8 *prio_tc, bool pause_en,
1140 struct ieee_pfc *my_pfc)
Ido Schimmelff6551e2016-04-06 17:10:03 +02001141{
1142 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001143 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
1144 u16 delay = !!my_pfc ? my_pfc->delay : 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001145 char pbmc_pl[MLXSW_REG_PBMC_LEN];
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001146 int i, j, err;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001147
1148 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
1149 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1150 if (err)
1151 return err;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001152
1153 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1154 bool configure = false;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001155 bool pfc = false;
Ido Schimmelf417f042017-03-24 08:02:50 +01001156 bool lossy;
1157 u16 thres;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001158
1159 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
1160 if (prio_tc[j] == i) {
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001161 pfc = pfc_en & BIT(j);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001162 configure = true;
1163 break;
1164 }
1165 }
1166
1167 if (!configure)
1168 continue;
Ido Schimmelf417f042017-03-24 08:02:50 +01001169
1170 lossy = !(pfc || pause_en);
Ido Schimmel18281f22017-03-24 08:02:51 +01001171 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
1172 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
1173 pause_en);
Ido Schimmelf417f042017-03-24 08:02:50 +01001174 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001175 }
1176
Ido Schimmelff6551e2016-04-06 17:10:03 +02001177 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1178}
1179
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001180static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001181 int mtu, bool pause_en)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001182{
1183 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
1184 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001185 struct ieee_pfc *my_pfc;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001186 u8 *prio_tc;
1187
1188 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001189 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001190
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001191 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001192 pause_en, my_pfc);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001193}
1194
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001195static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
1196{
1197 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001198 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001199 int err;
1200
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001201 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001202 if (err)
1203 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001204 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
1205 if (err)
1206 goto err_span_port_mtu_update;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001207 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1208 if (err)
1209 goto err_port_mtu_set;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001210 dev->mtu = mtu;
1211 return 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001212
1213err_port_mtu_set:
Yotam Gigi763b4b72016-07-21 12:03:17 +02001214 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1215err_span_port_mtu_update:
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001216 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
Ido Schimmelff6551e2016-04-06 17:10:03 +02001217 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001218}
1219
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +03001220static int
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001221mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1222 struct rtnl_link_stats64 *stats)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001223{
1224 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1225 struct mlxsw_sp_port_pcpu_stats *p;
1226 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1227 u32 tx_dropped = 0;
1228 unsigned int start;
1229 int i;
1230
1231 for_each_possible_cpu(i) {
1232 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1233 do {
1234 start = u64_stats_fetch_begin_irq(&p->syncp);
1235 rx_packets = p->rx_packets;
1236 rx_bytes = p->rx_bytes;
1237 tx_packets = p->tx_packets;
1238 tx_bytes = p->tx_bytes;
1239 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1240
1241 stats->rx_packets += rx_packets;
1242 stats->rx_bytes += rx_bytes;
1243 stats->tx_packets += tx_packets;
1244 stats->tx_bytes += tx_bytes;
1245 /* tx_dropped is u32, updated without syncp protection. */
1246 tx_dropped += p->tx_dropped;
1247 }
1248 stats->tx_dropped = tx_dropped;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001249 return 0;
1250}
1251
Or Gerlitz3df5b3c2016-11-22 23:09:54 +02001252static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001253{
1254 switch (attr_id) {
1255 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1256 return true;
1257 }
1258
1259 return false;
1260}
1261
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +03001262static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1263 void *sp)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001264{
1265 switch (attr_id) {
1266 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1267 return mlxsw_sp_port_get_sw_stats64(dev, sp);
1268 }
1269
1270 return -EINVAL;
1271}
1272
1273static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1274 int prio, char *ppcnt_pl)
1275{
1276 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1278
1279 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1280 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1281}
1282
1283static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1284 struct rtnl_link_stats64 *stats)
1285{
1286 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1287 int err;
1288
1289 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1290 0, ppcnt_pl);
1291 if (err)
1292 goto out;
1293
1294 stats->tx_packets =
1295 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1296 stats->rx_packets =
1297 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1298 stats->tx_bytes =
1299 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1300 stats->rx_bytes =
1301 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1302 stats->multicast =
1303 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1304
1305 stats->rx_crc_errors =
1306 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1307 stats->rx_frame_errors =
1308 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1309
1310 stats->rx_length_errors = (
1311 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1312 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1313 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1314
1315 stats->rx_errors = (stats->rx_crc_errors +
1316 stats->rx_frame_errors + stats->rx_length_errors);
1317
1318out:
1319 return err;
1320}
1321
1322static void update_stats_cache(struct work_struct *work)
1323{
1324 struct mlxsw_sp_port *mlxsw_sp_port =
1325 container_of(work, struct mlxsw_sp_port,
1326 hw_stats.update_dw.work);
1327
1328 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1329 goto out;
1330
1331 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1332 mlxsw_sp_port->hw_stats.cache);
1333
1334out:
1335 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1336 MLXSW_HW_STATS_UPDATE_TIME);
1337}
1338
1339/* Return the stats from a cache that is updated periodically,
1340 * as this function might get called in an atomic context.
1341 */
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001342static void
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001343mlxsw_sp_port_get_stats64(struct net_device *dev,
1344 struct rtnl_link_stats64 *stats)
1345{
1346 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1347
1348 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001349}
1350
Jiri Pirko93cd0812017-04-18 16:55:35 +02001351static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1352 u16 vid_begin, u16 vid_end,
1353 bool is_member, bool untagged)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001354{
1355 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1356 char *spvm_pl;
1357 int err;
1358
1359 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1360 if (!spvm_pl)
1361 return -ENOMEM;
1362
1363 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1364 vid_end, is_member, untagged);
1365 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1366 kfree(spvm_pl);
1367 return err;
1368}
1369
Jiri Pirko93cd0812017-04-18 16:55:35 +02001370int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1371 u16 vid_end, bool is_member, bool untagged)
1372{
1373 u16 vid, vid_e;
1374 int err;
1375
1376 for (vid = vid_begin; vid <= vid_end;
1377 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1378 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1379 vid_end);
1380
1381 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1382 is_member, untagged);
1383 if (err)
1384 return err;
1385 }
1386
1387 return 0;
1388}
1389
Ido Schimmelc57529e2017-05-26 08:37:31 +02001390static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001391{
Ido Schimmelc57529e2017-05-26 08:37:31 +02001392 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001393
Ido Schimmelc57529e2017-05-26 08:37:31 +02001394 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1395 &mlxsw_sp_port->vlans_list, list)
1396 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001397}
1398
Ido Schimmel31a08a52017-05-26 08:37:26 +02001399static struct mlxsw_sp_port_vlan *
1400mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1401{
1402 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmelc57529e2017-05-26 08:37:31 +02001403 bool untagged = vid == 1;
1404 int err;
1405
1406 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1407 if (err)
1408 return ERR_PTR(err);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001409
1410 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001411 if (!mlxsw_sp_port_vlan) {
1412 err = -ENOMEM;
1413 goto err_port_vlan_alloc;
1414 }
Ido Schimmel31a08a52017-05-26 08:37:26 +02001415
1416 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1417 mlxsw_sp_port_vlan->vid = vid;
1418 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1419
1420 return mlxsw_sp_port_vlan;
Ido Schimmelc57529e2017-05-26 08:37:31 +02001421
1422err_port_vlan_alloc:
1423 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1424 return ERR_PTR(err);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001425}
1426
1427static void
1428mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1429{
Ido Schimmelc57529e2017-05-26 08:37:31 +02001430 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1431 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02001432
Ido Schimmel31a08a52017-05-26 08:37:26 +02001433 list_del(&mlxsw_sp_port_vlan->list);
1434 kfree(mlxsw_sp_port_vlan);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001435 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1436}
1437
1438struct mlxsw_sp_port_vlan *
1439mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1440{
1441 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1442
1443 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1444 if (mlxsw_sp_port_vlan)
1445 return mlxsw_sp_port_vlan;
1446
1447 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1448}
1449
1450void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1451{
Ido Schimmela1107482017-05-26 08:37:39 +02001452 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1453
Ido Schimmelc57529e2017-05-26 08:37:31 +02001454 if (mlxsw_sp_port_vlan->bridge_port)
1455 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
Ido Schimmela1107482017-05-26 08:37:39 +02001456 else if (fid)
1457 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001458
1459 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001460}
1461
Ido Schimmel05978482016-08-17 16:39:30 +02001462static int mlxsw_sp_port_add_vid(struct net_device *dev,
1463 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001464{
1465 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001466
1467 /* VLAN 0 is added to HW filter when device goes up, but it is
1468 * reserved in our case, so simply return.
1469 */
1470 if (!vid)
1471 return 0;
1472
Ido Schimmelc57529e2017-05-26 08:37:31 +02001473 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001474}
1475
Ido Schimmel32d863f2016-07-02 11:00:10 +02001476static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1477 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001478{
1479 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001480 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001481
1482 /* VLAN 0 is removed from HW filter when device goes down, but
1483 * it is reserved in our case, so simply return.
1484 */
1485 if (!vid)
1486 return 0;
1487
Ido Schimmel31a08a52017-05-26 08:37:26 +02001488 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001489 if (!mlxsw_sp_port_vlan)
Ido Schimmel31a08a52017-05-26 08:37:26 +02001490 return 0;
Ido Schimmelc57529e2017-05-26 08:37:31 +02001491 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001492
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001493 return 0;
1494}
1495
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001496static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1497 size_t len)
1498{
1499 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmeld664b412016-06-09 09:51:40 +02001500 u8 module = mlxsw_sp_port->mapping.module;
1501 u8 width = mlxsw_sp_port->mapping.width;
1502 u8 lane = mlxsw_sp_port->mapping.lane;
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001503 int err;
1504
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001505 if (!mlxsw_sp_port->split)
1506 err = snprintf(name, len, "p%d", module + 1);
1507 else
1508 err = snprintf(name, len, "p%ds%d", module + 1,
1509 lane / width);
1510
1511 if (err >= len)
1512 return -EINVAL;
1513
1514 return 0;
1515}
1516
Yotam Gigi763b4b72016-07-21 12:03:17 +02001517static struct mlxsw_sp_port_mall_tc_entry *
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001518mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1519 unsigned long cookie) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001520 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1521
1522 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1523 if (mall_tc_entry->cookie == cookie)
1524 return mall_tc_entry;
1525
1526 return NULL;
1527}
1528
1529static int
1530mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001531 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001532 const struct tc_action *a,
1533 bool ingress)
1534{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001535 struct net *net = dev_net(mlxsw_sp_port->dev);
1536 enum mlxsw_sp_span_type span_type;
1537 struct mlxsw_sp_port *to_port;
1538 struct net_device *to_dev;
1539 int ifindex;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001540
1541 ifindex = tcf_mirred_ifindex(a);
1542 to_dev = __dev_get_by_index(net, ifindex);
1543 if (!to_dev) {
1544 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1545 return -EINVAL;
1546 }
1547
1548 if (!mlxsw_sp_port_dev_check(to_dev)) {
1549 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
Yotam Gigie915ac62017-01-09 11:25:48 +01001550 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001551 }
1552 to_port = netdev_priv(to_dev);
1553
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001554 mirror->to_local_port = to_port->local_port;
1555 mirror->ingress = ingress;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001556 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001557 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1558}
Yotam Gigi763b4b72016-07-21 12:03:17 +02001559
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001560static void
1561mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1562 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1563{
1564 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1565 enum mlxsw_sp_span_type span_type;
1566 struct mlxsw_sp_port *to_port;
1567
1568 to_port = mlxsw_sp->ports[mirror->to_local_port];
1569 span_type = mirror->ingress ?
1570 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1571 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001572}
1573
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001574static int
1575mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1576 struct tc_cls_matchall_offload *cls,
1577 const struct tc_action *a,
1578 bool ingress)
1579{
1580 int err;
1581
1582 if (!mlxsw_sp_port->sample)
1583 return -EOPNOTSUPP;
1584 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1585 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1586 return -EEXIST;
1587 }
1588 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1589 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1590 return -EOPNOTSUPP;
1591 }
1592
1593 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1594 tcf_sample_psample_group(a));
1595 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1596 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1597 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1598
1599 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1600 if (err)
1601 goto err_port_sample_set;
1602 return 0;
1603
1604err_port_sample_set:
1605 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1606 return err;
1607}
1608
1609static void
1610mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1611{
1612 if (!mlxsw_sp_port->sample)
1613 return;
1614
1615 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1616 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1617}
1618
Yotam Gigi763b4b72016-07-21 12:03:17 +02001619static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001620 struct tc_cls_matchall_offload *f,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001621 bool ingress)
1622{
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001623 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001624 __be16 protocol = f->common.protocol;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001625 const struct tc_action *a;
WANG Cong22dc13c2016-08-13 22:35:00 -07001626 LIST_HEAD(actions);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001627 int err;
1628
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001629 if (!tcf_exts_has_one_action(f->exts)) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001630 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
Yotam Gigie915ac62017-01-09 11:25:48 +01001631 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001632 }
1633
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001634 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1635 if (!mall_tc_entry)
1636 return -ENOMEM;
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001637 mall_tc_entry->cookie = f->cookie;
Ido Schimmel86cb13e2016-07-25 13:12:33 +03001638
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001639 tcf_exts_to_list(f->exts, &actions);
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001640 a = list_first_entry(&actions, struct tc_action, list);
1641
1642 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1643 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1644
1645 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1646 mirror = &mall_tc_entry->mirror;
1647 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1648 mirror, a, ingress);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001649 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1650 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001651 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001652 a, ingress);
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001653 } else {
1654 err = -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001655 }
1656
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001657 if (err)
1658 goto err_add_action;
1659
1660 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001661 return 0;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001662
1663err_add_action:
1664 kfree(mall_tc_entry);
1665 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001666}
1667
1668static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001669 struct tc_cls_matchall_offload *f)
Yotam Gigi763b4b72016-07-21 12:03:17 +02001670{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001671 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001672
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001673 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001674 f->cookie);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001675 if (!mall_tc_entry) {
1676 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1677 return;
1678 }
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001679 list_del(&mall_tc_entry->list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001680
1681 switch (mall_tc_entry->type) {
1682 case MLXSW_SP_PORT_MALL_MIRROR:
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001683 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1684 &mall_tc_entry->mirror);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001685 break;
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001686 case MLXSW_SP_PORT_MALL_SAMPLE:
1687 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1688 break;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001689 default:
1690 WARN_ON(1);
1691 }
1692
Yotam Gigi763b4b72016-07-21 12:03:17 +02001693 kfree(mall_tc_entry);
1694}
1695
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001696static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001697 struct tc_cls_matchall_offload *f)
Yotam Gigi763b4b72016-07-21 12:03:17 +02001698{
Jiri Pirkoa2e8da92017-08-09 14:30:33 +02001699 bool ingress;
1700
1701 if (is_classid_clsact_ingress(f->common.classid))
1702 ingress = true;
1703 else if (is_classid_clsact_egress(f->common.classid))
1704 ingress = false;
1705 else
1706 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001707
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001708 if (f->common.chain_index)
Jiri Pirkoa5fcf8a2017-06-06 17:00:16 +02001709 return -EOPNOTSUPP;
1710
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001711 switch (f->command) {
1712 case TC_CLSMATCHALL_REPLACE:
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001713 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001714 ingress);
1715 case TC_CLSMATCHALL_DESTROY:
1716 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1717 return 0;
1718 default:
1719 return -EOPNOTSUPP;
1720 }
1721}
1722
1723static int
1724mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001725 struct tc_cls_flower_offload *f)
1726{
Jiri Pirkoa2e8da92017-08-09 14:30:33 +02001727 bool ingress;
1728
1729 if (is_classid_clsact_ingress(f->common.classid))
1730 ingress = true;
1731 else if (is_classid_clsact_egress(f->common.classid))
1732 ingress = false;
1733 else
1734 return -EOPNOTSUPP;
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001735
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001736 switch (f->command) {
1737 case TC_CLSFLOWER_REPLACE:
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001738 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001739 case TC_CLSFLOWER_DESTROY:
1740 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
1741 return 0;
1742 case TC_CLSFLOWER_STATS:
1743 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
1744 default:
1745 return -EOPNOTSUPP;
1746 }
1747}
1748
1749static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
Jiri Pirkode4784c2017-08-07 10:15:32 +02001750 void *type_data)
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001751{
1752 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1753
Jiri Pirko2572ac52017-08-07 10:15:17 +02001754 switch (type) {
Jiri Pirkoade9b652017-08-07 10:15:18 +02001755 case TC_SETUP_CLSMATCHALL:
Jiri Pirkode4784c2017-08-07 10:15:32 +02001756 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data);
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001757 case TC_SETUP_CLSFLOWER:
Jiri Pirkode4784c2017-08-07 10:15:32 +02001758 return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data);
Jiri Pirko2572ac52017-08-07 10:15:17 +02001759 default:
1760 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001761 }
Yotam Gigi763b4b72016-07-21 12:03:17 +02001762}
1763
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001764static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1765 .ndo_open = mlxsw_sp_port_open,
1766 .ndo_stop = mlxsw_sp_port_stop,
1767 .ndo_start_xmit = mlxsw_sp_port_xmit,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001768 .ndo_setup_tc = mlxsw_sp_setup_tc,
Jiri Pirkoc5b9b512015-12-03 12:12:22 +01001769 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001770 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1771 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1772 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001773 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1774 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001775 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1776 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001777 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001778};
1779
1780static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1781 struct ethtool_drvinfo *drvinfo)
1782{
1783 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1784 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1785
1786 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1787 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1788 sizeof(drvinfo->version));
1789 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1790 "%d.%d.%d",
1791 mlxsw_sp->bus_info->fw_rev.major,
1792 mlxsw_sp->bus_info->fw_rev.minor,
1793 mlxsw_sp->bus_info->fw_rev.subminor);
1794 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1795 sizeof(drvinfo->bus_info));
1796}
1797
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001798static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1799 struct ethtool_pauseparam *pause)
1800{
1801 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1802
1803 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1804 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1805}
1806
1807static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1808 struct ethtool_pauseparam *pause)
1809{
1810 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1811
1812 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1813 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1814 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1815
1816 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1817 pfcc_pl);
1818}
1819
1820static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1821 struct ethtool_pauseparam *pause)
1822{
1823 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1824 bool pause_en = pause->tx_pause || pause->rx_pause;
1825 int err;
1826
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001827 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1828 netdev_err(dev, "PFC already enabled on port\n");
1829 return -EINVAL;
1830 }
1831
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001832 if (pause->autoneg) {
1833 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1834 return -EINVAL;
1835 }
1836
1837 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1838 if (err) {
1839 netdev_err(dev, "Failed to configure port's headroom\n");
1840 return err;
1841 }
1842
1843 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1844 if (err) {
1845 netdev_err(dev, "Failed to set PAUSE parameters\n");
1846 goto err_port_pause_configure;
1847 }
1848
1849 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1850 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1851
1852 return 0;
1853
1854err_port_pause_configure:
1855 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1856 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1857 return err;
1858}
1859
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001860struct mlxsw_sp_port_hw_stats {
1861 char str[ETH_GSTRING_LEN];
Jiri Pirko412791d2016-10-21 16:07:19 +02001862 u64 (*getter)(const char *payload);
Ido Schimmel18281f22017-03-24 08:02:51 +01001863 bool cells_bytes;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001864};
1865
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001866static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001867 {
1868 .str = "a_frames_transmitted_ok",
1869 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1870 },
1871 {
1872 .str = "a_frames_received_ok",
1873 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1874 },
1875 {
1876 .str = "a_frame_check_sequence_errors",
1877 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1878 },
1879 {
1880 .str = "a_alignment_errors",
1881 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1882 },
1883 {
1884 .str = "a_octets_transmitted_ok",
1885 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1886 },
1887 {
1888 .str = "a_octets_received_ok",
1889 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1890 },
1891 {
1892 .str = "a_multicast_frames_xmitted_ok",
1893 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1894 },
1895 {
1896 .str = "a_broadcast_frames_xmitted_ok",
1897 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1898 },
1899 {
1900 .str = "a_multicast_frames_received_ok",
1901 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1902 },
1903 {
1904 .str = "a_broadcast_frames_received_ok",
1905 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1906 },
1907 {
1908 .str = "a_in_range_length_errors",
1909 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1910 },
1911 {
1912 .str = "a_out_of_range_length_field",
1913 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1914 },
1915 {
1916 .str = "a_frame_too_long_errors",
1917 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1918 },
1919 {
1920 .str = "a_symbol_error_during_carrier",
1921 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1922 },
1923 {
1924 .str = "a_mac_control_frames_transmitted",
1925 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1926 },
1927 {
1928 .str = "a_mac_control_frames_received",
1929 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1930 },
1931 {
1932 .str = "a_unsupported_opcodes_received",
1933 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1934 },
1935 {
1936 .str = "a_pause_mac_ctrl_frames_received",
1937 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1938 },
1939 {
1940 .str = "a_pause_mac_ctrl_frames_xmitted",
1941 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1942 },
1943};
1944
1945#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1946
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001947static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1948 {
1949 .str = "rx_octets_prio",
1950 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1951 },
1952 {
1953 .str = "rx_frames_prio",
1954 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1955 },
1956 {
1957 .str = "tx_octets_prio",
1958 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1959 },
1960 {
1961 .str = "tx_frames_prio",
1962 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1963 },
1964 {
1965 .str = "rx_pause_prio",
1966 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1967 },
1968 {
1969 .str = "rx_pause_duration_prio",
1970 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1971 },
1972 {
1973 .str = "tx_pause_prio",
1974 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1975 },
1976 {
1977 .str = "tx_pause_duration_prio",
1978 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1979 },
1980};
1981
1982#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1983
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001984static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1985 {
1986 .str = "tc_transmit_queue_tc",
Ido Schimmel18281f22017-03-24 08:02:51 +01001987 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
1988 .cells_bytes = true,
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001989 },
1990 {
1991 .str = "tc_no_buffer_discard_uc_tc",
1992 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1993 },
1994};
1995
1996#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1997
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001998#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001999 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
2000 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002001 IEEE_8021QAZ_MAX_TCS)
2002
2003static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
2004{
2005 int i;
2006
2007 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
2008 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2009 mlxsw_sp_port_hw_prio_stats[i].str, prio);
2010 *p += ETH_GSTRING_LEN;
2011 }
2012}
2013
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002014static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2015{
2016 int i;
2017
2018 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2019 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2020 mlxsw_sp_port_hw_tc_stats[i].str, tc);
2021 *p += ETH_GSTRING_LEN;
2022 }
2023}
2024
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002025static void mlxsw_sp_port_get_strings(struct net_device *dev,
2026 u32 stringset, u8 *data)
2027{
2028 u8 *p = data;
2029 int i;
2030
2031 switch (stringset) {
2032 case ETH_SS_STATS:
2033 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2034 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2035 ETH_GSTRING_LEN);
2036 p += ETH_GSTRING_LEN;
2037 }
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002038
2039 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2040 mlxsw_sp_port_get_prio_strings(&p, i);
2041
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002042 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2043 mlxsw_sp_port_get_tc_strings(&p, i);
2044
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002045 break;
2046 }
2047}
2048
Ido Schimmel3a66ee32015-11-27 13:45:55 +01002049static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2050 enum ethtool_phys_id_state state)
2051{
2052 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2053 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2054 char mlcr_pl[MLXSW_REG_MLCR_LEN];
2055 bool active;
2056
2057 switch (state) {
2058 case ETHTOOL_ID_ACTIVE:
2059 active = true;
2060 break;
2061 case ETHTOOL_ID_INACTIVE:
2062 active = false;
2063 break;
2064 default:
2065 return -EOPNOTSUPP;
2066 }
2067
2068 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2069 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2070}
2071
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002072static int
2073mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2074 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2075{
2076 switch (grp) {
2077 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2078 *p_hw_stats = mlxsw_sp_port_hw_stats;
2079 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2080 break;
2081 case MLXSW_REG_PPCNT_PRIO_CNT:
2082 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2083 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2084 break;
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002085 case MLXSW_REG_PPCNT_TC_CNT:
2086 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2087 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2088 break;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002089 default:
2090 WARN_ON(1);
Yotam Gigie915ac62017-01-09 11:25:48 +01002091 return -EOPNOTSUPP;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002092 }
2093 return 0;
2094}
2095
2096static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2097 enum mlxsw_reg_ppcnt_grp grp, int prio,
2098 u64 *data, int data_index)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002099{
Ido Schimmel18281f22017-03-24 08:02:51 +01002100 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2101 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002102 struct mlxsw_sp_port_hw_stats *hw_stats;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002103 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002104 int i, len;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002105 int err;
2106
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002107 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2108 if (err)
2109 return;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002110 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
Ido Schimmel18281f22017-03-24 08:02:51 +01002111 for (i = 0; i < len; i++) {
Colin Ian Kingfaac0ff2016-09-23 12:02:45 +01002112 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
Ido Schimmel18281f22017-03-24 08:02:51 +01002113 if (!hw_stats[i].cells_bytes)
2114 continue;
2115 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2116 data[data_index + i]);
2117 }
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002118}
2119
2120static void mlxsw_sp_port_get_stats(struct net_device *dev,
2121 struct ethtool_stats *stats, u64 *data)
2122{
2123 int i, data_index = 0;
2124
2125 /* IEEE 802.3 Counters */
2126 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2127 data, data_index);
2128 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2129
2130 /* Per-Priority Counters */
2131 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2132 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2133 data, data_index);
2134 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2135 }
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002136
2137 /* Per-TC Counters */
2138 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2139 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2140 data, data_index);
2141 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2142 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002143}
2144
2145static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2146{
2147 switch (sset) {
2148 case ETH_SS_STATS:
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002149 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002150 default:
2151 return -EOPNOTSUPP;
2152 }
2153}
2154
2155struct mlxsw_sp_port_link_mode {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002156 enum ethtool_link_mode_bit_indices mask_ethtool;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002157 u32 mask;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002158 u32 speed;
2159};
2160
2161static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2162 {
2163 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002164 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2165 .speed = SPEED_100,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002166 },
2167 {
2168 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2169 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002170 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2171 .speed = SPEED_1000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002172 },
2173 {
2174 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002175 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2176 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002177 },
2178 {
2179 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2180 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002181 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2182 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002183 },
2184 {
2185 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2186 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2187 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2188 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002189 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2190 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002191 },
2192 {
2193 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002194 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2195 .speed = SPEED_20000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002196 },
2197 {
2198 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002199 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2200 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002201 },
2202 {
2203 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002204 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2205 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002206 },
2207 {
2208 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002209 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2210 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002211 },
2212 {
2213 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002214 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2215 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002216 },
2217 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002218 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2219 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2220 .speed = SPEED_25000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002221 },
2222 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002223 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2224 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2225 .speed = SPEED_25000,
2226 },
2227 {
2228 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2229 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2230 .speed = SPEED_25000,
2231 },
2232 {
2233 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2234 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2235 .speed = SPEED_25000,
2236 },
2237 {
2238 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2239 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2240 .speed = SPEED_50000,
2241 },
2242 {
2243 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2244 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2245 .speed = SPEED_50000,
2246 },
2247 {
2248 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2249 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2250 .speed = SPEED_50000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002251 },
2252 {
2253 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002254 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2255 .speed = SPEED_56000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002256 },
2257 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002258 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2259 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2260 .speed = SPEED_56000,
2261 },
2262 {
2263 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2264 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2265 .speed = SPEED_56000,
2266 },
2267 {
2268 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2269 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2270 .speed = SPEED_56000,
2271 },
2272 {
2273 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2274 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2275 .speed = SPEED_100000,
2276 },
2277 {
2278 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2279 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2280 .speed = SPEED_100000,
2281 },
2282 {
2283 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2284 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2285 .speed = SPEED_100000,
2286 },
2287 {
2288 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2289 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2290 .speed = SPEED_100000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002291 },
2292};
2293
2294#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2295
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002296static void
2297mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2298 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002299{
2300 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2301 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2302 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2303 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2304 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2305 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002306 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002307
2308 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2309 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2310 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2311 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2312 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002313 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002314}
2315
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002316static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002317{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002318 int i;
2319
2320 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2321 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002322 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2323 mode);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002324 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002325}
2326
2327static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002328 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002329{
2330 u32 speed = SPEED_UNKNOWN;
2331 u8 duplex = DUPLEX_UNKNOWN;
2332 int i;
2333
2334 if (!carrier_ok)
2335 goto out;
2336
2337 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2338 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2339 speed = mlxsw_sp_port_link_mode[i].speed;
2340 duplex = DUPLEX_FULL;
2341 break;
2342 }
2343 }
2344out:
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002345 cmd->base.speed = speed;
2346 cmd->base.duplex = duplex;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002347}
2348
2349static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2350{
2351 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2352 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2353 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2354 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2355 return PORT_FIBRE;
2356
2357 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2358 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2359 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2360 return PORT_DA;
2361
2362 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2363 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2364 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2365 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2366 return PORT_NONE;
2367
2368 return PORT_OTHER;
2369}
2370
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002371static u32
2372mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002373{
2374 u32 ptys_proto = 0;
2375 int i;
2376
2377 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002378 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2379 cmd->link_modes.advertising))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002380 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2381 }
2382 return ptys_proto;
2383}
2384
2385static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2386{
2387 u32 ptys_proto = 0;
2388 int i;
2389
2390 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2391 if (speed == mlxsw_sp_port_link_mode[i].speed)
2392 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2393 }
2394 return ptys_proto;
2395}
2396
Ido Schimmel18f1e702016-02-26 17:32:31 +01002397static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2398{
2399 u32 ptys_proto = 0;
2400 int i;
2401
2402 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2403 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2404 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2405 }
2406 return ptys_proto;
2407}
2408
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002409static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2410 struct ethtool_link_ksettings *cmd)
2411{
2412 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2413 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2414 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2415
2416 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2417 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2418}
2419
2420static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2421 struct ethtool_link_ksettings *cmd)
2422{
2423 if (!autoneg)
2424 return;
2425
2426 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2427 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2428}
2429
2430static void
2431mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2432 struct ethtool_link_ksettings *cmd)
2433{
2434 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2435 return;
2436
2437 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2438 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2439}
2440
2441static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2442 struct ethtool_link_ksettings *cmd)
2443{
2444 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2445 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2446 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2447 char ptys_pl[MLXSW_REG_PTYS_LEN];
2448 u8 autoneg_status;
2449 bool autoneg;
2450 int err;
2451
2452 autoneg = mlxsw_sp_port->link.autoneg;
Elad Raz401c8b42016-10-28 21:35:52 +02002453 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002454 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2455 if (err)
2456 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002457 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2458 &eth_proto_oper);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002459
2460 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2461
2462 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2463
2464 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2465 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2466 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2467
2468 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2469 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2470 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2471 cmd);
2472
2473 return 0;
2474}
2475
2476static int
2477mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2478 const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002479{
2480 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2481 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2482 char ptys_pl[MLXSW_REG_PTYS_LEN];
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002483 u32 eth_proto_cap, eth_proto_new;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002484 bool autoneg;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002485 int err;
2486
Elad Raz401c8b42016-10-28 21:35:52 +02002487 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002488 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002489 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002490 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002491 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002492
2493 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2494 eth_proto_new = autoneg ?
2495 mlxsw_sp_to_ptys_advert_link(cmd) :
2496 mlxsw_sp_to_ptys_speed(cmd->base.speed);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002497
2498 eth_proto_new = eth_proto_new & eth_proto_cap;
2499 if (!eth_proto_new) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002500 netdev_err(dev, "No supported speed requested\n");
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002501 return -EINVAL;
2502 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002503
Elad Raz401c8b42016-10-28 21:35:52 +02002504 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2505 eth_proto_new);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002506 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002507 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002508 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002509
Ido Schimmel6277d462016-07-15 11:14:58 +02002510 if (!netif_running(dev))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002511 return 0;
2512
Ido Schimmel0c83f882016-09-12 13:26:23 +02002513 mlxsw_sp_port->link.autoneg = autoneg;
2514
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002515 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2516 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002517
2518 return 0;
2519}
2520
Yotam Gigice6ef68f2017-06-01 16:26:46 +03002521static int mlxsw_sp_flash_device(struct net_device *dev,
2522 struct ethtool_flash *flash)
2523{
2524 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2525 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2526 const struct firmware *firmware;
2527 int err;
2528
2529 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
2530 return -EOPNOTSUPP;
2531
2532 dev_hold(dev);
2533 rtnl_unlock();
2534
2535 err = request_firmware_direct(&firmware, flash->data, &dev->dev);
2536 if (err)
2537 goto out;
2538 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
2539 release_firmware(firmware);
2540out:
2541 rtnl_lock();
2542 dev_put(dev);
2543 return err;
2544}
2545
Arkadi Sharshevsky2ea10902017-06-14 09:27:40 +02002546#define MLXSW_SP_QSFP_I2C_ADDR 0x50
2547
2548static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
2549 u16 offset, u16 size, void *data,
2550 unsigned int *p_read_size)
2551{
2552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2553 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
2554 char mcia_pl[MLXSW_REG_MCIA_LEN];
2555 int status;
2556 int err;
2557
2558 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
2559 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
2560 0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR);
2561
2562 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
2563 if (err)
2564 return err;
2565
2566 status = mlxsw_reg_mcia_status_get(mcia_pl);
2567 if (status)
2568 return -EIO;
2569
2570 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
2571 memcpy(data, eeprom_tmp, size);
2572 *p_read_size = size;
2573
2574 return 0;
2575}
2576
2577enum mlxsw_sp_eeprom_module_info_rev_id {
2578 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00,
2579 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01,
2580 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03,
2581};
2582
2583enum mlxsw_sp_eeprom_module_info_id {
2584 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03,
2585 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C,
2586 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
2587 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
2588};
2589
2590enum mlxsw_sp_eeprom_module_info {
2591 MLXSW_SP_EEPROM_MODULE_INFO_ID,
2592 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
2593 MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2594};
2595
2596static int mlxsw_sp_get_module_info(struct net_device *netdev,
2597 struct ethtool_modinfo *modinfo)
2598{
2599 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2600 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
2601 u8 module_rev_id, module_id;
2602 unsigned int read_size;
2603 int err;
2604
2605 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
2606 MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2607 module_info, &read_size);
2608 if (err)
2609 return err;
2610
2611 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
2612 return -EIO;
2613
2614 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
2615 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
2616
2617 switch (module_id) {
2618 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
2619 modinfo->type = ETH_MODULE_SFF_8436;
2620 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2621 break;
2622 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
2623 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
2624 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
2625 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
2626 modinfo->type = ETH_MODULE_SFF_8636;
2627 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2628 } else {
2629 modinfo->type = ETH_MODULE_SFF_8436;
2630 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2631 }
2632 break;
2633 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
2634 modinfo->type = ETH_MODULE_SFF_8472;
2635 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2636 break;
2637 default:
2638 return -EINVAL;
2639 }
2640
2641 return 0;
2642}
2643
2644static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
2645 struct ethtool_eeprom *ee,
2646 u8 *data)
2647{
2648 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2649 int offset = ee->offset;
2650 unsigned int read_size;
2651 int i = 0;
2652 int err;
2653
2654 if (!ee->len)
2655 return -EINVAL;
2656
2657 memset(data, 0, ee->len);
2658
2659 while (i < ee->len) {
2660 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
2661 ee->len - i, data + i,
2662 &read_size);
2663 if (err) {
2664 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
2665 return err;
2666 }
2667
2668 i += read_size;
2669 offset += read_size;
2670 }
2671
2672 return 0;
2673}
2674
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002675static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2676 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2677 .get_link = ethtool_op_get_link,
Ido Schimmel9f7ec052016-04-06 17:10:14 +02002678 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2679 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002680 .get_strings = mlxsw_sp_port_get_strings,
Ido Schimmel3a66ee32015-11-27 13:45:55 +01002681 .set_phys_id = mlxsw_sp_port_set_phys_id,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002682 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2683 .get_sset_count = mlxsw_sp_port_get_sset_count,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002684 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2685 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
Yotam Gigice6ef68f2017-06-01 16:26:46 +03002686 .flash_device = mlxsw_sp_flash_device,
Arkadi Sharshevsky2ea10902017-06-14 09:27:40 +02002687 .get_module_info = mlxsw_sp_get_module_info,
2688 .get_module_eeprom = mlxsw_sp_get_module_eeprom,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002689};
2690
Ido Schimmel18f1e702016-02-26 17:32:31 +01002691static int
2692mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2693{
2694 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2695 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2696 char ptys_pl[MLXSW_REG_PTYS_LEN];
2697 u32 eth_proto_admin;
2698
2699 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
Elad Raz401c8b42016-10-28 21:35:52 +02002700 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2701 eth_proto_admin);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002702 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2703}
2704
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002705int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2706 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2707 bool dwrr, u8 dwrr_weight)
Ido Schimmel90183b92016-04-06 17:10:08 +02002708{
2709 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2710 char qeec_pl[MLXSW_REG_QEEC_LEN];
2711
2712 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2713 next_index);
2714 mlxsw_reg_qeec_de_set(qeec_pl, true);
2715 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2716 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2717 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2718}
2719
Ido Schimmelcc7cf512016-04-06 17:10:11 +02002720int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2721 enum mlxsw_reg_qeec_hr hr, u8 index,
2722 u8 next_index, u32 maxrate)
Ido Schimmel90183b92016-04-06 17:10:08 +02002723{
2724 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2725 char qeec_pl[MLXSW_REG_QEEC_LEN];
2726
2727 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2728 next_index);
2729 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2730 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2731 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2732}
2733
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002734int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2735 u8 switch_prio, u8 tclass)
Ido Schimmel90183b92016-04-06 17:10:08 +02002736{
2737 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2738 char qtct_pl[MLXSW_REG_QTCT_LEN];
2739
2740 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2741 tclass);
2742 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2743}
2744
2745static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2746{
2747 int err, i;
2748
2749 /* Setup the elements hierarcy, so that each TC is linked to
2750 * one subgroup, which are all member in the same group.
2751 */
2752 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2753 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2754 0);
2755 if (err)
2756 return err;
2757 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2758 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2759 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2760 0, false, 0);
2761 if (err)
2762 return err;
2763 }
2764 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2765 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2766 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2767 false, 0);
2768 if (err)
2769 return err;
2770 }
2771
2772 /* Make sure the max shaper is disabled in all hierarcies that
2773 * support it.
2774 */
2775 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2776 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2777 MLXSW_REG_QEEC_MAS_DIS);
2778 if (err)
2779 return err;
2780 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2781 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2782 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2783 i, 0,
2784 MLXSW_REG_QEEC_MAS_DIS);
2785 if (err)
2786 return err;
2787 }
2788 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2789 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2790 MLXSW_REG_QEEC_HIERARCY_TC,
2791 i, i,
2792 MLXSW_REG_QEEC_MAS_DIS);
2793 if (err)
2794 return err;
2795 }
2796
2797 /* Map all priorities to traffic class 0. */
2798 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2799 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2800 if (err)
2801 return err;
2802 }
2803
2804 return 0;
2805}
2806
Ido Schimmel5b153852017-06-08 08:47:44 +02002807static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2808 bool split, u8 module, u8 width, u8 lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002809{
Ido Schimmelc57529e2017-05-26 08:37:31 +02002810 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002811 struct mlxsw_sp_port *mlxsw_sp_port;
2812 struct net_device *dev;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002813 int err;
2814
Ido Schimmel5b153852017-06-08 08:47:44 +02002815 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2816 if (err) {
2817 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2818 local_port);
2819 return err;
2820 }
2821
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002822 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
Ido Schimmel5b153852017-06-08 08:47:44 +02002823 if (!dev) {
2824 err = -ENOMEM;
2825 goto err_alloc_etherdev;
2826 }
Jiri Pirkof20a91f2016-10-27 15:13:00 +02002827 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002828 mlxsw_sp_port = netdev_priv(dev);
2829 mlxsw_sp_port->dev = dev;
2830 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2831 mlxsw_sp_port->local_port = local_port;
Ido Schimmelc57529e2017-05-26 08:37:31 +02002832 mlxsw_sp_port->pvid = 1;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002833 mlxsw_sp_port->split = split;
Ido Schimmeld664b412016-06-09 09:51:40 +02002834 mlxsw_sp_port->mapping.module = module;
2835 mlxsw_sp_port->mapping.width = width;
2836 mlxsw_sp_port->mapping.lane = lane;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002837 mlxsw_sp_port->link.autoneg = 1;
Ido Schimmel31a08a52017-05-26 08:37:26 +02002838 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02002839 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002840
2841 mlxsw_sp_port->pcpu_stats =
2842 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2843 if (!mlxsw_sp_port->pcpu_stats) {
2844 err = -ENOMEM;
2845 goto err_alloc_stats;
2846 }
2847
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002848 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2849 GFP_KERNEL);
2850 if (!mlxsw_sp_port->sample) {
2851 err = -ENOMEM;
2852 goto err_alloc_sample;
2853 }
2854
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002855 mlxsw_sp_port->hw_stats.cache =
2856 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2857
2858 if (!mlxsw_sp_port->hw_stats.cache) {
2859 err = -ENOMEM;
2860 goto err_alloc_hw_stats;
2861 }
2862 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2863 &update_stats_cache);
2864
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002865 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2866 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2867
Ido Schimmel2e915e02017-06-08 08:47:45 +02002868 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
Ido Schimmel5b153852017-06-08 08:47:44 +02002869 if (err) {
2870 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
2871 mlxsw_sp_port->local_port);
2872 goto err_port_module_map;
2873 }
2874
Ido Schimmel3247ff22016-09-08 08:16:02 +02002875 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2876 if (err) {
2877 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2878 mlxsw_sp_port->local_port);
2879 goto err_port_swid_set;
2880 }
2881
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002882 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2883 if (err) {
2884 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2885 mlxsw_sp_port->local_port);
2886 goto err_dev_addr_init;
2887 }
2888
2889 netif_carrier_off(dev);
2890
2891 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
Yotam Gigi763b4b72016-07-21 12:03:17 +02002892 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2893 dev->hw_features |= NETIF_F_HW_TC;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002894
Jarod Wilsond894be52016-10-20 13:55:16 -04002895 dev->min_mtu = 0;
2896 dev->max_mtu = ETH_MAX_MTU;
2897
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002898 /* Each packet needs to have a Tx header (metadata) on top all other
2899 * headers.
2900 */
Yotam Gigifeb7d382016-10-04 09:46:04 +02002901 dev->needed_headroom = MLXSW_TXHDR_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002902
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002903 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2904 if (err) {
2905 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2906 mlxsw_sp_port->local_port);
2907 goto err_port_system_port_mapping_set;
2908 }
2909
Ido Schimmel18f1e702016-02-26 17:32:31 +01002910 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2911 if (err) {
2912 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2913 mlxsw_sp_port->local_port);
2914 goto err_port_speed_by_width_set;
2915 }
2916
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002917 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2918 if (err) {
2919 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2920 mlxsw_sp_port->local_port);
2921 goto err_port_mtu_set;
2922 }
2923
2924 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2925 if (err)
2926 goto err_port_admin_status_set;
2927
2928 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2929 if (err) {
2930 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2931 mlxsw_sp_port->local_port);
2932 goto err_port_buffers_init;
2933 }
2934
Ido Schimmel90183b92016-04-06 17:10:08 +02002935 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2936 if (err) {
2937 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2938 mlxsw_sp_port->local_port);
2939 goto err_port_ets_init;
2940 }
2941
Ido Schimmelf00817d2016-04-06 17:10:09 +02002942 /* ETS and buffers must be initialized before DCB. */
2943 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2944 if (err) {
2945 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2946 mlxsw_sp_port->local_port);
2947 goto err_port_dcb_init;
2948 }
2949
Ido Schimmela1107482017-05-26 08:37:39 +02002950 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
Ido Schimmel45a4a162017-05-16 19:38:35 +02002951 if (err) {
Ido Schimmela1107482017-05-26 08:37:39 +02002952 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
Ido Schimmel45a4a162017-05-16 19:38:35 +02002953 mlxsw_sp_port->local_port);
Ido Schimmela1107482017-05-26 08:37:39 +02002954 goto err_port_fids_init;
Ido Schimmel45a4a162017-05-16 19:38:35 +02002955 }
2956
Ido Schimmelc57529e2017-05-26 08:37:31 +02002957 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2958 if (IS_ERR(mlxsw_sp_port_vlan)) {
2959 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
Ido Schimmel05978482016-08-17 16:39:30 +02002960 mlxsw_sp_port->local_port);
Ido Schimmelc57529e2017-05-26 08:37:31 +02002961 goto err_port_vlan_get;
Ido Schimmel05978482016-08-17 16:39:30 +02002962 }
2963
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002964 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
Ido Schimmel2f258442016-08-17 16:39:31 +02002965 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002966 err = register_netdev(dev);
2967 if (err) {
2968 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2969 mlxsw_sp_port->local_port);
2970 goto err_register_netdev;
2971 }
2972
Elad Razd808c7e2016-10-28 21:35:57 +02002973 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2974 mlxsw_sp_port, dev, mlxsw_sp_port->split,
2975 module);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002976 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002977 return 0;
2978
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002979err_register_netdev:
Ido Schimmel2f258442016-08-17 16:39:31 +02002980 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02002981 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmelc57529e2017-05-26 08:37:31 +02002982 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
2983err_port_vlan_get:
Ido Schimmela1107482017-05-26 08:37:39 +02002984 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
2985err_port_fids_init:
Ido Schimmel4de34eb2016-08-04 17:36:22 +03002986 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02002987err_port_dcb_init:
Ido Schimmel90183b92016-04-06 17:10:08 +02002988err_port_ets_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002989err_port_buffers_init:
2990err_port_admin_status_set:
2991err_port_mtu_set:
Ido Schimmel18f1e702016-02-26 17:32:31 +01002992err_port_speed_by_width_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002993err_port_system_port_mapping_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002994err_dev_addr_init:
Ido Schimmel3247ff22016-09-08 08:16:02 +02002995 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2996err_port_swid_set:
Ido Schimmel2e915e02017-06-08 08:47:45 +02002997 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
Ido Schimmel5b153852017-06-08 08:47:44 +02002998err_port_module_map:
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002999 kfree(mlxsw_sp_port->hw_stats.cache);
3000err_alloc_hw_stats:
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003001 kfree(mlxsw_sp_port->sample);
3002err_alloc_sample:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003003 free_percpu(mlxsw_sp_port->pcpu_stats);
3004err_alloc_stats:
3005 free_netdev(dev);
Ido Schimmel5b153852017-06-08 08:47:44 +02003006err_alloc_etherdev:
Jiri Pirko67963a32016-10-28 21:35:55 +02003007 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3008 return err;
3009}
3010
Ido Schimmel5b153852017-06-08 08:47:44 +02003011static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003012{
3013 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3014
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02003015 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
Jiri Pirko67963a32016-10-28 21:35:55 +02003016 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003017 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
Ido Schimmel2f258442016-08-17 16:39:31 +02003018 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02003019 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmelc57529e2017-05-26 08:37:31 +02003020 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
Ido Schimmela1107482017-05-26 08:37:39 +02003021 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02003022 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmel3e9b27b2016-02-26 17:32:28 +01003023 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
Ido Schimmel2e915e02017-06-08 08:47:45 +02003024 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02003025 kfree(mlxsw_sp_port->hw_stats.cache);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003026 kfree(mlxsw_sp_port->sample);
Yotam Gigi136f1442017-01-09 11:25:47 +01003027 free_percpu(mlxsw_sp_port->pcpu_stats);
Ido Schimmel31a08a52017-05-26 08:37:26 +02003028 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003029 free_netdev(mlxsw_sp_port->dev);
Jiri Pirko67963a32016-10-28 21:35:55 +02003030 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3031}
3032
Jiri Pirkof83e2102016-10-28 21:35:49 +02003033static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3034{
3035 return mlxsw_sp->ports[local_port] != NULL;
3036}
3037
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003038static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
3039{
3040 int i;
3041
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003042 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003043 if (mlxsw_sp_port_created(mlxsw_sp, i))
3044 mlxsw_sp_port_remove(mlxsw_sp, i);
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003045 kfree(mlxsw_sp->port_to_module);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003046 kfree(mlxsw_sp->ports);
3047}
3048
3049static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3050{
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003051 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
Ido Schimmeld664b412016-06-09 09:51:40 +02003052 u8 module, width, lane;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003053 size_t alloc_size;
3054 int i;
3055 int err;
3056
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003057 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003058 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3059 if (!mlxsw_sp->ports)
3060 return -ENOMEM;
3061
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003062 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
3063 if (!mlxsw_sp->port_to_module) {
3064 err = -ENOMEM;
3065 goto err_port_to_module_alloc;
3066 }
3067
3068 for (i = 1; i < max_ports; i++) {
Ido Schimmel558c2d52016-02-26 17:32:29 +01003069 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
Ido Schimmeld664b412016-06-09 09:51:40 +02003070 &width, &lane);
Ido Schimmel558c2d52016-02-26 17:32:29 +01003071 if (err)
3072 goto err_port_module_info_get;
3073 if (!width)
3074 continue;
3075 mlxsw_sp->port_to_module[i] = module;
Jiri Pirko67963a32016-10-28 21:35:55 +02003076 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3077 module, width, lane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003078 if (err)
3079 goto err_port_create;
3080 }
3081 return 0;
3082
3083err_port_create:
Ido Schimmel558c2d52016-02-26 17:32:29 +01003084err_port_module_info_get:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003085 for (i--; i >= 1; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003086 if (mlxsw_sp_port_created(mlxsw_sp, i))
3087 mlxsw_sp_port_remove(mlxsw_sp, i);
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003088 kfree(mlxsw_sp->port_to_module);
3089err_port_to_module_alloc:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003090 kfree(mlxsw_sp->ports);
3091 return err;
3092}
3093
Ido Schimmel18f1e702016-02-26 17:32:31 +01003094static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3095{
3096 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3097
3098 return local_port - offset;
3099}
3100
Ido Schimmelbe945352016-06-09 09:51:39 +02003101static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3102 u8 module, unsigned int count)
3103{
3104 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3105 int err, i;
3106
3107 for (i = 0; i < count; i++) {
Ido Schimmelbe945352016-06-09 09:51:39 +02003108 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
Ido Schimmeld664b412016-06-09 09:51:40 +02003109 module, width, i * width);
Ido Schimmelbe945352016-06-09 09:51:39 +02003110 if (err)
3111 goto err_port_create;
3112 }
3113
3114 return 0;
3115
3116err_port_create:
3117 for (i--; i >= 0; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003118 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3119 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmelbe945352016-06-09 09:51:39 +02003120 return err;
3121}
3122
3123static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3124 u8 base_port, unsigned int count)
3125{
3126 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3127 int i;
3128
3129 /* Split by four means we need to re-create two ports, otherwise
3130 * only one.
3131 */
3132 count = count / 2;
3133
3134 for (i = 0; i < count; i++) {
3135 local_port = base_port + i * 2;
3136 module = mlxsw_sp->port_to_module[local_port];
3137
Ido Schimmelbe945352016-06-09 09:51:39 +02003138 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
Ido Schimmeld664b412016-06-09 09:51:40 +02003139 width, 0);
Ido Schimmelbe945352016-06-09 09:51:39 +02003140 }
3141}
3142
Jiri Pirkob2f10572016-04-08 19:11:23 +02003143static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3144 unsigned int count)
Ido Schimmel18f1e702016-02-26 17:32:31 +01003145{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003146 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003147 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003148 u8 module, cur_width, base_port;
3149 int i;
3150 int err;
3151
3152 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3153 if (!mlxsw_sp_port) {
3154 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3155 local_port);
3156 return -EINVAL;
3157 }
3158
Ido Schimmeld664b412016-06-09 09:51:40 +02003159 module = mlxsw_sp_port->mapping.module;
3160 cur_width = mlxsw_sp_port->mapping.width;
3161
Ido Schimmel18f1e702016-02-26 17:32:31 +01003162 if (count != 2 && count != 4) {
3163 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3164 return -EINVAL;
3165 }
3166
Ido Schimmel18f1e702016-02-26 17:32:31 +01003167 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3168 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3169 return -EINVAL;
3170 }
3171
3172 /* Make sure we have enough slave (even) ports for the split. */
3173 if (count == 2) {
3174 base_port = local_port;
3175 if (mlxsw_sp->ports[base_port + 1]) {
3176 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3177 return -EINVAL;
3178 }
3179 } else {
3180 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3181 if (mlxsw_sp->ports[base_port + 1] ||
3182 mlxsw_sp->ports[base_port + 3]) {
3183 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3184 return -EINVAL;
3185 }
3186 }
3187
3188 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003189 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3190 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003191
Ido Schimmelbe945352016-06-09 09:51:39 +02003192 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3193 if (err) {
3194 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3195 goto err_port_split_create;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003196 }
3197
3198 return 0;
3199
Ido Schimmelbe945352016-06-09 09:51:39 +02003200err_port_split_create:
3201 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003202 return err;
3203}
3204
Jiri Pirkob2f10572016-04-08 19:11:23 +02003205static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
Ido Schimmel18f1e702016-02-26 17:32:31 +01003206{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003207 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003208 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmeld664b412016-06-09 09:51:40 +02003209 u8 cur_width, base_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003210 unsigned int count;
3211 int i;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003212
3213 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3214 if (!mlxsw_sp_port) {
3215 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3216 local_port);
3217 return -EINVAL;
3218 }
3219
3220 if (!mlxsw_sp_port->split) {
3221 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
3222 return -EINVAL;
3223 }
3224
Ido Schimmeld664b412016-06-09 09:51:40 +02003225 cur_width = mlxsw_sp_port->mapping.width;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003226 count = cur_width == 1 ? 4 : 2;
3227
3228 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3229
3230 /* Determine which ports to remove. */
3231 if (count == 2 && local_port >= base_port + 2)
3232 base_port = base_port + 2;
3233
3234 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003235 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3236 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003237
Ido Schimmelbe945352016-06-09 09:51:39 +02003238 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003239
3240 return 0;
3241}
3242
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003243static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3244 char *pude_pl, void *priv)
3245{
3246 struct mlxsw_sp *mlxsw_sp = priv;
3247 struct mlxsw_sp_port *mlxsw_sp_port;
3248 enum mlxsw_reg_pude_oper_status status;
3249 u8 local_port;
3250
3251 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3252 mlxsw_sp_port = mlxsw_sp->ports[local_port];
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003253 if (!mlxsw_sp_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003254 return;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003255
3256 status = mlxsw_reg_pude_oper_status_get(pude_pl);
3257 if (status == MLXSW_PORT_OPER_STATUS_UP) {
3258 netdev_info(mlxsw_sp_port->dev, "link up\n");
3259 netif_carrier_on(mlxsw_sp_port->dev);
3260 } else {
3261 netdev_info(mlxsw_sp_port->dev, "link down\n");
3262 netif_carrier_off(mlxsw_sp_port->dev);
3263 }
3264}
3265
Nogah Frankel14eeda92016-11-25 10:33:32 +01003266static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3267 u8 local_port, void *priv)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003268{
3269 struct mlxsw_sp *mlxsw_sp = priv;
3270 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3271 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3272
3273 if (unlikely(!mlxsw_sp_port)) {
3274 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3275 local_port);
3276 return;
3277 }
3278
3279 skb->dev = mlxsw_sp_port->dev;
3280
3281 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3282 u64_stats_update_begin(&pcpu_stats->syncp);
3283 pcpu_stats->rx_packets++;
3284 pcpu_stats->rx_bytes += skb->len;
3285 u64_stats_update_end(&pcpu_stats->syncp);
3286
3287 skb->protocol = eth_type_trans(skb, skb->dev);
3288 netif_receive_skb(skb);
3289}
3290
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02003291static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3292 void *priv)
3293{
3294 skb->offload_fwd_mark = 1;
Nogah Frankel14eeda92016-11-25 10:33:32 +01003295 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02003296}
3297
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003298static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3299 void *priv)
3300{
3301 struct mlxsw_sp *mlxsw_sp = priv;
3302 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3303 struct psample_group *psample_group;
3304 u32 size;
3305
3306 if (unlikely(!mlxsw_sp_port)) {
3307 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3308 local_port);
3309 goto out;
3310 }
3311 if (unlikely(!mlxsw_sp_port->sample)) {
3312 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3313 local_port);
3314 goto out;
3315 }
3316
3317 size = mlxsw_sp_port->sample->truncate ?
3318 mlxsw_sp_port->sample->trunc_size : skb->len;
3319
3320 rcu_read_lock();
3321 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3322 if (!psample_group)
3323 goto out_unlock;
3324 psample_sample_packet(psample_group, skb, size,
3325 mlxsw_sp_port->dev->ifindex, 0,
3326 mlxsw_sp_port->sample->rate);
3327out_unlock:
3328 rcu_read_unlock();
3329out:
3330 consume_skb(skb);
3331}
3332
Nogah Frankel117b0da2016-11-25 10:33:44 +01003333#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel0fb78a42016-11-25 10:33:39 +01003334 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01003335 _is_ctrl, SP_##_trap_group, DISCARD)
Ido Schimmel93393b32016-08-25 18:42:38 +02003336
Nogah Frankel117b0da2016-11-25 10:33:44 +01003337#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel14eeda92016-11-25 10:33:32 +01003338 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01003339 _is_ctrl, SP_##_trap_group, DISCARD)
3340
3341#define MLXSW_SP_EVENTL(_func, _trap_id) \
3342 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
Nogah Frankel14eeda92016-11-25 10:33:32 +01003343
Nogah Frankel45449132016-11-25 10:33:35 +01003344static const struct mlxsw_listener mlxsw_sp_listener[] = {
3345 /* Events */
Nogah Frankel117b0da2016-11-25 10:33:44 +01003346 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
Nogah Frankelee4a60d2016-11-25 10:33:29 +01003347 /* L2 traps */
Nogah Frankel117b0da2016-11-25 10:33:44 +01003348 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3349 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3350 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3351 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3352 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3353 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3354 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3355 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3356 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3357 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3358 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
Jiri Pirko9d41acc2017-04-18 16:55:38 +02003359 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
Arkadi Sharshevsky588823f2017-07-17 14:15:31 +02003360 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
3361 false),
3362 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3363 false),
3364 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
3365 false),
3366 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3367 false),
Ido Schimmel93393b32016-08-25 18:42:38 +02003368 /* L3 traps */
Ido Schimmel0fcc4842017-07-17 14:15:29 +02003369 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3370 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3371 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
Ido Schimmel0fcc4842017-07-17 14:15:29 +02003372 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003373 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
3374 false),
3375 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
3376 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
3377 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
3378 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
3379 false),
3380 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
3381 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
3382 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
Ido Schimmel0fcc4842017-07-17 14:15:29 +02003383 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003384 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
3385 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
3386 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3387 false),
3388 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3389 false),
3390 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3391 false),
3392 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3393 false),
3394 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
3395 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
3396 false),
3397 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
3398 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
Ido Schimmel7607dd32017-07-17 14:15:30 +02003399 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003400 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003401 /* PKT Sample trap */
3402 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
Jiri Pirko0db7b382017-06-06 14:12:05 +02003403 false, SP_IP2ME, DISCARD),
3404 /* ACL trap */
3405 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003406};
3407
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003408static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3409{
3410 char qpcr_pl[MLXSW_REG_QPCR_LEN];
3411 enum mlxsw_reg_qpcr_ir_units ir_units;
3412 int max_cpu_policers;
3413 bool is_bytes;
3414 u8 burst_size;
3415 u32 rate;
3416 int i, err;
3417
3418 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3419 return -EIO;
3420
3421 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3422
3423 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3424 for (i = 0; i < max_cpu_policers; i++) {
3425 is_bytes = false;
3426 switch (i) {
3427 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3428 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3429 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3430 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3431 rate = 128;
3432 burst_size = 7;
3433 break;
3434 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
Arkadi Sharshevsky588823f2017-07-17 14:15:31 +02003435 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003436 rate = 16 * 1024;
3437 burst_size = 10;
3438 break;
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003439 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003440 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3441 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003442 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003443 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3444 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003445 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003446 rate = 1024;
3447 burst_size = 7;
3448 break;
3449 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3450 is_bytes = true;
3451 rate = 4 * 1024;
3452 burst_size = 4;
3453 break;
3454 default:
3455 continue;
3456 }
3457
3458 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3459 burst_size);
3460 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3461 if (err)
3462 return err;
3463 }
3464
3465 return 0;
3466}
3467
Nogah Frankel579c82e2016-11-25 10:33:42 +01003468static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003469{
3470 char htgt_pl[MLXSW_REG_HTGT_LEN];
Nogah Frankel117b0da2016-11-25 10:33:44 +01003471 enum mlxsw_reg_htgt_trap_group i;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003472 int max_cpu_policers;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003473 int max_trap_groups;
3474 u8 priority, tc;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003475 u16 policer_id;
Nogah Frankel117b0da2016-11-25 10:33:44 +01003476 int err;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003477
3478 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3479 return -EIO;
3480
3481 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003482 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003483
3484 for (i = 0; i < max_trap_groups; i++) {
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003485 policer_id = i;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003486 switch (i) {
Nogah Frankel117b0da2016-11-25 10:33:44 +01003487 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3488 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3489 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3490 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3491 priority = 5;
3492 tc = 5;
3493 break;
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003494 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
Nogah Frankel117b0da2016-11-25 10:33:44 +01003495 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3496 priority = 4;
3497 tc = 4;
3498 break;
3499 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3500 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
Arkadi Sharshevsky588823f2017-07-17 14:15:31 +02003501 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
Nogah Frankel117b0da2016-11-25 10:33:44 +01003502 priority = 3;
3503 tc = 3;
3504 break;
3505 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003506 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
Nogah Frankel117b0da2016-11-25 10:33:44 +01003507 priority = 2;
3508 tc = 2;
3509 break;
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003510 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
Nogah Frankel117b0da2016-11-25 10:33:44 +01003511 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3512 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3513 priority = 1;
3514 tc = 1;
3515 break;
3516 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
Nogah Frankel579c82e2016-11-25 10:33:42 +01003517 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3518 tc = MLXSW_REG_HTGT_DEFAULT_TC;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003519 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003520 break;
3521 default:
3522 continue;
3523 }
Nogah Frankel117b0da2016-11-25 10:33:44 +01003524
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003525 if (max_cpu_policers <= policer_id &&
3526 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3527 return -EIO;
3528
3529 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003530 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3531 if (err)
3532 return err;
3533 }
3534
3535 return 0;
3536}
3537
3538static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3539{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003540 int i;
3541 int err;
3542
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003543 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3544 if (err)
3545 return err;
3546
Nogah Frankel579c82e2016-11-25 10:33:42 +01003547 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003548 if (err)
3549 return err;
3550
Nogah Frankel45449132016-11-25 10:33:35 +01003551 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003552 err = mlxsw_core_trap_register(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003553 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003554 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003555 if (err)
Nogah Frankel45449132016-11-25 10:33:35 +01003556 goto err_listener_register;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003557
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003558 }
3559 return 0;
3560
Nogah Frankel45449132016-11-25 10:33:35 +01003561err_listener_register:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003562 for (i--; i >= 0; i--) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003563 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003564 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003565 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003566 }
3567 return err;
3568}
3569
3570static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3571{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003572 int i;
3573
Nogah Frankel45449132016-11-25 10:33:35 +01003574 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003575 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003576 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003577 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003578 }
3579}
3580
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003581static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3582{
3583 char slcr_pl[MLXSW_REG_SLCR_LEN];
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003584 int err;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003585
3586 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3587 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3588 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3589 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3590 MLXSW_REG_SLCR_LAG_HASH_SIP |
3591 MLXSW_REG_SLCR_LAG_HASH_DIP |
3592 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3593 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3594 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003595 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3596 if (err)
3597 return err;
3598
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003599 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3600 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003601 return -EIO;
3602
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003603 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003604 sizeof(struct mlxsw_sp_upper),
3605 GFP_KERNEL);
3606 if (!mlxsw_sp->lags)
3607 return -ENOMEM;
3608
3609 return 0;
3610}
3611
3612static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3613{
3614 kfree(mlxsw_sp->lags);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003615}
3616
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003617static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3618{
3619 char htgt_pl[MLXSW_REG_HTGT_LEN];
3620
Nogah Frankel579c82e2016-11-25 10:33:42 +01003621 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3622 MLXSW_REG_HTGT_INVALID_POLICER,
3623 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3624 MLXSW_REG_HTGT_DEFAULT_TC);
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003625 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3626}
3627
Jiri Pirkob2f10572016-04-08 19:11:23 +02003628static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003629 const struct mlxsw_bus_info *mlxsw_bus_info)
3630{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003631 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003632 int err;
3633
3634 mlxsw_sp->core = mlxsw_core;
3635 mlxsw_sp->bus_info = mlxsw_bus_info;
3636
Yotam Gigi6b742192017-05-23 21:56:29 +02003637 err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3638 if (err) {
3639 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
3640 return err;
3641 }
3642
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003643 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3644 if (err) {
3645 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3646 return err;
3647 }
3648
Ido Schimmela1107482017-05-26 08:37:39 +02003649 err = mlxsw_sp_fids_init(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003650 if (err) {
Ido Schimmela1107482017-05-26 08:37:39 +02003651 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
Nogah Frankel45449132016-11-25 10:33:35 +01003652 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003653 }
3654
Ido Schimmela1107482017-05-26 08:37:39 +02003655 err = mlxsw_sp_traps_init(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003656 if (err) {
Ido Schimmela1107482017-05-26 08:37:39 +02003657 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3658 goto err_traps_init;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003659 }
3660
3661 err = mlxsw_sp_buffers_init(mlxsw_sp);
3662 if (err) {
3663 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3664 goto err_buffers_init;
3665 }
3666
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003667 err = mlxsw_sp_lag_init(mlxsw_sp);
3668 if (err) {
3669 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3670 goto err_lag_init;
3671 }
3672
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003673 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3674 if (err) {
3675 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3676 goto err_switchdev_init;
3677 }
3678
Ido Schimmel464dce12016-07-02 11:00:15 +02003679 err = mlxsw_sp_router_init(mlxsw_sp);
3680 if (err) {
3681 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3682 goto err_router_init;
3683 }
3684
Yotam Gigi763b4b72016-07-21 12:03:17 +02003685 err = mlxsw_sp_span_init(mlxsw_sp);
3686 if (err) {
3687 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3688 goto err_span_init;
3689 }
3690
Jiri Pirko22a67762017-02-03 10:29:07 +01003691 err = mlxsw_sp_acl_init(mlxsw_sp);
3692 if (err) {
3693 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3694 goto err_acl_init;
3695 }
3696
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003697 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3698 if (err) {
3699 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3700 goto err_counter_pool_init;
3701 }
3702
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +02003703 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3704 if (err) {
3705 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3706 goto err_dpipe_init;
3707 }
3708
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003709 err = mlxsw_sp_ports_create(mlxsw_sp);
3710 if (err) {
3711 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3712 goto err_ports_create;
3713 }
3714
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003715 return 0;
3716
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003717err_ports_create:
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +02003718 mlxsw_sp_dpipe_fini(mlxsw_sp);
3719err_dpipe_init:
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003720 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3721err_counter_pool_init:
Jiri Pirko22a67762017-02-03 10:29:07 +01003722 mlxsw_sp_acl_fini(mlxsw_sp);
3723err_acl_init:
Yotam Gigi763b4b72016-07-21 12:03:17 +02003724 mlxsw_sp_span_fini(mlxsw_sp);
3725err_span_init:
Ido Schimmel464dce12016-07-02 11:00:15 +02003726 mlxsw_sp_router_fini(mlxsw_sp);
3727err_router_init:
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003728 mlxsw_sp_switchdev_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003729err_switchdev_init:
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003730 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003731err_lag_init:
Jiri Pirko0f433fa2016-04-14 18:19:24 +02003732 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003733err_buffers_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003734 mlxsw_sp_traps_fini(mlxsw_sp);
Ido Schimmela1107482017-05-26 08:37:39 +02003735err_traps_init:
3736 mlxsw_sp_fids_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003737 return err;
3738}
3739
Jiri Pirkob2f10572016-04-08 19:11:23 +02003740static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003741{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003742 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003743
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003744 mlxsw_sp_ports_remove(mlxsw_sp);
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +02003745 mlxsw_sp_dpipe_fini(mlxsw_sp);
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003746 mlxsw_sp_counter_pool_fini(mlxsw_sp);
Jiri Pirko22a67762017-02-03 10:29:07 +01003747 mlxsw_sp_acl_fini(mlxsw_sp);
Yotam Gigi763b4b72016-07-21 12:03:17 +02003748 mlxsw_sp_span_fini(mlxsw_sp);
Ido Schimmel464dce12016-07-02 11:00:15 +02003749 mlxsw_sp_router_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003750 mlxsw_sp_switchdev_fini(mlxsw_sp);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003751 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko5113bfd2016-05-06 22:20:59 +02003752 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003753 mlxsw_sp_traps_fini(mlxsw_sp);
Ido Schimmela1107482017-05-26 08:37:39 +02003754 mlxsw_sp_fids_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003755}
3756
Bhumika Goyal159fe882017-08-11 19:10:42 +05303757static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003758 .used_max_vepa_channels = 1,
3759 .max_vepa_channels = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003760 .used_max_mid = 1,
Elad Raz53ae6282016-01-10 21:06:26 +01003761 .max_mid = MLXSW_SP_MID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003762 .used_max_pgt = 1,
3763 .max_pgt = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003764 .used_flood_tables = 1,
3765 .used_flood_mode = 1,
3766 .flood_mode = 3,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003767 .max_fid_offset_flood_tables = 3,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003768 .fid_offset_flood_table_size = VLAN_N_VID - 1,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003769 .max_fid_flood_tables = 3,
Ido Schimmela1107482017-05-26 08:37:39 +02003770 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003771 .used_max_ib_mc = 1,
3772 .max_ib_mc = 0,
3773 .used_max_pkey = 1,
3774 .max_pkey = 0,
Nogah Frankel403547d2016-09-20 11:16:52 +02003775 .used_kvd_split_data = 1,
3776 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3777 .kvd_hash_single_parts = 2,
3778 .kvd_hash_double_parts = 1,
Jiri Pirkoc6022422016-07-05 11:27:46 +02003779 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003780 .swid_config = {
3781 {
3782 .used_type = 1,
3783 .type = MLXSW_PORT_SWID_TYPE_ETH,
3784 }
3785 },
Nogah Frankel57d316b2016-07-21 12:03:09 +02003786 .resource_query_enable = 1,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003787};
3788
3789static struct mlxsw_driver mlxsw_sp_driver = {
Jiri Pirko1d20d232016-10-27 15:12:59 +02003790 .kind = mlxsw_sp_driver_name,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003791 .priv_size = sizeof(struct mlxsw_sp),
3792 .init = mlxsw_sp_init,
3793 .fini = mlxsw_sp_fini,
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003794 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003795 .port_split = mlxsw_sp_port_split,
3796 .port_unsplit = mlxsw_sp_port_unsplit,
3797 .sb_pool_get = mlxsw_sp_sb_pool_get,
3798 .sb_pool_set = mlxsw_sp_sb_pool_set,
3799 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3800 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3801 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3802 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3803 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3804 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3805 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3806 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3807 .txhdr_construct = mlxsw_sp_txhdr_construct,
3808 .txhdr_len = MLXSW_TXHDR_LEN,
3809 .profile = &mlxsw_sp_config_profile,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003810};
3811
Jiri Pirko22a67762017-02-03 10:29:07 +01003812bool mlxsw_sp_port_dev_check(const struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003813{
3814 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3815}
3816
Jiri Pirko1182e532017-03-06 21:25:20 +01003817static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
David Aherndd823642016-10-17 19:15:49 -07003818{
Jiri Pirko1182e532017-03-06 21:25:20 +01003819 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
David Aherndd823642016-10-17 19:15:49 -07003820 int ret = 0;
3821
3822 if (mlxsw_sp_port_dev_check(lower_dev)) {
Jiri Pirko1182e532017-03-06 21:25:20 +01003823 *p_mlxsw_sp_port = netdev_priv(lower_dev);
David Aherndd823642016-10-17 19:15:49 -07003824 ret = 1;
3825 }
3826
3827 return ret;
3828}
3829
Ido Schimmelc57529e2017-05-26 08:37:31 +02003830struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003831{
Jiri Pirko1182e532017-03-06 21:25:20 +01003832 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003833
3834 if (mlxsw_sp_port_dev_check(dev))
3835 return netdev_priv(dev);
3836
Jiri Pirko1182e532017-03-06 21:25:20 +01003837 mlxsw_sp_port = NULL;
3838 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003839
Jiri Pirko1182e532017-03-06 21:25:20 +01003840 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003841}
3842
Ido Schimmel4724ba562017-03-10 08:53:39 +01003843struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003844{
3845 struct mlxsw_sp_port *mlxsw_sp_port;
3846
3847 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3848 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3849}
3850
Arkadi Sharshevskyaf0613782017-06-08 08:44:20 +02003851struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003852{
Jiri Pirko1182e532017-03-06 21:25:20 +01003853 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003854
3855 if (mlxsw_sp_port_dev_check(dev))
3856 return netdev_priv(dev);
3857
Jiri Pirko1182e532017-03-06 21:25:20 +01003858 mlxsw_sp_port = NULL;
3859 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3860 &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003861
Jiri Pirko1182e532017-03-06 21:25:20 +01003862 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003863}
3864
3865struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3866{
3867 struct mlxsw_sp_port *mlxsw_sp_port;
3868
3869 rcu_read_lock();
3870 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3871 if (mlxsw_sp_port)
3872 dev_hold(mlxsw_sp_port->dev);
3873 rcu_read_unlock();
3874 return mlxsw_sp_port;
3875}
3876
3877void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3878{
3879 dev_put(mlxsw_sp_port->dev);
3880}
3881
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003882static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003883{
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003884 char sldr_pl[MLXSW_REG_SLDR_LEN];
3885
3886 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3887 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3888}
3889
3890static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3891{
3892 char sldr_pl[MLXSW_REG_SLDR_LEN];
3893
3894 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3895 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3896}
3897
3898static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3899 u16 lag_id, u8 port_index)
3900{
3901 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3902 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3903
3904 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3905 lag_id, port_index);
3906 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3907}
3908
3909static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3910 u16 lag_id)
3911{
3912 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3913 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3914
3915 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3916 lag_id);
3917 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3918}
3919
3920static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3921 u16 lag_id)
3922{
3923 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3924 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3925
3926 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3927 lag_id);
3928 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3929}
3930
3931static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3932 u16 lag_id)
3933{
3934 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3935 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3936
3937 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3938 lag_id);
3939 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3940}
3941
3942static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3943 struct net_device *lag_dev,
3944 u16 *p_lag_id)
3945{
3946 struct mlxsw_sp_upper *lag;
3947 int free_lag_id = -1;
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003948 u64 max_lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003949 int i;
3950
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003951 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3952 for (i = 0; i < max_lag; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003953 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3954 if (lag->ref_count) {
3955 if (lag->dev == lag_dev) {
3956 *p_lag_id = i;
3957 return 0;
3958 }
3959 } else if (free_lag_id < 0) {
3960 free_lag_id = i;
3961 }
3962 }
3963 if (free_lag_id < 0)
3964 return -EBUSY;
3965 *p_lag_id = free_lag_id;
3966 return 0;
3967}
3968
3969static bool
3970mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3971 struct net_device *lag_dev,
3972 struct netdev_lag_upper_info *lag_upper_info)
3973{
3974 u16 lag_id;
3975
3976 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3977 return false;
3978 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3979 return false;
3980 return true;
3981}
3982
3983static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3984 u16 lag_id, u8 *p_port_index)
3985{
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003986 u64 max_lag_members;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003987 int i;
3988
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003989 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3990 MAX_LAG_MEMBERS);
3991 for (i = 0; i < max_lag_members; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003992 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3993 *p_port_index = i;
3994 return 0;
3995 }
3996 }
3997 return -EBUSY;
3998}
3999
4000static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4001 struct net_device *lag_dev)
4002{
4003 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmelc57529e2017-05-26 08:37:31 +02004004 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004005 struct mlxsw_sp_upper *lag;
4006 u16 lag_id;
4007 u8 port_index;
4008 int err;
4009
4010 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4011 if (err)
4012 return err;
4013 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4014 if (!lag->ref_count) {
4015 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4016 if (err)
4017 return err;
4018 lag->dev = lag_dev;
4019 }
4020
4021 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4022 if (err)
4023 return err;
4024 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4025 if (err)
4026 goto err_col_port_add;
4027 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
4028 if (err)
4029 goto err_col_port_enable;
4030
4031 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4032 mlxsw_sp_port->local_port);
4033 mlxsw_sp_port->lag_id = lag_id;
4034 mlxsw_sp_port->lagged = 1;
4035 lag->ref_count++;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02004036
Ido Schimmelc57529e2017-05-26 08:37:31 +02004037 /* Port is no longer usable as a router interface */
4038 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
4039 if (mlxsw_sp_port_vlan->fid)
Ido Schimmela1107482017-05-26 08:37:39 +02004040 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel86bf95b2016-07-02 11:00:11 +02004041
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004042 return 0;
4043
Ido Schimmel51554db2016-05-06 22:18:39 +02004044err_col_port_enable:
4045 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004046err_col_port_add:
4047 if (!lag->ref_count)
4048 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004049 return err;
4050}
4051
Ido Schimmel82e6db02016-06-20 23:04:04 +02004052static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4053 struct net_device *lag_dev)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004054{
4055 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004056 u16 lag_id = mlxsw_sp_port->lag_id;
Ido Schimmel1c800752016-06-20 23:04:20 +02004057 struct mlxsw_sp_upper *lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004058
4059 if (!mlxsw_sp_port->lagged)
Ido Schimmel82e6db02016-06-20 23:04:04 +02004060 return;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004061 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4062 WARN_ON(lag->ref_count == 0);
4063
Ido Schimmel82e6db02016-06-20 23:04:04 +02004064 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
4065 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004066
Ido Schimmelc57529e2017-05-26 08:37:31 +02004067 /* Any VLANs configured on the port are no longer valid */
4068 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01004069
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02004070 if (lag->ref_count == 1)
Ido Schimmel82e6db02016-06-20 23:04:04 +02004071 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004072
4073 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4074 mlxsw_sp_port->local_port);
4075 mlxsw_sp_port->lagged = 0;
4076 lag->ref_count--;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02004077
Ido Schimmelc57529e2017-05-26 08:37:31 +02004078 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
4079 /* Make sure untagged frames are allowed to ingress */
4080 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004081}
4082
Jiri Pirko74581202015-12-03 12:12:30 +01004083static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4084 u16 lag_id)
4085{
4086 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4087 char sldr_pl[MLXSW_REG_SLDR_LEN];
4088
4089 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4090 mlxsw_sp_port->local_port);
4091 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4092}
4093
4094static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4095 u16 lag_id)
4096{
4097 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4098 char sldr_pl[MLXSW_REG_SLDR_LEN];
4099
4100 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4101 mlxsw_sp_port->local_port);
4102 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4103}
4104
4105static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
4106 bool lag_tx_enabled)
4107{
4108 if (lag_tx_enabled)
4109 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
4110 mlxsw_sp_port->lag_id);
4111 else
4112 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4113 mlxsw_sp_port->lag_id);
4114}
4115
4116static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4117 struct netdev_lag_lower_state_info *info)
4118{
4119 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
4120}
4121
Jiri Pirko2b94e582017-04-18 16:55:37 +02004122static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4123 bool enable)
4124{
4125 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4126 enum mlxsw_reg_spms_state spms_state;
4127 char *spms_pl;
4128 u16 vid;
4129 int err;
4130
4131 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4132 MLXSW_REG_SPMS_STATE_DISCARDING;
4133
4134 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4135 if (!spms_pl)
4136 return -ENOMEM;
4137 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4138
4139 for (vid = 0; vid < VLAN_N_VID; vid++)
4140 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4141
4142 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4143 kfree(spms_pl);
4144 return err;
4145}
4146
4147static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4148{
4149 int err;
4150
Ido Schimmel4aafc362017-05-26 08:37:25 +02004151 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004152 if (err)
4153 return err;
Ido Schimmel4aafc362017-05-26 08:37:25 +02004154 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4155 if (err)
4156 goto err_port_stp_set;
Jiri Pirko2b94e582017-04-18 16:55:37 +02004157 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4158 true, false);
4159 if (err)
4160 goto err_port_vlan_set;
4161 return 0;
4162
4163err_port_vlan_set:
4164 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
Ido Schimmel4aafc362017-05-26 08:37:25 +02004165err_port_stp_set:
4166 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004167 return err;
4168}
4169
4170static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4171{
4172 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4173 false, false);
4174 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
Ido Schimmel4aafc362017-05-26 08:37:25 +02004175 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004176}
4177
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004178static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4179 struct net_device *dev,
Jiri Pirko74581202015-12-03 12:12:30 +01004180 unsigned long event, void *ptr)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004181{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004182 struct netdev_notifier_changeupper_info *info;
4183 struct mlxsw_sp_port *mlxsw_sp_port;
4184 struct net_device *upper_dev;
4185 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel80bedf12016-06-20 23:03:59 +02004186 int err = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004187
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004188 mlxsw_sp_port = netdev_priv(dev);
4189 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4190 info = ptr;
4191
4192 switch (event) {
4193 case NETDEV_PRECHANGEUPPER:
4194 upper_dev = info->upper_dev;
Ido Schimmel59fe9b32016-06-20 23:04:00 +02004195 if (!is_vlan_dev(upper_dev) &&
4196 !netif_is_lag_master(upper_dev) &&
Ido Schimmel7179eb52017-03-16 09:08:18 +01004197 !netif_is_bridge_master(upper_dev) &&
Jiri Pirko2b94e582017-04-18 16:55:37 +02004198 !netif_is_ovs_master(upper_dev))
Ido Schimmel59fe9b32016-06-20 23:04:00 +02004199 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02004200 if (!info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004201 break;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004202 if (netif_is_lag_master(upper_dev) &&
4203 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4204 info->upper_info))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004205 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02004206 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4207 return -EINVAL;
4208 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4209 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4210 return -EINVAL;
Jiri Pirko2b94e582017-04-18 16:55:37 +02004211 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
4212 return -EINVAL;
4213 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
4214 return -EINVAL;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004215 break;
4216 case NETDEV_CHANGEUPPER:
4217 upper_dev = info->upper_dev;
Ido Schimmelc57529e2017-05-26 08:37:31 +02004218 if (netif_is_bridge_master(upper_dev)) {
Ido Schimmel7117a572016-06-20 23:04:06 +02004219 if (info->linking)
4220 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004221 lower_dev,
Ido Schimmel7117a572016-06-20 23:04:06 +02004222 upper_dev);
4223 else
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004224 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4225 lower_dev,
4226 upper_dev);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004227 } else if (netif_is_lag_master(upper_dev)) {
Ido Schimmel80bedf12016-06-20 23:03:59 +02004228 if (info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004229 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4230 upper_dev);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004231 else
Ido Schimmel82e6db02016-06-20 23:04:04 +02004232 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4233 upper_dev);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004234 } else if (netif_is_ovs_master(upper_dev)) {
4235 if (info->linking)
4236 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4237 else
4238 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004239 }
4240 break;
4241 }
4242
Ido Schimmel80bedf12016-06-20 23:03:59 +02004243 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004244}
4245
Jiri Pirko74581202015-12-03 12:12:30 +01004246static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4247 unsigned long event, void *ptr)
4248{
4249 struct netdev_notifier_changelowerstate_info *info;
4250 struct mlxsw_sp_port *mlxsw_sp_port;
4251 int err;
4252
4253 mlxsw_sp_port = netdev_priv(dev);
4254 info = ptr;
4255
4256 switch (event) {
4257 case NETDEV_CHANGELOWERSTATE:
4258 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4259 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4260 info->lower_state_info);
4261 if (err)
4262 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4263 }
4264 break;
4265 }
4266
Ido Schimmel80bedf12016-06-20 23:03:59 +02004267 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004268}
4269
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004270static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4271 struct net_device *port_dev,
Jiri Pirko74581202015-12-03 12:12:30 +01004272 unsigned long event, void *ptr)
4273{
4274 switch (event) {
4275 case NETDEV_PRECHANGEUPPER:
4276 case NETDEV_CHANGEUPPER:
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004277 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4278 event, ptr);
Jiri Pirko74581202015-12-03 12:12:30 +01004279 case NETDEV_CHANGELOWERSTATE:
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004280 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4281 ptr);
Jiri Pirko74581202015-12-03 12:12:30 +01004282 }
4283
Ido Schimmel80bedf12016-06-20 23:03:59 +02004284 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004285}
4286
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004287static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4288 unsigned long event, void *ptr)
4289{
4290 struct net_device *dev;
4291 struct list_head *iter;
4292 int ret;
4293
4294 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4295 if (mlxsw_sp_port_dev_check(dev)) {
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004296 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4297 ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004298 if (ret)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004299 return ret;
4300 }
4301 }
4302
Ido Schimmel80bedf12016-06-20 23:03:59 +02004303 return 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004304}
4305
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004306static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4307 struct net_device *dev,
4308 unsigned long event, void *ptr,
4309 u16 vid)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004310{
4311 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4312 struct netdev_notifier_changeupper_info *info = ptr;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004313 struct net_device *upper_dev;
Ido Schimmel80bedf12016-06-20 23:03:59 +02004314 int err = 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004315
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004316 switch (event) {
4317 case NETDEV_PRECHANGEUPPER:
4318 upper_dev = info->upper_dev;
Ido Schimmelb1e45522017-04-30 19:47:14 +03004319 if (!netif_is_bridge_master(upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004320 return -EINVAL;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004321 break;
4322 case NETDEV_CHANGEUPPER:
4323 upper_dev = info->upper_dev;
Ido Schimmel1f880612017-03-10 08:53:35 +01004324 if (netif_is_bridge_master(upper_dev)) {
4325 if (info->linking)
Ido Schimmelc57529e2017-05-26 08:37:31 +02004326 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4327 vlan_dev,
4328 upper_dev);
Ido Schimmel1f880612017-03-10 08:53:35 +01004329 else
Ido Schimmelc57529e2017-05-26 08:37:31 +02004330 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4331 vlan_dev,
4332 upper_dev);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004333 } else {
Ido Schimmel1f880612017-03-10 08:53:35 +01004334 err = -EINVAL;
4335 WARN_ON(1);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004336 }
Ido Schimmel1f880612017-03-10 08:53:35 +01004337 break;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004338 }
4339
Ido Schimmel80bedf12016-06-20 23:03:59 +02004340 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004341}
4342
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004343static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4344 struct net_device *lag_dev,
4345 unsigned long event,
4346 void *ptr, u16 vid)
Ido Schimmel272c4472015-12-15 16:03:47 +01004347{
4348 struct net_device *dev;
4349 struct list_head *iter;
4350 int ret;
4351
4352 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4353 if (mlxsw_sp_port_dev_check(dev)) {
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004354 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4355 event, ptr,
4356 vid);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004357 if (ret)
Ido Schimmel272c4472015-12-15 16:03:47 +01004358 return ret;
4359 }
4360 }
4361
Ido Schimmel80bedf12016-06-20 23:03:59 +02004362 return 0;
Ido Schimmel272c4472015-12-15 16:03:47 +01004363}
4364
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004365static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4366 unsigned long event, void *ptr)
4367{
4368 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4369 u16 vid = vlan_dev_vlan_id(vlan_dev);
4370
Ido Schimmel272c4472015-12-15 16:03:47 +01004371 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004372 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4373 event, ptr, vid);
Ido Schimmel272c4472015-12-15 16:03:47 +01004374 else if (netif_is_lag_master(real_dev))
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004375 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4376 real_dev, event,
4377 ptr, vid);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004378
Ido Schimmel80bedf12016-06-20 23:03:59 +02004379 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004380}
4381
Ido Schimmelb1e45522017-04-30 19:47:14 +03004382static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4383{
4384 struct netdev_notifier_changeupper_info *info = ptr;
4385
4386 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4387 return false;
4388 return netif_is_l3_master(info->upper_dev);
4389}
4390
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004391static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4392 unsigned long event, void *ptr)
4393{
4394 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004395 int err = 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004396
Ido Schimmel6e095fd2016-07-04 08:23:13 +02004397 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4398 err = mlxsw_sp_netdevice_router_port_event(dev);
Ido Schimmelb1e45522017-04-30 19:47:14 +03004399 else if (mlxsw_sp_is_vrf_event(event, ptr))
4400 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
Ido Schimmel6e095fd2016-07-04 08:23:13 +02004401 else if (mlxsw_sp_port_dev_check(dev))
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004402 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004403 else if (netif_is_lag_master(dev))
4404 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4405 else if (is_vlan_dev(dev))
4406 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004407
Ido Schimmel80bedf12016-06-20 23:03:59 +02004408 return notifier_from_errno(err);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004409}
4410
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004411static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4412 .notifier_call = mlxsw_sp_netdevice_event,
4413};
4414
Ido Schimmel99724c12016-07-04 08:23:14 +02004415static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4416 .notifier_call = mlxsw_sp_inetaddr_event,
4417 .priority = 10, /* Must be called before FIB notifier block */
4418};
4419
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02004420static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
4421 .notifier_call = mlxsw_sp_inet6addr_event,
4422};
4423
Jiri Pirkoe7322632016-09-01 10:37:43 +02004424static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4425 .notifier_call = mlxsw_sp_router_netevent_event,
4426};
4427
Jiri Pirko1d20d232016-10-27 15:12:59 +02004428static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4429 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4430 {0, },
4431};
4432
4433static struct pci_driver mlxsw_sp_pci_driver = {
4434 .name = mlxsw_sp_driver_name,
4435 .id_table = mlxsw_sp_pci_id_table,
4436};
4437
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004438static int __init mlxsw_sp_module_init(void)
4439{
4440 int err;
4441
4442 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004443 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02004444 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004445 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4446
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004447 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4448 if (err)
4449 goto err_core_driver_register;
Jiri Pirko1d20d232016-10-27 15:12:59 +02004450
4451 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4452 if (err)
4453 goto err_pci_driver_register;
4454
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004455 return 0;
4456
Jiri Pirko1d20d232016-10-27 15:12:59 +02004457err_pci_driver_register:
4458 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004459err_core_driver_register:
Jiri Pirkoe7322632016-09-01 10:37:43 +02004460 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02004461 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
Jiri Pirkode7d6292016-09-01 10:37:42 +02004462 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004463 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4464 return err;
4465}
4466
4467static void __exit mlxsw_sp_module_exit(void)
4468{
Jiri Pirko1d20d232016-10-27 15:12:59 +02004469 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004470 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004471 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02004472 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004473 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004474 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4475}
4476
4477module_init(mlxsw_sp_module_init);
4478module_exit(mlxsw_sp_module_exit);
4479
4480MODULE_LICENSE("Dual BSD/GPL");
4481MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4482MODULE_DESCRIPTION("Mellanox Spectrum driver");
Jiri Pirko1d20d232016-10-27 15:12:59 +02004483MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
Yotam Gigi6b742192017-05-23 21:56:29 +02004484MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);