blob: a99600333a49940cfbf316a9fddefc61513f6f3c [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
Jiri Pirko22a67762017-02-03 10:29:07 +01003 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
Jiri Pirko56ade8f2015-10-16 14:01:37 +02005 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
Jiri Pirko1d20d232016-10-27 15:12:59 +020040#include <linux/pci.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020041#include <linux/netdevice.h>
42#include <linux/etherdevice.h>
43#include <linux/ethtool.h>
44#include <linux/slab.h>
45#include <linux/device.h>
46#include <linux/skbuff.h>
47#include <linux/if_vlan.h>
48#include <linux/if_bridge.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/bitops.h>
Ido Schimmel7f71eb42015-12-15 16:03:37 +010052#include <linux/list.h>
Ido Schimmel80bedf12016-06-20 23:03:59 +020053#include <linux/notifier.h>
Ido Schimmel90183b92016-04-06 17:10:08 +020054#include <linux/dcbnl.h>
Ido Schimmel99724c12016-07-04 08:23:14 +020055#include <linux/inetdevice.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020056#include <net/switchdev.h>
Yotam Gigi763b4b72016-07-21 12:03:17 +020057#include <net/pkt_cls.h>
58#include <net/tc_act/tc_mirred.h>
Jiri Pirkoe7322632016-09-01 10:37:43 +020059#include <net/netevent.h>
Yotam Gigi98d0f7b2017-01-23 11:07:11 +010060#include <net/tc_act/tc_sample.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020061#include <net/addrconf.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020062
63#include "spectrum.h"
Jiri Pirko1d20d232016-10-27 15:12:59 +020064#include "pci.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020065#include "core.h"
66#include "reg.h"
67#include "port.h"
68#include "trap.h"
69#include "txheader.h"
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +010070#include "spectrum_cnt.h"
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +020071#include "spectrum_dpipe.h"
Yotam Gigie5e5c882017-05-23 21:56:27 +020072#include "../mlxfw/mlxfw.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020073
Yotam Gigi6b742192017-05-23 21:56:29 +020074#define MLXSW_FWREV_MAJOR 13
75#define MLXSW_FWREV_MINOR 1420
76#define MLXSW_FWREV_SUBMINOR 122
77
78static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
79 .major = MLXSW_FWREV_MAJOR,
80 .minor = MLXSW_FWREV_MINOR,
81 .subminor = MLXSW_FWREV_SUBMINOR
82};
83
84#define MLXSW_SP_FW_FILENAME \
Yotam Gigia4e1ce22017-06-04 16:49:58 +020085 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
Yotam Gigi6b742192017-05-23 21:56:29 +020086 "." __stringify(MLXSW_FWREV_MINOR) \
87 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
88
Jiri Pirko56ade8f2015-10-16 14:01:37 +020089static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
90static const char mlxsw_sp_driver_version[] = "1.0";
91
92/* tx_hdr_version
93 * Tx header version.
94 * Must be set to 1.
95 */
96MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
97
98/* tx_hdr_ctl
99 * Packet control type.
100 * 0 - Ethernet control (e.g. EMADs, LACP)
101 * 1 - Ethernet data
102 */
103MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
104
105/* tx_hdr_proto
106 * Packet protocol type. Must be set to 1 (Ethernet).
107 */
108MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
109
110/* tx_hdr_rx_is_router
111 * Packet is sent from the router. Valid for data packets only.
112 */
113MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
114
115/* tx_hdr_fid_valid
116 * Indicates if the 'fid' field is valid and should be used for
117 * forwarding lookup. Valid for data packets only.
118 */
119MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
120
121/* tx_hdr_swid
122 * Switch partition ID. Must be set to 0.
123 */
124MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
125
126/* tx_hdr_control_tclass
127 * Indicates if the packet should use the control TClass and not one
128 * of the data TClasses.
129 */
130MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
131
132/* tx_hdr_etclass
133 * Egress TClass to be used on the egress device on the egress port.
134 */
135MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
136
137/* tx_hdr_port_mid
138 * Destination local port for unicast packets.
139 * Destination multicast ID for multicast packets.
140 *
141 * Control packets are directed to a specific egress port, while data
142 * packets are transmitted through the CPU port (0) into the switch partition,
143 * where forwarding rules are applied.
144 */
145MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
146
147/* tx_hdr_fid
148 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
149 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
150 * Valid for data packets only.
151 */
152MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
153
154/* tx_hdr_type
155 * 0 - Data packets
156 * 6 - Control packets
157 */
158MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
159
Yotam Gigie5e5c882017-05-23 21:56:27 +0200160struct mlxsw_sp_mlxfw_dev {
161 struct mlxfw_dev mlxfw_dev;
162 struct mlxsw_sp *mlxsw_sp;
163};
164
165static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
166 u16 component_index, u32 *p_max_size,
167 u8 *p_align_bits, u16 *p_max_write_size)
168{
169 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
170 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
171 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
172 char mcqi_pl[MLXSW_REG_MCQI_LEN];
173 int err;
174
175 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
176 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
177 if (err)
178 return err;
179 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
180 p_max_write_size);
181
182 *p_align_bits = max_t(u8, *p_align_bits, 2);
183 *p_max_write_size = min_t(u16, *p_max_write_size,
184 MLXSW_REG_MCDA_MAX_DATA_LEN);
185 return 0;
186}
187
188static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
189{
190 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
191 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
192 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
193 char mcc_pl[MLXSW_REG_MCC_LEN];
194 u8 control_state;
195 int err;
196
197 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
198 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
199 if (err)
200 return err;
201
202 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
203 if (control_state != MLXFW_FSM_STATE_IDLE)
204 return -EBUSY;
205
206 mlxsw_reg_mcc_pack(mcc_pl,
207 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
208 0, *fwhandle, 0);
209 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
210}
211
212static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
213 u32 fwhandle, u16 component_index,
214 u32 component_size)
215{
216 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
217 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
218 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
219 char mcc_pl[MLXSW_REG_MCC_LEN];
220
221 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
222 component_index, fwhandle, component_size);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
224}
225
226static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
227 u32 fwhandle, u8 *data, u16 size,
228 u32 offset)
229{
230 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
231 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
233 char mcda_pl[MLXSW_REG_MCDA_LEN];
234
235 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
237}
238
239static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
240 u32 fwhandle, u16 component_index)
241{
242 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
243 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
244 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
245 char mcc_pl[MLXSW_REG_MCC_LEN];
246
247 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
248 component_index, fwhandle, 0);
249 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
250}
251
252static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
253{
254 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
255 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
256 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
257 char mcc_pl[MLXSW_REG_MCC_LEN];
258
259 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
260 fwhandle, 0);
261 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
262}
263
264static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
265 enum mlxfw_fsm_state *fsm_state,
266 enum mlxfw_fsm_state_err *fsm_state_err)
267{
268 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
269 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
271 char mcc_pl[MLXSW_REG_MCC_LEN];
272 u8 control_state;
273 u8 error_code;
274 int err;
275
276 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
277 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
278 if (err)
279 return err;
280
281 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
282 *fsm_state = control_state;
283 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
284 MLXFW_FSM_STATE_ERR_MAX);
285 return 0;
286}
287
288static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
289{
290 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
291 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
292 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
293 char mcc_pl[MLXSW_REG_MCC_LEN];
294
295 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
296 fwhandle, 0);
297 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
298}
299
300static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
301{
302 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
303 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
304 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
305 char mcc_pl[MLXSW_REG_MCC_LEN];
306
307 mlxsw_reg_mcc_pack(mcc_pl,
308 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
309 fwhandle, 0);
310 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
311}
312
313static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
314 .component_query = mlxsw_sp_component_query,
315 .fsm_lock = mlxsw_sp_fsm_lock,
316 .fsm_component_update = mlxsw_sp_fsm_component_update,
317 .fsm_block_download = mlxsw_sp_fsm_block_download,
318 .fsm_component_verify = mlxsw_sp_fsm_component_verify,
319 .fsm_activate = mlxsw_sp_fsm_activate,
320 .fsm_query_state = mlxsw_sp_fsm_query_state,
321 .fsm_cancel = mlxsw_sp_fsm_cancel,
322 .fsm_release = mlxsw_sp_fsm_release
323};
324
Yotam Gigice6ef68f2017-06-01 16:26:46 +0300325static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
326 const struct firmware *firmware)
327{
328 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
329 .mlxfw_dev = {
330 .ops = &mlxsw_sp_mlxfw_dev_ops,
331 .psid = mlxsw_sp->bus_info->psid,
332 .psid_size = strlen(mlxsw_sp->bus_info->psid),
333 },
334 .mlxsw_sp = mlxsw_sp
335 };
336
337 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
338}
339
Yotam Gigi6b742192017-05-23 21:56:29 +0200340static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
341 const struct mlxsw_fw_rev *b)
342{
343 if (a->major != b->major)
344 return a->major > b->major;
345 if (a->minor != b->minor)
346 return a->minor > b->minor;
347 return a->subminor >= b->subminor;
348}
349
350static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
351{
352 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
Yotam Gigi6b742192017-05-23 21:56:29 +0200353 const struct firmware *firmware;
354 int err;
355
356 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
357 return 0;
358
359 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
360 rev->major, rev->minor, rev->subminor);
361 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
362 MLXSW_SP_FW_FILENAME);
363
364 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
365 mlxsw_sp->bus_info->dev);
366 if (err) {
367 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
368 MLXSW_SP_FW_FILENAME);
369 return err;
370 }
371
Yotam Gigice6ef68f2017-06-01 16:26:46 +0300372 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
Yotam Gigi6b742192017-05-23 21:56:29 +0200373 release_firmware(firmware);
374 return err;
375}
376
Arkadi Sharshevsky1abcbcc2017-03-11 09:42:53 +0100377int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
378 unsigned int counter_index, u64 *packets,
379 u64 *bytes)
380{
381 char mgpc_pl[MLXSW_REG_MGPC_LEN];
382 int err;
383
384 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
385 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
386 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
387 if (err)
388 return err;
389 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
390 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
391 return 0;
392}
393
394static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
395 unsigned int counter_index)
396{
397 char mgpc_pl[MLXSW_REG_MGPC_LEN];
398
399 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
400 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
401 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
402}
403
404int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
405 unsigned int *p_counter_index)
406{
407 int err;
408
409 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
410 p_counter_index);
411 if (err)
412 return err;
413 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
414 if (err)
415 goto err_counter_clear;
416 return 0;
417
418err_counter_clear:
419 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
420 *p_counter_index);
421 return err;
422}
423
424void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
425 unsigned int counter_index)
426{
427 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
428 counter_index);
429}
430
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200431static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
432 const struct mlxsw_tx_info *tx_info)
433{
434 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
435
436 memset(txhdr, 0, MLXSW_TXHDR_LEN);
437
438 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
439 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
440 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
441 mlxsw_tx_hdr_swid_set(txhdr, 0);
442 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
443 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
444 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
445}
446
Ido Schimmelfe9ccc72017-05-16 19:38:31 +0200447int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
448 u8 state)
449{
450 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
451 enum mlxsw_reg_spms_state spms_state;
452 char *spms_pl;
453 int err;
454
455 switch (state) {
456 case BR_STATE_FORWARDING:
457 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
458 break;
459 case BR_STATE_LEARNING:
460 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
461 break;
462 case BR_STATE_LISTENING: /* fall-through */
463 case BR_STATE_DISABLED: /* fall-through */
464 case BR_STATE_BLOCKING:
465 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
466 break;
467 default:
468 BUG();
469 }
470
471 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
472 if (!spms_pl)
473 return -ENOMEM;
474 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
475 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
476
477 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
478 kfree(spms_pl);
479 return err;
480}
481
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200482static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
483{
Elad Raz5b090742016-10-28 21:35:46 +0200484 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200485 int err;
486
487 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
488 if (err)
489 return err;
490 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
491 return 0;
492}
493
Yotam Gigi763b4b72016-07-21 12:03:17 +0200494static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
495{
Yotam Gigi763b4b72016-07-21 12:03:17 +0200496 int i;
497
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200498 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
Yotam Gigi763b4b72016-07-21 12:03:17 +0200499 return -EIO;
500
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200501 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
502 MAX_SPAN);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200503 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
504 sizeof(struct mlxsw_sp_span_entry),
505 GFP_KERNEL);
506 if (!mlxsw_sp->span.entries)
507 return -ENOMEM;
508
509 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
510 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
511
512 return 0;
513}
514
515static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
516{
517 int i;
518
519 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
520 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
521
522 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
523 }
524 kfree(mlxsw_sp->span.entries);
525}
526
527static struct mlxsw_sp_span_entry *
528mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
529{
530 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
531 struct mlxsw_sp_span_entry *span_entry;
532 char mpat_pl[MLXSW_REG_MPAT_LEN];
533 u8 local_port = port->local_port;
534 int index;
535 int i;
536 int err;
537
538 /* find a free entry to use */
539 index = -1;
540 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
541 if (!mlxsw_sp->span.entries[i].used) {
542 index = i;
543 span_entry = &mlxsw_sp->span.entries[i];
544 break;
545 }
546 }
547 if (index < 0)
548 return NULL;
549
550 /* create a new port analayzer entry for local_port */
551 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
552 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
553 if (err)
554 return NULL;
555
556 span_entry->used = true;
557 span_entry->id = index;
Yotam Gigi2d644d42016-11-11 16:34:25 +0100558 span_entry->ref_count = 1;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200559 span_entry->local_port = local_port;
560 return span_entry;
561}
562
563static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_span_entry *span_entry)
565{
566 u8 local_port = span_entry->local_port;
567 char mpat_pl[MLXSW_REG_MPAT_LEN];
568 int pa_id = span_entry->id;
569
570 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
571 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
572 span_entry->used = false;
573}
574
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200575static struct mlxsw_sp_span_entry *
576mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200577{
578 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
579 int i;
580
581 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
582 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
583
584 if (curr->used && curr->local_port == port->local_port)
585 return curr;
586 }
587 return NULL;
588}
589
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200590static struct mlxsw_sp_span_entry
591*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200592{
593 struct mlxsw_sp_span_entry *span_entry;
594
595 span_entry = mlxsw_sp_span_entry_find(port);
596 if (span_entry) {
Yotam Gigi2d644d42016-11-11 16:34:25 +0100597 /* Already exists, just take a reference */
Yotam Gigi763b4b72016-07-21 12:03:17 +0200598 span_entry->ref_count++;
599 return span_entry;
600 }
601
602 return mlxsw_sp_span_entry_create(port);
603}
604
605static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
606 struct mlxsw_sp_span_entry *span_entry)
607{
Yotam Gigi2d644d42016-11-11 16:34:25 +0100608 WARN_ON(!span_entry->ref_count);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200609 if (--span_entry->ref_count == 0)
610 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
611 return 0;
612}
613
614static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
615{
616 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
617 struct mlxsw_sp_span_inspected_port *p;
618 int i;
619
620 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
621 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
622
623 list_for_each_entry(p, &curr->bound_ports_list, list)
624 if (p->local_port == port->local_port &&
625 p->type == MLXSW_SP_SPAN_EGRESS)
626 return true;
627 }
628
629 return false;
630}
631
Ido Schimmel18281f22017-03-24 08:02:51 +0100632static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
633 int mtu)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200634{
Ido Schimmel18281f22017-03-24 08:02:51 +0100635 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200636}
637
638static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
639{
640 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
641 char sbib_pl[MLXSW_REG_SBIB_LEN];
642 int err;
643
644 /* If port is egress mirrored, the shared buffer size should be
645 * updated according to the mtu value
646 */
647 if (mlxsw_sp_span_is_egress_mirror(port)) {
Ido Schimmel18281f22017-03-24 08:02:51 +0100648 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
649
650 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200651 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
652 if (err) {
653 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
654 return err;
655 }
656 }
657
658 return 0;
659}
660
661static struct mlxsw_sp_span_inspected_port *
662mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
663 struct mlxsw_sp_span_entry *span_entry)
664{
665 struct mlxsw_sp_span_inspected_port *p;
666
667 list_for_each_entry(p, &span_entry->bound_ports_list, list)
668 if (port->local_port == p->local_port)
669 return p;
670 return NULL;
671}
672
673static int
674mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
675 struct mlxsw_sp_span_entry *span_entry,
676 enum mlxsw_sp_span_type type)
677{
678 struct mlxsw_sp_span_inspected_port *inspected_port;
679 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
680 char mpar_pl[MLXSW_REG_MPAR_LEN];
681 char sbib_pl[MLXSW_REG_SBIB_LEN];
682 int pa_id = span_entry->id;
683 int err;
684
685 /* if it is an egress SPAN, bind a shared buffer to it */
686 if (type == MLXSW_SP_SPAN_EGRESS) {
Ido Schimmel18281f22017-03-24 08:02:51 +0100687 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
688 port->dev->mtu);
689
690 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200691 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
692 if (err) {
693 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
694 return err;
695 }
696 }
697
698 /* bind the port to the SPAN entry */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200699 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
700 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200701 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
702 if (err)
703 goto err_mpar_reg_write;
704
705 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
706 if (!inspected_port) {
707 err = -ENOMEM;
708 goto err_inspected_port_alloc;
709 }
710 inspected_port->local_port = port->local_port;
711 inspected_port->type = type;
712 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
713
714 return 0;
715
716err_mpar_reg_write:
717err_inspected_port_alloc:
718 if (type == MLXSW_SP_SPAN_EGRESS) {
719 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
720 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
721 }
722 return err;
723}
724
725static void
726mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
727 struct mlxsw_sp_span_entry *span_entry,
728 enum mlxsw_sp_span_type type)
729{
730 struct mlxsw_sp_span_inspected_port *inspected_port;
731 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
732 char mpar_pl[MLXSW_REG_MPAR_LEN];
733 char sbib_pl[MLXSW_REG_SBIB_LEN];
734 int pa_id = span_entry->id;
735
736 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
737 if (!inspected_port)
738 return;
739
740 /* remove the inspected port */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200741 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
742 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200743 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
744
745 /* remove the SBIB buffer if it was egress SPAN */
746 if (type == MLXSW_SP_SPAN_EGRESS) {
747 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
748 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
749 }
750
751 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
752
753 list_del(&inspected_port->list);
754 kfree(inspected_port);
755}
756
757static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
758 struct mlxsw_sp_port *to,
759 enum mlxsw_sp_span_type type)
760{
761 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
762 struct mlxsw_sp_span_entry *span_entry;
763 int err;
764
765 span_entry = mlxsw_sp_span_entry_get(to);
766 if (!span_entry)
767 return -ENOENT;
768
769 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
770 span_entry->id);
771
772 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
773 if (err)
774 goto err_port_bind;
775
776 return 0;
777
778err_port_bind:
779 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
780 return err;
781}
782
783static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
784 struct mlxsw_sp_port *to,
785 enum mlxsw_sp_span_type type)
786{
787 struct mlxsw_sp_span_entry *span_entry;
788
789 span_entry = mlxsw_sp_span_entry_find(to);
790 if (!span_entry) {
791 netdev_err(from->dev, "no span entry found\n");
792 return;
793 }
794
795 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
796 span_entry->id);
797 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
798}
799
Yotam Gigi98d0f7b2017-01-23 11:07:11 +0100800static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
801 bool enable, u32 rate)
802{
803 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
804 char mpsc_pl[MLXSW_REG_MPSC_LEN];
805
806 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
807 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
808}
809
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200810static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
811 bool is_up)
812{
813 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
814 char paos_pl[MLXSW_REG_PAOS_LEN];
815
816 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
817 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
818 MLXSW_PORT_ADMIN_STATUS_DOWN);
819 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
820}
821
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200822static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
823 unsigned char *addr)
824{
825 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
826 char ppad_pl[MLXSW_REG_PPAD_LEN];
827
828 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
829 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
830 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
831}
832
833static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
834{
835 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
836 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
837
838 ether_addr_copy(addr, mlxsw_sp->base_mac);
839 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
840 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
841}
842
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200843static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
844{
845 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
846 char pmtu_pl[MLXSW_REG_PMTU_LEN];
847 int max_mtu;
848 int err;
849
850 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
851 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
852 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
853 if (err)
854 return err;
855 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
856
857 if (mtu > max_mtu)
858 return -EINVAL;
859
860 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
861 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
862}
863
864static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
865{
866 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel5b153852017-06-08 08:47:44 +0200867 char pspa_pl[MLXSW_REG_PSPA_LEN];
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200868
Ido Schimmel5b153852017-06-08 08:47:44 +0200869 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
870 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200871}
872
Ido Schimmela1107482017-05-26 08:37:39 +0200873int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200874{
875 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
876 char svpe_pl[MLXSW_REG_SVPE_LEN];
877
878 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
879 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
880}
881
Ido Schimmel7cbc4272017-05-16 19:38:33 +0200882int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
883 bool learn_enable)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200884{
885 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
886 char *spvmlr_pl;
887 int err;
888
889 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
890 if (!spvmlr_pl)
891 return -ENOMEM;
Ido Schimmel7cbc4272017-05-16 19:38:33 +0200892 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
893 learn_enable);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200894 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
895 kfree(spvmlr_pl);
896 return err;
897}
898
Ido Schimmelb02eae92017-05-16 19:38:34 +0200899static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
900 u16 vid)
901{
902 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
903 char spvid_pl[MLXSW_REG_SPVID_LEN];
904
905 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
906 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
907}
908
909static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
910 bool allow)
911{
912 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
913 char spaft_pl[MLXSW_REG_SPAFT_LEN];
914
915 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
916 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
917}
918
919int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
920{
921 int err;
922
923 if (!vid) {
924 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
925 if (err)
926 return err;
927 } else {
928 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
929 if (err)
930 return err;
931 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
932 if (err)
933 goto err_port_allow_untagged_set;
934 }
935
936 mlxsw_sp_port->pvid = vid;
937 return 0;
938
939err_port_allow_untagged_set:
940 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
941 return err;
942}
943
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200944static int
945mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
946{
947 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
948 char sspr_pl[MLXSW_REG_SSPR_LEN];
949
950 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
951 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
952}
953
Ido Schimmeld664b412016-06-09 09:51:40 +0200954static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
955 u8 local_port, u8 *p_module,
956 u8 *p_width, u8 *p_lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200957{
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200958 char pmlp_pl[MLXSW_REG_PMLP_LEN];
959 int err;
960
Ido Schimmel558c2d52016-02-26 17:32:29 +0100961 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200962 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
963 if (err)
964 return err;
Ido Schimmel558c2d52016-02-26 17:32:29 +0100965 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
966 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
Ido Schimmel2bf9a582016-04-05 10:20:04 +0200967 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200968 return 0;
969}
970
Ido Schimmel2e915e02017-06-08 08:47:45 +0200971static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel18f1e702016-02-26 17:32:31 +0100972 u8 module, u8 width, u8 lane)
973{
Ido Schimmel2e915e02017-06-08 08:47:45 +0200974 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel18f1e702016-02-26 17:32:31 +0100975 char pmlp_pl[MLXSW_REG_PMLP_LEN];
976 int i;
977
Ido Schimmel2e915e02017-06-08 08:47:45 +0200978 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
Ido Schimmel18f1e702016-02-26 17:32:31 +0100979 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
980 for (i = 0; i < width; i++) {
981 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
982 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
983 }
984
985 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
986}
987
Ido Schimmel2e915e02017-06-08 08:47:45 +0200988static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100989{
Ido Schimmel2e915e02017-06-08 08:47:45 +0200990 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100991 char pmlp_pl[MLXSW_REG_PMLP_LEN];
992
Ido Schimmel2e915e02017-06-08 08:47:45 +0200993 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100994 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
995 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
996}
997
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200998static int mlxsw_sp_port_open(struct net_device *dev)
999{
1000 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1001 int err;
1002
1003 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1004 if (err)
1005 return err;
1006 netif_start_queue(dev);
1007 return 0;
1008}
1009
1010static int mlxsw_sp_port_stop(struct net_device *dev)
1011{
1012 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1013
1014 netif_stop_queue(dev);
1015 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1016}
1017
1018static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
1019 struct net_device *dev)
1020{
1021 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1022 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1023 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1024 const struct mlxsw_tx_info tx_info = {
1025 .local_port = mlxsw_sp_port->local_port,
1026 .is_emad = false,
1027 };
1028 u64 len;
1029 int err;
1030
Jiri Pirko307c2432016-04-08 19:11:22 +02001031 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001032 return NETDEV_TX_BUSY;
1033
1034 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
1035 struct sk_buff *skb_orig = skb;
1036
1037 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
1038 if (!skb) {
1039 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1040 dev_kfree_skb_any(skb_orig);
1041 return NETDEV_TX_OK;
1042 }
Arkadi Sharshevsky36bf38d2017-01-12 09:10:37 +01001043 dev_consume_skb_any(skb_orig);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001044 }
1045
1046 if (eth_skb_pad(skb)) {
1047 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1048 return NETDEV_TX_OK;
1049 }
1050
1051 mlxsw_sp_txhdr_construct(skb, &tx_info);
Nogah Frankel63dcdd32016-06-17 15:09:05 +02001052 /* TX header is consumed by HW on the way so we shouldn't count its
1053 * bytes as being sent.
1054 */
1055 len = skb->len - MLXSW_TXHDR_LEN;
1056
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001057 /* Due to a race we might fail here because of a full queue. In that
1058 * unlikely case we simply drop the packet.
1059 */
Jiri Pirko307c2432016-04-08 19:11:22 +02001060 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001061
1062 if (!err) {
1063 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1064 u64_stats_update_begin(&pcpu_stats->syncp);
1065 pcpu_stats->tx_packets++;
1066 pcpu_stats->tx_bytes += len;
1067 u64_stats_update_end(&pcpu_stats->syncp);
1068 } else {
1069 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1070 dev_kfree_skb_any(skb);
1071 }
1072 return NETDEV_TX_OK;
1073}
1074
Jiri Pirkoc5b9b512015-12-03 12:12:22 +01001075static void mlxsw_sp_set_rx_mode(struct net_device *dev)
1076{
1077}
1078
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001079static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
1080{
1081 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1082 struct sockaddr *addr = p;
1083 int err;
1084
1085 if (!is_valid_ether_addr(addr->sa_data))
1086 return -EADDRNOTAVAIL;
1087
1088 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
1089 if (err)
1090 return err;
1091 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1092 return 0;
1093}
1094
Ido Schimmel18281f22017-03-24 08:02:51 +01001095static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
1096 int mtu)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001097{
Ido Schimmel18281f22017-03-24 08:02:51 +01001098 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
Ido Schimmelf417f042017-03-24 08:02:50 +01001099}
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001100
Ido Schimmelf417f042017-03-24 08:02:50 +01001101#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
Ido Schimmel18281f22017-03-24 08:02:51 +01001102
1103static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1104 u16 delay)
Ido Schimmelf417f042017-03-24 08:02:50 +01001105{
Ido Schimmel18281f22017-03-24 08:02:51 +01001106 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
1107 BITS_PER_BYTE));
1108 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
1109 mtu);
Ido Schimmelf417f042017-03-24 08:02:50 +01001110}
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001111
Ido Schimmel18281f22017-03-24 08:02:51 +01001112/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
Ido Schimmelf417f042017-03-24 08:02:50 +01001113 * Assumes 100m cable and maximum MTU.
1114 */
Ido Schimmel18281f22017-03-24 08:02:51 +01001115#define MLXSW_SP_PAUSE_DELAY 58752
1116
1117static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1118 u16 delay, bool pfc, bool pause)
Ido Schimmelf417f042017-03-24 08:02:50 +01001119{
1120 if (pfc)
Ido Schimmel18281f22017-03-24 08:02:51 +01001121 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
Ido Schimmelf417f042017-03-24 08:02:50 +01001122 else if (pause)
Ido Schimmel18281f22017-03-24 08:02:51 +01001123 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001124 else
Ido Schimmelf417f042017-03-24 08:02:50 +01001125 return 0;
1126}
1127
1128static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
1129 bool lossy)
1130{
1131 if (lossy)
1132 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
1133 else
1134 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
1135 thres);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001136}
1137
1138int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001139 u8 *prio_tc, bool pause_en,
1140 struct ieee_pfc *my_pfc)
Ido Schimmelff6551e2016-04-06 17:10:03 +02001141{
1142 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001143 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
1144 u16 delay = !!my_pfc ? my_pfc->delay : 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001145 char pbmc_pl[MLXSW_REG_PBMC_LEN];
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001146 int i, j, err;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001147
1148 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
1149 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1150 if (err)
1151 return err;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001152
1153 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1154 bool configure = false;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001155 bool pfc = false;
Ido Schimmelf417f042017-03-24 08:02:50 +01001156 bool lossy;
1157 u16 thres;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001158
1159 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
1160 if (prio_tc[j] == i) {
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001161 pfc = pfc_en & BIT(j);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001162 configure = true;
1163 break;
1164 }
1165 }
1166
1167 if (!configure)
1168 continue;
Ido Schimmelf417f042017-03-24 08:02:50 +01001169
1170 lossy = !(pfc || pause_en);
Ido Schimmel18281f22017-03-24 08:02:51 +01001171 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
1172 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
1173 pause_en);
Ido Schimmelf417f042017-03-24 08:02:50 +01001174 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001175 }
1176
Ido Schimmelff6551e2016-04-06 17:10:03 +02001177 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1178}
1179
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001180static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001181 int mtu, bool pause_en)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001182{
1183 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
1184 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001185 struct ieee_pfc *my_pfc;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001186 u8 *prio_tc;
1187
1188 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001189 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001190
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001191 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001192 pause_en, my_pfc);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001193}
1194
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001195static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
1196{
1197 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001198 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001199 int err;
1200
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001201 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001202 if (err)
1203 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001204 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
1205 if (err)
1206 goto err_span_port_mtu_update;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001207 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1208 if (err)
1209 goto err_port_mtu_set;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001210 dev->mtu = mtu;
1211 return 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001212
1213err_port_mtu_set:
Yotam Gigi763b4b72016-07-21 12:03:17 +02001214 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1215err_span_port_mtu_update:
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001216 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
Ido Schimmelff6551e2016-04-06 17:10:03 +02001217 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001218}
1219
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +03001220static int
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001221mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1222 struct rtnl_link_stats64 *stats)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001223{
1224 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1225 struct mlxsw_sp_port_pcpu_stats *p;
1226 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1227 u32 tx_dropped = 0;
1228 unsigned int start;
1229 int i;
1230
1231 for_each_possible_cpu(i) {
1232 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1233 do {
1234 start = u64_stats_fetch_begin_irq(&p->syncp);
1235 rx_packets = p->rx_packets;
1236 rx_bytes = p->rx_bytes;
1237 tx_packets = p->tx_packets;
1238 tx_bytes = p->tx_bytes;
1239 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1240
1241 stats->rx_packets += rx_packets;
1242 stats->rx_bytes += rx_bytes;
1243 stats->tx_packets += tx_packets;
1244 stats->tx_bytes += tx_bytes;
1245 /* tx_dropped is u32, updated without syncp protection. */
1246 tx_dropped += p->tx_dropped;
1247 }
1248 stats->tx_dropped = tx_dropped;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001249 return 0;
1250}
1251
Or Gerlitz3df5b3c2016-11-22 23:09:54 +02001252static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001253{
1254 switch (attr_id) {
1255 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1256 return true;
1257 }
1258
1259 return false;
1260}
1261
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +03001262static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1263 void *sp)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001264{
1265 switch (attr_id) {
1266 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1267 return mlxsw_sp_port_get_sw_stats64(dev, sp);
1268 }
1269
1270 return -EINVAL;
1271}
1272
1273static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1274 int prio, char *ppcnt_pl)
1275{
1276 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1278
1279 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1280 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1281}
1282
1283static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1284 struct rtnl_link_stats64 *stats)
1285{
1286 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1287 int err;
1288
1289 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1290 0, ppcnt_pl);
1291 if (err)
1292 goto out;
1293
1294 stats->tx_packets =
1295 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1296 stats->rx_packets =
1297 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1298 stats->tx_bytes =
1299 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1300 stats->rx_bytes =
1301 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1302 stats->multicast =
1303 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1304
1305 stats->rx_crc_errors =
1306 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1307 stats->rx_frame_errors =
1308 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1309
1310 stats->rx_length_errors = (
1311 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1312 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1313 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1314
1315 stats->rx_errors = (stats->rx_crc_errors +
1316 stats->rx_frame_errors + stats->rx_length_errors);
1317
1318out:
1319 return err;
1320}
1321
1322static void update_stats_cache(struct work_struct *work)
1323{
1324 struct mlxsw_sp_port *mlxsw_sp_port =
1325 container_of(work, struct mlxsw_sp_port,
1326 hw_stats.update_dw.work);
1327
1328 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1329 goto out;
1330
1331 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1332 mlxsw_sp_port->hw_stats.cache);
1333
1334out:
1335 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1336 MLXSW_HW_STATS_UPDATE_TIME);
1337}
1338
1339/* Return the stats from a cache that is updated periodically,
1340 * as this function might get called in an atomic context.
1341 */
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001342static void
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001343mlxsw_sp_port_get_stats64(struct net_device *dev,
1344 struct rtnl_link_stats64 *stats)
1345{
1346 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1347
1348 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001349}
1350
Jiri Pirko93cd0812017-04-18 16:55:35 +02001351static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1352 u16 vid_begin, u16 vid_end,
1353 bool is_member, bool untagged)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001354{
1355 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1356 char *spvm_pl;
1357 int err;
1358
1359 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1360 if (!spvm_pl)
1361 return -ENOMEM;
1362
1363 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1364 vid_end, is_member, untagged);
1365 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1366 kfree(spvm_pl);
1367 return err;
1368}
1369
Jiri Pirko93cd0812017-04-18 16:55:35 +02001370int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1371 u16 vid_end, bool is_member, bool untagged)
1372{
1373 u16 vid, vid_e;
1374 int err;
1375
1376 for (vid = vid_begin; vid <= vid_end;
1377 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1378 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1379 vid_end);
1380
1381 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1382 is_member, untagged);
1383 if (err)
1384 return err;
1385 }
1386
1387 return 0;
1388}
1389
Ido Schimmelc57529e2017-05-26 08:37:31 +02001390static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001391{
Ido Schimmelc57529e2017-05-26 08:37:31 +02001392 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001393
Ido Schimmelc57529e2017-05-26 08:37:31 +02001394 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1395 &mlxsw_sp_port->vlans_list, list)
1396 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001397}
1398
Ido Schimmel31a08a52017-05-26 08:37:26 +02001399static struct mlxsw_sp_port_vlan *
1400mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1401{
1402 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmelc57529e2017-05-26 08:37:31 +02001403 bool untagged = vid == 1;
1404 int err;
1405
1406 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1407 if (err)
1408 return ERR_PTR(err);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001409
1410 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001411 if (!mlxsw_sp_port_vlan) {
1412 err = -ENOMEM;
1413 goto err_port_vlan_alloc;
1414 }
Ido Schimmel31a08a52017-05-26 08:37:26 +02001415
1416 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1417 mlxsw_sp_port_vlan->vid = vid;
1418 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1419
1420 return mlxsw_sp_port_vlan;
Ido Schimmelc57529e2017-05-26 08:37:31 +02001421
1422err_port_vlan_alloc:
1423 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1424 return ERR_PTR(err);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001425}
1426
1427static void
1428mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1429{
Ido Schimmelc57529e2017-05-26 08:37:31 +02001430 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1431 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02001432
Ido Schimmel31a08a52017-05-26 08:37:26 +02001433 list_del(&mlxsw_sp_port_vlan->list);
1434 kfree(mlxsw_sp_port_vlan);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001435 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1436}
1437
1438struct mlxsw_sp_port_vlan *
1439mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1440{
1441 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1442
1443 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1444 if (mlxsw_sp_port_vlan)
1445 return mlxsw_sp_port_vlan;
1446
1447 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1448}
1449
1450void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1451{
Ido Schimmela1107482017-05-26 08:37:39 +02001452 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1453
Ido Schimmelc57529e2017-05-26 08:37:31 +02001454 if (mlxsw_sp_port_vlan->bridge_port)
1455 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
Ido Schimmela1107482017-05-26 08:37:39 +02001456 else if (fid)
1457 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001458
1459 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001460}
1461
Ido Schimmel05978482016-08-17 16:39:30 +02001462static int mlxsw_sp_port_add_vid(struct net_device *dev,
1463 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001464{
1465 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001466
1467 /* VLAN 0 is added to HW filter when device goes up, but it is
1468 * reserved in our case, so simply return.
1469 */
1470 if (!vid)
1471 return 0;
1472
Ido Schimmelc57529e2017-05-26 08:37:31 +02001473 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001474}
1475
Ido Schimmel32d863f2016-07-02 11:00:10 +02001476static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1477 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001478{
1479 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001480 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001481
1482 /* VLAN 0 is removed from HW filter when device goes down, but
1483 * it is reserved in our case, so simply return.
1484 */
1485 if (!vid)
1486 return 0;
1487
Ido Schimmel31a08a52017-05-26 08:37:26 +02001488 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001489 if (!mlxsw_sp_port_vlan)
Ido Schimmel31a08a52017-05-26 08:37:26 +02001490 return 0;
Ido Schimmelc57529e2017-05-26 08:37:31 +02001491 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001492
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001493 return 0;
1494}
1495
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001496static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1497 size_t len)
1498{
1499 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmeld664b412016-06-09 09:51:40 +02001500 u8 module = mlxsw_sp_port->mapping.module;
1501 u8 width = mlxsw_sp_port->mapping.width;
1502 u8 lane = mlxsw_sp_port->mapping.lane;
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001503 int err;
1504
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001505 if (!mlxsw_sp_port->split)
1506 err = snprintf(name, len, "p%d", module + 1);
1507 else
1508 err = snprintf(name, len, "p%ds%d", module + 1,
1509 lane / width);
1510
1511 if (err >= len)
1512 return -EINVAL;
1513
1514 return 0;
1515}
1516
Yotam Gigi763b4b72016-07-21 12:03:17 +02001517static struct mlxsw_sp_port_mall_tc_entry *
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001518mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1519 unsigned long cookie) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001520 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1521
1522 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1523 if (mall_tc_entry->cookie == cookie)
1524 return mall_tc_entry;
1525
1526 return NULL;
1527}
1528
1529static int
1530mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001531 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001532 const struct tc_action *a,
1533 bool ingress)
1534{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001535 struct net *net = dev_net(mlxsw_sp_port->dev);
1536 enum mlxsw_sp_span_type span_type;
1537 struct mlxsw_sp_port *to_port;
1538 struct net_device *to_dev;
1539 int ifindex;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001540
1541 ifindex = tcf_mirred_ifindex(a);
1542 to_dev = __dev_get_by_index(net, ifindex);
1543 if (!to_dev) {
1544 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1545 return -EINVAL;
1546 }
1547
1548 if (!mlxsw_sp_port_dev_check(to_dev)) {
1549 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
Yotam Gigie915ac62017-01-09 11:25:48 +01001550 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001551 }
1552 to_port = netdev_priv(to_dev);
1553
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001554 mirror->to_local_port = to_port->local_port;
1555 mirror->ingress = ingress;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001556 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001557 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1558}
Yotam Gigi763b4b72016-07-21 12:03:17 +02001559
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001560static void
1561mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1562 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1563{
1564 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1565 enum mlxsw_sp_span_type span_type;
1566 struct mlxsw_sp_port *to_port;
1567
1568 to_port = mlxsw_sp->ports[mirror->to_local_port];
1569 span_type = mirror->ingress ?
1570 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1571 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001572}
1573
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001574static int
1575mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1576 struct tc_cls_matchall_offload *cls,
1577 const struct tc_action *a,
1578 bool ingress)
1579{
1580 int err;
1581
1582 if (!mlxsw_sp_port->sample)
1583 return -EOPNOTSUPP;
1584 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1585 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1586 return -EEXIST;
1587 }
1588 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1589 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1590 return -EOPNOTSUPP;
1591 }
1592
1593 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1594 tcf_sample_psample_group(a));
1595 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1596 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1597 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1598
1599 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1600 if (err)
1601 goto err_port_sample_set;
1602 return 0;
1603
1604err_port_sample_set:
1605 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1606 return err;
1607}
1608
1609static void
1610mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1611{
1612 if (!mlxsw_sp_port->sample)
1613 return;
1614
1615 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1616 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1617}
1618
Yotam Gigi763b4b72016-07-21 12:03:17 +02001619static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001620 struct tc_cls_matchall_offload *f,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001621 bool ingress)
1622{
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001623 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001624 __be16 protocol = f->common.protocol;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001625 const struct tc_action *a;
WANG Cong22dc13c2016-08-13 22:35:00 -07001626 LIST_HEAD(actions);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001627 int err;
1628
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001629 if (!tcf_exts_has_one_action(f->exts)) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001630 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
Yotam Gigie915ac62017-01-09 11:25:48 +01001631 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001632 }
1633
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001634 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1635 if (!mall_tc_entry)
1636 return -ENOMEM;
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001637 mall_tc_entry->cookie = f->cookie;
Ido Schimmel86cb13e2016-07-25 13:12:33 +03001638
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001639 tcf_exts_to_list(f->exts, &actions);
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001640 a = list_first_entry(&actions, struct tc_action, list);
1641
1642 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1643 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1644
1645 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1646 mirror = &mall_tc_entry->mirror;
1647 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1648 mirror, a, ingress);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001649 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1650 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001651 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001652 a, ingress);
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001653 } else {
1654 err = -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001655 }
1656
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001657 if (err)
1658 goto err_add_action;
1659
1660 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001661 return 0;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001662
1663err_add_action:
1664 kfree(mall_tc_entry);
1665 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001666}
1667
1668static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001669 struct tc_cls_matchall_offload *f)
Yotam Gigi763b4b72016-07-21 12:03:17 +02001670{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001671 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001672
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001673 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
Jiri Pirko9cbf14e2017-08-07 10:15:25 +02001674 f->cookie);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001675 if (!mall_tc_entry) {
1676 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1677 return;
1678 }
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001679 list_del(&mall_tc_entry->list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001680
1681 switch (mall_tc_entry->type) {
1682 case MLXSW_SP_PORT_MALL_MIRROR:
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001683 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1684 &mall_tc_entry->mirror);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001685 break;
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001686 case MLXSW_SP_PORT_MALL_SAMPLE:
1687 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1688 break;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001689 default:
1690 WARN_ON(1);
1691 }
1692
Yotam Gigi763b4b72016-07-21 12:03:17 +02001693 kfree(mall_tc_entry);
1694}
1695
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001696static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001697 struct tc_cls_matchall_offload *f)
Yotam Gigi763b4b72016-07-21 12:03:17 +02001698{
Jiri Pirkoa2e8da92017-08-09 14:30:33 +02001699 bool ingress;
1700
1701 if (is_classid_clsact_ingress(f->common.classid))
1702 ingress = true;
1703 else if (is_classid_clsact_egress(f->common.classid))
1704 ingress = false;
1705 else
1706 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001707
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001708 if (f->common.chain_index)
Jiri Pirkoa5fcf8a2017-06-06 17:00:16 +02001709 return -EOPNOTSUPP;
1710
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001711 switch (f->command) {
1712 case TC_CLSMATCHALL_REPLACE:
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001713 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001714 ingress);
1715 case TC_CLSMATCHALL_DESTROY:
1716 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1717 return 0;
1718 default:
1719 return -EOPNOTSUPP;
1720 }
1721}
1722
1723static int
1724mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001725 struct tc_cls_flower_offload *f)
1726{
Jiri Pirkoa2e8da92017-08-09 14:30:33 +02001727 bool ingress;
1728
1729 if (is_classid_clsact_ingress(f->common.classid))
1730 ingress = true;
1731 else if (is_classid_clsact_egress(f->common.classid))
1732 ingress = false;
1733 else
1734 return -EOPNOTSUPP;
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001735
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001736 if (f->common.chain_index)
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001737 return -EOPNOTSUPP;
1738
1739 switch (f->command) {
1740 case TC_CLSFLOWER_REPLACE:
Jiri Pirko5fd9fc42017-08-07 10:15:29 +02001741 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001742 case TC_CLSFLOWER_DESTROY:
1743 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
1744 return 0;
1745 case TC_CLSFLOWER_STATS:
1746 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
1747 default:
1748 return -EOPNOTSUPP;
1749 }
1750}
1751
1752static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
Jiri Pirkode4784c2017-08-07 10:15:32 +02001753 void *type_data)
Jiri Pirkofd33f1d2017-08-07 10:15:24 +02001754{
1755 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1756
Jiri Pirko2572ac52017-08-07 10:15:17 +02001757 switch (type) {
Jiri Pirkoade9b652017-08-07 10:15:18 +02001758 case TC_SETUP_CLSMATCHALL:
Jiri Pirkode4784c2017-08-07 10:15:32 +02001759 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data);
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001760 case TC_SETUP_CLSFLOWER:
Jiri Pirkode4784c2017-08-07 10:15:32 +02001761 return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data);
Jiri Pirko2572ac52017-08-07 10:15:17 +02001762 default:
1763 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001764 }
Yotam Gigi763b4b72016-07-21 12:03:17 +02001765}
1766
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001767static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1768 .ndo_open = mlxsw_sp_port_open,
1769 .ndo_stop = mlxsw_sp_port_stop,
1770 .ndo_start_xmit = mlxsw_sp_port_xmit,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001771 .ndo_setup_tc = mlxsw_sp_setup_tc,
Jiri Pirkoc5b9b512015-12-03 12:12:22 +01001772 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001773 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1774 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1775 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001776 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1777 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001778 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1779 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001780 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001781};
1782
1783static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1784 struct ethtool_drvinfo *drvinfo)
1785{
1786 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1787 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1788
1789 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1790 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1791 sizeof(drvinfo->version));
1792 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1793 "%d.%d.%d",
1794 mlxsw_sp->bus_info->fw_rev.major,
1795 mlxsw_sp->bus_info->fw_rev.minor,
1796 mlxsw_sp->bus_info->fw_rev.subminor);
1797 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1798 sizeof(drvinfo->bus_info));
1799}
1800
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001801static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1802 struct ethtool_pauseparam *pause)
1803{
1804 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1805
1806 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1807 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1808}
1809
1810static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1811 struct ethtool_pauseparam *pause)
1812{
1813 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1814
1815 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1816 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1817 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1818
1819 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1820 pfcc_pl);
1821}
1822
1823static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1824 struct ethtool_pauseparam *pause)
1825{
1826 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1827 bool pause_en = pause->tx_pause || pause->rx_pause;
1828 int err;
1829
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001830 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1831 netdev_err(dev, "PFC already enabled on port\n");
1832 return -EINVAL;
1833 }
1834
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001835 if (pause->autoneg) {
1836 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1837 return -EINVAL;
1838 }
1839
1840 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1841 if (err) {
1842 netdev_err(dev, "Failed to configure port's headroom\n");
1843 return err;
1844 }
1845
1846 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1847 if (err) {
1848 netdev_err(dev, "Failed to set PAUSE parameters\n");
1849 goto err_port_pause_configure;
1850 }
1851
1852 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1853 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1854
1855 return 0;
1856
1857err_port_pause_configure:
1858 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1859 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1860 return err;
1861}
1862
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001863struct mlxsw_sp_port_hw_stats {
1864 char str[ETH_GSTRING_LEN];
Jiri Pirko412791d2016-10-21 16:07:19 +02001865 u64 (*getter)(const char *payload);
Ido Schimmel18281f22017-03-24 08:02:51 +01001866 bool cells_bytes;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001867};
1868
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001869static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001870 {
1871 .str = "a_frames_transmitted_ok",
1872 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1873 },
1874 {
1875 .str = "a_frames_received_ok",
1876 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1877 },
1878 {
1879 .str = "a_frame_check_sequence_errors",
1880 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1881 },
1882 {
1883 .str = "a_alignment_errors",
1884 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1885 },
1886 {
1887 .str = "a_octets_transmitted_ok",
1888 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1889 },
1890 {
1891 .str = "a_octets_received_ok",
1892 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1893 },
1894 {
1895 .str = "a_multicast_frames_xmitted_ok",
1896 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1897 },
1898 {
1899 .str = "a_broadcast_frames_xmitted_ok",
1900 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1901 },
1902 {
1903 .str = "a_multicast_frames_received_ok",
1904 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1905 },
1906 {
1907 .str = "a_broadcast_frames_received_ok",
1908 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1909 },
1910 {
1911 .str = "a_in_range_length_errors",
1912 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1913 },
1914 {
1915 .str = "a_out_of_range_length_field",
1916 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1917 },
1918 {
1919 .str = "a_frame_too_long_errors",
1920 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1921 },
1922 {
1923 .str = "a_symbol_error_during_carrier",
1924 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1925 },
1926 {
1927 .str = "a_mac_control_frames_transmitted",
1928 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1929 },
1930 {
1931 .str = "a_mac_control_frames_received",
1932 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1933 },
1934 {
1935 .str = "a_unsupported_opcodes_received",
1936 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1937 },
1938 {
1939 .str = "a_pause_mac_ctrl_frames_received",
1940 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1941 },
1942 {
1943 .str = "a_pause_mac_ctrl_frames_xmitted",
1944 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1945 },
1946};
1947
1948#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1949
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001950static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1951 {
1952 .str = "rx_octets_prio",
1953 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1954 },
1955 {
1956 .str = "rx_frames_prio",
1957 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1958 },
1959 {
1960 .str = "tx_octets_prio",
1961 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1962 },
1963 {
1964 .str = "tx_frames_prio",
1965 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1966 },
1967 {
1968 .str = "rx_pause_prio",
1969 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1970 },
1971 {
1972 .str = "rx_pause_duration_prio",
1973 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1974 },
1975 {
1976 .str = "tx_pause_prio",
1977 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1978 },
1979 {
1980 .str = "tx_pause_duration_prio",
1981 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1982 },
1983};
1984
1985#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1986
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001987static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1988 {
1989 .str = "tc_transmit_queue_tc",
Ido Schimmel18281f22017-03-24 08:02:51 +01001990 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
1991 .cells_bytes = true,
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001992 },
1993 {
1994 .str = "tc_no_buffer_discard_uc_tc",
1995 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1996 },
1997};
1998
1999#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
2000
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002001#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002002 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
2003 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002004 IEEE_8021QAZ_MAX_TCS)
2005
2006static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
2007{
2008 int i;
2009
2010 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
2011 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2012 mlxsw_sp_port_hw_prio_stats[i].str, prio);
2013 *p += ETH_GSTRING_LEN;
2014 }
2015}
2016
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002017static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2018{
2019 int i;
2020
2021 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2022 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2023 mlxsw_sp_port_hw_tc_stats[i].str, tc);
2024 *p += ETH_GSTRING_LEN;
2025 }
2026}
2027
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002028static void mlxsw_sp_port_get_strings(struct net_device *dev,
2029 u32 stringset, u8 *data)
2030{
2031 u8 *p = data;
2032 int i;
2033
2034 switch (stringset) {
2035 case ETH_SS_STATS:
2036 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2037 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2038 ETH_GSTRING_LEN);
2039 p += ETH_GSTRING_LEN;
2040 }
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002041
2042 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2043 mlxsw_sp_port_get_prio_strings(&p, i);
2044
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002045 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2046 mlxsw_sp_port_get_tc_strings(&p, i);
2047
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002048 break;
2049 }
2050}
2051
Ido Schimmel3a66ee32015-11-27 13:45:55 +01002052static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2053 enum ethtool_phys_id_state state)
2054{
2055 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2056 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2057 char mlcr_pl[MLXSW_REG_MLCR_LEN];
2058 bool active;
2059
2060 switch (state) {
2061 case ETHTOOL_ID_ACTIVE:
2062 active = true;
2063 break;
2064 case ETHTOOL_ID_INACTIVE:
2065 active = false;
2066 break;
2067 default:
2068 return -EOPNOTSUPP;
2069 }
2070
2071 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2072 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2073}
2074
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002075static int
2076mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2077 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2078{
2079 switch (grp) {
2080 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2081 *p_hw_stats = mlxsw_sp_port_hw_stats;
2082 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2083 break;
2084 case MLXSW_REG_PPCNT_PRIO_CNT:
2085 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2086 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2087 break;
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002088 case MLXSW_REG_PPCNT_TC_CNT:
2089 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2090 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2091 break;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002092 default:
2093 WARN_ON(1);
Yotam Gigie915ac62017-01-09 11:25:48 +01002094 return -EOPNOTSUPP;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002095 }
2096 return 0;
2097}
2098
2099static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2100 enum mlxsw_reg_ppcnt_grp grp, int prio,
2101 u64 *data, int data_index)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002102{
Ido Schimmel18281f22017-03-24 08:02:51 +01002103 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2104 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002105 struct mlxsw_sp_port_hw_stats *hw_stats;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002106 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002107 int i, len;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002108 int err;
2109
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002110 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2111 if (err)
2112 return;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002113 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
Ido Schimmel18281f22017-03-24 08:02:51 +01002114 for (i = 0; i < len; i++) {
Colin Ian Kingfaac0ff2016-09-23 12:02:45 +01002115 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
Ido Schimmel18281f22017-03-24 08:02:51 +01002116 if (!hw_stats[i].cells_bytes)
2117 continue;
2118 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2119 data[data_index + i]);
2120 }
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002121}
2122
2123static void mlxsw_sp_port_get_stats(struct net_device *dev,
2124 struct ethtool_stats *stats, u64 *data)
2125{
2126 int i, data_index = 0;
2127
2128 /* IEEE 802.3 Counters */
2129 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2130 data, data_index);
2131 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2132
2133 /* Per-Priority Counters */
2134 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2135 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2136 data, data_index);
2137 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2138 }
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002139
2140 /* Per-TC Counters */
2141 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2142 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2143 data, data_index);
2144 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2145 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002146}
2147
2148static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2149{
2150 switch (sset) {
2151 case ETH_SS_STATS:
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002152 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002153 default:
2154 return -EOPNOTSUPP;
2155 }
2156}
2157
2158struct mlxsw_sp_port_link_mode {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002159 enum ethtool_link_mode_bit_indices mask_ethtool;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002160 u32 mask;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002161 u32 speed;
2162};
2163
2164static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2165 {
2166 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002167 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2168 .speed = SPEED_100,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002169 },
2170 {
2171 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2172 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002173 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2174 .speed = SPEED_1000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002175 },
2176 {
2177 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002178 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2179 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002180 },
2181 {
2182 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2183 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002184 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2185 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002186 },
2187 {
2188 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2189 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2190 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2191 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002192 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2193 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002194 },
2195 {
2196 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002197 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2198 .speed = SPEED_20000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002199 },
2200 {
2201 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002202 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2203 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002204 },
2205 {
2206 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002207 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2208 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002209 },
2210 {
2211 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002212 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2213 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002214 },
2215 {
2216 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002217 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2218 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002219 },
2220 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002221 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2222 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2223 .speed = SPEED_25000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002224 },
2225 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002226 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2227 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2228 .speed = SPEED_25000,
2229 },
2230 {
2231 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2232 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2233 .speed = SPEED_25000,
2234 },
2235 {
2236 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2237 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2238 .speed = SPEED_25000,
2239 },
2240 {
2241 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2242 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2243 .speed = SPEED_50000,
2244 },
2245 {
2246 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2247 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2248 .speed = SPEED_50000,
2249 },
2250 {
2251 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2252 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2253 .speed = SPEED_50000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002254 },
2255 {
2256 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002257 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2258 .speed = SPEED_56000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002259 },
2260 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002261 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2262 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2263 .speed = SPEED_56000,
2264 },
2265 {
2266 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2267 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2268 .speed = SPEED_56000,
2269 },
2270 {
2271 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2272 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2273 .speed = SPEED_56000,
2274 },
2275 {
2276 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2277 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2278 .speed = SPEED_100000,
2279 },
2280 {
2281 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2282 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2283 .speed = SPEED_100000,
2284 },
2285 {
2286 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2287 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2288 .speed = SPEED_100000,
2289 },
2290 {
2291 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2292 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2293 .speed = SPEED_100000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002294 },
2295};
2296
2297#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2298
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002299static void
2300mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2301 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002302{
2303 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2304 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2305 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2306 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2307 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2308 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002309 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002310
2311 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2312 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2313 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2314 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2315 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002316 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002317}
2318
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002319static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002320{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002321 int i;
2322
2323 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2324 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002325 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2326 mode);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002327 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002328}
2329
2330static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002331 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002332{
2333 u32 speed = SPEED_UNKNOWN;
2334 u8 duplex = DUPLEX_UNKNOWN;
2335 int i;
2336
2337 if (!carrier_ok)
2338 goto out;
2339
2340 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2341 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2342 speed = mlxsw_sp_port_link_mode[i].speed;
2343 duplex = DUPLEX_FULL;
2344 break;
2345 }
2346 }
2347out:
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002348 cmd->base.speed = speed;
2349 cmd->base.duplex = duplex;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002350}
2351
2352static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2353{
2354 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2355 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2356 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2357 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2358 return PORT_FIBRE;
2359
2360 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2361 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2362 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2363 return PORT_DA;
2364
2365 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2366 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2367 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2368 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2369 return PORT_NONE;
2370
2371 return PORT_OTHER;
2372}
2373
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002374static u32
2375mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002376{
2377 u32 ptys_proto = 0;
2378 int i;
2379
2380 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002381 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2382 cmd->link_modes.advertising))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002383 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2384 }
2385 return ptys_proto;
2386}
2387
2388static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2389{
2390 u32 ptys_proto = 0;
2391 int i;
2392
2393 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2394 if (speed == mlxsw_sp_port_link_mode[i].speed)
2395 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2396 }
2397 return ptys_proto;
2398}
2399
Ido Schimmel18f1e702016-02-26 17:32:31 +01002400static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2401{
2402 u32 ptys_proto = 0;
2403 int i;
2404
2405 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2406 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2407 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2408 }
2409 return ptys_proto;
2410}
2411
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002412static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2413 struct ethtool_link_ksettings *cmd)
2414{
2415 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2416 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2417 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2418
2419 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2420 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2421}
2422
2423static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2424 struct ethtool_link_ksettings *cmd)
2425{
2426 if (!autoneg)
2427 return;
2428
2429 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2430 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2431}
2432
2433static void
2434mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2435 struct ethtool_link_ksettings *cmd)
2436{
2437 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2438 return;
2439
2440 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2441 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2442}
2443
2444static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2445 struct ethtool_link_ksettings *cmd)
2446{
2447 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2448 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2449 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2450 char ptys_pl[MLXSW_REG_PTYS_LEN];
2451 u8 autoneg_status;
2452 bool autoneg;
2453 int err;
2454
2455 autoneg = mlxsw_sp_port->link.autoneg;
Elad Raz401c8b42016-10-28 21:35:52 +02002456 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002457 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2458 if (err)
2459 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002460 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2461 &eth_proto_oper);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002462
2463 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2464
2465 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2466
2467 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2468 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2469 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2470
2471 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2472 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2473 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2474 cmd);
2475
2476 return 0;
2477}
2478
2479static int
2480mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2481 const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002482{
2483 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2484 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2485 char ptys_pl[MLXSW_REG_PTYS_LEN];
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002486 u32 eth_proto_cap, eth_proto_new;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002487 bool autoneg;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002488 int err;
2489
Elad Raz401c8b42016-10-28 21:35:52 +02002490 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002491 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002492 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002493 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002494 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002495
2496 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2497 eth_proto_new = autoneg ?
2498 mlxsw_sp_to_ptys_advert_link(cmd) :
2499 mlxsw_sp_to_ptys_speed(cmd->base.speed);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002500
2501 eth_proto_new = eth_proto_new & eth_proto_cap;
2502 if (!eth_proto_new) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002503 netdev_err(dev, "No supported speed requested\n");
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002504 return -EINVAL;
2505 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002506
Elad Raz401c8b42016-10-28 21:35:52 +02002507 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2508 eth_proto_new);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002509 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002510 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002511 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002512
Ido Schimmel6277d462016-07-15 11:14:58 +02002513 if (!netif_running(dev))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002514 return 0;
2515
Ido Schimmel0c83f882016-09-12 13:26:23 +02002516 mlxsw_sp_port->link.autoneg = autoneg;
2517
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002518 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2519 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002520
2521 return 0;
2522}
2523
Yotam Gigice6ef68f2017-06-01 16:26:46 +03002524static int mlxsw_sp_flash_device(struct net_device *dev,
2525 struct ethtool_flash *flash)
2526{
2527 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2528 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2529 const struct firmware *firmware;
2530 int err;
2531
2532 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
2533 return -EOPNOTSUPP;
2534
2535 dev_hold(dev);
2536 rtnl_unlock();
2537
2538 err = request_firmware_direct(&firmware, flash->data, &dev->dev);
2539 if (err)
2540 goto out;
2541 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
2542 release_firmware(firmware);
2543out:
2544 rtnl_lock();
2545 dev_put(dev);
2546 return err;
2547}
2548
Arkadi Sharshevsky2ea10902017-06-14 09:27:40 +02002549#define MLXSW_SP_QSFP_I2C_ADDR 0x50
2550
2551static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
2552 u16 offset, u16 size, void *data,
2553 unsigned int *p_read_size)
2554{
2555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2556 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
2557 char mcia_pl[MLXSW_REG_MCIA_LEN];
2558 int status;
2559 int err;
2560
2561 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
2562 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
2563 0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR);
2564
2565 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
2566 if (err)
2567 return err;
2568
2569 status = mlxsw_reg_mcia_status_get(mcia_pl);
2570 if (status)
2571 return -EIO;
2572
2573 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
2574 memcpy(data, eeprom_tmp, size);
2575 *p_read_size = size;
2576
2577 return 0;
2578}
2579
2580enum mlxsw_sp_eeprom_module_info_rev_id {
2581 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00,
2582 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01,
2583 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03,
2584};
2585
2586enum mlxsw_sp_eeprom_module_info_id {
2587 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03,
2588 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C,
2589 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
2590 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
2591};
2592
2593enum mlxsw_sp_eeprom_module_info {
2594 MLXSW_SP_EEPROM_MODULE_INFO_ID,
2595 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
2596 MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2597};
2598
2599static int mlxsw_sp_get_module_info(struct net_device *netdev,
2600 struct ethtool_modinfo *modinfo)
2601{
2602 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2603 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
2604 u8 module_rev_id, module_id;
2605 unsigned int read_size;
2606 int err;
2607
2608 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
2609 MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2610 module_info, &read_size);
2611 if (err)
2612 return err;
2613
2614 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
2615 return -EIO;
2616
2617 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
2618 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
2619
2620 switch (module_id) {
2621 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
2622 modinfo->type = ETH_MODULE_SFF_8436;
2623 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2624 break;
2625 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
2626 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
2627 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
2628 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
2629 modinfo->type = ETH_MODULE_SFF_8636;
2630 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2631 } else {
2632 modinfo->type = ETH_MODULE_SFF_8436;
2633 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2634 }
2635 break;
2636 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
2637 modinfo->type = ETH_MODULE_SFF_8472;
2638 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2639 break;
2640 default:
2641 return -EINVAL;
2642 }
2643
2644 return 0;
2645}
2646
2647static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
2648 struct ethtool_eeprom *ee,
2649 u8 *data)
2650{
2651 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2652 int offset = ee->offset;
2653 unsigned int read_size;
2654 int i = 0;
2655 int err;
2656
2657 if (!ee->len)
2658 return -EINVAL;
2659
2660 memset(data, 0, ee->len);
2661
2662 while (i < ee->len) {
2663 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
2664 ee->len - i, data + i,
2665 &read_size);
2666 if (err) {
2667 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
2668 return err;
2669 }
2670
2671 i += read_size;
2672 offset += read_size;
2673 }
2674
2675 return 0;
2676}
2677
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002678static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2679 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2680 .get_link = ethtool_op_get_link,
Ido Schimmel9f7ec052016-04-06 17:10:14 +02002681 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2682 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002683 .get_strings = mlxsw_sp_port_get_strings,
Ido Schimmel3a66ee32015-11-27 13:45:55 +01002684 .set_phys_id = mlxsw_sp_port_set_phys_id,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002685 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2686 .get_sset_count = mlxsw_sp_port_get_sset_count,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002687 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2688 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
Yotam Gigice6ef68f2017-06-01 16:26:46 +03002689 .flash_device = mlxsw_sp_flash_device,
Arkadi Sharshevsky2ea10902017-06-14 09:27:40 +02002690 .get_module_info = mlxsw_sp_get_module_info,
2691 .get_module_eeprom = mlxsw_sp_get_module_eeprom,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002692};
2693
Ido Schimmel18f1e702016-02-26 17:32:31 +01002694static int
2695mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2696{
2697 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2698 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2699 char ptys_pl[MLXSW_REG_PTYS_LEN];
2700 u32 eth_proto_admin;
2701
2702 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
Elad Raz401c8b42016-10-28 21:35:52 +02002703 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2704 eth_proto_admin);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002705 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2706}
2707
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002708int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2709 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2710 bool dwrr, u8 dwrr_weight)
Ido Schimmel90183b92016-04-06 17:10:08 +02002711{
2712 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2713 char qeec_pl[MLXSW_REG_QEEC_LEN];
2714
2715 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2716 next_index);
2717 mlxsw_reg_qeec_de_set(qeec_pl, true);
2718 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2719 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2720 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2721}
2722
Ido Schimmelcc7cf512016-04-06 17:10:11 +02002723int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2724 enum mlxsw_reg_qeec_hr hr, u8 index,
2725 u8 next_index, u32 maxrate)
Ido Schimmel90183b92016-04-06 17:10:08 +02002726{
2727 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2728 char qeec_pl[MLXSW_REG_QEEC_LEN];
2729
2730 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2731 next_index);
2732 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2733 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2734 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2735}
2736
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002737int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2738 u8 switch_prio, u8 tclass)
Ido Schimmel90183b92016-04-06 17:10:08 +02002739{
2740 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2741 char qtct_pl[MLXSW_REG_QTCT_LEN];
2742
2743 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2744 tclass);
2745 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2746}
2747
2748static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2749{
2750 int err, i;
2751
2752 /* Setup the elements hierarcy, so that each TC is linked to
2753 * one subgroup, which are all member in the same group.
2754 */
2755 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2756 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2757 0);
2758 if (err)
2759 return err;
2760 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2761 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2762 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2763 0, false, 0);
2764 if (err)
2765 return err;
2766 }
2767 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2768 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2769 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2770 false, 0);
2771 if (err)
2772 return err;
2773 }
2774
2775 /* Make sure the max shaper is disabled in all hierarcies that
2776 * support it.
2777 */
2778 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2779 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2780 MLXSW_REG_QEEC_MAS_DIS);
2781 if (err)
2782 return err;
2783 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2784 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2785 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2786 i, 0,
2787 MLXSW_REG_QEEC_MAS_DIS);
2788 if (err)
2789 return err;
2790 }
2791 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2792 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2793 MLXSW_REG_QEEC_HIERARCY_TC,
2794 i, i,
2795 MLXSW_REG_QEEC_MAS_DIS);
2796 if (err)
2797 return err;
2798 }
2799
2800 /* Map all priorities to traffic class 0. */
2801 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2802 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2803 if (err)
2804 return err;
2805 }
2806
2807 return 0;
2808}
2809
Ido Schimmel5b153852017-06-08 08:47:44 +02002810static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2811 bool split, u8 module, u8 width, u8 lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002812{
Ido Schimmelc57529e2017-05-26 08:37:31 +02002813 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002814 struct mlxsw_sp_port *mlxsw_sp_port;
2815 struct net_device *dev;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002816 int err;
2817
Ido Schimmel5b153852017-06-08 08:47:44 +02002818 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2819 if (err) {
2820 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2821 local_port);
2822 return err;
2823 }
2824
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002825 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
Ido Schimmel5b153852017-06-08 08:47:44 +02002826 if (!dev) {
2827 err = -ENOMEM;
2828 goto err_alloc_etherdev;
2829 }
Jiri Pirkof20a91f2016-10-27 15:13:00 +02002830 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002831 mlxsw_sp_port = netdev_priv(dev);
2832 mlxsw_sp_port->dev = dev;
2833 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2834 mlxsw_sp_port->local_port = local_port;
Ido Schimmelc57529e2017-05-26 08:37:31 +02002835 mlxsw_sp_port->pvid = 1;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002836 mlxsw_sp_port->split = split;
Ido Schimmeld664b412016-06-09 09:51:40 +02002837 mlxsw_sp_port->mapping.module = module;
2838 mlxsw_sp_port->mapping.width = width;
2839 mlxsw_sp_port->mapping.lane = lane;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002840 mlxsw_sp_port->link.autoneg = 1;
Ido Schimmel31a08a52017-05-26 08:37:26 +02002841 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02002842 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002843
2844 mlxsw_sp_port->pcpu_stats =
2845 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2846 if (!mlxsw_sp_port->pcpu_stats) {
2847 err = -ENOMEM;
2848 goto err_alloc_stats;
2849 }
2850
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002851 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2852 GFP_KERNEL);
2853 if (!mlxsw_sp_port->sample) {
2854 err = -ENOMEM;
2855 goto err_alloc_sample;
2856 }
2857
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002858 mlxsw_sp_port->hw_stats.cache =
2859 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2860
2861 if (!mlxsw_sp_port->hw_stats.cache) {
2862 err = -ENOMEM;
2863 goto err_alloc_hw_stats;
2864 }
2865 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2866 &update_stats_cache);
2867
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002868 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2869 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2870
Ido Schimmel2e915e02017-06-08 08:47:45 +02002871 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
Ido Schimmel5b153852017-06-08 08:47:44 +02002872 if (err) {
2873 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
2874 mlxsw_sp_port->local_port);
2875 goto err_port_module_map;
2876 }
2877
Ido Schimmel3247ff22016-09-08 08:16:02 +02002878 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2879 if (err) {
2880 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2881 mlxsw_sp_port->local_port);
2882 goto err_port_swid_set;
2883 }
2884
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002885 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2886 if (err) {
2887 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2888 mlxsw_sp_port->local_port);
2889 goto err_dev_addr_init;
2890 }
2891
2892 netif_carrier_off(dev);
2893
2894 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
Yotam Gigi763b4b72016-07-21 12:03:17 +02002895 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2896 dev->hw_features |= NETIF_F_HW_TC;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002897
Jarod Wilsond894be52016-10-20 13:55:16 -04002898 dev->min_mtu = 0;
2899 dev->max_mtu = ETH_MAX_MTU;
2900
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002901 /* Each packet needs to have a Tx header (metadata) on top all other
2902 * headers.
2903 */
Yotam Gigifeb7d382016-10-04 09:46:04 +02002904 dev->needed_headroom = MLXSW_TXHDR_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002905
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002906 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2907 if (err) {
2908 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2909 mlxsw_sp_port->local_port);
2910 goto err_port_system_port_mapping_set;
2911 }
2912
Ido Schimmel18f1e702016-02-26 17:32:31 +01002913 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2914 if (err) {
2915 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2916 mlxsw_sp_port->local_port);
2917 goto err_port_speed_by_width_set;
2918 }
2919
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002920 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2921 if (err) {
2922 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2923 mlxsw_sp_port->local_port);
2924 goto err_port_mtu_set;
2925 }
2926
2927 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2928 if (err)
2929 goto err_port_admin_status_set;
2930
2931 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2932 if (err) {
2933 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2934 mlxsw_sp_port->local_port);
2935 goto err_port_buffers_init;
2936 }
2937
Ido Schimmel90183b92016-04-06 17:10:08 +02002938 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2939 if (err) {
2940 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2941 mlxsw_sp_port->local_port);
2942 goto err_port_ets_init;
2943 }
2944
Ido Schimmelf00817d2016-04-06 17:10:09 +02002945 /* ETS and buffers must be initialized before DCB. */
2946 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2947 if (err) {
2948 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2949 mlxsw_sp_port->local_port);
2950 goto err_port_dcb_init;
2951 }
2952
Ido Schimmela1107482017-05-26 08:37:39 +02002953 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
Ido Schimmel45a4a162017-05-16 19:38:35 +02002954 if (err) {
Ido Schimmela1107482017-05-26 08:37:39 +02002955 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
Ido Schimmel45a4a162017-05-16 19:38:35 +02002956 mlxsw_sp_port->local_port);
Ido Schimmela1107482017-05-26 08:37:39 +02002957 goto err_port_fids_init;
Ido Schimmel45a4a162017-05-16 19:38:35 +02002958 }
2959
Ido Schimmelc57529e2017-05-26 08:37:31 +02002960 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2961 if (IS_ERR(mlxsw_sp_port_vlan)) {
2962 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
Ido Schimmel05978482016-08-17 16:39:30 +02002963 mlxsw_sp_port->local_port);
Ido Schimmelc57529e2017-05-26 08:37:31 +02002964 goto err_port_vlan_get;
Ido Schimmel05978482016-08-17 16:39:30 +02002965 }
2966
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002967 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
Ido Schimmel2f258442016-08-17 16:39:31 +02002968 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002969 err = register_netdev(dev);
2970 if (err) {
2971 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2972 mlxsw_sp_port->local_port);
2973 goto err_register_netdev;
2974 }
2975
Elad Razd808c7e2016-10-28 21:35:57 +02002976 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2977 mlxsw_sp_port, dev, mlxsw_sp_port->split,
2978 module);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002979 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002980 return 0;
2981
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002982err_register_netdev:
Ido Schimmel2f258442016-08-17 16:39:31 +02002983 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02002984 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmelc57529e2017-05-26 08:37:31 +02002985 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
2986err_port_vlan_get:
Ido Schimmela1107482017-05-26 08:37:39 +02002987 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
2988err_port_fids_init:
Ido Schimmel4de34eb2016-08-04 17:36:22 +03002989 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02002990err_port_dcb_init:
Ido Schimmel90183b92016-04-06 17:10:08 +02002991err_port_ets_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002992err_port_buffers_init:
2993err_port_admin_status_set:
2994err_port_mtu_set:
Ido Schimmel18f1e702016-02-26 17:32:31 +01002995err_port_speed_by_width_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002996err_port_system_port_mapping_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002997err_dev_addr_init:
Ido Schimmel3247ff22016-09-08 08:16:02 +02002998 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2999err_port_swid_set:
Ido Schimmel2e915e02017-06-08 08:47:45 +02003000 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
Ido Schimmel5b153852017-06-08 08:47:44 +02003001err_port_module_map:
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02003002 kfree(mlxsw_sp_port->hw_stats.cache);
3003err_alloc_hw_stats:
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003004 kfree(mlxsw_sp_port->sample);
3005err_alloc_sample:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003006 free_percpu(mlxsw_sp_port->pcpu_stats);
3007err_alloc_stats:
3008 free_netdev(dev);
Ido Schimmel5b153852017-06-08 08:47:44 +02003009err_alloc_etherdev:
Jiri Pirko67963a32016-10-28 21:35:55 +02003010 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3011 return err;
3012}
3013
Ido Schimmel5b153852017-06-08 08:47:44 +02003014static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003015{
3016 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3017
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02003018 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
Jiri Pirko67963a32016-10-28 21:35:55 +02003019 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003020 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
Ido Schimmel2f258442016-08-17 16:39:31 +02003021 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02003022 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmelc57529e2017-05-26 08:37:31 +02003023 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
Ido Schimmela1107482017-05-26 08:37:39 +02003024 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02003025 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmel3e9b27b2016-02-26 17:32:28 +01003026 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
Ido Schimmel2e915e02017-06-08 08:47:45 +02003027 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02003028 kfree(mlxsw_sp_port->hw_stats.cache);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003029 kfree(mlxsw_sp_port->sample);
Yotam Gigi136f1442017-01-09 11:25:47 +01003030 free_percpu(mlxsw_sp_port->pcpu_stats);
Ido Schimmel31a08a52017-05-26 08:37:26 +02003031 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003032 free_netdev(mlxsw_sp_port->dev);
Jiri Pirko67963a32016-10-28 21:35:55 +02003033 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3034}
3035
Jiri Pirkof83e2102016-10-28 21:35:49 +02003036static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3037{
3038 return mlxsw_sp->ports[local_port] != NULL;
3039}
3040
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003041static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
3042{
3043 int i;
3044
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003045 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003046 if (mlxsw_sp_port_created(mlxsw_sp, i))
3047 mlxsw_sp_port_remove(mlxsw_sp, i);
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003048 kfree(mlxsw_sp->port_to_module);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003049 kfree(mlxsw_sp->ports);
3050}
3051
3052static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3053{
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003054 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
Ido Schimmeld664b412016-06-09 09:51:40 +02003055 u8 module, width, lane;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003056 size_t alloc_size;
3057 int i;
3058 int err;
3059
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003060 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003061 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3062 if (!mlxsw_sp->ports)
3063 return -ENOMEM;
3064
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003065 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
3066 if (!mlxsw_sp->port_to_module) {
3067 err = -ENOMEM;
3068 goto err_port_to_module_alloc;
3069 }
3070
3071 for (i = 1; i < max_ports; i++) {
Ido Schimmel558c2d52016-02-26 17:32:29 +01003072 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
Ido Schimmeld664b412016-06-09 09:51:40 +02003073 &width, &lane);
Ido Schimmel558c2d52016-02-26 17:32:29 +01003074 if (err)
3075 goto err_port_module_info_get;
3076 if (!width)
3077 continue;
3078 mlxsw_sp->port_to_module[i] = module;
Jiri Pirko67963a32016-10-28 21:35:55 +02003079 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3080 module, width, lane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003081 if (err)
3082 goto err_port_create;
3083 }
3084 return 0;
3085
3086err_port_create:
Ido Schimmel558c2d52016-02-26 17:32:29 +01003087err_port_module_info_get:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003088 for (i--; i >= 1; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003089 if (mlxsw_sp_port_created(mlxsw_sp, i))
3090 mlxsw_sp_port_remove(mlxsw_sp, i);
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003091 kfree(mlxsw_sp->port_to_module);
3092err_port_to_module_alloc:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003093 kfree(mlxsw_sp->ports);
3094 return err;
3095}
3096
Ido Schimmel18f1e702016-02-26 17:32:31 +01003097static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3098{
3099 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3100
3101 return local_port - offset;
3102}
3103
Ido Schimmelbe945352016-06-09 09:51:39 +02003104static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3105 u8 module, unsigned int count)
3106{
3107 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3108 int err, i;
3109
3110 for (i = 0; i < count; i++) {
Ido Schimmelbe945352016-06-09 09:51:39 +02003111 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
Ido Schimmeld664b412016-06-09 09:51:40 +02003112 module, width, i * width);
Ido Schimmelbe945352016-06-09 09:51:39 +02003113 if (err)
3114 goto err_port_create;
3115 }
3116
3117 return 0;
3118
3119err_port_create:
3120 for (i--; i >= 0; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003121 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3122 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmelbe945352016-06-09 09:51:39 +02003123 return err;
3124}
3125
3126static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3127 u8 base_port, unsigned int count)
3128{
3129 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3130 int i;
3131
3132 /* Split by four means we need to re-create two ports, otherwise
3133 * only one.
3134 */
3135 count = count / 2;
3136
3137 for (i = 0; i < count; i++) {
3138 local_port = base_port + i * 2;
3139 module = mlxsw_sp->port_to_module[local_port];
3140
Ido Schimmelbe945352016-06-09 09:51:39 +02003141 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
Ido Schimmeld664b412016-06-09 09:51:40 +02003142 width, 0);
Ido Schimmelbe945352016-06-09 09:51:39 +02003143 }
3144}
3145
Jiri Pirkob2f10572016-04-08 19:11:23 +02003146static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3147 unsigned int count)
Ido Schimmel18f1e702016-02-26 17:32:31 +01003148{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003149 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003150 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003151 u8 module, cur_width, base_port;
3152 int i;
3153 int err;
3154
3155 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3156 if (!mlxsw_sp_port) {
3157 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3158 local_port);
3159 return -EINVAL;
3160 }
3161
Ido Schimmeld664b412016-06-09 09:51:40 +02003162 module = mlxsw_sp_port->mapping.module;
3163 cur_width = mlxsw_sp_port->mapping.width;
3164
Ido Schimmel18f1e702016-02-26 17:32:31 +01003165 if (count != 2 && count != 4) {
3166 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3167 return -EINVAL;
3168 }
3169
Ido Schimmel18f1e702016-02-26 17:32:31 +01003170 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3171 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3172 return -EINVAL;
3173 }
3174
3175 /* Make sure we have enough slave (even) ports for the split. */
3176 if (count == 2) {
3177 base_port = local_port;
3178 if (mlxsw_sp->ports[base_port + 1]) {
3179 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3180 return -EINVAL;
3181 }
3182 } else {
3183 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3184 if (mlxsw_sp->ports[base_port + 1] ||
3185 mlxsw_sp->ports[base_port + 3]) {
3186 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3187 return -EINVAL;
3188 }
3189 }
3190
3191 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003192 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3193 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003194
Ido Schimmelbe945352016-06-09 09:51:39 +02003195 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3196 if (err) {
3197 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3198 goto err_port_split_create;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003199 }
3200
3201 return 0;
3202
Ido Schimmelbe945352016-06-09 09:51:39 +02003203err_port_split_create:
3204 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003205 return err;
3206}
3207
Jiri Pirkob2f10572016-04-08 19:11:23 +02003208static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
Ido Schimmel18f1e702016-02-26 17:32:31 +01003209{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003210 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003211 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmeld664b412016-06-09 09:51:40 +02003212 u8 cur_width, base_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003213 unsigned int count;
3214 int i;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003215
3216 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3217 if (!mlxsw_sp_port) {
3218 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3219 local_port);
3220 return -EINVAL;
3221 }
3222
3223 if (!mlxsw_sp_port->split) {
3224 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
3225 return -EINVAL;
3226 }
3227
Ido Schimmeld664b412016-06-09 09:51:40 +02003228 cur_width = mlxsw_sp_port->mapping.width;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003229 count = cur_width == 1 ? 4 : 2;
3230
3231 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3232
3233 /* Determine which ports to remove. */
3234 if (count == 2 && local_port >= base_port + 2)
3235 base_port = base_port + 2;
3236
3237 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003238 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3239 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003240
Ido Schimmelbe945352016-06-09 09:51:39 +02003241 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003242
3243 return 0;
3244}
3245
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003246static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3247 char *pude_pl, void *priv)
3248{
3249 struct mlxsw_sp *mlxsw_sp = priv;
3250 struct mlxsw_sp_port *mlxsw_sp_port;
3251 enum mlxsw_reg_pude_oper_status status;
3252 u8 local_port;
3253
3254 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3255 mlxsw_sp_port = mlxsw_sp->ports[local_port];
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003256 if (!mlxsw_sp_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003257 return;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003258
3259 status = mlxsw_reg_pude_oper_status_get(pude_pl);
3260 if (status == MLXSW_PORT_OPER_STATUS_UP) {
3261 netdev_info(mlxsw_sp_port->dev, "link up\n");
3262 netif_carrier_on(mlxsw_sp_port->dev);
3263 } else {
3264 netdev_info(mlxsw_sp_port->dev, "link down\n");
3265 netif_carrier_off(mlxsw_sp_port->dev);
3266 }
3267}
3268
Nogah Frankel14eeda92016-11-25 10:33:32 +01003269static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3270 u8 local_port, void *priv)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003271{
3272 struct mlxsw_sp *mlxsw_sp = priv;
3273 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3274 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3275
3276 if (unlikely(!mlxsw_sp_port)) {
3277 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3278 local_port);
3279 return;
3280 }
3281
3282 skb->dev = mlxsw_sp_port->dev;
3283
3284 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3285 u64_stats_update_begin(&pcpu_stats->syncp);
3286 pcpu_stats->rx_packets++;
3287 pcpu_stats->rx_bytes += skb->len;
3288 u64_stats_update_end(&pcpu_stats->syncp);
3289
3290 skb->protocol = eth_type_trans(skb, skb->dev);
3291 netif_receive_skb(skb);
3292}
3293
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02003294static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3295 void *priv)
3296{
3297 skb->offload_fwd_mark = 1;
Nogah Frankel14eeda92016-11-25 10:33:32 +01003298 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02003299}
3300
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003301static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3302 void *priv)
3303{
3304 struct mlxsw_sp *mlxsw_sp = priv;
3305 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3306 struct psample_group *psample_group;
3307 u32 size;
3308
3309 if (unlikely(!mlxsw_sp_port)) {
3310 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3311 local_port);
3312 goto out;
3313 }
3314 if (unlikely(!mlxsw_sp_port->sample)) {
3315 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3316 local_port);
3317 goto out;
3318 }
3319
3320 size = mlxsw_sp_port->sample->truncate ?
3321 mlxsw_sp_port->sample->trunc_size : skb->len;
3322
3323 rcu_read_lock();
3324 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3325 if (!psample_group)
3326 goto out_unlock;
3327 psample_sample_packet(psample_group, skb, size,
3328 mlxsw_sp_port->dev->ifindex, 0,
3329 mlxsw_sp_port->sample->rate);
3330out_unlock:
3331 rcu_read_unlock();
3332out:
3333 consume_skb(skb);
3334}
3335
Nogah Frankel117b0da2016-11-25 10:33:44 +01003336#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel0fb78a42016-11-25 10:33:39 +01003337 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01003338 _is_ctrl, SP_##_trap_group, DISCARD)
Ido Schimmel93393b32016-08-25 18:42:38 +02003339
Nogah Frankel117b0da2016-11-25 10:33:44 +01003340#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel14eeda92016-11-25 10:33:32 +01003341 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01003342 _is_ctrl, SP_##_trap_group, DISCARD)
3343
3344#define MLXSW_SP_EVENTL(_func, _trap_id) \
3345 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
Nogah Frankel14eeda92016-11-25 10:33:32 +01003346
Nogah Frankel45449132016-11-25 10:33:35 +01003347static const struct mlxsw_listener mlxsw_sp_listener[] = {
3348 /* Events */
Nogah Frankel117b0da2016-11-25 10:33:44 +01003349 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
Nogah Frankelee4a60d2016-11-25 10:33:29 +01003350 /* L2 traps */
Nogah Frankel117b0da2016-11-25 10:33:44 +01003351 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3352 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3353 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3354 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3355 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3356 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3357 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3358 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3359 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3360 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3361 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
Jiri Pirko9d41acc2017-04-18 16:55:38 +02003362 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
Arkadi Sharshevsky588823f2017-07-17 14:15:31 +02003363 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
3364 false),
3365 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3366 false),
3367 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
3368 false),
3369 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3370 false),
Ido Schimmel93393b32016-08-25 18:42:38 +02003371 /* L3 traps */
Ido Schimmel0fcc4842017-07-17 14:15:29 +02003372 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3373 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3374 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
Ido Schimmel0fcc4842017-07-17 14:15:29 +02003375 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003376 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
3377 false),
3378 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
3379 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
3380 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
3381 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
3382 false),
3383 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
3384 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
3385 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
Ido Schimmel0fcc4842017-07-17 14:15:29 +02003386 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003387 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
3388 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
3389 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3390 false),
3391 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3392 false),
3393 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3394 false),
3395 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3396 false),
3397 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
3398 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
3399 false),
3400 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
3401 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
Ido Schimmel7607dd32017-07-17 14:15:30 +02003402 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003403 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003404 /* PKT Sample trap */
3405 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
Jiri Pirko0db7b382017-06-06 14:12:05 +02003406 false, SP_IP2ME, DISCARD),
3407 /* ACL trap */
3408 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003409};
3410
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003411static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3412{
3413 char qpcr_pl[MLXSW_REG_QPCR_LEN];
3414 enum mlxsw_reg_qpcr_ir_units ir_units;
3415 int max_cpu_policers;
3416 bool is_bytes;
3417 u8 burst_size;
3418 u32 rate;
3419 int i, err;
3420
3421 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3422 return -EIO;
3423
3424 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3425
3426 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3427 for (i = 0; i < max_cpu_policers; i++) {
3428 is_bytes = false;
3429 switch (i) {
3430 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3431 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3432 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3433 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3434 rate = 128;
3435 burst_size = 7;
3436 break;
3437 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
Arkadi Sharshevsky588823f2017-07-17 14:15:31 +02003438 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003439 rate = 16 * 1024;
3440 burst_size = 10;
3441 break;
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003442 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003443 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3444 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003445 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003446 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3447 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003448 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003449 rate = 1024;
3450 burst_size = 7;
3451 break;
3452 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3453 is_bytes = true;
3454 rate = 4 * 1024;
3455 burst_size = 4;
3456 break;
3457 default:
3458 continue;
3459 }
3460
3461 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3462 burst_size);
3463 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3464 if (err)
3465 return err;
3466 }
3467
3468 return 0;
3469}
3470
Nogah Frankel579c82e2016-11-25 10:33:42 +01003471static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003472{
3473 char htgt_pl[MLXSW_REG_HTGT_LEN];
Nogah Frankel117b0da2016-11-25 10:33:44 +01003474 enum mlxsw_reg_htgt_trap_group i;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003475 int max_cpu_policers;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003476 int max_trap_groups;
3477 u8 priority, tc;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003478 u16 policer_id;
Nogah Frankel117b0da2016-11-25 10:33:44 +01003479 int err;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003480
3481 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3482 return -EIO;
3483
3484 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003485 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003486
3487 for (i = 0; i < max_trap_groups; i++) {
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003488 policer_id = i;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003489 switch (i) {
Nogah Frankel117b0da2016-11-25 10:33:44 +01003490 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3491 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3492 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3493 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3494 priority = 5;
3495 tc = 5;
3496 break;
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003497 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
Nogah Frankel117b0da2016-11-25 10:33:44 +01003498 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3499 priority = 4;
3500 tc = 4;
3501 break;
3502 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3503 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
Arkadi Sharshevsky588823f2017-07-17 14:15:31 +02003504 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
Nogah Frankel117b0da2016-11-25 10:33:44 +01003505 priority = 3;
3506 tc = 3;
3507 break;
3508 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003509 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
Nogah Frankel117b0da2016-11-25 10:33:44 +01003510 priority = 2;
3511 tc = 2;
3512 break;
Arkadi Sharshevsky8d548142017-07-18 10:10:11 +02003513 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
Nogah Frankel117b0da2016-11-25 10:33:44 +01003514 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3515 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3516 priority = 1;
3517 tc = 1;
3518 break;
3519 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
Nogah Frankel579c82e2016-11-25 10:33:42 +01003520 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3521 tc = MLXSW_REG_HTGT_DEFAULT_TC;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003522 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003523 break;
3524 default:
3525 continue;
3526 }
Nogah Frankel117b0da2016-11-25 10:33:44 +01003527
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003528 if (max_cpu_policers <= policer_id &&
3529 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3530 return -EIO;
3531
3532 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003533 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3534 if (err)
3535 return err;
3536 }
3537
3538 return 0;
3539}
3540
3541static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3542{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003543 int i;
3544 int err;
3545
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003546 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3547 if (err)
3548 return err;
3549
Nogah Frankel579c82e2016-11-25 10:33:42 +01003550 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003551 if (err)
3552 return err;
3553
Nogah Frankel45449132016-11-25 10:33:35 +01003554 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003555 err = mlxsw_core_trap_register(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003556 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003557 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003558 if (err)
Nogah Frankel45449132016-11-25 10:33:35 +01003559 goto err_listener_register;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003560
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003561 }
3562 return 0;
3563
Nogah Frankel45449132016-11-25 10:33:35 +01003564err_listener_register:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003565 for (i--; i >= 0; i--) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003566 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003567 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003568 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003569 }
3570 return err;
3571}
3572
3573static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3574{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003575 int i;
3576
Nogah Frankel45449132016-11-25 10:33:35 +01003577 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003578 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003579 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003580 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003581 }
3582}
3583
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003584static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3585{
3586 char slcr_pl[MLXSW_REG_SLCR_LEN];
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003587 int err;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003588
3589 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3590 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3591 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3592 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3593 MLXSW_REG_SLCR_LAG_HASH_SIP |
3594 MLXSW_REG_SLCR_LAG_HASH_DIP |
3595 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3596 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3597 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003598 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3599 if (err)
3600 return err;
3601
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003602 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3603 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003604 return -EIO;
3605
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003606 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003607 sizeof(struct mlxsw_sp_upper),
3608 GFP_KERNEL);
3609 if (!mlxsw_sp->lags)
3610 return -ENOMEM;
3611
3612 return 0;
3613}
3614
3615static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3616{
3617 kfree(mlxsw_sp->lags);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003618}
3619
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003620static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3621{
3622 char htgt_pl[MLXSW_REG_HTGT_LEN];
3623
Nogah Frankel579c82e2016-11-25 10:33:42 +01003624 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3625 MLXSW_REG_HTGT_INVALID_POLICER,
3626 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3627 MLXSW_REG_HTGT_DEFAULT_TC);
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003628 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3629}
3630
Jiri Pirkob2f10572016-04-08 19:11:23 +02003631static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003632 const struct mlxsw_bus_info *mlxsw_bus_info)
3633{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003634 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003635 int err;
3636
3637 mlxsw_sp->core = mlxsw_core;
3638 mlxsw_sp->bus_info = mlxsw_bus_info;
3639
Yotam Gigi6b742192017-05-23 21:56:29 +02003640 err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3641 if (err) {
3642 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
3643 return err;
3644 }
3645
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003646 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3647 if (err) {
3648 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3649 return err;
3650 }
3651
Ido Schimmela1107482017-05-26 08:37:39 +02003652 err = mlxsw_sp_fids_init(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003653 if (err) {
Ido Schimmela1107482017-05-26 08:37:39 +02003654 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
Nogah Frankel45449132016-11-25 10:33:35 +01003655 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003656 }
3657
Ido Schimmela1107482017-05-26 08:37:39 +02003658 err = mlxsw_sp_traps_init(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003659 if (err) {
Ido Schimmela1107482017-05-26 08:37:39 +02003660 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3661 goto err_traps_init;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003662 }
3663
3664 err = mlxsw_sp_buffers_init(mlxsw_sp);
3665 if (err) {
3666 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3667 goto err_buffers_init;
3668 }
3669
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003670 err = mlxsw_sp_lag_init(mlxsw_sp);
3671 if (err) {
3672 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3673 goto err_lag_init;
3674 }
3675
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003676 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3677 if (err) {
3678 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3679 goto err_switchdev_init;
3680 }
3681
Ido Schimmel464dce12016-07-02 11:00:15 +02003682 err = mlxsw_sp_router_init(mlxsw_sp);
3683 if (err) {
3684 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3685 goto err_router_init;
3686 }
3687
Yotam Gigi763b4b72016-07-21 12:03:17 +02003688 err = mlxsw_sp_span_init(mlxsw_sp);
3689 if (err) {
3690 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3691 goto err_span_init;
3692 }
3693
Jiri Pirko22a67762017-02-03 10:29:07 +01003694 err = mlxsw_sp_acl_init(mlxsw_sp);
3695 if (err) {
3696 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3697 goto err_acl_init;
3698 }
3699
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003700 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3701 if (err) {
3702 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3703 goto err_counter_pool_init;
3704 }
3705
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +02003706 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3707 if (err) {
3708 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3709 goto err_dpipe_init;
3710 }
3711
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003712 err = mlxsw_sp_ports_create(mlxsw_sp);
3713 if (err) {
3714 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3715 goto err_ports_create;
3716 }
3717
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003718 return 0;
3719
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003720err_ports_create:
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +02003721 mlxsw_sp_dpipe_fini(mlxsw_sp);
3722err_dpipe_init:
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003723 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3724err_counter_pool_init:
Jiri Pirko22a67762017-02-03 10:29:07 +01003725 mlxsw_sp_acl_fini(mlxsw_sp);
3726err_acl_init:
Yotam Gigi763b4b72016-07-21 12:03:17 +02003727 mlxsw_sp_span_fini(mlxsw_sp);
3728err_span_init:
Ido Schimmel464dce12016-07-02 11:00:15 +02003729 mlxsw_sp_router_fini(mlxsw_sp);
3730err_router_init:
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003731 mlxsw_sp_switchdev_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003732err_switchdev_init:
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003733 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003734err_lag_init:
Jiri Pirko0f433fa2016-04-14 18:19:24 +02003735 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003736err_buffers_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003737 mlxsw_sp_traps_fini(mlxsw_sp);
Ido Schimmela1107482017-05-26 08:37:39 +02003738err_traps_init:
3739 mlxsw_sp_fids_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003740 return err;
3741}
3742
Jiri Pirkob2f10572016-04-08 19:11:23 +02003743static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003744{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003745 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003746
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003747 mlxsw_sp_ports_remove(mlxsw_sp);
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +02003748 mlxsw_sp_dpipe_fini(mlxsw_sp);
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003749 mlxsw_sp_counter_pool_fini(mlxsw_sp);
Jiri Pirko22a67762017-02-03 10:29:07 +01003750 mlxsw_sp_acl_fini(mlxsw_sp);
Yotam Gigi763b4b72016-07-21 12:03:17 +02003751 mlxsw_sp_span_fini(mlxsw_sp);
Ido Schimmel464dce12016-07-02 11:00:15 +02003752 mlxsw_sp_router_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003753 mlxsw_sp_switchdev_fini(mlxsw_sp);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003754 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko5113bfd2016-05-06 22:20:59 +02003755 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003756 mlxsw_sp_traps_fini(mlxsw_sp);
Ido Schimmela1107482017-05-26 08:37:39 +02003757 mlxsw_sp_fids_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003758}
3759
3760static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3761 .used_max_vepa_channels = 1,
3762 .max_vepa_channels = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003763 .used_max_mid = 1,
Elad Raz53ae6282016-01-10 21:06:26 +01003764 .max_mid = MLXSW_SP_MID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003765 .used_max_pgt = 1,
3766 .max_pgt = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003767 .used_flood_tables = 1,
3768 .used_flood_mode = 1,
3769 .flood_mode = 3,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003770 .max_fid_offset_flood_tables = 3,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003771 .fid_offset_flood_table_size = VLAN_N_VID - 1,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003772 .max_fid_flood_tables = 3,
Ido Schimmela1107482017-05-26 08:37:39 +02003773 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003774 .used_max_ib_mc = 1,
3775 .max_ib_mc = 0,
3776 .used_max_pkey = 1,
3777 .max_pkey = 0,
Nogah Frankel403547d2016-09-20 11:16:52 +02003778 .used_kvd_split_data = 1,
3779 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3780 .kvd_hash_single_parts = 2,
3781 .kvd_hash_double_parts = 1,
Jiri Pirkoc6022422016-07-05 11:27:46 +02003782 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003783 .swid_config = {
3784 {
3785 .used_type = 1,
3786 .type = MLXSW_PORT_SWID_TYPE_ETH,
3787 }
3788 },
Nogah Frankel57d316b2016-07-21 12:03:09 +02003789 .resource_query_enable = 1,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003790};
3791
3792static struct mlxsw_driver mlxsw_sp_driver = {
Jiri Pirko1d20d232016-10-27 15:12:59 +02003793 .kind = mlxsw_sp_driver_name,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003794 .priv_size = sizeof(struct mlxsw_sp),
3795 .init = mlxsw_sp_init,
3796 .fini = mlxsw_sp_fini,
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003797 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003798 .port_split = mlxsw_sp_port_split,
3799 .port_unsplit = mlxsw_sp_port_unsplit,
3800 .sb_pool_get = mlxsw_sp_sb_pool_get,
3801 .sb_pool_set = mlxsw_sp_sb_pool_set,
3802 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3803 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3804 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3805 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3806 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3807 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3808 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3809 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3810 .txhdr_construct = mlxsw_sp_txhdr_construct,
3811 .txhdr_len = MLXSW_TXHDR_LEN,
3812 .profile = &mlxsw_sp_config_profile,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003813};
3814
Jiri Pirko22a67762017-02-03 10:29:07 +01003815bool mlxsw_sp_port_dev_check(const struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003816{
3817 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3818}
3819
Jiri Pirko1182e532017-03-06 21:25:20 +01003820static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
David Aherndd823642016-10-17 19:15:49 -07003821{
Jiri Pirko1182e532017-03-06 21:25:20 +01003822 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
David Aherndd823642016-10-17 19:15:49 -07003823 int ret = 0;
3824
3825 if (mlxsw_sp_port_dev_check(lower_dev)) {
Jiri Pirko1182e532017-03-06 21:25:20 +01003826 *p_mlxsw_sp_port = netdev_priv(lower_dev);
David Aherndd823642016-10-17 19:15:49 -07003827 ret = 1;
3828 }
3829
3830 return ret;
3831}
3832
Ido Schimmelc57529e2017-05-26 08:37:31 +02003833struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003834{
Jiri Pirko1182e532017-03-06 21:25:20 +01003835 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003836
3837 if (mlxsw_sp_port_dev_check(dev))
3838 return netdev_priv(dev);
3839
Jiri Pirko1182e532017-03-06 21:25:20 +01003840 mlxsw_sp_port = NULL;
3841 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003842
Jiri Pirko1182e532017-03-06 21:25:20 +01003843 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003844}
3845
Ido Schimmel4724ba562017-03-10 08:53:39 +01003846struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003847{
3848 struct mlxsw_sp_port *mlxsw_sp_port;
3849
3850 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3851 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3852}
3853
Arkadi Sharshevskyaf0613782017-06-08 08:44:20 +02003854struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003855{
Jiri Pirko1182e532017-03-06 21:25:20 +01003856 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003857
3858 if (mlxsw_sp_port_dev_check(dev))
3859 return netdev_priv(dev);
3860
Jiri Pirko1182e532017-03-06 21:25:20 +01003861 mlxsw_sp_port = NULL;
3862 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3863 &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003864
Jiri Pirko1182e532017-03-06 21:25:20 +01003865 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003866}
3867
3868struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3869{
3870 struct mlxsw_sp_port *mlxsw_sp_port;
3871
3872 rcu_read_lock();
3873 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3874 if (mlxsw_sp_port)
3875 dev_hold(mlxsw_sp_port->dev);
3876 rcu_read_unlock();
3877 return mlxsw_sp_port;
3878}
3879
3880void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3881{
3882 dev_put(mlxsw_sp_port->dev);
3883}
3884
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003885static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003886{
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003887 char sldr_pl[MLXSW_REG_SLDR_LEN];
3888
3889 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3890 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3891}
3892
3893static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3894{
3895 char sldr_pl[MLXSW_REG_SLDR_LEN];
3896
3897 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3898 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3899}
3900
3901static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3902 u16 lag_id, u8 port_index)
3903{
3904 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3905 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3906
3907 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3908 lag_id, port_index);
3909 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3910}
3911
3912static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3913 u16 lag_id)
3914{
3915 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3916 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3917
3918 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3919 lag_id);
3920 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3921}
3922
3923static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3924 u16 lag_id)
3925{
3926 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3927 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3928
3929 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3930 lag_id);
3931 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3932}
3933
3934static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3935 u16 lag_id)
3936{
3937 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3938 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3939
3940 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3941 lag_id);
3942 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3943}
3944
3945static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3946 struct net_device *lag_dev,
3947 u16 *p_lag_id)
3948{
3949 struct mlxsw_sp_upper *lag;
3950 int free_lag_id = -1;
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003951 u64 max_lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003952 int i;
3953
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003954 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3955 for (i = 0; i < max_lag; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003956 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3957 if (lag->ref_count) {
3958 if (lag->dev == lag_dev) {
3959 *p_lag_id = i;
3960 return 0;
3961 }
3962 } else if (free_lag_id < 0) {
3963 free_lag_id = i;
3964 }
3965 }
3966 if (free_lag_id < 0)
3967 return -EBUSY;
3968 *p_lag_id = free_lag_id;
3969 return 0;
3970}
3971
3972static bool
3973mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3974 struct net_device *lag_dev,
3975 struct netdev_lag_upper_info *lag_upper_info)
3976{
3977 u16 lag_id;
3978
3979 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3980 return false;
3981 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3982 return false;
3983 return true;
3984}
3985
3986static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3987 u16 lag_id, u8 *p_port_index)
3988{
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003989 u64 max_lag_members;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003990 int i;
3991
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003992 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3993 MAX_LAG_MEMBERS);
3994 for (i = 0; i < max_lag_members; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003995 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3996 *p_port_index = i;
3997 return 0;
3998 }
3999 }
4000 return -EBUSY;
4001}
4002
4003static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4004 struct net_device *lag_dev)
4005{
4006 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmelc57529e2017-05-26 08:37:31 +02004007 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004008 struct mlxsw_sp_upper *lag;
4009 u16 lag_id;
4010 u8 port_index;
4011 int err;
4012
4013 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4014 if (err)
4015 return err;
4016 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4017 if (!lag->ref_count) {
4018 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4019 if (err)
4020 return err;
4021 lag->dev = lag_dev;
4022 }
4023
4024 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4025 if (err)
4026 return err;
4027 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4028 if (err)
4029 goto err_col_port_add;
4030 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
4031 if (err)
4032 goto err_col_port_enable;
4033
4034 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4035 mlxsw_sp_port->local_port);
4036 mlxsw_sp_port->lag_id = lag_id;
4037 mlxsw_sp_port->lagged = 1;
4038 lag->ref_count++;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02004039
Ido Schimmelc57529e2017-05-26 08:37:31 +02004040 /* Port is no longer usable as a router interface */
4041 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
4042 if (mlxsw_sp_port_vlan->fid)
Ido Schimmela1107482017-05-26 08:37:39 +02004043 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel86bf95b2016-07-02 11:00:11 +02004044
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004045 return 0;
4046
Ido Schimmel51554db2016-05-06 22:18:39 +02004047err_col_port_enable:
4048 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004049err_col_port_add:
4050 if (!lag->ref_count)
4051 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004052 return err;
4053}
4054
Ido Schimmel82e6db02016-06-20 23:04:04 +02004055static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4056 struct net_device *lag_dev)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004057{
4058 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004059 u16 lag_id = mlxsw_sp_port->lag_id;
Ido Schimmel1c800752016-06-20 23:04:20 +02004060 struct mlxsw_sp_upper *lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004061
4062 if (!mlxsw_sp_port->lagged)
Ido Schimmel82e6db02016-06-20 23:04:04 +02004063 return;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004064 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4065 WARN_ON(lag->ref_count == 0);
4066
Ido Schimmel82e6db02016-06-20 23:04:04 +02004067 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
4068 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004069
Ido Schimmelc57529e2017-05-26 08:37:31 +02004070 /* Any VLANs configured on the port are no longer valid */
4071 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01004072
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02004073 if (lag->ref_count == 1)
Ido Schimmel82e6db02016-06-20 23:04:04 +02004074 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004075
4076 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4077 mlxsw_sp_port->local_port);
4078 mlxsw_sp_port->lagged = 0;
4079 lag->ref_count--;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02004080
Ido Schimmelc57529e2017-05-26 08:37:31 +02004081 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
4082 /* Make sure untagged frames are allowed to ingress */
4083 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004084}
4085
Jiri Pirko74581202015-12-03 12:12:30 +01004086static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4087 u16 lag_id)
4088{
4089 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4090 char sldr_pl[MLXSW_REG_SLDR_LEN];
4091
4092 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4093 mlxsw_sp_port->local_port);
4094 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4095}
4096
4097static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4098 u16 lag_id)
4099{
4100 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4101 char sldr_pl[MLXSW_REG_SLDR_LEN];
4102
4103 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4104 mlxsw_sp_port->local_port);
4105 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4106}
4107
4108static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
4109 bool lag_tx_enabled)
4110{
4111 if (lag_tx_enabled)
4112 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
4113 mlxsw_sp_port->lag_id);
4114 else
4115 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4116 mlxsw_sp_port->lag_id);
4117}
4118
4119static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4120 struct netdev_lag_lower_state_info *info)
4121{
4122 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
4123}
4124
Jiri Pirko2b94e582017-04-18 16:55:37 +02004125static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4126 bool enable)
4127{
4128 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4129 enum mlxsw_reg_spms_state spms_state;
4130 char *spms_pl;
4131 u16 vid;
4132 int err;
4133
4134 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4135 MLXSW_REG_SPMS_STATE_DISCARDING;
4136
4137 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4138 if (!spms_pl)
4139 return -ENOMEM;
4140 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4141
4142 for (vid = 0; vid < VLAN_N_VID; vid++)
4143 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4144
4145 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4146 kfree(spms_pl);
4147 return err;
4148}
4149
4150static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4151{
4152 int err;
4153
Ido Schimmel4aafc362017-05-26 08:37:25 +02004154 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004155 if (err)
4156 return err;
Ido Schimmel4aafc362017-05-26 08:37:25 +02004157 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4158 if (err)
4159 goto err_port_stp_set;
Jiri Pirko2b94e582017-04-18 16:55:37 +02004160 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4161 true, false);
4162 if (err)
4163 goto err_port_vlan_set;
4164 return 0;
4165
4166err_port_vlan_set:
4167 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
Ido Schimmel4aafc362017-05-26 08:37:25 +02004168err_port_stp_set:
4169 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004170 return err;
4171}
4172
4173static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4174{
4175 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4176 false, false);
4177 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
Ido Schimmel4aafc362017-05-26 08:37:25 +02004178 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004179}
4180
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004181static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4182 struct net_device *dev,
Jiri Pirko74581202015-12-03 12:12:30 +01004183 unsigned long event, void *ptr)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004184{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004185 struct netdev_notifier_changeupper_info *info;
4186 struct mlxsw_sp_port *mlxsw_sp_port;
4187 struct net_device *upper_dev;
4188 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel80bedf12016-06-20 23:03:59 +02004189 int err = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004190
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004191 mlxsw_sp_port = netdev_priv(dev);
4192 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4193 info = ptr;
4194
4195 switch (event) {
4196 case NETDEV_PRECHANGEUPPER:
4197 upper_dev = info->upper_dev;
Ido Schimmel59fe9b32016-06-20 23:04:00 +02004198 if (!is_vlan_dev(upper_dev) &&
4199 !netif_is_lag_master(upper_dev) &&
Ido Schimmel7179eb52017-03-16 09:08:18 +01004200 !netif_is_bridge_master(upper_dev) &&
Jiri Pirko2b94e582017-04-18 16:55:37 +02004201 !netif_is_ovs_master(upper_dev))
Ido Schimmel59fe9b32016-06-20 23:04:00 +02004202 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02004203 if (!info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004204 break;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004205 if (netif_is_lag_master(upper_dev) &&
4206 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4207 info->upper_info))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004208 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02004209 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4210 return -EINVAL;
4211 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4212 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4213 return -EINVAL;
Jiri Pirko2b94e582017-04-18 16:55:37 +02004214 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
4215 return -EINVAL;
4216 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
4217 return -EINVAL;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004218 break;
4219 case NETDEV_CHANGEUPPER:
4220 upper_dev = info->upper_dev;
Ido Schimmelc57529e2017-05-26 08:37:31 +02004221 if (netif_is_bridge_master(upper_dev)) {
Ido Schimmel7117a572016-06-20 23:04:06 +02004222 if (info->linking)
4223 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004224 lower_dev,
Ido Schimmel7117a572016-06-20 23:04:06 +02004225 upper_dev);
4226 else
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004227 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4228 lower_dev,
4229 upper_dev);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004230 } else if (netif_is_lag_master(upper_dev)) {
Ido Schimmel80bedf12016-06-20 23:03:59 +02004231 if (info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004232 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4233 upper_dev);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004234 else
Ido Schimmel82e6db02016-06-20 23:04:04 +02004235 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4236 upper_dev);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004237 } else if (netif_is_ovs_master(upper_dev)) {
4238 if (info->linking)
4239 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4240 else
4241 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004242 }
4243 break;
4244 }
4245
Ido Schimmel80bedf12016-06-20 23:03:59 +02004246 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004247}
4248
Jiri Pirko74581202015-12-03 12:12:30 +01004249static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4250 unsigned long event, void *ptr)
4251{
4252 struct netdev_notifier_changelowerstate_info *info;
4253 struct mlxsw_sp_port *mlxsw_sp_port;
4254 int err;
4255
4256 mlxsw_sp_port = netdev_priv(dev);
4257 info = ptr;
4258
4259 switch (event) {
4260 case NETDEV_CHANGELOWERSTATE:
4261 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4262 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4263 info->lower_state_info);
4264 if (err)
4265 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4266 }
4267 break;
4268 }
4269
Ido Schimmel80bedf12016-06-20 23:03:59 +02004270 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004271}
4272
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004273static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4274 struct net_device *port_dev,
Jiri Pirko74581202015-12-03 12:12:30 +01004275 unsigned long event, void *ptr)
4276{
4277 switch (event) {
4278 case NETDEV_PRECHANGEUPPER:
4279 case NETDEV_CHANGEUPPER:
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004280 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4281 event, ptr);
Jiri Pirko74581202015-12-03 12:12:30 +01004282 case NETDEV_CHANGELOWERSTATE:
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004283 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4284 ptr);
Jiri Pirko74581202015-12-03 12:12:30 +01004285 }
4286
Ido Schimmel80bedf12016-06-20 23:03:59 +02004287 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004288}
4289
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004290static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4291 unsigned long event, void *ptr)
4292{
4293 struct net_device *dev;
4294 struct list_head *iter;
4295 int ret;
4296
4297 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4298 if (mlxsw_sp_port_dev_check(dev)) {
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004299 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4300 ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004301 if (ret)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004302 return ret;
4303 }
4304 }
4305
Ido Schimmel80bedf12016-06-20 23:03:59 +02004306 return 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004307}
4308
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004309static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4310 struct net_device *dev,
4311 unsigned long event, void *ptr,
4312 u16 vid)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004313{
4314 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4315 struct netdev_notifier_changeupper_info *info = ptr;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004316 struct net_device *upper_dev;
Ido Schimmel80bedf12016-06-20 23:03:59 +02004317 int err = 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004318
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004319 switch (event) {
4320 case NETDEV_PRECHANGEUPPER:
4321 upper_dev = info->upper_dev;
Ido Schimmelb1e45522017-04-30 19:47:14 +03004322 if (!netif_is_bridge_master(upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004323 return -EINVAL;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004324 break;
4325 case NETDEV_CHANGEUPPER:
4326 upper_dev = info->upper_dev;
Ido Schimmel1f880612017-03-10 08:53:35 +01004327 if (netif_is_bridge_master(upper_dev)) {
4328 if (info->linking)
Ido Schimmelc57529e2017-05-26 08:37:31 +02004329 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4330 vlan_dev,
4331 upper_dev);
Ido Schimmel1f880612017-03-10 08:53:35 +01004332 else
Ido Schimmelc57529e2017-05-26 08:37:31 +02004333 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4334 vlan_dev,
4335 upper_dev);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004336 } else {
Ido Schimmel1f880612017-03-10 08:53:35 +01004337 err = -EINVAL;
4338 WARN_ON(1);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004339 }
Ido Schimmel1f880612017-03-10 08:53:35 +01004340 break;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004341 }
4342
Ido Schimmel80bedf12016-06-20 23:03:59 +02004343 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004344}
4345
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004346static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4347 struct net_device *lag_dev,
4348 unsigned long event,
4349 void *ptr, u16 vid)
Ido Schimmel272c4472015-12-15 16:03:47 +01004350{
4351 struct net_device *dev;
4352 struct list_head *iter;
4353 int ret;
4354
4355 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4356 if (mlxsw_sp_port_dev_check(dev)) {
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004357 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4358 event, ptr,
4359 vid);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004360 if (ret)
Ido Schimmel272c4472015-12-15 16:03:47 +01004361 return ret;
4362 }
4363 }
4364
Ido Schimmel80bedf12016-06-20 23:03:59 +02004365 return 0;
Ido Schimmel272c4472015-12-15 16:03:47 +01004366}
4367
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004368static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4369 unsigned long event, void *ptr)
4370{
4371 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4372 u16 vid = vlan_dev_vlan_id(vlan_dev);
4373
Ido Schimmel272c4472015-12-15 16:03:47 +01004374 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004375 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4376 event, ptr, vid);
Ido Schimmel272c4472015-12-15 16:03:47 +01004377 else if (netif_is_lag_master(real_dev))
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004378 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4379 real_dev, event,
4380 ptr, vid);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004381
Ido Schimmel80bedf12016-06-20 23:03:59 +02004382 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004383}
4384
Ido Schimmelb1e45522017-04-30 19:47:14 +03004385static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4386{
4387 struct netdev_notifier_changeupper_info *info = ptr;
4388
4389 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4390 return false;
4391 return netif_is_l3_master(info->upper_dev);
4392}
4393
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004394static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4395 unsigned long event, void *ptr)
4396{
4397 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004398 int err = 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004399
Ido Schimmel6e095fd2016-07-04 08:23:13 +02004400 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4401 err = mlxsw_sp_netdevice_router_port_event(dev);
Ido Schimmelb1e45522017-04-30 19:47:14 +03004402 else if (mlxsw_sp_is_vrf_event(event, ptr))
4403 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
Ido Schimmel6e095fd2016-07-04 08:23:13 +02004404 else if (mlxsw_sp_port_dev_check(dev))
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004405 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004406 else if (netif_is_lag_master(dev))
4407 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4408 else if (is_vlan_dev(dev))
4409 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004410
Ido Schimmel80bedf12016-06-20 23:03:59 +02004411 return notifier_from_errno(err);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004412}
4413
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004414static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4415 .notifier_call = mlxsw_sp_netdevice_event,
4416};
4417
Ido Schimmel99724c12016-07-04 08:23:14 +02004418static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4419 .notifier_call = mlxsw_sp_inetaddr_event,
4420 .priority = 10, /* Must be called before FIB notifier block */
4421};
4422
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02004423static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
4424 .notifier_call = mlxsw_sp_inet6addr_event,
4425};
4426
Jiri Pirkoe7322632016-09-01 10:37:43 +02004427static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4428 .notifier_call = mlxsw_sp_router_netevent_event,
4429};
4430
Jiri Pirko1d20d232016-10-27 15:12:59 +02004431static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4432 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4433 {0, },
4434};
4435
4436static struct pci_driver mlxsw_sp_pci_driver = {
4437 .name = mlxsw_sp_driver_name,
4438 .id_table = mlxsw_sp_pci_id_table,
4439};
4440
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004441static int __init mlxsw_sp_module_init(void)
4442{
4443 int err;
4444
4445 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004446 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02004447 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004448 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4449
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004450 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4451 if (err)
4452 goto err_core_driver_register;
Jiri Pirko1d20d232016-10-27 15:12:59 +02004453
4454 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4455 if (err)
4456 goto err_pci_driver_register;
4457
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004458 return 0;
4459
Jiri Pirko1d20d232016-10-27 15:12:59 +02004460err_pci_driver_register:
4461 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004462err_core_driver_register:
Jiri Pirkoe7322632016-09-01 10:37:43 +02004463 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02004464 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
Jiri Pirkode7d6292016-09-01 10:37:42 +02004465 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004466 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4467 return err;
4468}
4469
4470static void __exit mlxsw_sp_module_exit(void)
4471{
Jiri Pirko1d20d232016-10-27 15:12:59 +02004472 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004473 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004474 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02004475 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004476 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004477 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4478}
4479
4480module_init(mlxsw_sp_module_init);
4481module_exit(mlxsw_sp_module_exit);
4482
4483MODULE_LICENSE("Dual BSD/GPL");
4484MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4485MODULE_DESCRIPTION("Mellanox Spectrum driver");
Jiri Pirko1d20d232016-10-27 15:12:59 +02004486MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
Yotam Gigi6b742192017-05-23 21:56:29 +02004487MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);