blob: 39e57c858258b3e1dd91889d00700bf8d3d5f210 [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
Jiri Pirko22a67762017-02-03 10:29:07 +01003 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
Jiri Pirko56ade8f2015-10-16 14:01:37 +02005 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
Jiri Pirko1d20d232016-10-27 15:12:59 +020040#include <linux/pci.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020041#include <linux/netdevice.h>
42#include <linux/etherdevice.h>
43#include <linux/ethtool.h>
44#include <linux/slab.h>
45#include <linux/device.h>
46#include <linux/skbuff.h>
47#include <linux/if_vlan.h>
48#include <linux/if_bridge.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/bitops.h>
Ido Schimmel7f71eb42015-12-15 16:03:37 +010052#include <linux/list.h>
Ido Schimmel80bedf12016-06-20 23:03:59 +020053#include <linux/notifier.h>
Ido Schimmel90183b92016-04-06 17:10:08 +020054#include <linux/dcbnl.h>
Ido Schimmel99724c12016-07-04 08:23:14 +020055#include <linux/inetdevice.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020056#include <net/switchdev.h>
Yotam Gigi763b4b72016-07-21 12:03:17 +020057#include <net/pkt_cls.h>
58#include <net/tc_act/tc_mirred.h>
Jiri Pirkoe7322632016-09-01 10:37:43 +020059#include <net/netevent.h>
Yotam Gigi98d0f7b2017-01-23 11:07:11 +010060#include <net/tc_act/tc_sample.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020061
62#include "spectrum.h"
Jiri Pirko1d20d232016-10-27 15:12:59 +020063#include "pci.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020064#include "core.h"
65#include "reg.h"
66#include "port.h"
67#include "trap.h"
68#include "txheader.h"
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +010069#include "spectrum_cnt.h"
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +020070#include "spectrum_dpipe.h"
Yotam Gigie5e5c882017-05-23 21:56:27 +020071#include "../mlxfw/mlxfw.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020072
Yotam Gigi6b742192017-05-23 21:56:29 +020073#define MLXSW_FWREV_MAJOR 13
74#define MLXSW_FWREV_MINOR 1420
75#define MLXSW_FWREV_SUBMINOR 122
76
77static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
78 .major = MLXSW_FWREV_MAJOR,
79 .minor = MLXSW_FWREV_MINOR,
80 .subminor = MLXSW_FWREV_SUBMINOR
81};
82
83#define MLXSW_SP_FW_FILENAME \
Yotam Gigia4e1ce22017-06-04 16:49:58 +020084 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
Yotam Gigi6b742192017-05-23 21:56:29 +020085 "." __stringify(MLXSW_FWREV_MINOR) \
86 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
87
Jiri Pirko56ade8f2015-10-16 14:01:37 +020088static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
89static const char mlxsw_sp_driver_version[] = "1.0";
90
91/* tx_hdr_version
92 * Tx header version.
93 * Must be set to 1.
94 */
95MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
96
97/* tx_hdr_ctl
98 * Packet control type.
99 * 0 - Ethernet control (e.g. EMADs, LACP)
100 * 1 - Ethernet data
101 */
102MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
103
104/* tx_hdr_proto
105 * Packet protocol type. Must be set to 1 (Ethernet).
106 */
107MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
108
109/* tx_hdr_rx_is_router
110 * Packet is sent from the router. Valid for data packets only.
111 */
112MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
113
114/* tx_hdr_fid_valid
115 * Indicates if the 'fid' field is valid and should be used for
116 * forwarding lookup. Valid for data packets only.
117 */
118MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
119
120/* tx_hdr_swid
121 * Switch partition ID. Must be set to 0.
122 */
123MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
124
125/* tx_hdr_control_tclass
126 * Indicates if the packet should use the control TClass and not one
127 * of the data TClasses.
128 */
129MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
130
131/* tx_hdr_etclass
132 * Egress TClass to be used on the egress device on the egress port.
133 */
134MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
135
136/* tx_hdr_port_mid
137 * Destination local port for unicast packets.
138 * Destination multicast ID for multicast packets.
139 *
140 * Control packets are directed to a specific egress port, while data
141 * packets are transmitted through the CPU port (0) into the switch partition,
142 * where forwarding rules are applied.
143 */
144MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
145
146/* tx_hdr_fid
147 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
148 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
149 * Valid for data packets only.
150 */
151MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
152
153/* tx_hdr_type
154 * 0 - Data packets
155 * 6 - Control packets
156 */
157MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
158
Yotam Gigie5e5c882017-05-23 21:56:27 +0200159struct mlxsw_sp_mlxfw_dev {
160 struct mlxfw_dev mlxfw_dev;
161 struct mlxsw_sp *mlxsw_sp;
162};
163
164static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
165 u16 component_index, u32 *p_max_size,
166 u8 *p_align_bits, u16 *p_max_write_size)
167{
168 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
169 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
170 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
171 char mcqi_pl[MLXSW_REG_MCQI_LEN];
172 int err;
173
174 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
175 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
176 if (err)
177 return err;
178 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
179 p_max_write_size);
180
181 *p_align_bits = max_t(u8, *p_align_bits, 2);
182 *p_max_write_size = min_t(u16, *p_max_write_size,
183 MLXSW_REG_MCDA_MAX_DATA_LEN);
184 return 0;
185}
186
187static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
188{
189 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
190 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
191 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
192 char mcc_pl[MLXSW_REG_MCC_LEN];
193 u8 control_state;
194 int err;
195
196 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
197 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
198 if (err)
199 return err;
200
201 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
202 if (control_state != MLXFW_FSM_STATE_IDLE)
203 return -EBUSY;
204
205 mlxsw_reg_mcc_pack(mcc_pl,
206 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
207 0, *fwhandle, 0);
208 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
209}
210
211static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
212 u32 fwhandle, u16 component_index,
213 u32 component_size)
214{
215 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
216 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
217 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
218 char mcc_pl[MLXSW_REG_MCC_LEN];
219
220 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
221 component_index, fwhandle, component_size);
222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
223}
224
225static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
226 u32 fwhandle, u8 *data, u16 size,
227 u32 offset)
228{
229 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
230 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
232 char mcda_pl[MLXSW_REG_MCDA_LEN];
233
234 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
235 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
236}
237
238static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
239 u32 fwhandle, u16 component_index)
240{
241 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
242 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
243 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
244 char mcc_pl[MLXSW_REG_MCC_LEN];
245
246 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
247 component_index, fwhandle, 0);
248 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
249}
250
251static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
252{
253 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
254 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
256 char mcc_pl[MLXSW_REG_MCC_LEN];
257
258 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
259 fwhandle, 0);
260 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
261}
262
263static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
264 enum mlxfw_fsm_state *fsm_state,
265 enum mlxfw_fsm_state_err *fsm_state_err)
266{
267 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
268 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
270 char mcc_pl[MLXSW_REG_MCC_LEN];
271 u8 control_state;
272 u8 error_code;
273 int err;
274
275 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
276 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
277 if (err)
278 return err;
279
280 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
281 *fsm_state = control_state;
282 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
283 MLXFW_FSM_STATE_ERR_MAX);
284 return 0;
285}
286
287static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
288{
289 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
290 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
291 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
292 char mcc_pl[MLXSW_REG_MCC_LEN];
293
294 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
295 fwhandle, 0);
296 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
297}
298
299static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
300{
301 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
302 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
303 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
304 char mcc_pl[MLXSW_REG_MCC_LEN];
305
306 mlxsw_reg_mcc_pack(mcc_pl,
307 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
308 fwhandle, 0);
309 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
310}
311
312static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
313 .component_query = mlxsw_sp_component_query,
314 .fsm_lock = mlxsw_sp_fsm_lock,
315 .fsm_component_update = mlxsw_sp_fsm_component_update,
316 .fsm_block_download = mlxsw_sp_fsm_block_download,
317 .fsm_component_verify = mlxsw_sp_fsm_component_verify,
318 .fsm_activate = mlxsw_sp_fsm_activate,
319 .fsm_query_state = mlxsw_sp_fsm_query_state,
320 .fsm_cancel = mlxsw_sp_fsm_cancel,
321 .fsm_release = mlxsw_sp_fsm_release
322};
323
Yotam Gigice6ef68f2017-06-01 16:26:46 +0300324static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
325 const struct firmware *firmware)
326{
327 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
328 .mlxfw_dev = {
329 .ops = &mlxsw_sp_mlxfw_dev_ops,
330 .psid = mlxsw_sp->bus_info->psid,
331 .psid_size = strlen(mlxsw_sp->bus_info->psid),
332 },
333 .mlxsw_sp = mlxsw_sp
334 };
335
336 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
337}
338
Yotam Gigi6b742192017-05-23 21:56:29 +0200339static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
340 const struct mlxsw_fw_rev *b)
341{
342 if (a->major != b->major)
343 return a->major > b->major;
344 if (a->minor != b->minor)
345 return a->minor > b->minor;
346 return a->subminor >= b->subminor;
347}
348
349static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
350{
351 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
Yotam Gigi6b742192017-05-23 21:56:29 +0200352 const struct firmware *firmware;
353 int err;
354
355 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
356 return 0;
357
358 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
359 rev->major, rev->minor, rev->subminor);
360 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
361 MLXSW_SP_FW_FILENAME);
362
363 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
364 mlxsw_sp->bus_info->dev);
365 if (err) {
366 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
367 MLXSW_SP_FW_FILENAME);
368 return err;
369 }
370
Yotam Gigice6ef68f2017-06-01 16:26:46 +0300371 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
Yotam Gigi6b742192017-05-23 21:56:29 +0200372 release_firmware(firmware);
373 return err;
374}
375
Arkadi Sharshevsky1abcbcc2017-03-11 09:42:53 +0100376int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
377 unsigned int counter_index, u64 *packets,
378 u64 *bytes)
379{
380 char mgpc_pl[MLXSW_REG_MGPC_LEN];
381 int err;
382
383 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
384 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
385 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
386 if (err)
387 return err;
388 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
389 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
390 return 0;
391}
392
393static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
394 unsigned int counter_index)
395{
396 char mgpc_pl[MLXSW_REG_MGPC_LEN];
397
398 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
399 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
400 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
401}
402
403int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
404 unsigned int *p_counter_index)
405{
406 int err;
407
408 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
409 p_counter_index);
410 if (err)
411 return err;
412 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
413 if (err)
414 goto err_counter_clear;
415 return 0;
416
417err_counter_clear:
418 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
419 *p_counter_index);
420 return err;
421}
422
423void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
424 unsigned int counter_index)
425{
426 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
427 counter_index);
428}
429
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200430static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
431 const struct mlxsw_tx_info *tx_info)
432{
433 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
434
435 memset(txhdr, 0, MLXSW_TXHDR_LEN);
436
437 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
438 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
439 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
440 mlxsw_tx_hdr_swid_set(txhdr, 0);
441 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
442 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
443 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
444}
445
Ido Schimmelfe9ccc72017-05-16 19:38:31 +0200446int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
447 u8 state)
448{
449 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
450 enum mlxsw_reg_spms_state spms_state;
451 char *spms_pl;
452 int err;
453
454 switch (state) {
455 case BR_STATE_FORWARDING:
456 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
457 break;
458 case BR_STATE_LEARNING:
459 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
460 break;
461 case BR_STATE_LISTENING: /* fall-through */
462 case BR_STATE_DISABLED: /* fall-through */
463 case BR_STATE_BLOCKING:
464 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
465 break;
466 default:
467 BUG();
468 }
469
470 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
471 if (!spms_pl)
472 return -ENOMEM;
473 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
474 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
475
476 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
477 kfree(spms_pl);
478 return err;
479}
480
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200481static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
482{
Elad Raz5b090742016-10-28 21:35:46 +0200483 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200484 int err;
485
486 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
487 if (err)
488 return err;
489 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
490 return 0;
491}
492
Yotam Gigi763b4b72016-07-21 12:03:17 +0200493static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
494{
Yotam Gigi763b4b72016-07-21 12:03:17 +0200495 int i;
496
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200497 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
Yotam Gigi763b4b72016-07-21 12:03:17 +0200498 return -EIO;
499
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200500 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
501 MAX_SPAN);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200502 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
503 sizeof(struct mlxsw_sp_span_entry),
504 GFP_KERNEL);
505 if (!mlxsw_sp->span.entries)
506 return -ENOMEM;
507
508 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
509 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
510
511 return 0;
512}
513
514static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
515{
516 int i;
517
518 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
519 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
520
521 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
522 }
523 kfree(mlxsw_sp->span.entries);
524}
525
526static struct mlxsw_sp_span_entry *
527mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
528{
529 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
530 struct mlxsw_sp_span_entry *span_entry;
531 char mpat_pl[MLXSW_REG_MPAT_LEN];
532 u8 local_port = port->local_port;
533 int index;
534 int i;
535 int err;
536
537 /* find a free entry to use */
538 index = -1;
539 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
540 if (!mlxsw_sp->span.entries[i].used) {
541 index = i;
542 span_entry = &mlxsw_sp->span.entries[i];
543 break;
544 }
545 }
546 if (index < 0)
547 return NULL;
548
549 /* create a new port analayzer entry for local_port */
550 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
551 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
552 if (err)
553 return NULL;
554
555 span_entry->used = true;
556 span_entry->id = index;
Yotam Gigi2d644d42016-11-11 16:34:25 +0100557 span_entry->ref_count = 1;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200558 span_entry->local_port = local_port;
559 return span_entry;
560}
561
562static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
563 struct mlxsw_sp_span_entry *span_entry)
564{
565 u8 local_port = span_entry->local_port;
566 char mpat_pl[MLXSW_REG_MPAT_LEN];
567 int pa_id = span_entry->id;
568
569 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
570 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
571 span_entry->used = false;
572}
573
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200574static struct mlxsw_sp_span_entry *
575mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200576{
577 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
578 int i;
579
580 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
581 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
582
583 if (curr->used && curr->local_port == port->local_port)
584 return curr;
585 }
586 return NULL;
587}
588
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200589static struct mlxsw_sp_span_entry
590*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200591{
592 struct mlxsw_sp_span_entry *span_entry;
593
594 span_entry = mlxsw_sp_span_entry_find(port);
595 if (span_entry) {
Yotam Gigi2d644d42016-11-11 16:34:25 +0100596 /* Already exists, just take a reference */
Yotam Gigi763b4b72016-07-21 12:03:17 +0200597 span_entry->ref_count++;
598 return span_entry;
599 }
600
601 return mlxsw_sp_span_entry_create(port);
602}
603
604static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
605 struct mlxsw_sp_span_entry *span_entry)
606{
Yotam Gigi2d644d42016-11-11 16:34:25 +0100607 WARN_ON(!span_entry->ref_count);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200608 if (--span_entry->ref_count == 0)
609 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
610 return 0;
611}
612
613static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
614{
615 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
616 struct mlxsw_sp_span_inspected_port *p;
617 int i;
618
619 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
620 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
621
622 list_for_each_entry(p, &curr->bound_ports_list, list)
623 if (p->local_port == port->local_port &&
624 p->type == MLXSW_SP_SPAN_EGRESS)
625 return true;
626 }
627
628 return false;
629}
630
Ido Schimmel18281f22017-03-24 08:02:51 +0100631static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
632 int mtu)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200633{
Ido Schimmel18281f22017-03-24 08:02:51 +0100634 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200635}
636
637static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
638{
639 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
640 char sbib_pl[MLXSW_REG_SBIB_LEN];
641 int err;
642
643 /* If port is egress mirrored, the shared buffer size should be
644 * updated according to the mtu value
645 */
646 if (mlxsw_sp_span_is_egress_mirror(port)) {
Ido Schimmel18281f22017-03-24 08:02:51 +0100647 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
648
649 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200650 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
651 if (err) {
652 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
653 return err;
654 }
655 }
656
657 return 0;
658}
659
660static struct mlxsw_sp_span_inspected_port *
661mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
662 struct mlxsw_sp_span_entry *span_entry)
663{
664 struct mlxsw_sp_span_inspected_port *p;
665
666 list_for_each_entry(p, &span_entry->bound_ports_list, list)
667 if (port->local_port == p->local_port)
668 return p;
669 return NULL;
670}
671
672static int
673mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
674 struct mlxsw_sp_span_entry *span_entry,
675 enum mlxsw_sp_span_type type)
676{
677 struct mlxsw_sp_span_inspected_port *inspected_port;
678 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
679 char mpar_pl[MLXSW_REG_MPAR_LEN];
680 char sbib_pl[MLXSW_REG_SBIB_LEN];
681 int pa_id = span_entry->id;
682 int err;
683
684 /* if it is an egress SPAN, bind a shared buffer to it */
685 if (type == MLXSW_SP_SPAN_EGRESS) {
Ido Schimmel18281f22017-03-24 08:02:51 +0100686 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
687 port->dev->mtu);
688
689 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200690 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
691 if (err) {
692 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
693 return err;
694 }
695 }
696
697 /* bind the port to the SPAN entry */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200698 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
699 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200700 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
701 if (err)
702 goto err_mpar_reg_write;
703
704 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
705 if (!inspected_port) {
706 err = -ENOMEM;
707 goto err_inspected_port_alloc;
708 }
709 inspected_port->local_port = port->local_port;
710 inspected_port->type = type;
711 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
712
713 return 0;
714
715err_mpar_reg_write:
716err_inspected_port_alloc:
717 if (type == MLXSW_SP_SPAN_EGRESS) {
718 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
719 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
720 }
721 return err;
722}
723
724static void
725mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
726 struct mlxsw_sp_span_entry *span_entry,
727 enum mlxsw_sp_span_type type)
728{
729 struct mlxsw_sp_span_inspected_port *inspected_port;
730 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
731 char mpar_pl[MLXSW_REG_MPAR_LEN];
732 char sbib_pl[MLXSW_REG_SBIB_LEN];
733 int pa_id = span_entry->id;
734
735 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
736 if (!inspected_port)
737 return;
738
739 /* remove the inspected port */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200740 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
741 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200742 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
743
744 /* remove the SBIB buffer if it was egress SPAN */
745 if (type == MLXSW_SP_SPAN_EGRESS) {
746 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
747 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
748 }
749
750 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
751
752 list_del(&inspected_port->list);
753 kfree(inspected_port);
754}
755
756static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
757 struct mlxsw_sp_port *to,
758 enum mlxsw_sp_span_type type)
759{
760 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
761 struct mlxsw_sp_span_entry *span_entry;
762 int err;
763
764 span_entry = mlxsw_sp_span_entry_get(to);
765 if (!span_entry)
766 return -ENOENT;
767
768 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
769 span_entry->id);
770
771 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
772 if (err)
773 goto err_port_bind;
774
775 return 0;
776
777err_port_bind:
778 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
779 return err;
780}
781
782static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
783 struct mlxsw_sp_port *to,
784 enum mlxsw_sp_span_type type)
785{
786 struct mlxsw_sp_span_entry *span_entry;
787
788 span_entry = mlxsw_sp_span_entry_find(to);
789 if (!span_entry) {
790 netdev_err(from->dev, "no span entry found\n");
791 return;
792 }
793
794 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
795 span_entry->id);
796 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
797}
798
Yotam Gigi98d0f7b2017-01-23 11:07:11 +0100799static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
800 bool enable, u32 rate)
801{
802 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
803 char mpsc_pl[MLXSW_REG_MPSC_LEN];
804
805 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
806 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
807}
808
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200809static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
810 bool is_up)
811{
812 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
813 char paos_pl[MLXSW_REG_PAOS_LEN];
814
815 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
816 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
817 MLXSW_PORT_ADMIN_STATUS_DOWN);
818 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
819}
820
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200821static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
822 unsigned char *addr)
823{
824 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
825 char ppad_pl[MLXSW_REG_PPAD_LEN];
826
827 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
828 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
829 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
830}
831
832static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
833{
834 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
835 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
836
837 ether_addr_copy(addr, mlxsw_sp->base_mac);
838 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
839 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
840}
841
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200842static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
843{
844 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
845 char pmtu_pl[MLXSW_REG_PMTU_LEN];
846 int max_mtu;
847 int err;
848
849 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
850 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
851 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
852 if (err)
853 return err;
854 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
855
856 if (mtu > max_mtu)
857 return -EINVAL;
858
859 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
860 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
861}
862
863static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
864{
865 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel5b153852017-06-08 08:47:44 +0200866 char pspa_pl[MLXSW_REG_PSPA_LEN];
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200867
Ido Schimmel5b153852017-06-08 08:47:44 +0200868 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
869 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200870}
871
Ido Schimmela1107482017-05-26 08:37:39 +0200872int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200873{
874 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
875 char svpe_pl[MLXSW_REG_SVPE_LEN];
876
877 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
878 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
879}
880
Ido Schimmel7cbc4272017-05-16 19:38:33 +0200881int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
882 bool learn_enable)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200883{
884 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
885 char *spvmlr_pl;
886 int err;
887
888 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
889 if (!spvmlr_pl)
890 return -ENOMEM;
Ido Schimmel7cbc4272017-05-16 19:38:33 +0200891 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
892 learn_enable);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200893 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
894 kfree(spvmlr_pl);
895 return err;
896}
897
Ido Schimmelb02eae92017-05-16 19:38:34 +0200898static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
899 u16 vid)
900{
901 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
902 char spvid_pl[MLXSW_REG_SPVID_LEN];
903
904 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
905 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
906}
907
908static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
909 bool allow)
910{
911 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
912 char spaft_pl[MLXSW_REG_SPAFT_LEN];
913
914 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
915 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
916}
917
918int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
919{
920 int err;
921
922 if (!vid) {
923 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
924 if (err)
925 return err;
926 } else {
927 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
928 if (err)
929 return err;
930 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
931 if (err)
932 goto err_port_allow_untagged_set;
933 }
934
935 mlxsw_sp_port->pvid = vid;
936 return 0;
937
938err_port_allow_untagged_set:
939 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
940 return err;
941}
942
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200943static int
944mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
945{
946 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
947 char sspr_pl[MLXSW_REG_SSPR_LEN];
948
949 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
950 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
951}
952
Ido Schimmeld664b412016-06-09 09:51:40 +0200953static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
954 u8 local_port, u8 *p_module,
955 u8 *p_width, u8 *p_lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200956{
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200957 char pmlp_pl[MLXSW_REG_PMLP_LEN];
958 int err;
959
Ido Schimmel558c2d52016-02-26 17:32:29 +0100960 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200961 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
962 if (err)
963 return err;
Ido Schimmel558c2d52016-02-26 17:32:29 +0100964 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
965 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
Ido Schimmel2bf9a582016-04-05 10:20:04 +0200966 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200967 return 0;
968}
969
Ido Schimmel18f1e702016-02-26 17:32:31 +0100970static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
971 u8 module, u8 width, u8 lane)
972{
973 char pmlp_pl[MLXSW_REG_PMLP_LEN];
974 int i;
975
976 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
977 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
978 for (i = 0; i < width; i++) {
979 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
980 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
981 }
982
983 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
984}
985
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100986static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
987{
988 char pmlp_pl[MLXSW_REG_PMLP_LEN];
989
990 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
991 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
992 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
993}
994
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200995static int mlxsw_sp_port_open(struct net_device *dev)
996{
997 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
998 int err;
999
1000 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1001 if (err)
1002 return err;
1003 netif_start_queue(dev);
1004 return 0;
1005}
1006
1007static int mlxsw_sp_port_stop(struct net_device *dev)
1008{
1009 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1010
1011 netif_stop_queue(dev);
1012 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1013}
1014
1015static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
1016 struct net_device *dev)
1017{
1018 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1019 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1020 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1021 const struct mlxsw_tx_info tx_info = {
1022 .local_port = mlxsw_sp_port->local_port,
1023 .is_emad = false,
1024 };
1025 u64 len;
1026 int err;
1027
Jiri Pirko307c2432016-04-08 19:11:22 +02001028 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001029 return NETDEV_TX_BUSY;
1030
1031 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
1032 struct sk_buff *skb_orig = skb;
1033
1034 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
1035 if (!skb) {
1036 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1037 dev_kfree_skb_any(skb_orig);
1038 return NETDEV_TX_OK;
1039 }
Arkadi Sharshevsky36bf38d2017-01-12 09:10:37 +01001040 dev_consume_skb_any(skb_orig);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001041 }
1042
1043 if (eth_skb_pad(skb)) {
1044 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1045 return NETDEV_TX_OK;
1046 }
1047
1048 mlxsw_sp_txhdr_construct(skb, &tx_info);
Nogah Frankel63dcdd32016-06-17 15:09:05 +02001049 /* TX header is consumed by HW on the way so we shouldn't count its
1050 * bytes as being sent.
1051 */
1052 len = skb->len - MLXSW_TXHDR_LEN;
1053
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001054 /* Due to a race we might fail here because of a full queue. In that
1055 * unlikely case we simply drop the packet.
1056 */
Jiri Pirko307c2432016-04-08 19:11:22 +02001057 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001058
1059 if (!err) {
1060 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1061 u64_stats_update_begin(&pcpu_stats->syncp);
1062 pcpu_stats->tx_packets++;
1063 pcpu_stats->tx_bytes += len;
1064 u64_stats_update_end(&pcpu_stats->syncp);
1065 } else {
1066 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1067 dev_kfree_skb_any(skb);
1068 }
1069 return NETDEV_TX_OK;
1070}
1071
Jiri Pirkoc5b9b512015-12-03 12:12:22 +01001072static void mlxsw_sp_set_rx_mode(struct net_device *dev)
1073{
1074}
1075
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001076static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
1077{
1078 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1079 struct sockaddr *addr = p;
1080 int err;
1081
1082 if (!is_valid_ether_addr(addr->sa_data))
1083 return -EADDRNOTAVAIL;
1084
1085 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
1086 if (err)
1087 return err;
1088 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1089 return 0;
1090}
1091
Ido Schimmel18281f22017-03-24 08:02:51 +01001092static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
1093 int mtu)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001094{
Ido Schimmel18281f22017-03-24 08:02:51 +01001095 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
Ido Schimmelf417f042017-03-24 08:02:50 +01001096}
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001097
Ido Schimmelf417f042017-03-24 08:02:50 +01001098#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
Ido Schimmel18281f22017-03-24 08:02:51 +01001099
1100static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1101 u16 delay)
Ido Schimmelf417f042017-03-24 08:02:50 +01001102{
Ido Schimmel18281f22017-03-24 08:02:51 +01001103 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
1104 BITS_PER_BYTE));
1105 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
1106 mtu);
Ido Schimmelf417f042017-03-24 08:02:50 +01001107}
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001108
Ido Schimmel18281f22017-03-24 08:02:51 +01001109/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
Ido Schimmelf417f042017-03-24 08:02:50 +01001110 * Assumes 100m cable and maximum MTU.
1111 */
Ido Schimmel18281f22017-03-24 08:02:51 +01001112#define MLXSW_SP_PAUSE_DELAY 58752
1113
1114static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1115 u16 delay, bool pfc, bool pause)
Ido Schimmelf417f042017-03-24 08:02:50 +01001116{
1117 if (pfc)
Ido Schimmel18281f22017-03-24 08:02:51 +01001118 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
Ido Schimmelf417f042017-03-24 08:02:50 +01001119 else if (pause)
Ido Schimmel18281f22017-03-24 08:02:51 +01001120 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001121 else
Ido Schimmelf417f042017-03-24 08:02:50 +01001122 return 0;
1123}
1124
1125static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
1126 bool lossy)
1127{
1128 if (lossy)
1129 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
1130 else
1131 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
1132 thres);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001133}
1134
1135int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001136 u8 *prio_tc, bool pause_en,
1137 struct ieee_pfc *my_pfc)
Ido Schimmelff6551e2016-04-06 17:10:03 +02001138{
1139 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001140 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
1141 u16 delay = !!my_pfc ? my_pfc->delay : 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001142 char pbmc_pl[MLXSW_REG_PBMC_LEN];
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001143 int i, j, err;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001144
1145 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
1146 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1147 if (err)
1148 return err;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001149
1150 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1151 bool configure = false;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001152 bool pfc = false;
Ido Schimmelf417f042017-03-24 08:02:50 +01001153 bool lossy;
1154 u16 thres;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001155
1156 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
1157 if (prio_tc[j] == i) {
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001158 pfc = pfc_en & BIT(j);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001159 configure = true;
1160 break;
1161 }
1162 }
1163
1164 if (!configure)
1165 continue;
Ido Schimmelf417f042017-03-24 08:02:50 +01001166
1167 lossy = !(pfc || pause_en);
Ido Schimmel18281f22017-03-24 08:02:51 +01001168 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
1169 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
1170 pause_en);
Ido Schimmelf417f042017-03-24 08:02:50 +01001171 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001172 }
1173
Ido Schimmelff6551e2016-04-06 17:10:03 +02001174 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1175}
1176
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001177static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001178 int mtu, bool pause_en)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001179{
1180 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
1181 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001182 struct ieee_pfc *my_pfc;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001183 u8 *prio_tc;
1184
1185 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001186 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001187
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001188 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001189 pause_en, my_pfc);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02001190}
1191
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001192static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
1193{
1194 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001195 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001196 int err;
1197
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001198 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001199 if (err)
1200 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001201 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
1202 if (err)
1203 goto err_span_port_mtu_update;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001204 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1205 if (err)
1206 goto err_port_mtu_set;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001207 dev->mtu = mtu;
1208 return 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +02001209
1210err_port_mtu_set:
Yotam Gigi763b4b72016-07-21 12:03:17 +02001211 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1212err_span_port_mtu_update:
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001213 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
Ido Schimmelff6551e2016-04-06 17:10:03 +02001214 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001215}
1216
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +03001217static int
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001218mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1219 struct rtnl_link_stats64 *stats)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001220{
1221 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1222 struct mlxsw_sp_port_pcpu_stats *p;
1223 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1224 u32 tx_dropped = 0;
1225 unsigned int start;
1226 int i;
1227
1228 for_each_possible_cpu(i) {
1229 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1230 do {
1231 start = u64_stats_fetch_begin_irq(&p->syncp);
1232 rx_packets = p->rx_packets;
1233 rx_bytes = p->rx_bytes;
1234 tx_packets = p->tx_packets;
1235 tx_bytes = p->tx_bytes;
1236 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1237
1238 stats->rx_packets += rx_packets;
1239 stats->rx_bytes += rx_bytes;
1240 stats->tx_packets += tx_packets;
1241 stats->tx_bytes += tx_bytes;
1242 /* tx_dropped is u32, updated without syncp protection. */
1243 tx_dropped += p->tx_dropped;
1244 }
1245 stats->tx_dropped = tx_dropped;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001246 return 0;
1247}
1248
Or Gerlitz3df5b3c2016-11-22 23:09:54 +02001249static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001250{
1251 switch (attr_id) {
1252 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1253 return true;
1254 }
1255
1256 return false;
1257}
1258
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +03001259static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1260 void *sp)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001261{
1262 switch (attr_id) {
1263 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1264 return mlxsw_sp_port_get_sw_stats64(dev, sp);
1265 }
1266
1267 return -EINVAL;
1268}
1269
1270static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1271 int prio, char *ppcnt_pl)
1272{
1273 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1274 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1275
1276 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1277 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1278}
1279
1280static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1281 struct rtnl_link_stats64 *stats)
1282{
1283 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1284 int err;
1285
1286 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1287 0, ppcnt_pl);
1288 if (err)
1289 goto out;
1290
1291 stats->tx_packets =
1292 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1293 stats->rx_packets =
1294 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1295 stats->tx_bytes =
1296 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1297 stats->rx_bytes =
1298 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1299 stats->multicast =
1300 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1301
1302 stats->rx_crc_errors =
1303 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1304 stats->rx_frame_errors =
1305 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1306
1307 stats->rx_length_errors = (
1308 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1309 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1310 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1311
1312 stats->rx_errors = (stats->rx_crc_errors +
1313 stats->rx_frame_errors + stats->rx_length_errors);
1314
1315out:
1316 return err;
1317}
1318
1319static void update_stats_cache(struct work_struct *work)
1320{
1321 struct mlxsw_sp_port *mlxsw_sp_port =
1322 container_of(work, struct mlxsw_sp_port,
1323 hw_stats.update_dw.work);
1324
1325 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1326 goto out;
1327
1328 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1329 mlxsw_sp_port->hw_stats.cache);
1330
1331out:
1332 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1333 MLXSW_HW_STATS_UPDATE_TIME);
1334}
1335
1336/* Return the stats from a cache that is updated periodically,
1337 * as this function might get called in an atomic context.
1338 */
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001339static void
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001340mlxsw_sp_port_get_stats64(struct net_device *dev,
1341 struct rtnl_link_stats64 *stats)
1342{
1343 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1344
1345 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001346}
1347
Jiri Pirko93cd0812017-04-18 16:55:35 +02001348static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1349 u16 vid_begin, u16 vid_end,
1350 bool is_member, bool untagged)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001351{
1352 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1353 char *spvm_pl;
1354 int err;
1355
1356 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1357 if (!spvm_pl)
1358 return -ENOMEM;
1359
1360 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1361 vid_end, is_member, untagged);
1362 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1363 kfree(spvm_pl);
1364 return err;
1365}
1366
Jiri Pirko93cd0812017-04-18 16:55:35 +02001367int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1368 u16 vid_end, bool is_member, bool untagged)
1369{
1370 u16 vid, vid_e;
1371 int err;
1372
1373 for (vid = vid_begin; vid <= vid_end;
1374 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1375 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1376 vid_end);
1377
1378 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1379 is_member, untagged);
1380 if (err)
1381 return err;
1382 }
1383
1384 return 0;
1385}
1386
Ido Schimmelc57529e2017-05-26 08:37:31 +02001387static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001388{
Ido Schimmelc57529e2017-05-26 08:37:31 +02001389 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001390
Ido Schimmelc57529e2017-05-26 08:37:31 +02001391 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1392 &mlxsw_sp_port->vlans_list, list)
1393 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001394}
1395
Ido Schimmel31a08a52017-05-26 08:37:26 +02001396static struct mlxsw_sp_port_vlan *
1397mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1398{
1399 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmelc57529e2017-05-26 08:37:31 +02001400 bool untagged = vid == 1;
1401 int err;
1402
1403 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1404 if (err)
1405 return ERR_PTR(err);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001406
1407 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001408 if (!mlxsw_sp_port_vlan) {
1409 err = -ENOMEM;
1410 goto err_port_vlan_alloc;
1411 }
Ido Schimmel31a08a52017-05-26 08:37:26 +02001412
1413 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1414 mlxsw_sp_port_vlan->vid = vid;
1415 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1416
1417 return mlxsw_sp_port_vlan;
Ido Schimmelc57529e2017-05-26 08:37:31 +02001418
1419err_port_vlan_alloc:
1420 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1421 return ERR_PTR(err);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001422}
1423
1424static void
1425mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1426{
Ido Schimmelc57529e2017-05-26 08:37:31 +02001427 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1428 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02001429
Ido Schimmel31a08a52017-05-26 08:37:26 +02001430 list_del(&mlxsw_sp_port_vlan->list);
1431 kfree(mlxsw_sp_port_vlan);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001432 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1433}
1434
1435struct mlxsw_sp_port_vlan *
1436mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1437{
1438 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1439
1440 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1441 if (mlxsw_sp_port_vlan)
1442 return mlxsw_sp_port_vlan;
1443
1444 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1445}
1446
1447void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1448{
Ido Schimmela1107482017-05-26 08:37:39 +02001449 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1450
Ido Schimmelc57529e2017-05-26 08:37:31 +02001451 if (mlxsw_sp_port_vlan->bridge_port)
1452 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
Ido Schimmela1107482017-05-26 08:37:39 +02001453 else if (fid)
1454 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001455
1456 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001457}
1458
Ido Schimmel05978482016-08-17 16:39:30 +02001459static int mlxsw_sp_port_add_vid(struct net_device *dev,
1460 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001461{
1462 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001463
1464 /* VLAN 0 is added to HW filter when device goes up, but it is
1465 * reserved in our case, so simply return.
1466 */
1467 if (!vid)
1468 return 0;
1469
Ido Schimmelc57529e2017-05-26 08:37:31 +02001470 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001471}
1472
Ido Schimmel32d863f2016-07-02 11:00:10 +02001473static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1474 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001475{
1476 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001477 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001478
1479 /* VLAN 0 is removed from HW filter when device goes down, but
1480 * it is reserved in our case, so simply return.
1481 */
1482 if (!vid)
1483 return 0;
1484
Ido Schimmel31a08a52017-05-26 08:37:26 +02001485 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmelc57529e2017-05-26 08:37:31 +02001486 if (!mlxsw_sp_port_vlan)
Ido Schimmel31a08a52017-05-26 08:37:26 +02001487 return 0;
Ido Schimmelc57529e2017-05-26 08:37:31 +02001488 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
Ido Schimmel31a08a52017-05-26 08:37:26 +02001489
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001490 return 0;
1491}
1492
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001493static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1494 size_t len)
1495{
1496 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmeld664b412016-06-09 09:51:40 +02001497 u8 module = mlxsw_sp_port->mapping.module;
1498 u8 width = mlxsw_sp_port->mapping.width;
1499 u8 lane = mlxsw_sp_port->mapping.lane;
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001500 int err;
1501
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001502 if (!mlxsw_sp_port->split)
1503 err = snprintf(name, len, "p%d", module + 1);
1504 else
1505 err = snprintf(name, len, "p%ds%d", module + 1,
1506 lane / width);
1507
1508 if (err >= len)
1509 return -EINVAL;
1510
1511 return 0;
1512}
1513
Yotam Gigi763b4b72016-07-21 12:03:17 +02001514static struct mlxsw_sp_port_mall_tc_entry *
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001515mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1516 unsigned long cookie) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001517 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1518
1519 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1520 if (mall_tc_entry->cookie == cookie)
1521 return mall_tc_entry;
1522
1523 return NULL;
1524}
1525
1526static int
1527mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001528 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001529 const struct tc_action *a,
1530 bool ingress)
1531{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001532 struct net *net = dev_net(mlxsw_sp_port->dev);
1533 enum mlxsw_sp_span_type span_type;
1534 struct mlxsw_sp_port *to_port;
1535 struct net_device *to_dev;
1536 int ifindex;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001537
1538 ifindex = tcf_mirred_ifindex(a);
1539 to_dev = __dev_get_by_index(net, ifindex);
1540 if (!to_dev) {
1541 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1542 return -EINVAL;
1543 }
1544
1545 if (!mlxsw_sp_port_dev_check(to_dev)) {
1546 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
Yotam Gigie915ac62017-01-09 11:25:48 +01001547 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001548 }
1549 to_port = netdev_priv(to_dev);
1550
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001551 mirror->to_local_port = to_port->local_port;
1552 mirror->ingress = ingress;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001553 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001554 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1555}
Yotam Gigi763b4b72016-07-21 12:03:17 +02001556
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001557static void
1558mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1559 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1560{
1561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1562 enum mlxsw_sp_span_type span_type;
1563 struct mlxsw_sp_port *to_port;
1564
1565 to_port = mlxsw_sp->ports[mirror->to_local_port];
1566 span_type = mirror->ingress ?
1567 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1568 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001569}
1570
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001571static int
1572mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1573 struct tc_cls_matchall_offload *cls,
1574 const struct tc_action *a,
1575 bool ingress)
1576{
1577 int err;
1578
1579 if (!mlxsw_sp_port->sample)
1580 return -EOPNOTSUPP;
1581 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1582 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1583 return -EEXIST;
1584 }
1585 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1586 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1587 return -EOPNOTSUPP;
1588 }
1589
1590 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1591 tcf_sample_psample_group(a));
1592 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1593 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1594 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1595
1596 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1597 if (err)
1598 goto err_port_sample_set;
1599 return 0;
1600
1601err_port_sample_set:
1602 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1603 return err;
1604}
1605
1606static void
1607mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1608{
1609 if (!mlxsw_sp_port->sample)
1610 return;
1611
1612 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1613 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1614}
1615
Yotam Gigi763b4b72016-07-21 12:03:17 +02001616static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1617 __be16 protocol,
1618 struct tc_cls_matchall_offload *cls,
1619 bool ingress)
1620{
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001621 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001622 const struct tc_action *a;
WANG Cong22dc13c2016-08-13 22:35:00 -07001623 LIST_HEAD(actions);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001624 int err;
1625
Ido Schimmel86cb13e2016-07-25 13:12:33 +03001626 if (!tc_single_action(cls->exts)) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001627 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
Yotam Gigie915ac62017-01-09 11:25:48 +01001628 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001629 }
1630
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001631 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1632 if (!mall_tc_entry)
1633 return -ENOMEM;
1634 mall_tc_entry->cookie = cls->cookie;
Ido Schimmel86cb13e2016-07-25 13:12:33 +03001635
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001636 tcf_exts_to_list(cls->exts, &actions);
1637 a = list_first_entry(&actions, struct tc_action, list);
1638
1639 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1640 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1641
1642 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1643 mirror = &mall_tc_entry->mirror;
1644 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1645 mirror, a, ingress);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001646 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1647 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1648 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
1649 a, ingress);
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001650 } else {
1651 err = -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001652 }
1653
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001654 if (err)
1655 goto err_add_action;
1656
1657 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001658 return 0;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001659
1660err_add_action:
1661 kfree(mall_tc_entry);
1662 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001663}
1664
1665static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1666 struct tc_cls_matchall_offload *cls)
1667{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001668 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001669
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001670 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1671 cls->cookie);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001672 if (!mall_tc_entry) {
1673 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1674 return;
1675 }
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001676 list_del(&mall_tc_entry->list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001677
1678 switch (mall_tc_entry->type) {
1679 case MLXSW_SP_PORT_MALL_MIRROR:
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001680 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1681 &mall_tc_entry->mirror);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001682 break;
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001683 case MLXSW_SP_PORT_MALL_SAMPLE:
1684 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1685 break;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001686 default:
1687 WARN_ON(1);
1688 }
1689
Yotam Gigi763b4b72016-07-21 12:03:17 +02001690 kfree(mall_tc_entry);
1691}
1692
1693static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
Jiri Pirkoa5fcf8a2017-06-06 17:00:16 +02001694 u32 chain_index, __be16 proto,
1695 struct tc_to_netdev *tc)
Yotam Gigi763b4b72016-07-21 12:03:17 +02001696{
1697 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1698 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
1699
Jiri Pirkoa5fcf8a2017-06-06 17:00:16 +02001700 if (chain_index)
1701 return -EOPNOTSUPP;
1702
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001703 switch (tc->type) {
1704 case TC_SETUP_MATCHALL:
Yotam Gigi763b4b72016-07-21 12:03:17 +02001705 switch (tc->cls_mall->command) {
1706 case TC_CLSMATCHALL_REPLACE:
1707 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
1708 proto,
1709 tc->cls_mall,
1710 ingress);
1711 case TC_CLSMATCHALL_DESTROY:
1712 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
1713 tc->cls_mall);
1714 return 0;
1715 default:
Or Gerlitzabbdf4b2017-03-17 09:38:01 +01001716 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001717 }
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001718 case TC_SETUP_CLSFLOWER:
1719 switch (tc->cls_flower->command) {
1720 case TC_CLSFLOWER_REPLACE:
1721 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
1722 proto, tc->cls_flower);
1723 case TC_CLSFLOWER_DESTROY:
1724 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
1725 tc->cls_flower);
1726 return 0;
Arkadi Sharshevsky7c1b8eb2017-03-11 09:42:59 +01001727 case TC_CLSFLOWER_STATS:
1728 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
1729 tc->cls_flower);
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001730 default:
1731 return -EOPNOTSUPP;
1732 }
Yotam Gigi763b4b72016-07-21 12:03:17 +02001733 }
1734
Yotam Gigie915ac62017-01-09 11:25:48 +01001735 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001736}
1737
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001738static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1739 .ndo_open = mlxsw_sp_port_open,
1740 .ndo_stop = mlxsw_sp_port_stop,
1741 .ndo_start_xmit = mlxsw_sp_port_xmit,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001742 .ndo_setup_tc = mlxsw_sp_setup_tc,
Jiri Pirkoc5b9b512015-12-03 12:12:22 +01001743 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001744 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1745 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1746 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001747 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1748 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001749 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1750 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001751 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001752};
1753
1754static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1755 struct ethtool_drvinfo *drvinfo)
1756{
1757 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1758 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1759
1760 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1761 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1762 sizeof(drvinfo->version));
1763 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1764 "%d.%d.%d",
1765 mlxsw_sp->bus_info->fw_rev.major,
1766 mlxsw_sp->bus_info->fw_rev.minor,
1767 mlxsw_sp->bus_info->fw_rev.subminor);
1768 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1769 sizeof(drvinfo->bus_info));
1770}
1771
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001772static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1773 struct ethtool_pauseparam *pause)
1774{
1775 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1776
1777 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1778 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1779}
1780
1781static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1782 struct ethtool_pauseparam *pause)
1783{
1784 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1785
1786 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1787 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1788 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1789
1790 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1791 pfcc_pl);
1792}
1793
1794static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1795 struct ethtool_pauseparam *pause)
1796{
1797 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1798 bool pause_en = pause->tx_pause || pause->rx_pause;
1799 int err;
1800
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001801 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1802 netdev_err(dev, "PFC already enabled on port\n");
1803 return -EINVAL;
1804 }
1805
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001806 if (pause->autoneg) {
1807 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1808 return -EINVAL;
1809 }
1810
1811 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1812 if (err) {
1813 netdev_err(dev, "Failed to configure port's headroom\n");
1814 return err;
1815 }
1816
1817 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1818 if (err) {
1819 netdev_err(dev, "Failed to set PAUSE parameters\n");
1820 goto err_port_pause_configure;
1821 }
1822
1823 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1824 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1825
1826 return 0;
1827
1828err_port_pause_configure:
1829 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1830 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1831 return err;
1832}
1833
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001834struct mlxsw_sp_port_hw_stats {
1835 char str[ETH_GSTRING_LEN];
Jiri Pirko412791d2016-10-21 16:07:19 +02001836 u64 (*getter)(const char *payload);
Ido Schimmel18281f22017-03-24 08:02:51 +01001837 bool cells_bytes;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001838};
1839
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001840static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001841 {
1842 .str = "a_frames_transmitted_ok",
1843 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1844 },
1845 {
1846 .str = "a_frames_received_ok",
1847 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1848 },
1849 {
1850 .str = "a_frame_check_sequence_errors",
1851 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1852 },
1853 {
1854 .str = "a_alignment_errors",
1855 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1856 },
1857 {
1858 .str = "a_octets_transmitted_ok",
1859 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1860 },
1861 {
1862 .str = "a_octets_received_ok",
1863 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1864 },
1865 {
1866 .str = "a_multicast_frames_xmitted_ok",
1867 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1868 },
1869 {
1870 .str = "a_broadcast_frames_xmitted_ok",
1871 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1872 },
1873 {
1874 .str = "a_multicast_frames_received_ok",
1875 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1876 },
1877 {
1878 .str = "a_broadcast_frames_received_ok",
1879 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1880 },
1881 {
1882 .str = "a_in_range_length_errors",
1883 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1884 },
1885 {
1886 .str = "a_out_of_range_length_field",
1887 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1888 },
1889 {
1890 .str = "a_frame_too_long_errors",
1891 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1892 },
1893 {
1894 .str = "a_symbol_error_during_carrier",
1895 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1896 },
1897 {
1898 .str = "a_mac_control_frames_transmitted",
1899 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1900 },
1901 {
1902 .str = "a_mac_control_frames_received",
1903 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1904 },
1905 {
1906 .str = "a_unsupported_opcodes_received",
1907 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1908 },
1909 {
1910 .str = "a_pause_mac_ctrl_frames_received",
1911 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1912 },
1913 {
1914 .str = "a_pause_mac_ctrl_frames_xmitted",
1915 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1916 },
1917};
1918
1919#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1920
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001921static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1922 {
1923 .str = "rx_octets_prio",
1924 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1925 },
1926 {
1927 .str = "rx_frames_prio",
1928 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1929 },
1930 {
1931 .str = "tx_octets_prio",
1932 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1933 },
1934 {
1935 .str = "tx_frames_prio",
1936 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1937 },
1938 {
1939 .str = "rx_pause_prio",
1940 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1941 },
1942 {
1943 .str = "rx_pause_duration_prio",
1944 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1945 },
1946 {
1947 .str = "tx_pause_prio",
1948 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1949 },
1950 {
1951 .str = "tx_pause_duration_prio",
1952 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1953 },
1954};
1955
1956#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1957
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001958static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1959 {
1960 .str = "tc_transmit_queue_tc",
Ido Schimmel18281f22017-03-24 08:02:51 +01001961 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
1962 .cells_bytes = true,
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001963 },
1964 {
1965 .str = "tc_no_buffer_discard_uc_tc",
1966 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1967 },
1968};
1969
1970#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1971
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001972#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001973 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1974 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001975 IEEE_8021QAZ_MAX_TCS)
1976
1977static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1978{
1979 int i;
1980
1981 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1982 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1983 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1984 *p += ETH_GSTRING_LEN;
1985 }
1986}
1987
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001988static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1989{
1990 int i;
1991
1992 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1993 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1994 mlxsw_sp_port_hw_tc_stats[i].str, tc);
1995 *p += ETH_GSTRING_LEN;
1996 }
1997}
1998
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001999static void mlxsw_sp_port_get_strings(struct net_device *dev,
2000 u32 stringset, u8 *data)
2001{
2002 u8 *p = data;
2003 int i;
2004
2005 switch (stringset) {
2006 case ETH_SS_STATS:
2007 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2008 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2009 ETH_GSTRING_LEN);
2010 p += ETH_GSTRING_LEN;
2011 }
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002012
2013 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2014 mlxsw_sp_port_get_prio_strings(&p, i);
2015
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002016 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2017 mlxsw_sp_port_get_tc_strings(&p, i);
2018
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002019 break;
2020 }
2021}
2022
Ido Schimmel3a66ee32015-11-27 13:45:55 +01002023static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2024 enum ethtool_phys_id_state state)
2025{
2026 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2027 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2028 char mlcr_pl[MLXSW_REG_MLCR_LEN];
2029 bool active;
2030
2031 switch (state) {
2032 case ETHTOOL_ID_ACTIVE:
2033 active = true;
2034 break;
2035 case ETHTOOL_ID_INACTIVE:
2036 active = false;
2037 break;
2038 default:
2039 return -EOPNOTSUPP;
2040 }
2041
2042 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2043 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2044}
2045
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002046static int
2047mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2048 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2049{
2050 switch (grp) {
2051 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2052 *p_hw_stats = mlxsw_sp_port_hw_stats;
2053 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2054 break;
2055 case MLXSW_REG_PPCNT_PRIO_CNT:
2056 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2057 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2058 break;
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002059 case MLXSW_REG_PPCNT_TC_CNT:
2060 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2061 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2062 break;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002063 default:
2064 WARN_ON(1);
Yotam Gigie915ac62017-01-09 11:25:48 +01002065 return -EOPNOTSUPP;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002066 }
2067 return 0;
2068}
2069
2070static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2071 enum mlxsw_reg_ppcnt_grp grp, int prio,
2072 u64 *data, int data_index)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002073{
Ido Schimmel18281f22017-03-24 08:02:51 +01002074 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2075 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002076 struct mlxsw_sp_port_hw_stats *hw_stats;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002077 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002078 int i, len;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002079 int err;
2080
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002081 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2082 if (err)
2083 return;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002084 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
Ido Schimmel18281f22017-03-24 08:02:51 +01002085 for (i = 0; i < len; i++) {
Colin Ian Kingfaac0ff2016-09-23 12:02:45 +01002086 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
Ido Schimmel18281f22017-03-24 08:02:51 +01002087 if (!hw_stats[i].cells_bytes)
2088 continue;
2089 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2090 data[data_index + i]);
2091 }
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002092}
2093
2094static void mlxsw_sp_port_get_stats(struct net_device *dev,
2095 struct ethtool_stats *stats, u64 *data)
2096{
2097 int i, data_index = 0;
2098
2099 /* IEEE 802.3 Counters */
2100 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2101 data, data_index);
2102 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2103
2104 /* Per-Priority Counters */
2105 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2106 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2107 data, data_index);
2108 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2109 }
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002110
2111 /* Per-TC Counters */
2112 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2113 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2114 data, data_index);
2115 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2116 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002117}
2118
2119static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2120{
2121 switch (sset) {
2122 case ETH_SS_STATS:
Ido Schimmel7ed674b2016-07-19 15:35:53 +02002123 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002124 default:
2125 return -EOPNOTSUPP;
2126 }
2127}
2128
2129struct mlxsw_sp_port_link_mode {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002130 enum ethtool_link_mode_bit_indices mask_ethtool;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002131 u32 mask;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002132 u32 speed;
2133};
2134
2135static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2136 {
2137 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002138 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2139 .speed = SPEED_100,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002140 },
2141 {
2142 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2143 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002144 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2145 .speed = SPEED_1000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002146 },
2147 {
2148 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002149 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2150 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002151 },
2152 {
2153 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2154 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002155 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2156 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002157 },
2158 {
2159 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2160 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2161 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2162 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002163 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2164 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002165 },
2166 {
2167 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002168 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2169 .speed = SPEED_20000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002170 },
2171 {
2172 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002173 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2174 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002175 },
2176 {
2177 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002178 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2179 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002180 },
2181 {
2182 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002183 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2184 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002185 },
2186 {
2187 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002188 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2189 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002190 },
2191 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002192 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2193 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2194 .speed = SPEED_25000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002195 },
2196 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002197 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2198 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2199 .speed = SPEED_25000,
2200 },
2201 {
2202 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2203 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2204 .speed = SPEED_25000,
2205 },
2206 {
2207 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2208 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2209 .speed = SPEED_25000,
2210 },
2211 {
2212 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2213 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2214 .speed = SPEED_50000,
2215 },
2216 {
2217 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2218 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2219 .speed = SPEED_50000,
2220 },
2221 {
2222 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2223 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2224 .speed = SPEED_50000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002225 },
2226 {
2227 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002228 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2229 .speed = SPEED_56000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002230 },
2231 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002232 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2233 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2234 .speed = SPEED_56000,
2235 },
2236 {
2237 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2238 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2239 .speed = SPEED_56000,
2240 },
2241 {
2242 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2243 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2244 .speed = SPEED_56000,
2245 },
2246 {
2247 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2248 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2249 .speed = SPEED_100000,
2250 },
2251 {
2252 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2253 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2254 .speed = SPEED_100000,
2255 },
2256 {
2257 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2258 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2259 .speed = SPEED_100000,
2260 },
2261 {
2262 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2263 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2264 .speed = SPEED_100000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002265 },
2266};
2267
2268#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2269
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002270static void
2271mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2272 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002273{
2274 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2275 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2276 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2277 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2278 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2279 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002280 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002281
2282 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2283 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2284 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2285 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2286 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002287 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002288}
2289
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002290static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002291{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002292 int i;
2293
2294 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2295 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002296 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2297 mode);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002298 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002299}
2300
2301static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002302 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002303{
2304 u32 speed = SPEED_UNKNOWN;
2305 u8 duplex = DUPLEX_UNKNOWN;
2306 int i;
2307
2308 if (!carrier_ok)
2309 goto out;
2310
2311 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2312 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2313 speed = mlxsw_sp_port_link_mode[i].speed;
2314 duplex = DUPLEX_FULL;
2315 break;
2316 }
2317 }
2318out:
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002319 cmd->base.speed = speed;
2320 cmd->base.duplex = duplex;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002321}
2322
2323static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2324{
2325 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2326 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2327 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2328 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2329 return PORT_FIBRE;
2330
2331 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2332 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2333 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2334 return PORT_DA;
2335
2336 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2337 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2338 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2339 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2340 return PORT_NONE;
2341
2342 return PORT_OTHER;
2343}
2344
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002345static u32
2346mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002347{
2348 u32 ptys_proto = 0;
2349 int i;
2350
2351 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002352 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2353 cmd->link_modes.advertising))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002354 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2355 }
2356 return ptys_proto;
2357}
2358
2359static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2360{
2361 u32 ptys_proto = 0;
2362 int i;
2363
2364 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2365 if (speed == mlxsw_sp_port_link_mode[i].speed)
2366 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2367 }
2368 return ptys_proto;
2369}
2370
Ido Schimmel18f1e702016-02-26 17:32:31 +01002371static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2372{
2373 u32 ptys_proto = 0;
2374 int i;
2375
2376 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2377 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2378 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2379 }
2380 return ptys_proto;
2381}
2382
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002383static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2384 struct ethtool_link_ksettings *cmd)
2385{
2386 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2387 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2388 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2389
2390 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2391 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2392}
2393
2394static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2395 struct ethtool_link_ksettings *cmd)
2396{
2397 if (!autoneg)
2398 return;
2399
2400 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2401 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2402}
2403
2404static void
2405mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2406 struct ethtool_link_ksettings *cmd)
2407{
2408 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2409 return;
2410
2411 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2412 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2413}
2414
2415static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2416 struct ethtool_link_ksettings *cmd)
2417{
2418 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2419 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2420 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2421 char ptys_pl[MLXSW_REG_PTYS_LEN];
2422 u8 autoneg_status;
2423 bool autoneg;
2424 int err;
2425
2426 autoneg = mlxsw_sp_port->link.autoneg;
Elad Raz401c8b42016-10-28 21:35:52 +02002427 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002428 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2429 if (err)
2430 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002431 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2432 &eth_proto_oper);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002433
2434 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2435
2436 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2437
2438 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2439 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2440 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2441
2442 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2443 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2444 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2445 cmd);
2446
2447 return 0;
2448}
2449
2450static int
2451mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2452 const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002453{
2454 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2456 char ptys_pl[MLXSW_REG_PTYS_LEN];
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002457 u32 eth_proto_cap, eth_proto_new;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002458 bool autoneg;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002459 int err;
2460
Elad Raz401c8b42016-10-28 21:35:52 +02002461 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002462 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002463 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002464 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002465 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002466
2467 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2468 eth_proto_new = autoneg ?
2469 mlxsw_sp_to_ptys_advert_link(cmd) :
2470 mlxsw_sp_to_ptys_speed(cmd->base.speed);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002471
2472 eth_proto_new = eth_proto_new & eth_proto_cap;
2473 if (!eth_proto_new) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002474 netdev_err(dev, "No supported speed requested\n");
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002475 return -EINVAL;
2476 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002477
Elad Raz401c8b42016-10-28 21:35:52 +02002478 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2479 eth_proto_new);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002480 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002481 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002482 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002483
Ido Schimmel6277d462016-07-15 11:14:58 +02002484 if (!netif_running(dev))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002485 return 0;
2486
Ido Schimmel0c83f882016-09-12 13:26:23 +02002487 mlxsw_sp_port->link.autoneg = autoneg;
2488
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002489 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2490 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002491
2492 return 0;
2493}
2494
Yotam Gigice6ef68f2017-06-01 16:26:46 +03002495static int mlxsw_sp_flash_device(struct net_device *dev,
2496 struct ethtool_flash *flash)
2497{
2498 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2499 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2500 const struct firmware *firmware;
2501 int err;
2502
2503 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
2504 return -EOPNOTSUPP;
2505
2506 dev_hold(dev);
2507 rtnl_unlock();
2508
2509 err = request_firmware_direct(&firmware, flash->data, &dev->dev);
2510 if (err)
2511 goto out;
2512 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
2513 release_firmware(firmware);
2514out:
2515 rtnl_lock();
2516 dev_put(dev);
2517 return err;
2518}
2519
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002520static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2521 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2522 .get_link = ethtool_op_get_link,
Ido Schimmel9f7ec052016-04-06 17:10:14 +02002523 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2524 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002525 .get_strings = mlxsw_sp_port_get_strings,
Ido Schimmel3a66ee32015-11-27 13:45:55 +01002526 .set_phys_id = mlxsw_sp_port_set_phys_id,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002527 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2528 .get_sset_count = mlxsw_sp_port_get_sset_count,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002529 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2530 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
Yotam Gigice6ef68f2017-06-01 16:26:46 +03002531 .flash_device = mlxsw_sp_flash_device,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002532};
2533
Ido Schimmel18f1e702016-02-26 17:32:31 +01002534static int
2535mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2536{
2537 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2538 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2539 char ptys_pl[MLXSW_REG_PTYS_LEN];
2540 u32 eth_proto_admin;
2541
2542 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
Elad Raz401c8b42016-10-28 21:35:52 +02002543 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2544 eth_proto_admin);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002545 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2546}
2547
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002548int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2549 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2550 bool dwrr, u8 dwrr_weight)
Ido Schimmel90183b92016-04-06 17:10:08 +02002551{
2552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2553 char qeec_pl[MLXSW_REG_QEEC_LEN];
2554
2555 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2556 next_index);
2557 mlxsw_reg_qeec_de_set(qeec_pl, true);
2558 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2559 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2560 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2561}
2562
Ido Schimmelcc7cf512016-04-06 17:10:11 +02002563int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2564 enum mlxsw_reg_qeec_hr hr, u8 index,
2565 u8 next_index, u32 maxrate)
Ido Schimmel90183b92016-04-06 17:10:08 +02002566{
2567 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2568 char qeec_pl[MLXSW_REG_QEEC_LEN];
2569
2570 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2571 next_index);
2572 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2573 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2574 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2575}
2576
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002577int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2578 u8 switch_prio, u8 tclass)
Ido Schimmel90183b92016-04-06 17:10:08 +02002579{
2580 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2581 char qtct_pl[MLXSW_REG_QTCT_LEN];
2582
2583 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2584 tclass);
2585 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2586}
2587
2588static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2589{
2590 int err, i;
2591
2592 /* Setup the elements hierarcy, so that each TC is linked to
2593 * one subgroup, which are all member in the same group.
2594 */
2595 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2596 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2597 0);
2598 if (err)
2599 return err;
2600 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2601 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2602 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2603 0, false, 0);
2604 if (err)
2605 return err;
2606 }
2607 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2608 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2609 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2610 false, 0);
2611 if (err)
2612 return err;
2613 }
2614
2615 /* Make sure the max shaper is disabled in all hierarcies that
2616 * support it.
2617 */
2618 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2619 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2620 MLXSW_REG_QEEC_MAS_DIS);
2621 if (err)
2622 return err;
2623 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2624 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2625 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2626 i, 0,
2627 MLXSW_REG_QEEC_MAS_DIS);
2628 if (err)
2629 return err;
2630 }
2631 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2632 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2633 MLXSW_REG_QEEC_HIERARCY_TC,
2634 i, i,
2635 MLXSW_REG_QEEC_MAS_DIS);
2636 if (err)
2637 return err;
2638 }
2639
2640 /* Map all priorities to traffic class 0. */
2641 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2642 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2643 if (err)
2644 return err;
2645 }
2646
2647 return 0;
2648}
2649
Ido Schimmel5b153852017-06-08 08:47:44 +02002650static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2651 bool split, u8 module, u8 width, u8 lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002652{
Ido Schimmelc57529e2017-05-26 08:37:31 +02002653 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002654 struct mlxsw_sp_port *mlxsw_sp_port;
2655 struct net_device *dev;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002656 int err;
2657
Ido Schimmel5b153852017-06-08 08:47:44 +02002658 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2659 if (err) {
2660 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2661 local_port);
2662 return err;
2663 }
2664
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002665 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
Ido Schimmel5b153852017-06-08 08:47:44 +02002666 if (!dev) {
2667 err = -ENOMEM;
2668 goto err_alloc_etherdev;
2669 }
Jiri Pirkof20a91f2016-10-27 15:13:00 +02002670 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002671 mlxsw_sp_port = netdev_priv(dev);
2672 mlxsw_sp_port->dev = dev;
2673 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2674 mlxsw_sp_port->local_port = local_port;
Ido Schimmelc57529e2017-05-26 08:37:31 +02002675 mlxsw_sp_port->pvid = 1;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002676 mlxsw_sp_port->split = split;
Ido Schimmeld664b412016-06-09 09:51:40 +02002677 mlxsw_sp_port->mapping.module = module;
2678 mlxsw_sp_port->mapping.width = width;
2679 mlxsw_sp_port->mapping.lane = lane;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002680 mlxsw_sp_port->link.autoneg = 1;
Ido Schimmel31a08a52017-05-26 08:37:26 +02002681 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02002682 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002683
2684 mlxsw_sp_port->pcpu_stats =
2685 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2686 if (!mlxsw_sp_port->pcpu_stats) {
2687 err = -ENOMEM;
2688 goto err_alloc_stats;
2689 }
2690
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002691 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2692 GFP_KERNEL);
2693 if (!mlxsw_sp_port->sample) {
2694 err = -ENOMEM;
2695 goto err_alloc_sample;
2696 }
2697
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002698 mlxsw_sp_port->hw_stats.cache =
2699 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2700
2701 if (!mlxsw_sp_port->hw_stats.cache) {
2702 err = -ENOMEM;
2703 goto err_alloc_hw_stats;
2704 }
2705 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2706 &update_stats_cache);
2707
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002708 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2709 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2710
Ido Schimmel5b153852017-06-08 08:47:44 +02002711 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
2712 lane);
2713 if (err) {
2714 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
2715 mlxsw_sp_port->local_port);
2716 goto err_port_module_map;
2717 }
2718
Ido Schimmel3247ff22016-09-08 08:16:02 +02002719 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2720 if (err) {
2721 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2722 mlxsw_sp_port->local_port);
2723 goto err_port_swid_set;
2724 }
2725
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002726 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2727 if (err) {
2728 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2729 mlxsw_sp_port->local_port);
2730 goto err_dev_addr_init;
2731 }
2732
2733 netif_carrier_off(dev);
2734
2735 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
Yotam Gigi763b4b72016-07-21 12:03:17 +02002736 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2737 dev->hw_features |= NETIF_F_HW_TC;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002738
Jarod Wilsond894be52016-10-20 13:55:16 -04002739 dev->min_mtu = 0;
2740 dev->max_mtu = ETH_MAX_MTU;
2741
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002742 /* Each packet needs to have a Tx header (metadata) on top all other
2743 * headers.
2744 */
Yotam Gigifeb7d382016-10-04 09:46:04 +02002745 dev->needed_headroom = MLXSW_TXHDR_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002746
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002747 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2748 if (err) {
2749 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2750 mlxsw_sp_port->local_port);
2751 goto err_port_system_port_mapping_set;
2752 }
2753
Ido Schimmel18f1e702016-02-26 17:32:31 +01002754 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2755 if (err) {
2756 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2757 mlxsw_sp_port->local_port);
2758 goto err_port_speed_by_width_set;
2759 }
2760
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002761 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2762 if (err) {
2763 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2764 mlxsw_sp_port->local_port);
2765 goto err_port_mtu_set;
2766 }
2767
2768 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2769 if (err)
2770 goto err_port_admin_status_set;
2771
2772 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2773 if (err) {
2774 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2775 mlxsw_sp_port->local_port);
2776 goto err_port_buffers_init;
2777 }
2778
Ido Schimmel90183b92016-04-06 17:10:08 +02002779 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2780 if (err) {
2781 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2782 mlxsw_sp_port->local_port);
2783 goto err_port_ets_init;
2784 }
2785
Ido Schimmelf00817d2016-04-06 17:10:09 +02002786 /* ETS and buffers must be initialized before DCB. */
2787 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2788 if (err) {
2789 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2790 mlxsw_sp_port->local_port);
2791 goto err_port_dcb_init;
2792 }
2793
Ido Schimmela1107482017-05-26 08:37:39 +02002794 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
Ido Schimmel45a4a162017-05-16 19:38:35 +02002795 if (err) {
Ido Schimmela1107482017-05-26 08:37:39 +02002796 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
Ido Schimmel45a4a162017-05-16 19:38:35 +02002797 mlxsw_sp_port->local_port);
Ido Schimmela1107482017-05-26 08:37:39 +02002798 goto err_port_fids_init;
Ido Schimmel45a4a162017-05-16 19:38:35 +02002799 }
2800
Ido Schimmelc57529e2017-05-26 08:37:31 +02002801 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2802 if (IS_ERR(mlxsw_sp_port_vlan)) {
2803 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
Ido Schimmel05978482016-08-17 16:39:30 +02002804 mlxsw_sp_port->local_port);
Ido Schimmelc57529e2017-05-26 08:37:31 +02002805 goto err_port_vlan_get;
Ido Schimmel05978482016-08-17 16:39:30 +02002806 }
2807
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002808 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
Ido Schimmel2f258442016-08-17 16:39:31 +02002809 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002810 err = register_netdev(dev);
2811 if (err) {
2812 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2813 mlxsw_sp_port->local_port);
2814 goto err_register_netdev;
2815 }
2816
Elad Razd808c7e2016-10-28 21:35:57 +02002817 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2818 mlxsw_sp_port, dev, mlxsw_sp_port->split,
2819 module);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002820 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002821 return 0;
2822
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002823err_register_netdev:
Ido Schimmel2f258442016-08-17 16:39:31 +02002824 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02002825 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmelc57529e2017-05-26 08:37:31 +02002826 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
2827err_port_vlan_get:
Ido Schimmela1107482017-05-26 08:37:39 +02002828 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
2829err_port_fids_init:
Ido Schimmel4de34eb2016-08-04 17:36:22 +03002830 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02002831err_port_dcb_init:
Ido Schimmel90183b92016-04-06 17:10:08 +02002832err_port_ets_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002833err_port_buffers_init:
2834err_port_admin_status_set:
2835err_port_mtu_set:
Ido Schimmel18f1e702016-02-26 17:32:31 +01002836err_port_speed_by_width_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002837err_port_system_port_mapping_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002838err_dev_addr_init:
Ido Schimmel3247ff22016-09-08 08:16:02 +02002839 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2840err_port_swid_set:
Ido Schimmel5b153852017-06-08 08:47:44 +02002841 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
2842err_port_module_map:
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002843 kfree(mlxsw_sp_port->hw_stats.cache);
2844err_alloc_hw_stats:
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002845 kfree(mlxsw_sp_port->sample);
2846err_alloc_sample:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002847 free_percpu(mlxsw_sp_port->pcpu_stats);
2848err_alloc_stats:
2849 free_netdev(dev);
Ido Schimmel5b153852017-06-08 08:47:44 +02002850err_alloc_etherdev:
Jiri Pirko67963a32016-10-28 21:35:55 +02002851 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2852 return err;
2853}
2854
Ido Schimmel5b153852017-06-08 08:47:44 +02002855static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002856{
2857 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2858
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002859 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
Jiri Pirko67963a32016-10-28 21:35:55 +02002860 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002861 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
Ido Schimmel2f258442016-08-17 16:39:31 +02002862 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02002863 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmelc57529e2017-05-26 08:37:31 +02002864 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
Ido Schimmela1107482017-05-26 08:37:39 +02002865 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02002866 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmel3e9b27b2016-02-26 17:32:28 +01002867 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2868 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002869 kfree(mlxsw_sp_port->hw_stats.cache);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002870 kfree(mlxsw_sp_port->sample);
Yotam Gigi136f1442017-01-09 11:25:47 +01002871 free_percpu(mlxsw_sp_port->pcpu_stats);
Ido Schimmel31a08a52017-05-26 08:37:26 +02002872 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002873 free_netdev(mlxsw_sp_port->dev);
Jiri Pirko67963a32016-10-28 21:35:55 +02002874 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2875}
2876
Jiri Pirkof83e2102016-10-28 21:35:49 +02002877static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2878{
2879 return mlxsw_sp->ports[local_port] != NULL;
2880}
2881
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002882static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2883{
2884 int i;
2885
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002886 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002887 if (mlxsw_sp_port_created(mlxsw_sp, i))
2888 mlxsw_sp_port_remove(mlxsw_sp, i);
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002889 kfree(mlxsw_sp->port_to_module);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002890 kfree(mlxsw_sp->ports);
2891}
2892
2893static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2894{
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002895 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
Ido Schimmeld664b412016-06-09 09:51:40 +02002896 u8 module, width, lane;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002897 size_t alloc_size;
2898 int i;
2899 int err;
2900
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002901 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002902 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2903 if (!mlxsw_sp->ports)
2904 return -ENOMEM;
2905
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002906 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
2907 if (!mlxsw_sp->port_to_module) {
2908 err = -ENOMEM;
2909 goto err_port_to_module_alloc;
2910 }
2911
2912 for (i = 1; i < max_ports; i++) {
Ido Schimmel558c2d52016-02-26 17:32:29 +01002913 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
Ido Schimmeld664b412016-06-09 09:51:40 +02002914 &width, &lane);
Ido Schimmel558c2d52016-02-26 17:32:29 +01002915 if (err)
2916 goto err_port_module_info_get;
2917 if (!width)
2918 continue;
2919 mlxsw_sp->port_to_module[i] = module;
Jiri Pirko67963a32016-10-28 21:35:55 +02002920 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
2921 module, width, lane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002922 if (err)
2923 goto err_port_create;
2924 }
2925 return 0;
2926
2927err_port_create:
Ido Schimmel558c2d52016-02-26 17:32:29 +01002928err_port_module_info_get:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002929 for (i--; i >= 1; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002930 if (mlxsw_sp_port_created(mlxsw_sp, i))
2931 mlxsw_sp_port_remove(mlxsw_sp, i);
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002932 kfree(mlxsw_sp->port_to_module);
2933err_port_to_module_alloc:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002934 kfree(mlxsw_sp->ports);
2935 return err;
2936}
2937
Ido Schimmel18f1e702016-02-26 17:32:31 +01002938static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
2939{
2940 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
2941
2942 return local_port - offset;
2943}
2944
Ido Schimmelbe945352016-06-09 09:51:39 +02002945static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2946 u8 module, unsigned int count)
2947{
2948 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
2949 int err, i;
2950
2951 for (i = 0; i < count; i++) {
Ido Schimmelbe945352016-06-09 09:51:39 +02002952 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
Ido Schimmeld664b412016-06-09 09:51:40 +02002953 module, width, i * width);
Ido Schimmelbe945352016-06-09 09:51:39 +02002954 if (err)
2955 goto err_port_create;
2956 }
2957
2958 return 0;
2959
2960err_port_create:
2961 for (i--; i >= 0; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002962 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2963 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmelbe945352016-06-09 09:51:39 +02002964 return err;
2965}
2966
2967static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2968 u8 base_port, unsigned int count)
2969{
2970 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
2971 int i;
2972
2973 /* Split by four means we need to re-create two ports, otherwise
2974 * only one.
2975 */
2976 count = count / 2;
2977
2978 for (i = 0; i < count; i++) {
2979 local_port = base_port + i * 2;
2980 module = mlxsw_sp->port_to_module[local_port];
2981
Ido Schimmelbe945352016-06-09 09:51:39 +02002982 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
Ido Schimmeld664b412016-06-09 09:51:40 +02002983 width, 0);
Ido Schimmelbe945352016-06-09 09:51:39 +02002984 }
2985}
2986
Jiri Pirkob2f10572016-04-08 19:11:23 +02002987static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2988 unsigned int count)
Ido Schimmel18f1e702016-02-26 17:32:31 +01002989{
Jiri Pirkob2f10572016-04-08 19:11:23 +02002990 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002991 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002992 u8 module, cur_width, base_port;
2993 int i;
2994 int err;
2995
2996 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2997 if (!mlxsw_sp_port) {
2998 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2999 local_port);
3000 return -EINVAL;
3001 }
3002
Ido Schimmeld664b412016-06-09 09:51:40 +02003003 module = mlxsw_sp_port->mapping.module;
3004 cur_width = mlxsw_sp_port->mapping.width;
3005
Ido Schimmel18f1e702016-02-26 17:32:31 +01003006 if (count != 2 && count != 4) {
3007 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3008 return -EINVAL;
3009 }
3010
Ido Schimmel18f1e702016-02-26 17:32:31 +01003011 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3012 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3013 return -EINVAL;
3014 }
3015
3016 /* Make sure we have enough slave (even) ports for the split. */
3017 if (count == 2) {
3018 base_port = local_port;
3019 if (mlxsw_sp->ports[base_port + 1]) {
3020 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3021 return -EINVAL;
3022 }
3023 } else {
3024 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3025 if (mlxsw_sp->ports[base_port + 1] ||
3026 mlxsw_sp->ports[base_port + 3]) {
3027 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3028 return -EINVAL;
3029 }
3030 }
3031
3032 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003033 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3034 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003035
Ido Schimmelbe945352016-06-09 09:51:39 +02003036 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3037 if (err) {
3038 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3039 goto err_port_split_create;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003040 }
3041
3042 return 0;
3043
Ido Schimmelbe945352016-06-09 09:51:39 +02003044err_port_split_create:
3045 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003046 return err;
3047}
3048
Jiri Pirkob2f10572016-04-08 19:11:23 +02003049static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
Ido Schimmel18f1e702016-02-26 17:32:31 +01003050{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003051 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003052 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmeld664b412016-06-09 09:51:40 +02003053 u8 cur_width, base_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003054 unsigned int count;
3055 int i;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003056
3057 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3058 if (!mlxsw_sp_port) {
3059 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3060 local_port);
3061 return -EINVAL;
3062 }
3063
3064 if (!mlxsw_sp_port->split) {
3065 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
3066 return -EINVAL;
3067 }
3068
Ido Schimmeld664b412016-06-09 09:51:40 +02003069 cur_width = mlxsw_sp_port->mapping.width;
Ido Schimmel18f1e702016-02-26 17:32:31 +01003070 count = cur_width == 1 ? 4 : 2;
3071
3072 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3073
3074 /* Determine which ports to remove. */
3075 if (count == 2 && local_port >= base_port + 2)
3076 base_port = base_port + 2;
3077
3078 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02003079 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3080 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003081
Ido Schimmelbe945352016-06-09 09:51:39 +02003082 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01003083
3084 return 0;
3085}
3086
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003087static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3088 char *pude_pl, void *priv)
3089{
3090 struct mlxsw_sp *mlxsw_sp = priv;
3091 struct mlxsw_sp_port *mlxsw_sp_port;
3092 enum mlxsw_reg_pude_oper_status status;
3093 u8 local_port;
3094
3095 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3096 mlxsw_sp_port = mlxsw_sp->ports[local_port];
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003097 if (!mlxsw_sp_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003098 return;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003099
3100 status = mlxsw_reg_pude_oper_status_get(pude_pl);
3101 if (status == MLXSW_PORT_OPER_STATUS_UP) {
3102 netdev_info(mlxsw_sp_port->dev, "link up\n");
3103 netif_carrier_on(mlxsw_sp_port->dev);
3104 } else {
3105 netdev_info(mlxsw_sp_port->dev, "link down\n");
3106 netif_carrier_off(mlxsw_sp_port->dev);
3107 }
3108}
3109
Nogah Frankel14eeda92016-11-25 10:33:32 +01003110static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3111 u8 local_port, void *priv)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003112{
3113 struct mlxsw_sp *mlxsw_sp = priv;
3114 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3115 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3116
3117 if (unlikely(!mlxsw_sp_port)) {
3118 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3119 local_port);
3120 return;
3121 }
3122
3123 skb->dev = mlxsw_sp_port->dev;
3124
3125 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3126 u64_stats_update_begin(&pcpu_stats->syncp);
3127 pcpu_stats->rx_packets++;
3128 pcpu_stats->rx_bytes += skb->len;
3129 u64_stats_update_end(&pcpu_stats->syncp);
3130
3131 skb->protocol = eth_type_trans(skb, skb->dev);
3132 netif_receive_skb(skb);
3133}
3134
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02003135static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3136 void *priv)
3137{
3138 skb->offload_fwd_mark = 1;
Nogah Frankel14eeda92016-11-25 10:33:32 +01003139 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02003140}
3141
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003142static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3143 void *priv)
3144{
3145 struct mlxsw_sp *mlxsw_sp = priv;
3146 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3147 struct psample_group *psample_group;
3148 u32 size;
3149
3150 if (unlikely(!mlxsw_sp_port)) {
3151 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3152 local_port);
3153 goto out;
3154 }
3155 if (unlikely(!mlxsw_sp_port->sample)) {
3156 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3157 local_port);
3158 goto out;
3159 }
3160
3161 size = mlxsw_sp_port->sample->truncate ?
3162 mlxsw_sp_port->sample->trunc_size : skb->len;
3163
3164 rcu_read_lock();
3165 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3166 if (!psample_group)
3167 goto out_unlock;
3168 psample_sample_packet(psample_group, skb, size,
3169 mlxsw_sp_port->dev->ifindex, 0,
3170 mlxsw_sp_port->sample->rate);
3171out_unlock:
3172 rcu_read_unlock();
3173out:
3174 consume_skb(skb);
3175}
3176
Nogah Frankel117b0da2016-11-25 10:33:44 +01003177#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel0fb78a42016-11-25 10:33:39 +01003178 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01003179 _is_ctrl, SP_##_trap_group, DISCARD)
Ido Schimmel93393b32016-08-25 18:42:38 +02003180
Nogah Frankel117b0da2016-11-25 10:33:44 +01003181#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel14eeda92016-11-25 10:33:32 +01003182 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01003183 _is_ctrl, SP_##_trap_group, DISCARD)
3184
3185#define MLXSW_SP_EVENTL(_func, _trap_id) \
3186 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
Nogah Frankel14eeda92016-11-25 10:33:32 +01003187
Nogah Frankel45449132016-11-25 10:33:35 +01003188static const struct mlxsw_listener mlxsw_sp_listener[] = {
3189 /* Events */
Nogah Frankel117b0da2016-11-25 10:33:44 +01003190 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
Nogah Frankelee4a60d2016-11-25 10:33:29 +01003191 /* L2 traps */
Nogah Frankel117b0da2016-11-25 10:33:44 +01003192 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3193 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3194 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3195 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3196 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3197 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3198 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3199 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3200 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3201 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3202 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
Jiri Pirko9d41acc2017-04-18 16:55:38 +02003203 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
Ido Schimmel93393b32016-08-25 18:42:38 +02003204 /* L3 traps */
Nogah Frankel117b0da2016-11-25 10:33:44 +01003205 MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3206 MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3207 MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3208 MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
3209 MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
3210 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
3211 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
3212 MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01003213 /* PKT Sample trap */
3214 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
Jiri Pirko0db7b382017-06-06 14:12:05 +02003215 false, SP_IP2ME, DISCARD),
3216 /* ACL trap */
3217 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003218};
3219
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003220static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3221{
3222 char qpcr_pl[MLXSW_REG_QPCR_LEN];
3223 enum mlxsw_reg_qpcr_ir_units ir_units;
3224 int max_cpu_policers;
3225 bool is_bytes;
3226 u8 burst_size;
3227 u32 rate;
3228 int i, err;
3229
3230 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3231 return -EIO;
3232
3233 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3234
3235 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3236 for (i = 0; i < max_cpu_policers; i++) {
3237 is_bytes = false;
3238 switch (i) {
3239 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3240 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3241 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3242 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3243 rate = 128;
3244 burst_size = 7;
3245 break;
3246 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3247 rate = 16 * 1024;
3248 burst_size = 10;
3249 break;
3250 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3251 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3252 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3253 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3254 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3255 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3256 rate = 1024;
3257 burst_size = 7;
3258 break;
3259 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3260 is_bytes = true;
3261 rate = 4 * 1024;
3262 burst_size = 4;
3263 break;
3264 default:
3265 continue;
3266 }
3267
3268 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3269 burst_size);
3270 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3271 if (err)
3272 return err;
3273 }
3274
3275 return 0;
3276}
3277
Nogah Frankel579c82e2016-11-25 10:33:42 +01003278static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003279{
3280 char htgt_pl[MLXSW_REG_HTGT_LEN];
Nogah Frankel117b0da2016-11-25 10:33:44 +01003281 enum mlxsw_reg_htgt_trap_group i;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003282 int max_cpu_policers;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003283 int max_trap_groups;
3284 u8 priority, tc;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003285 u16 policer_id;
Nogah Frankel117b0da2016-11-25 10:33:44 +01003286 int err;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003287
3288 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3289 return -EIO;
3290
3291 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003292 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003293
3294 for (i = 0; i < max_trap_groups; i++) {
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003295 policer_id = i;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003296 switch (i) {
Nogah Frankel117b0da2016-11-25 10:33:44 +01003297 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3298 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3299 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3300 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3301 priority = 5;
3302 tc = 5;
3303 break;
3304 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3305 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3306 priority = 4;
3307 tc = 4;
3308 break;
3309 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3310 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3311 priority = 3;
3312 tc = 3;
3313 break;
3314 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3315 priority = 2;
3316 tc = 2;
3317 break;
3318 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3319 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3320 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3321 priority = 1;
3322 tc = 1;
3323 break;
3324 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
Nogah Frankel579c82e2016-11-25 10:33:42 +01003325 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3326 tc = MLXSW_REG_HTGT_DEFAULT_TC;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003327 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003328 break;
3329 default:
3330 continue;
3331 }
Nogah Frankel117b0da2016-11-25 10:33:44 +01003332
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003333 if (max_cpu_policers <= policer_id &&
3334 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3335 return -EIO;
3336
3337 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003338 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3339 if (err)
3340 return err;
3341 }
3342
3343 return 0;
3344}
3345
3346static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3347{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003348 int i;
3349 int err;
3350
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003351 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3352 if (err)
3353 return err;
3354
Nogah Frankel579c82e2016-11-25 10:33:42 +01003355 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003356 if (err)
3357 return err;
3358
Nogah Frankel45449132016-11-25 10:33:35 +01003359 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003360 err = mlxsw_core_trap_register(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003361 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003362 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003363 if (err)
Nogah Frankel45449132016-11-25 10:33:35 +01003364 goto err_listener_register;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003365
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003366 }
3367 return 0;
3368
Nogah Frankel45449132016-11-25 10:33:35 +01003369err_listener_register:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003370 for (i--; i >= 0; i--) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003371 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003372 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003373 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003374 }
3375 return err;
3376}
3377
3378static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3379{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003380 int i;
3381
Nogah Frankel45449132016-11-25 10:33:35 +01003382 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003383 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003384 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003385 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003386 }
3387}
3388
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003389static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3390{
3391 char slcr_pl[MLXSW_REG_SLCR_LEN];
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003392 int err;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003393
3394 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3395 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3396 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3397 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3398 MLXSW_REG_SLCR_LAG_HASH_SIP |
3399 MLXSW_REG_SLCR_LAG_HASH_DIP |
3400 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3401 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3402 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003403 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3404 if (err)
3405 return err;
3406
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003407 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3408 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003409 return -EIO;
3410
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003411 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003412 sizeof(struct mlxsw_sp_upper),
3413 GFP_KERNEL);
3414 if (!mlxsw_sp->lags)
3415 return -ENOMEM;
3416
3417 return 0;
3418}
3419
3420static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3421{
3422 kfree(mlxsw_sp->lags);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003423}
3424
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003425static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3426{
3427 char htgt_pl[MLXSW_REG_HTGT_LEN];
3428
Nogah Frankel579c82e2016-11-25 10:33:42 +01003429 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3430 MLXSW_REG_HTGT_INVALID_POLICER,
3431 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3432 MLXSW_REG_HTGT_DEFAULT_TC);
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003433 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3434}
3435
Jiri Pirkob2f10572016-04-08 19:11:23 +02003436static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003437 const struct mlxsw_bus_info *mlxsw_bus_info)
3438{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003439 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003440 int err;
3441
3442 mlxsw_sp->core = mlxsw_core;
3443 mlxsw_sp->bus_info = mlxsw_bus_info;
3444
Yotam Gigi6b742192017-05-23 21:56:29 +02003445 err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3446 if (err) {
3447 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
3448 return err;
3449 }
3450
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003451 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3452 if (err) {
3453 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3454 return err;
3455 }
3456
Ido Schimmela1107482017-05-26 08:37:39 +02003457 err = mlxsw_sp_fids_init(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003458 if (err) {
Ido Schimmela1107482017-05-26 08:37:39 +02003459 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
Nogah Frankel45449132016-11-25 10:33:35 +01003460 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003461 }
3462
Ido Schimmela1107482017-05-26 08:37:39 +02003463 err = mlxsw_sp_traps_init(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003464 if (err) {
Ido Schimmela1107482017-05-26 08:37:39 +02003465 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3466 goto err_traps_init;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003467 }
3468
3469 err = mlxsw_sp_buffers_init(mlxsw_sp);
3470 if (err) {
3471 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3472 goto err_buffers_init;
3473 }
3474
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003475 err = mlxsw_sp_lag_init(mlxsw_sp);
3476 if (err) {
3477 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3478 goto err_lag_init;
3479 }
3480
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003481 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3482 if (err) {
3483 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3484 goto err_switchdev_init;
3485 }
3486
Ido Schimmel464dce12016-07-02 11:00:15 +02003487 err = mlxsw_sp_router_init(mlxsw_sp);
3488 if (err) {
3489 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3490 goto err_router_init;
3491 }
3492
Yotam Gigi763b4b72016-07-21 12:03:17 +02003493 err = mlxsw_sp_span_init(mlxsw_sp);
3494 if (err) {
3495 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3496 goto err_span_init;
3497 }
3498
Jiri Pirko22a67762017-02-03 10:29:07 +01003499 err = mlxsw_sp_acl_init(mlxsw_sp);
3500 if (err) {
3501 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3502 goto err_acl_init;
3503 }
3504
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003505 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3506 if (err) {
3507 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3508 goto err_counter_pool_init;
3509 }
3510
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +02003511 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3512 if (err) {
3513 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3514 goto err_dpipe_init;
3515 }
3516
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003517 err = mlxsw_sp_ports_create(mlxsw_sp);
3518 if (err) {
3519 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3520 goto err_ports_create;
3521 }
3522
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003523 return 0;
3524
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003525err_ports_create:
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +02003526 mlxsw_sp_dpipe_fini(mlxsw_sp);
3527err_dpipe_init:
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003528 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3529err_counter_pool_init:
Jiri Pirko22a67762017-02-03 10:29:07 +01003530 mlxsw_sp_acl_fini(mlxsw_sp);
3531err_acl_init:
Yotam Gigi763b4b72016-07-21 12:03:17 +02003532 mlxsw_sp_span_fini(mlxsw_sp);
3533err_span_init:
Ido Schimmel464dce12016-07-02 11:00:15 +02003534 mlxsw_sp_router_fini(mlxsw_sp);
3535err_router_init:
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003536 mlxsw_sp_switchdev_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003537err_switchdev_init:
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003538 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003539err_lag_init:
Jiri Pirko0f433fa2016-04-14 18:19:24 +02003540 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003541err_buffers_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003542 mlxsw_sp_traps_fini(mlxsw_sp);
Ido Schimmela1107482017-05-26 08:37:39 +02003543err_traps_init:
3544 mlxsw_sp_fids_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003545 return err;
3546}
3547
Jiri Pirkob2f10572016-04-08 19:11:23 +02003548static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003549{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003550 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003551
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003552 mlxsw_sp_ports_remove(mlxsw_sp);
Arkadi Sharshevsky230ead02017-03-28 17:24:12 +02003553 mlxsw_sp_dpipe_fini(mlxsw_sp);
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003554 mlxsw_sp_counter_pool_fini(mlxsw_sp);
Jiri Pirko22a67762017-02-03 10:29:07 +01003555 mlxsw_sp_acl_fini(mlxsw_sp);
Yotam Gigi763b4b72016-07-21 12:03:17 +02003556 mlxsw_sp_span_fini(mlxsw_sp);
Ido Schimmel464dce12016-07-02 11:00:15 +02003557 mlxsw_sp_router_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003558 mlxsw_sp_switchdev_fini(mlxsw_sp);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003559 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko5113bfd2016-05-06 22:20:59 +02003560 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003561 mlxsw_sp_traps_fini(mlxsw_sp);
Ido Schimmela1107482017-05-26 08:37:39 +02003562 mlxsw_sp_fids_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003563}
3564
3565static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3566 .used_max_vepa_channels = 1,
3567 .max_vepa_channels = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003568 .used_max_mid = 1,
Elad Raz53ae6282016-01-10 21:06:26 +01003569 .max_mid = MLXSW_SP_MID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003570 .used_max_pgt = 1,
3571 .max_pgt = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003572 .used_flood_tables = 1,
3573 .used_flood_mode = 1,
3574 .flood_mode = 3,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003575 .max_fid_offset_flood_tables = 3,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003576 .fid_offset_flood_table_size = VLAN_N_VID - 1,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003577 .max_fid_flood_tables = 3,
Ido Schimmela1107482017-05-26 08:37:39 +02003578 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003579 .used_max_ib_mc = 1,
3580 .max_ib_mc = 0,
3581 .used_max_pkey = 1,
3582 .max_pkey = 0,
Nogah Frankel403547d2016-09-20 11:16:52 +02003583 .used_kvd_split_data = 1,
3584 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3585 .kvd_hash_single_parts = 2,
3586 .kvd_hash_double_parts = 1,
Jiri Pirkoc6022422016-07-05 11:27:46 +02003587 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003588 .swid_config = {
3589 {
3590 .used_type = 1,
3591 .type = MLXSW_PORT_SWID_TYPE_ETH,
3592 }
3593 },
Nogah Frankel57d316b2016-07-21 12:03:09 +02003594 .resource_query_enable = 1,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003595};
3596
3597static struct mlxsw_driver mlxsw_sp_driver = {
Jiri Pirko1d20d232016-10-27 15:12:59 +02003598 .kind = mlxsw_sp_driver_name,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003599 .priv_size = sizeof(struct mlxsw_sp),
3600 .init = mlxsw_sp_init,
3601 .fini = mlxsw_sp_fini,
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003602 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003603 .port_split = mlxsw_sp_port_split,
3604 .port_unsplit = mlxsw_sp_port_unsplit,
3605 .sb_pool_get = mlxsw_sp_sb_pool_get,
3606 .sb_pool_set = mlxsw_sp_sb_pool_set,
3607 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3608 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3609 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3610 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3611 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3612 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3613 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3614 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3615 .txhdr_construct = mlxsw_sp_txhdr_construct,
3616 .txhdr_len = MLXSW_TXHDR_LEN,
3617 .profile = &mlxsw_sp_config_profile,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003618};
3619
Jiri Pirko22a67762017-02-03 10:29:07 +01003620bool mlxsw_sp_port_dev_check(const struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003621{
3622 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3623}
3624
Jiri Pirko1182e532017-03-06 21:25:20 +01003625static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
David Aherndd823642016-10-17 19:15:49 -07003626{
Jiri Pirko1182e532017-03-06 21:25:20 +01003627 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
David Aherndd823642016-10-17 19:15:49 -07003628 int ret = 0;
3629
3630 if (mlxsw_sp_port_dev_check(lower_dev)) {
Jiri Pirko1182e532017-03-06 21:25:20 +01003631 *p_mlxsw_sp_port = netdev_priv(lower_dev);
David Aherndd823642016-10-17 19:15:49 -07003632 ret = 1;
3633 }
3634
3635 return ret;
3636}
3637
Ido Schimmelc57529e2017-05-26 08:37:31 +02003638struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003639{
Jiri Pirko1182e532017-03-06 21:25:20 +01003640 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003641
3642 if (mlxsw_sp_port_dev_check(dev))
3643 return netdev_priv(dev);
3644
Jiri Pirko1182e532017-03-06 21:25:20 +01003645 mlxsw_sp_port = NULL;
3646 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003647
Jiri Pirko1182e532017-03-06 21:25:20 +01003648 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003649}
3650
Ido Schimmel4724ba562017-03-10 08:53:39 +01003651struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003652{
3653 struct mlxsw_sp_port *mlxsw_sp_port;
3654
3655 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3656 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3657}
3658
Arkadi Sharshevskyaf0613782017-06-08 08:44:20 +02003659struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003660{
Jiri Pirko1182e532017-03-06 21:25:20 +01003661 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003662
3663 if (mlxsw_sp_port_dev_check(dev))
3664 return netdev_priv(dev);
3665
Jiri Pirko1182e532017-03-06 21:25:20 +01003666 mlxsw_sp_port = NULL;
3667 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3668 &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003669
Jiri Pirko1182e532017-03-06 21:25:20 +01003670 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003671}
3672
3673struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3674{
3675 struct mlxsw_sp_port *mlxsw_sp_port;
3676
3677 rcu_read_lock();
3678 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3679 if (mlxsw_sp_port)
3680 dev_hold(mlxsw_sp_port->dev);
3681 rcu_read_unlock();
3682 return mlxsw_sp_port;
3683}
3684
3685void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3686{
3687 dev_put(mlxsw_sp_port->dev);
3688}
3689
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003690static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003691{
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003692 char sldr_pl[MLXSW_REG_SLDR_LEN];
3693
3694 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3695 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3696}
3697
3698static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3699{
3700 char sldr_pl[MLXSW_REG_SLDR_LEN];
3701
3702 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3703 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3704}
3705
3706static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3707 u16 lag_id, u8 port_index)
3708{
3709 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3710 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3711
3712 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3713 lag_id, port_index);
3714 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3715}
3716
3717static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3718 u16 lag_id)
3719{
3720 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3721 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3722
3723 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3724 lag_id);
3725 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3726}
3727
3728static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3729 u16 lag_id)
3730{
3731 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3732 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3733
3734 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3735 lag_id);
3736 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3737}
3738
3739static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3740 u16 lag_id)
3741{
3742 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3743 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3744
3745 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3746 lag_id);
3747 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3748}
3749
3750static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3751 struct net_device *lag_dev,
3752 u16 *p_lag_id)
3753{
3754 struct mlxsw_sp_upper *lag;
3755 int free_lag_id = -1;
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003756 u64 max_lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003757 int i;
3758
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003759 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3760 for (i = 0; i < max_lag; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003761 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3762 if (lag->ref_count) {
3763 if (lag->dev == lag_dev) {
3764 *p_lag_id = i;
3765 return 0;
3766 }
3767 } else if (free_lag_id < 0) {
3768 free_lag_id = i;
3769 }
3770 }
3771 if (free_lag_id < 0)
3772 return -EBUSY;
3773 *p_lag_id = free_lag_id;
3774 return 0;
3775}
3776
3777static bool
3778mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3779 struct net_device *lag_dev,
3780 struct netdev_lag_upper_info *lag_upper_info)
3781{
3782 u16 lag_id;
3783
3784 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3785 return false;
3786 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3787 return false;
3788 return true;
3789}
3790
3791static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3792 u16 lag_id, u8 *p_port_index)
3793{
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003794 u64 max_lag_members;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003795 int i;
3796
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003797 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3798 MAX_LAG_MEMBERS);
3799 for (i = 0; i < max_lag_members; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003800 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3801 *p_port_index = i;
3802 return 0;
3803 }
3804 }
3805 return -EBUSY;
3806}
3807
3808static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3809 struct net_device *lag_dev)
3810{
3811 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmelc57529e2017-05-26 08:37:31 +02003812 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003813 struct mlxsw_sp_upper *lag;
3814 u16 lag_id;
3815 u8 port_index;
3816 int err;
3817
3818 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3819 if (err)
3820 return err;
3821 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3822 if (!lag->ref_count) {
3823 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3824 if (err)
3825 return err;
3826 lag->dev = lag_dev;
3827 }
3828
3829 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3830 if (err)
3831 return err;
3832 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3833 if (err)
3834 goto err_col_port_add;
3835 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3836 if (err)
3837 goto err_col_port_enable;
3838
3839 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3840 mlxsw_sp_port->local_port);
3841 mlxsw_sp_port->lag_id = lag_id;
3842 mlxsw_sp_port->lagged = 1;
3843 lag->ref_count++;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003844
Ido Schimmelc57529e2017-05-26 08:37:31 +02003845 /* Port is no longer usable as a router interface */
3846 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
3847 if (mlxsw_sp_port_vlan->fid)
Ido Schimmela1107482017-05-26 08:37:39 +02003848 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003849
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003850 return 0;
3851
Ido Schimmel51554db2016-05-06 22:18:39 +02003852err_col_port_enable:
3853 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003854err_col_port_add:
3855 if (!lag->ref_count)
3856 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003857 return err;
3858}
3859
Ido Schimmel82e6db02016-06-20 23:04:04 +02003860static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3861 struct net_device *lag_dev)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003862{
3863 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003864 u16 lag_id = mlxsw_sp_port->lag_id;
Ido Schimmel1c800752016-06-20 23:04:20 +02003865 struct mlxsw_sp_upper *lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003866
3867 if (!mlxsw_sp_port->lagged)
Ido Schimmel82e6db02016-06-20 23:04:04 +02003868 return;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003869 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3870 WARN_ON(lag->ref_count == 0);
3871
Ido Schimmel82e6db02016-06-20 23:04:04 +02003872 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3873 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003874
Ido Schimmelc57529e2017-05-26 08:37:31 +02003875 /* Any VLANs configured on the port are no longer valid */
3876 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01003877
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003878 if (lag->ref_count == 1)
Ido Schimmel82e6db02016-06-20 23:04:04 +02003879 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003880
3881 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3882 mlxsw_sp_port->local_port);
3883 mlxsw_sp_port->lagged = 0;
3884 lag->ref_count--;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003885
Ido Schimmelc57529e2017-05-26 08:37:31 +02003886 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
3887 /* Make sure untagged frames are allowed to ingress */
3888 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003889}
3890
Jiri Pirko74581202015-12-03 12:12:30 +01003891static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3892 u16 lag_id)
3893{
3894 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3895 char sldr_pl[MLXSW_REG_SLDR_LEN];
3896
3897 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3898 mlxsw_sp_port->local_port);
3899 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3900}
3901
3902static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3903 u16 lag_id)
3904{
3905 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3906 char sldr_pl[MLXSW_REG_SLDR_LEN];
3907
3908 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3909 mlxsw_sp_port->local_port);
3910 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3911}
3912
3913static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3914 bool lag_tx_enabled)
3915{
3916 if (lag_tx_enabled)
3917 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3918 mlxsw_sp_port->lag_id);
3919 else
3920 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3921 mlxsw_sp_port->lag_id);
3922}
3923
3924static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3925 struct netdev_lag_lower_state_info *info)
3926{
3927 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3928}
3929
Jiri Pirko2b94e582017-04-18 16:55:37 +02003930static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
3931 bool enable)
3932{
3933 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3934 enum mlxsw_reg_spms_state spms_state;
3935 char *spms_pl;
3936 u16 vid;
3937 int err;
3938
3939 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
3940 MLXSW_REG_SPMS_STATE_DISCARDING;
3941
3942 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
3943 if (!spms_pl)
3944 return -ENOMEM;
3945 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
3946
3947 for (vid = 0; vid < VLAN_N_VID; vid++)
3948 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
3949
3950 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
3951 kfree(spms_pl);
3952 return err;
3953}
3954
3955static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
3956{
3957 int err;
3958
Ido Schimmel4aafc362017-05-26 08:37:25 +02003959 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
Jiri Pirko2b94e582017-04-18 16:55:37 +02003960 if (err)
3961 return err;
Ido Schimmel4aafc362017-05-26 08:37:25 +02003962 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
3963 if (err)
3964 goto err_port_stp_set;
Jiri Pirko2b94e582017-04-18 16:55:37 +02003965 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
3966 true, false);
3967 if (err)
3968 goto err_port_vlan_set;
3969 return 0;
3970
3971err_port_vlan_set:
3972 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
Ido Schimmel4aafc362017-05-26 08:37:25 +02003973err_port_stp_set:
3974 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
Jiri Pirko2b94e582017-04-18 16:55:37 +02003975 return err;
3976}
3977
3978static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3979{
3980 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
3981 false, false);
3982 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
Ido Schimmel4aafc362017-05-26 08:37:25 +02003983 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
Jiri Pirko2b94e582017-04-18 16:55:37 +02003984}
3985
Ido Schimmelf0cebd82017-05-26 08:37:29 +02003986static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
3987 struct net_device *dev,
Jiri Pirko74581202015-12-03 12:12:30 +01003988 unsigned long event, void *ptr)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003989{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003990 struct netdev_notifier_changeupper_info *info;
3991 struct mlxsw_sp_port *mlxsw_sp_port;
3992 struct net_device *upper_dev;
3993 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel80bedf12016-06-20 23:03:59 +02003994 int err = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003995
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003996 mlxsw_sp_port = netdev_priv(dev);
3997 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3998 info = ptr;
3999
4000 switch (event) {
4001 case NETDEV_PRECHANGEUPPER:
4002 upper_dev = info->upper_dev;
Ido Schimmel59fe9b32016-06-20 23:04:00 +02004003 if (!is_vlan_dev(upper_dev) &&
4004 !netif_is_lag_master(upper_dev) &&
Ido Schimmel7179eb52017-03-16 09:08:18 +01004005 !netif_is_bridge_master(upper_dev) &&
Jiri Pirko2b94e582017-04-18 16:55:37 +02004006 !netif_is_ovs_master(upper_dev))
Ido Schimmel59fe9b32016-06-20 23:04:00 +02004007 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02004008 if (!info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004009 break;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004010 if (netif_is_lag_master(upper_dev) &&
4011 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4012 info->upper_info))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004013 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02004014 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4015 return -EINVAL;
4016 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4017 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4018 return -EINVAL;
Jiri Pirko2b94e582017-04-18 16:55:37 +02004019 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
4020 return -EINVAL;
4021 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
4022 return -EINVAL;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004023 break;
4024 case NETDEV_CHANGEUPPER:
4025 upper_dev = info->upper_dev;
Ido Schimmelc57529e2017-05-26 08:37:31 +02004026 if (netif_is_bridge_master(upper_dev)) {
Ido Schimmel7117a572016-06-20 23:04:06 +02004027 if (info->linking)
4028 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004029 lower_dev,
Ido Schimmel7117a572016-06-20 23:04:06 +02004030 upper_dev);
4031 else
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004032 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4033 lower_dev,
4034 upper_dev);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004035 } else if (netif_is_lag_master(upper_dev)) {
Ido Schimmel80bedf12016-06-20 23:03:59 +02004036 if (info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004037 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4038 upper_dev);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004039 else
Ido Schimmel82e6db02016-06-20 23:04:04 +02004040 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4041 upper_dev);
Jiri Pirko2b94e582017-04-18 16:55:37 +02004042 } else if (netif_is_ovs_master(upper_dev)) {
4043 if (info->linking)
4044 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4045 else
4046 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004047 }
4048 break;
4049 }
4050
Ido Schimmel80bedf12016-06-20 23:03:59 +02004051 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004052}
4053
Jiri Pirko74581202015-12-03 12:12:30 +01004054static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4055 unsigned long event, void *ptr)
4056{
4057 struct netdev_notifier_changelowerstate_info *info;
4058 struct mlxsw_sp_port *mlxsw_sp_port;
4059 int err;
4060
4061 mlxsw_sp_port = netdev_priv(dev);
4062 info = ptr;
4063
4064 switch (event) {
4065 case NETDEV_CHANGELOWERSTATE:
4066 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4067 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4068 info->lower_state_info);
4069 if (err)
4070 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4071 }
4072 break;
4073 }
4074
Ido Schimmel80bedf12016-06-20 23:03:59 +02004075 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004076}
4077
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004078static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4079 struct net_device *port_dev,
Jiri Pirko74581202015-12-03 12:12:30 +01004080 unsigned long event, void *ptr)
4081{
4082 switch (event) {
4083 case NETDEV_PRECHANGEUPPER:
4084 case NETDEV_CHANGEUPPER:
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004085 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4086 event, ptr);
Jiri Pirko74581202015-12-03 12:12:30 +01004087 case NETDEV_CHANGELOWERSTATE:
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004088 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4089 ptr);
Jiri Pirko74581202015-12-03 12:12:30 +01004090 }
4091
Ido Schimmel80bedf12016-06-20 23:03:59 +02004092 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004093}
4094
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004095static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4096 unsigned long event, void *ptr)
4097{
4098 struct net_device *dev;
4099 struct list_head *iter;
4100 int ret;
4101
4102 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4103 if (mlxsw_sp_port_dev_check(dev)) {
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004104 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4105 ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004106 if (ret)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004107 return ret;
4108 }
4109 }
4110
Ido Schimmel80bedf12016-06-20 23:03:59 +02004111 return 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004112}
4113
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004114static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4115 struct net_device *dev,
4116 unsigned long event, void *ptr,
4117 u16 vid)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004118{
4119 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4120 struct netdev_notifier_changeupper_info *info = ptr;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004121 struct net_device *upper_dev;
Ido Schimmel80bedf12016-06-20 23:03:59 +02004122 int err = 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004123
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004124 switch (event) {
4125 case NETDEV_PRECHANGEUPPER:
4126 upper_dev = info->upper_dev;
Ido Schimmelb1e45522017-04-30 19:47:14 +03004127 if (!netif_is_bridge_master(upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004128 return -EINVAL;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004129 break;
4130 case NETDEV_CHANGEUPPER:
4131 upper_dev = info->upper_dev;
Ido Schimmel1f880612017-03-10 08:53:35 +01004132 if (netif_is_bridge_master(upper_dev)) {
4133 if (info->linking)
Ido Schimmelc57529e2017-05-26 08:37:31 +02004134 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4135 vlan_dev,
4136 upper_dev);
Ido Schimmel1f880612017-03-10 08:53:35 +01004137 else
Ido Schimmelc57529e2017-05-26 08:37:31 +02004138 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4139 vlan_dev,
4140 upper_dev);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004141 } else {
Ido Schimmel1f880612017-03-10 08:53:35 +01004142 err = -EINVAL;
4143 WARN_ON(1);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004144 }
Ido Schimmel1f880612017-03-10 08:53:35 +01004145 break;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004146 }
4147
Ido Schimmel80bedf12016-06-20 23:03:59 +02004148 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004149}
4150
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004151static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4152 struct net_device *lag_dev,
4153 unsigned long event,
4154 void *ptr, u16 vid)
Ido Schimmel272c4472015-12-15 16:03:47 +01004155{
4156 struct net_device *dev;
4157 struct list_head *iter;
4158 int ret;
4159
4160 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4161 if (mlxsw_sp_port_dev_check(dev)) {
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004162 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4163 event, ptr,
4164 vid);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004165 if (ret)
Ido Schimmel272c4472015-12-15 16:03:47 +01004166 return ret;
4167 }
4168 }
4169
Ido Schimmel80bedf12016-06-20 23:03:59 +02004170 return 0;
Ido Schimmel272c4472015-12-15 16:03:47 +01004171}
4172
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004173static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4174 unsigned long event, void *ptr)
4175{
4176 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4177 u16 vid = vlan_dev_vlan_id(vlan_dev);
4178
Ido Schimmel272c4472015-12-15 16:03:47 +01004179 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004180 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4181 event, ptr, vid);
Ido Schimmel272c4472015-12-15 16:03:47 +01004182 else if (netif_is_lag_master(real_dev))
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004183 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4184 real_dev, event,
4185 ptr, vid);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004186
Ido Schimmel80bedf12016-06-20 23:03:59 +02004187 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004188}
4189
Ido Schimmelb1e45522017-04-30 19:47:14 +03004190static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4191{
4192 struct netdev_notifier_changeupper_info *info = ptr;
4193
4194 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4195 return false;
4196 return netif_is_l3_master(info->upper_dev);
4197}
4198
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004199static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4200 unsigned long event, void *ptr)
4201{
4202 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004203 int err = 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004204
Ido Schimmel6e095fd2016-07-04 08:23:13 +02004205 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4206 err = mlxsw_sp_netdevice_router_port_event(dev);
Ido Schimmelb1e45522017-04-30 19:47:14 +03004207 else if (mlxsw_sp_is_vrf_event(event, ptr))
4208 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
Ido Schimmel6e095fd2016-07-04 08:23:13 +02004209 else if (mlxsw_sp_port_dev_check(dev))
Ido Schimmelf0cebd82017-05-26 08:37:29 +02004210 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004211 else if (netif_is_lag_master(dev))
4212 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4213 else if (is_vlan_dev(dev))
4214 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004215
Ido Schimmel80bedf12016-06-20 23:03:59 +02004216 return notifier_from_errno(err);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004217}
4218
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004219static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4220 .notifier_call = mlxsw_sp_netdevice_event,
4221};
4222
Ido Schimmel99724c12016-07-04 08:23:14 +02004223static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4224 .notifier_call = mlxsw_sp_inetaddr_event,
4225 .priority = 10, /* Must be called before FIB notifier block */
4226};
4227
Jiri Pirkoe7322632016-09-01 10:37:43 +02004228static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4229 .notifier_call = mlxsw_sp_router_netevent_event,
4230};
4231
Jiri Pirko1d20d232016-10-27 15:12:59 +02004232static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4233 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4234 {0, },
4235};
4236
4237static struct pci_driver mlxsw_sp_pci_driver = {
4238 .name = mlxsw_sp_driver_name,
4239 .id_table = mlxsw_sp_pci_id_table,
4240};
4241
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004242static int __init mlxsw_sp_module_init(void)
4243{
4244 int err;
4245
4246 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004247 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004248 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4249
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004250 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4251 if (err)
4252 goto err_core_driver_register;
Jiri Pirko1d20d232016-10-27 15:12:59 +02004253
4254 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4255 if (err)
4256 goto err_pci_driver_register;
4257
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004258 return 0;
4259
Jiri Pirko1d20d232016-10-27 15:12:59 +02004260err_pci_driver_register:
4261 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004262err_core_driver_register:
Jiri Pirkoe7322632016-09-01 10:37:43 +02004263 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Jiri Pirkode7d6292016-09-01 10:37:42 +02004264 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004265 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4266 return err;
4267}
4268
4269static void __exit mlxsw_sp_module_exit(void)
4270{
Jiri Pirko1d20d232016-10-27 15:12:59 +02004271 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004272 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004273 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004274 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004275 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4276}
4277
4278module_init(mlxsw_sp_module_init);
4279module_exit(mlxsw_sp_module_exit);
4280
4281MODULE_LICENSE("Dual BSD/GPL");
4282MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4283MODULE_DESCRIPTION("Mellanox Spectrum driver");
Jiri Pirko1d20d232016-10-27 15:12:59 +02004284MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
Yotam Gigi6b742192017-05-23 21:56:29 +02004285MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);