blob: 4e2354ca0e4a559fadb0381f5a0ed57557068ca4 [file] [log] [blame]
Ido Schimmel4ec14b72015-07-29 23:33:48 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/reg.h
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
Ido Schimmel69c407a2016-07-02 11:00:13 +02004 * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
Jiri Pirko6f9fc3c2016-07-04 08:23:05 +02006 * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
Yotam Gigi4457b3df2016-07-05 11:27:40 +02007 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Ido Schimmel4ec14b72015-07-29 23:33:48 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#ifndef _MLXSW_REG_H
39#define _MLXSW_REG_H
40
41#include <linux/string.h>
42#include <linux/bitops.h>
43#include <linux/if_vlan.h>
44
45#include "item.h"
46#include "port.h"
47
48struct mlxsw_reg_info {
49 u16 id;
50 u16 len; /* In u8 */
51};
52
53#define MLXSW_REG(type) (&mlxsw_reg_##type)
54#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
55#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
56
57/* SGCR - Switch General Configuration Register
58 * --------------------------------------------
59 * This register is used for configuration of the switch capabilities.
60 */
61#define MLXSW_REG_SGCR_ID 0x2000
62#define MLXSW_REG_SGCR_LEN 0x10
63
64static const struct mlxsw_reg_info mlxsw_reg_sgcr = {
65 .id = MLXSW_REG_SGCR_ID,
66 .len = MLXSW_REG_SGCR_LEN,
67};
68
69/* reg_sgcr_llb
70 * Link Local Broadcast (Default=0)
71 * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
72 * packets and ignore the IGMP snooping entries.
73 * Access: RW
74 */
75MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
76
77static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
78{
79 MLXSW_REG_ZERO(sgcr, payload);
80 mlxsw_reg_sgcr_llb_set(payload, !!llb);
81}
82
83/* SPAD - Switch Physical Address Register
84 * ---------------------------------------
85 * The SPAD register configures the switch physical MAC address.
86 */
87#define MLXSW_REG_SPAD_ID 0x2002
88#define MLXSW_REG_SPAD_LEN 0x10
89
90static const struct mlxsw_reg_info mlxsw_reg_spad = {
91 .id = MLXSW_REG_SPAD_ID,
92 .len = MLXSW_REG_SPAD_LEN,
93};
94
95/* reg_spad_base_mac
96 * Base MAC address for the switch partitions.
97 * Per switch partition MAC address is equal to:
98 * base_mac + swid
99 * Access: RW
100 */
101MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
102
Elad Razfabe5482016-01-10 21:06:25 +0100103/* SMID - Switch Multicast ID
104 * --------------------------
105 * The MID record maps from a MID (Multicast ID), which is a unique identifier
106 * of the multicast group within the stacking domain, into a list of local
107 * ports into which the packet is replicated.
108 */
109#define MLXSW_REG_SMID_ID 0x2007
110#define MLXSW_REG_SMID_LEN 0x240
111
112static const struct mlxsw_reg_info mlxsw_reg_smid = {
113 .id = MLXSW_REG_SMID_ID,
114 .len = MLXSW_REG_SMID_LEN,
115};
116
117/* reg_smid_swid
118 * Switch partition ID.
119 * Access: Index
120 */
121MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
122
123/* reg_smid_mid
124 * Multicast identifier - global identifier that represents the multicast group
125 * across all devices.
126 * Access: Index
127 */
128MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
129
130/* reg_smid_port
131 * Local port memebership (1 bit per port).
132 * Access: RW
133 */
134MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
135
136/* reg_smid_port_mask
137 * Local port mask (1 bit per port).
138 * Access: W
139 */
140MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
141
142static inline void mlxsw_reg_smid_pack(char *payload, u16 mid,
143 u8 port, bool set)
144{
145 MLXSW_REG_ZERO(smid, payload);
146 mlxsw_reg_smid_swid_set(payload, 0);
147 mlxsw_reg_smid_mid_set(payload, mid);
148 mlxsw_reg_smid_port_set(payload, port, set);
149 mlxsw_reg_smid_port_mask_set(payload, port, 1);
150}
151
Ido Schimmele61011b2015-08-06 16:41:53 +0200152/* SSPR - Switch System Port Record Register
153 * -----------------------------------------
154 * Configures the system port to local port mapping.
155 */
156#define MLXSW_REG_SSPR_ID 0x2008
157#define MLXSW_REG_SSPR_LEN 0x8
158
159static const struct mlxsw_reg_info mlxsw_reg_sspr = {
160 .id = MLXSW_REG_SSPR_ID,
161 .len = MLXSW_REG_SSPR_LEN,
162};
163
164/* reg_sspr_m
165 * Master - if set, then the record describes the master system port.
166 * This is needed in case a local port is mapped into several system ports
167 * (for multipathing). That number will be reported as the source system
168 * port when packets are forwarded to the CPU. Only one master port is allowed
169 * per local port.
170 *
171 * Note: Must be set for Spectrum.
172 * Access: RW
173 */
174MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
175
176/* reg_sspr_local_port
177 * Local port number.
178 *
179 * Access: RW
180 */
181MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8);
182
183/* reg_sspr_sub_port
184 * Virtual port within the physical port.
185 * Should be set to 0 when virtual ports are not enabled on the port.
186 *
187 * Access: RW
188 */
189MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
190
191/* reg_sspr_system_port
192 * Unique identifier within the stacking domain that represents all the ports
193 * that are available in the system (external ports).
194 *
195 * Currently, only single-ASIC configurations are supported, so we default to
196 * 1:1 mapping between system ports and local ports.
197 * Access: Index
198 */
199MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
200
201static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
202{
203 MLXSW_REG_ZERO(sspr, payload);
204 mlxsw_reg_sspr_m_set(payload, 1);
205 mlxsw_reg_sspr_local_port_set(payload, local_port);
206 mlxsw_reg_sspr_sub_port_set(payload, 0);
207 mlxsw_reg_sspr_system_port_set(payload, local_port);
208}
209
Jiri Pirkoe534a56a2015-10-16 14:01:35 +0200210/* SFDAT - Switch Filtering Database Aging Time
211 * --------------------------------------------
212 * Controls the Switch aging time. Aging time is able to be set per Switch
213 * Partition.
214 */
215#define MLXSW_REG_SFDAT_ID 0x2009
216#define MLXSW_REG_SFDAT_LEN 0x8
217
218static const struct mlxsw_reg_info mlxsw_reg_sfdat = {
219 .id = MLXSW_REG_SFDAT_ID,
220 .len = MLXSW_REG_SFDAT_LEN,
221};
222
223/* reg_sfdat_swid
224 * Switch partition ID.
225 * Access: Index
226 */
227MLXSW_ITEM32(reg, sfdat, swid, 0x00, 24, 8);
228
229/* reg_sfdat_age_time
230 * Aging time in seconds
231 * Min - 10 seconds
232 * Max - 1,000,000 seconds
233 * Default is 300 seconds.
234 * Access: RW
235 */
236MLXSW_ITEM32(reg, sfdat, age_time, 0x04, 0, 20);
237
238static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time)
239{
240 MLXSW_REG_ZERO(sfdat, payload);
241 mlxsw_reg_sfdat_swid_set(payload, 0);
242 mlxsw_reg_sfdat_age_time_set(payload, age_time);
243}
244
Jiri Pirko236033b2015-10-16 14:01:28 +0200245/* SFD - Switch Filtering Database
246 * -------------------------------
247 * The following register defines the access to the filtering database.
248 * The register supports querying, adding, removing and modifying the database.
249 * The access is optimized for bulk updates in which case more than one
250 * FDB record is present in the same command.
251 */
252#define MLXSW_REG_SFD_ID 0x200A
253#define MLXSW_REG_SFD_BASE_LEN 0x10 /* base length, without records */
254#define MLXSW_REG_SFD_REC_LEN 0x10 /* record length */
255#define MLXSW_REG_SFD_REC_MAX_COUNT 64
256#define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN + \
257 MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT)
258
259static const struct mlxsw_reg_info mlxsw_reg_sfd = {
260 .id = MLXSW_REG_SFD_ID,
261 .len = MLXSW_REG_SFD_LEN,
262};
263
264/* reg_sfd_swid
265 * Switch partition ID for queries. Reserved on Write.
266 * Access: Index
267 */
268MLXSW_ITEM32(reg, sfd, swid, 0x00, 24, 8);
269
270enum mlxsw_reg_sfd_op {
271 /* Dump entire FDB a (process according to record_locator) */
272 MLXSW_REG_SFD_OP_QUERY_DUMP = 0,
273 /* Query records by {MAC, VID/FID} value */
274 MLXSW_REG_SFD_OP_QUERY_QUERY = 1,
275 /* Query and clear activity. Query records by {MAC, VID/FID} value */
276 MLXSW_REG_SFD_OP_QUERY_QUERY_AND_CLEAR_ACTIVITY = 2,
277 /* Test. Response indicates if each of the records could be
278 * added to the FDB.
279 */
280 MLXSW_REG_SFD_OP_WRITE_TEST = 0,
281 /* Add/modify. Aged-out records cannot be added. This command removes
282 * the learning notification of the {MAC, VID/FID}. Response includes
283 * the entries that were added to the FDB.
284 */
285 MLXSW_REG_SFD_OP_WRITE_EDIT = 1,
286 /* Remove record by {MAC, VID/FID}. This command also removes
287 * the learning notification and aged-out notifications
288 * of the {MAC, VID/FID}. The response provides current (pre-removal)
289 * entries as non-aged-out.
290 */
291 MLXSW_REG_SFD_OP_WRITE_REMOVE = 2,
292 /* Remove learned notification by {MAC, VID/FID}. The response provides
293 * the removed learning notification.
294 */
295 MLXSW_REG_SFD_OP_WRITE_REMOVE_NOTIFICATION = 2,
296};
297
298/* reg_sfd_op
299 * Operation.
300 * Access: OP
301 */
302MLXSW_ITEM32(reg, sfd, op, 0x04, 30, 2);
303
304/* reg_sfd_record_locator
305 * Used for querying the FDB. Use record_locator=0 to initiate the
306 * query. When a record is returned, a new record_locator is
307 * returned to be used in the subsequent query.
308 * Reserved for database update.
309 * Access: Index
310 */
311MLXSW_ITEM32(reg, sfd, record_locator, 0x04, 0, 30);
312
313/* reg_sfd_num_rec
314 * Request: Number of records to read/add/modify/remove
315 * Response: Number of records read/added/replaced/removed
316 * See above description for more details.
317 * Ranges 0..64
318 * Access: RW
319 */
320MLXSW_ITEM32(reg, sfd, num_rec, 0x08, 0, 8);
321
322static inline void mlxsw_reg_sfd_pack(char *payload, enum mlxsw_reg_sfd_op op,
323 u32 record_locator)
324{
325 MLXSW_REG_ZERO(sfd, payload);
326 mlxsw_reg_sfd_op_set(payload, op);
327 mlxsw_reg_sfd_record_locator_set(payload, record_locator);
328}
329
330/* reg_sfd_rec_swid
331 * Switch partition ID.
332 * Access: Index
333 */
334MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8,
335 MLXSW_REG_SFD_REC_LEN, 0x00, false);
336
337enum mlxsw_reg_sfd_rec_type {
338 MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0,
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100339 MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1,
Elad Raz5230b252016-01-10 21:06:24 +0100340 MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2,
Jiri Pirko236033b2015-10-16 14:01:28 +0200341};
342
343/* reg_sfd_rec_type
344 * FDB record type.
345 * Access: RW
346 */
347MLXSW_ITEM32_INDEXED(reg, sfd, rec_type, MLXSW_REG_SFD_BASE_LEN, 20, 4,
348 MLXSW_REG_SFD_REC_LEN, 0x00, false);
349
350enum mlxsw_reg_sfd_rec_policy {
351 /* Replacement disabled, aging disabled. */
352 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY = 0,
353 /* (mlag remote): Replacement enabled, aging disabled,
354 * learning notification enabled on this port.
355 */
356 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG = 1,
357 /* (ingress device): Replacement enabled, aging enabled. */
358 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS = 3,
359};
360
361/* reg_sfd_rec_policy
362 * Policy.
363 * Access: RW
364 */
365MLXSW_ITEM32_INDEXED(reg, sfd, rec_policy, MLXSW_REG_SFD_BASE_LEN, 18, 2,
366 MLXSW_REG_SFD_REC_LEN, 0x00, false);
367
368/* reg_sfd_rec_a
369 * Activity. Set for new static entries. Set for static entries if a frame SMAC
370 * lookup hits on the entry.
371 * To clear the a bit, use "query and clear activity" op.
372 * Access: RO
373 */
374MLXSW_ITEM32_INDEXED(reg, sfd, rec_a, MLXSW_REG_SFD_BASE_LEN, 16, 1,
375 MLXSW_REG_SFD_REC_LEN, 0x00, false);
376
377/* reg_sfd_rec_mac
378 * MAC address.
379 * Access: Index
380 */
381MLXSW_ITEM_BUF_INDEXED(reg, sfd, rec_mac, MLXSW_REG_SFD_BASE_LEN, 6,
382 MLXSW_REG_SFD_REC_LEN, 0x02);
383
384enum mlxsw_reg_sfd_rec_action {
385 /* forward */
386 MLXSW_REG_SFD_REC_ACTION_NOP = 0,
387 /* forward and trap, trap_id is FDB_TRAP */
388 MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1,
389 /* trap and do not forward, trap_id is FDB_TRAP */
Ido Schimmeld82d8c02016-07-02 11:00:17 +0200390 MLXSW_REG_SFD_REC_ACTION_TRAP = 2,
391 /* forward to IP router */
392 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER = 3,
Jiri Pirko236033b2015-10-16 14:01:28 +0200393 MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15,
394};
395
396/* reg_sfd_rec_action
397 * Action to apply on the packet.
398 * Note: Dynamic entries can only be configured with NOP action.
399 * Access: RW
400 */
401MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
402 MLXSW_REG_SFD_REC_LEN, 0x0C, false);
403
404/* reg_sfd_uc_sub_port
Jiri Pirko4e9ec082015-10-28 10:16:59 +0100405 * VEPA channel on local port.
406 * Valid only if local port is a non-stacking port. Must be 0 if multichannel
407 * VEPA is not enabled.
Jiri Pirko236033b2015-10-16 14:01:28 +0200408 * Access: RW
409 */
410MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
411 MLXSW_REG_SFD_REC_LEN, 0x08, false);
412
413/* reg_sfd_uc_fid_vid
414 * Filtering ID or VLAN ID
415 * For SwitchX and SwitchX-2:
416 * - Dynamic entries (policy 2,3) use FID
417 * - Static entries (policy 0) use VID
418 * - When independent learning is configured, VID=FID
419 * For Spectrum: use FID for both Dynamic and Static entries.
420 * VID should not be used.
421 * Access: Index
422 */
423MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
424 MLXSW_REG_SFD_REC_LEN, 0x08, false);
425
426/* reg_sfd_uc_system_port
427 * Unique port identifier for the final destination of the packet.
428 * Access: RW
429 */
430MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16,
431 MLXSW_REG_SFD_REC_LEN, 0x0C, false);
432
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100433static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index,
434 enum mlxsw_reg_sfd_rec_type rec_type,
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100435 const char *mac,
436 enum mlxsw_reg_sfd_rec_action action)
Jiri Pirko236033b2015-10-16 14:01:28 +0200437{
438 u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload);
439
440 if (rec_index >= num_rec)
441 mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1);
442 mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0);
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100443 mlxsw_reg_sfd_rec_type_set(payload, rec_index, rec_type);
Jiri Pirko236033b2015-10-16 14:01:28 +0200444 mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac);
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100445 mlxsw_reg_sfd_rec_action_set(payload, rec_index, action);
446}
447
448static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
449 enum mlxsw_reg_sfd_rec_policy policy,
Ido Schimmel9de6a802015-12-15 16:03:40 +0100450 const char *mac, u16 fid_vid,
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100451 enum mlxsw_reg_sfd_rec_action action,
452 u8 local_port)
453{
454 mlxsw_reg_sfd_rec_pack(payload, rec_index,
Elad Raz5230b252016-01-10 21:06:24 +0100455 MLXSW_REG_SFD_REC_TYPE_UNICAST, mac, action);
456 mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
Jiri Pirko236033b2015-10-16 14:01:28 +0200457 mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
Ido Schimmel9de6a802015-12-15 16:03:40 +0100458 mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid);
Jiri Pirko236033b2015-10-16 14:01:28 +0200459 mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
460}
461
Jiri Pirko75c09282015-10-28 10:17:01 +0100462static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
Ido Schimmel9de6a802015-12-15 16:03:40 +0100463 char *mac, u16 *p_fid_vid,
Jiri Pirko75c09282015-10-28 10:17:01 +0100464 u8 *p_local_port)
Jiri Pirko236033b2015-10-16 14:01:28 +0200465{
466 mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
Ido Schimmel9de6a802015-12-15 16:03:40 +0100467 *p_fid_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
Jiri Pirko236033b2015-10-16 14:01:28 +0200468 *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index);
469}
470
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100471/* reg_sfd_uc_lag_sub_port
472 * LAG sub port.
473 * Must be 0 if multichannel VEPA is not enabled.
474 * Access: RW
475 */
476MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
477 MLXSW_REG_SFD_REC_LEN, 0x08, false);
478
479/* reg_sfd_uc_lag_fid_vid
480 * Filtering ID or VLAN ID
481 * For SwitchX and SwitchX-2:
482 * - Dynamic entries (policy 2,3) use FID
483 * - Static entries (policy 0) use VID
484 * - When independent learning is configured, VID=FID
485 * For Spectrum: use FID for both Dynamic and Static entries.
486 * VID should not be used.
487 * Access: Index
488 */
489MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
490 MLXSW_REG_SFD_REC_LEN, 0x08, false);
491
Ido Schimmelafd7f972015-12-15 16:03:45 +0100492/* reg_sfd_uc_lag_lag_vid
493 * Indicates VID in case of vFIDs. Reserved for FIDs.
494 * Access: RW
495 */
496MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
497 MLXSW_REG_SFD_REC_LEN, 0x0C, false);
498
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100499/* reg_sfd_uc_lag_lag_id
500 * LAG Identifier - pointer into the LAG descriptor table.
501 * Access: RW
502 */
503MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_id, MLXSW_REG_SFD_BASE_LEN, 0, 10,
504 MLXSW_REG_SFD_REC_LEN, 0x0C, false);
505
506static inline void
507mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
508 enum mlxsw_reg_sfd_rec_policy policy,
Ido Schimmel9de6a802015-12-15 16:03:40 +0100509 const char *mac, u16 fid_vid,
Ido Schimmelafd7f972015-12-15 16:03:45 +0100510 enum mlxsw_reg_sfd_rec_action action, u16 lag_vid,
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100511 u16 lag_id)
512{
513 mlxsw_reg_sfd_rec_pack(payload, rec_index,
514 MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG,
Elad Raz5230b252016-01-10 21:06:24 +0100515 mac, action);
516 mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100517 mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0);
Ido Schimmel9de6a802015-12-15 16:03:40 +0100518 mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid);
Ido Schimmelafd7f972015-12-15 16:03:45 +0100519 mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid);
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100520 mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
521}
522
523static inline void mlxsw_reg_sfd_uc_lag_unpack(char *payload, int rec_index,
524 char *mac, u16 *p_vid,
525 u16 *p_lag_id)
526{
527 mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
528 *p_vid = mlxsw_reg_sfd_uc_lag_fid_vid_get(payload, rec_index);
529 *p_lag_id = mlxsw_reg_sfd_uc_lag_lag_id_get(payload, rec_index);
530}
531
Elad Raz5230b252016-01-10 21:06:24 +0100532/* reg_sfd_mc_pgi
533 *
534 * Multicast port group index - index into the port group table.
535 * Value 0x1FFF indicates the pgi should point to the MID entry.
536 * For Spectrum this value must be set to 0x1FFF
537 * Access: RW
538 */
539MLXSW_ITEM32_INDEXED(reg, sfd, mc_pgi, MLXSW_REG_SFD_BASE_LEN, 16, 13,
540 MLXSW_REG_SFD_REC_LEN, 0x08, false);
541
542/* reg_sfd_mc_fid_vid
543 *
544 * Filtering ID or VLAN ID
545 * Access: Index
546 */
547MLXSW_ITEM32_INDEXED(reg, sfd, mc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
548 MLXSW_REG_SFD_REC_LEN, 0x08, false);
549
550/* reg_sfd_mc_mid
551 *
552 * Multicast identifier - global identifier that represents the multicast
553 * group across all devices.
554 * Access: RW
555 */
556MLXSW_ITEM32_INDEXED(reg, sfd, mc_mid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
557 MLXSW_REG_SFD_REC_LEN, 0x0C, false);
558
559static inline void
560mlxsw_reg_sfd_mc_pack(char *payload, int rec_index,
561 const char *mac, u16 fid_vid,
562 enum mlxsw_reg_sfd_rec_action action, u16 mid)
563{
564 mlxsw_reg_sfd_rec_pack(payload, rec_index,
565 MLXSW_REG_SFD_REC_TYPE_MULTICAST, mac, action);
566 mlxsw_reg_sfd_mc_pgi_set(payload, rec_index, 0x1FFF);
567 mlxsw_reg_sfd_mc_fid_vid_set(payload, rec_index, fid_vid);
568 mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid);
569}
570
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200571/* SFN - Switch FDB Notification Register
572 * -------------------------------------------
573 * The switch provides notifications on newly learned FDB entries and
574 * aged out entries. The notifications can be polled by software.
575 */
576#define MLXSW_REG_SFN_ID 0x200B
577#define MLXSW_REG_SFN_BASE_LEN 0x10 /* base length, without records */
578#define MLXSW_REG_SFN_REC_LEN 0x10 /* record length */
579#define MLXSW_REG_SFN_REC_MAX_COUNT 64
580#define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN + \
581 MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT)
582
583static const struct mlxsw_reg_info mlxsw_reg_sfn = {
584 .id = MLXSW_REG_SFN_ID,
585 .len = MLXSW_REG_SFN_LEN,
586};
587
588/* reg_sfn_swid
589 * Switch partition ID.
590 * Access: Index
591 */
592MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8);
593
Ido Schimmel1803e0f2016-08-24 12:00:23 +0200594/* reg_sfn_end
595 * Forces the current session to end.
596 * Access: OP
597 */
598MLXSW_ITEM32(reg, sfn, end, 0x04, 20, 1);
599
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200600/* reg_sfn_num_rec
601 * Request: Number of learned notifications and aged-out notification
602 * records requested.
603 * Response: Number of notification records returned (must be smaller
604 * than or equal to the value requested)
605 * Ranges 0..64
606 * Access: OP
607 */
608MLXSW_ITEM32(reg, sfn, num_rec, 0x04, 0, 8);
609
610static inline void mlxsw_reg_sfn_pack(char *payload)
611{
612 MLXSW_REG_ZERO(sfn, payload);
613 mlxsw_reg_sfn_swid_set(payload, 0);
Ido Schimmel1803e0f2016-08-24 12:00:23 +0200614 mlxsw_reg_sfn_end_set(payload, 1);
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200615 mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
616}
617
618/* reg_sfn_rec_swid
619 * Switch partition ID.
620 * Access: RO
621 */
622MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8,
623 MLXSW_REG_SFN_REC_LEN, 0x00, false);
624
625enum mlxsw_reg_sfn_rec_type {
626 /* MAC addresses learned on a regular port. */
627 MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5,
Jiri Pirko3b715712015-12-03 12:12:27 +0100628 /* MAC addresses learned on a LAG port. */
629 MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG = 0x6,
630 /* Aged-out MAC address on a regular port. */
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200631 MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7,
Jiri Pirko3b715712015-12-03 12:12:27 +0100632 /* Aged-out MAC address on a LAG port. */
633 MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG = 0x8,
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200634};
635
636/* reg_sfn_rec_type
637 * Notification record type.
638 * Access: RO
639 */
640MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4,
641 MLXSW_REG_SFN_REC_LEN, 0x00, false);
642
643/* reg_sfn_rec_mac
644 * MAC address.
645 * Access: RO
646 */
647MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
648 MLXSW_REG_SFN_REC_LEN, 0x02);
649
Jiri Pirko8316f082015-10-28 10:17:00 +0100650/* reg_sfn_mac_sub_port
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200651 * VEPA channel on the local port.
652 * 0 if multichannel VEPA is not enabled.
653 * Access: RO
654 */
655MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
656 MLXSW_REG_SFN_REC_LEN, 0x08, false);
657
Jiri Pirko8316f082015-10-28 10:17:00 +0100658/* reg_sfn_mac_fid
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200659 * Filtering identifier.
660 * Access: RO
661 */
662MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
663 MLXSW_REG_SFN_REC_LEN, 0x08, false);
664
Jiri Pirko8316f082015-10-28 10:17:00 +0100665/* reg_sfn_mac_system_port
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200666 * Unique port identifier for the final destination of the packet.
667 * Access: RO
668 */
669MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16,
670 MLXSW_REG_SFN_REC_LEN, 0x0C, false);
671
672static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index,
673 char *mac, u16 *p_vid,
674 u8 *p_local_port)
675{
676 mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
677 *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
678 *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index);
679}
680
Jiri Pirko3b715712015-12-03 12:12:27 +0100681/* reg_sfn_mac_lag_lag_id
682 * LAG ID (pointer into the LAG descriptor table).
683 * Access: RO
684 */
685MLXSW_ITEM32_INDEXED(reg, sfn, mac_lag_lag_id, MLXSW_REG_SFN_BASE_LEN, 0, 10,
686 MLXSW_REG_SFN_REC_LEN, 0x0C, false);
687
688static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index,
689 char *mac, u16 *p_vid,
690 u16 *p_lag_id)
691{
692 mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
693 *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
694 *p_lag_id = mlxsw_reg_sfn_mac_lag_lag_id_get(payload, rec_index);
695}
696
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200697/* SPMS - Switch Port MSTP/RSTP State Register
698 * -------------------------------------------
699 * Configures the spanning tree state of a physical port.
700 */
Jiri Pirko3f0effd2015-10-15 17:43:23 +0200701#define MLXSW_REG_SPMS_ID 0x200D
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200702#define MLXSW_REG_SPMS_LEN 0x404
703
704static const struct mlxsw_reg_info mlxsw_reg_spms = {
705 .id = MLXSW_REG_SPMS_ID,
706 .len = MLXSW_REG_SPMS_LEN,
707};
708
709/* reg_spms_local_port
710 * Local port number.
711 * Access: Index
712 */
713MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
714
715enum mlxsw_reg_spms_state {
716 MLXSW_REG_SPMS_STATE_NO_CHANGE,
717 MLXSW_REG_SPMS_STATE_DISCARDING,
718 MLXSW_REG_SPMS_STATE_LEARNING,
719 MLXSW_REG_SPMS_STATE_FORWARDING,
720};
721
722/* reg_spms_state
723 * Spanning tree state of each VLAN ID (VID) of the local port.
724 * 0 - Do not change spanning tree state (used only when writing).
725 * 1 - Discarding. No learning or forwarding to/from this port (default).
726 * 2 - Learning. Port is learning, but not forwarding.
727 * 3 - Forwarding. Port is learning and forwarding.
728 * Access: RW
729 */
730MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
731
Jiri Pirkoebb79632015-10-15 17:43:26 +0200732static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port)
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200733{
734 MLXSW_REG_ZERO(spms, payload);
735 mlxsw_reg_spms_local_port_set(payload, local_port);
Jiri Pirkoebb79632015-10-15 17:43:26 +0200736}
737
738static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid,
739 enum mlxsw_reg_spms_state state)
740{
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200741 mlxsw_reg_spms_state_set(payload, vid, state);
742}
743
Elad Razb2e345f2015-10-16 14:01:30 +0200744/* SPVID - Switch Port VID
745 * -----------------------
746 * The switch port VID configures the default VID for a port.
747 */
748#define MLXSW_REG_SPVID_ID 0x200E
749#define MLXSW_REG_SPVID_LEN 0x08
750
751static const struct mlxsw_reg_info mlxsw_reg_spvid = {
752 .id = MLXSW_REG_SPVID_ID,
753 .len = MLXSW_REG_SPVID_LEN,
754};
755
756/* reg_spvid_local_port
757 * Local port number.
758 * Access: Index
759 */
760MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
761
762/* reg_spvid_sub_port
763 * Virtual port within the physical port.
764 * Should be set to 0 when virtual ports are not enabled on the port.
765 * Access: Index
766 */
767MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
768
769/* reg_spvid_pvid
770 * Port default VID
771 * Access: RW
772 */
773MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
774
775static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
776{
777 MLXSW_REG_ZERO(spvid, payload);
778 mlxsw_reg_spvid_local_port_set(payload, local_port);
779 mlxsw_reg_spvid_pvid_set(payload, pvid);
780}
781
782/* SPVM - Switch Port VLAN Membership
783 * ----------------------------------
784 * The Switch Port VLAN Membership register configures the VLAN membership
785 * of a port in a VLAN denoted by VID. VLAN membership is managed per
786 * virtual port. The register can be used to add and remove VID(s) from a port.
787 */
788#define MLXSW_REG_SPVM_ID 0x200F
789#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
790#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
791#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
792#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
793 MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
794
795static const struct mlxsw_reg_info mlxsw_reg_spvm = {
796 .id = MLXSW_REG_SPVM_ID,
797 .len = MLXSW_REG_SPVM_LEN,
798};
799
800/* reg_spvm_pt
801 * Priority tagged. If this bit is set, packets forwarded to the port with
802 * untagged VLAN membership (u bit is set) will be tagged with priority tag
803 * (VID=0)
804 * Access: RW
805 */
806MLXSW_ITEM32(reg, spvm, pt, 0x00, 31, 1);
807
808/* reg_spvm_pte
809 * Priority Tagged Update Enable. On Write operations, if this bit is cleared,
810 * the pt bit will NOT be updated. To update the pt bit, pte must be set.
811 * Access: WO
812 */
813MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1);
814
815/* reg_spvm_local_port
816 * Local port number.
817 * Access: Index
818 */
819MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8);
820
821/* reg_spvm_sub_port
822 * Virtual port within the physical port.
823 * Should be set to 0 when virtual ports are not enabled on the port.
824 * Access: Index
825 */
826MLXSW_ITEM32(reg, spvm, sub_port, 0x00, 8, 8);
827
828/* reg_spvm_num_rec
829 * Number of records to update. Each record contains: i, e, u, vid.
830 * Access: OP
831 */
832MLXSW_ITEM32(reg, spvm, num_rec, 0x00, 0, 8);
833
834/* reg_spvm_rec_i
835 * Ingress membership in VLAN ID.
836 * Access: Index
837 */
838MLXSW_ITEM32_INDEXED(reg, spvm, rec_i,
839 MLXSW_REG_SPVM_BASE_LEN, 14, 1,
840 MLXSW_REG_SPVM_REC_LEN, 0, false);
841
842/* reg_spvm_rec_e
843 * Egress membership in VLAN ID.
844 * Access: Index
845 */
846MLXSW_ITEM32_INDEXED(reg, spvm, rec_e,
847 MLXSW_REG_SPVM_BASE_LEN, 13, 1,
848 MLXSW_REG_SPVM_REC_LEN, 0, false);
849
850/* reg_spvm_rec_u
851 * Untagged - port is an untagged member - egress transmission uses untagged
852 * frames on VID<n>
853 * Access: Index
854 */
855MLXSW_ITEM32_INDEXED(reg, spvm, rec_u,
856 MLXSW_REG_SPVM_BASE_LEN, 12, 1,
857 MLXSW_REG_SPVM_REC_LEN, 0, false);
858
859/* reg_spvm_rec_vid
860 * Egress membership in VLAN ID.
861 * Access: Index
862 */
863MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid,
864 MLXSW_REG_SPVM_BASE_LEN, 0, 12,
865 MLXSW_REG_SPVM_REC_LEN, 0, false);
866
867static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
868 u16 vid_begin, u16 vid_end,
869 bool is_member, bool untagged)
870{
871 int size = vid_end - vid_begin + 1;
872 int i;
873
874 MLXSW_REG_ZERO(spvm, payload);
875 mlxsw_reg_spvm_local_port_set(payload, local_port);
876 mlxsw_reg_spvm_num_rec_set(payload, size);
877
878 for (i = 0; i < size; i++) {
879 mlxsw_reg_spvm_rec_i_set(payload, i, is_member);
880 mlxsw_reg_spvm_rec_e_set(payload, i, is_member);
881 mlxsw_reg_spvm_rec_u_set(payload, i, untagged);
882 mlxsw_reg_spvm_rec_vid_set(payload, i, vid_begin + i);
883 }
884}
885
Ido Schimmel148f4722016-02-18 11:30:01 +0100886/* SPAFT - Switch Port Acceptable Frame Types
887 * ------------------------------------------
888 * The Switch Port Acceptable Frame Types register configures the frame
889 * admittance of the port.
890 */
891#define MLXSW_REG_SPAFT_ID 0x2010
892#define MLXSW_REG_SPAFT_LEN 0x08
893
894static const struct mlxsw_reg_info mlxsw_reg_spaft = {
895 .id = MLXSW_REG_SPAFT_ID,
896 .len = MLXSW_REG_SPAFT_LEN,
897};
898
899/* reg_spaft_local_port
900 * Local port number.
901 * Access: Index
902 *
903 * Note: CPU port is not supported (all tag types are allowed).
904 */
905MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8);
906
907/* reg_spaft_sub_port
908 * Virtual port within the physical port.
909 * Should be set to 0 when virtual ports are not enabled on the port.
910 * Access: RW
911 */
912MLXSW_ITEM32(reg, spaft, sub_port, 0x00, 8, 8);
913
914/* reg_spaft_allow_untagged
915 * When set, untagged frames on the ingress are allowed (default).
916 * Access: RW
917 */
918MLXSW_ITEM32(reg, spaft, allow_untagged, 0x04, 31, 1);
919
920/* reg_spaft_allow_prio_tagged
921 * When set, priority tagged frames on the ingress are allowed (default).
922 * Access: RW
923 */
924MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
925
926/* reg_spaft_allow_tagged
927 * When set, tagged frames on the ingress are allowed (default).
928 * Access: RW
929 */
930MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
931
932static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
933 bool allow_untagged)
934{
935 MLXSW_REG_ZERO(spaft, payload);
936 mlxsw_reg_spaft_local_port_set(payload, local_port);
937 mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
938 mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
939 mlxsw_reg_spaft_allow_tagged_set(payload, true);
940}
941
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200942/* SFGC - Switch Flooding Group Configuration
943 * ------------------------------------------
944 * The following register controls the association of flooding tables and MIDs
945 * to packet types used for flooding.
946 */
Jiri Pirko36b78e82015-10-15 17:43:24 +0200947#define MLXSW_REG_SFGC_ID 0x2011
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200948#define MLXSW_REG_SFGC_LEN 0x10
949
950static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
951 .id = MLXSW_REG_SFGC_ID,
952 .len = MLXSW_REG_SFGC_LEN,
953};
954
955enum mlxsw_reg_sfgc_type {
Ido Schimmelfa6ad052015-10-15 17:43:25 +0200956 MLXSW_REG_SFGC_TYPE_BROADCAST,
957 MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
958 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
959 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
960 MLXSW_REG_SFGC_TYPE_RESERVED,
961 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
962 MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL,
963 MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST,
964 MLXSW_REG_SFGC_TYPE_MAX,
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200965};
966
967/* reg_sfgc_type
968 * The traffic type to reach the flooding table.
969 * Access: Index
970 */
971MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
972
973enum mlxsw_reg_sfgc_bridge_type {
974 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
975 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
976};
977
978/* reg_sfgc_bridge_type
979 * Access: Index
980 *
981 * Note: SwitchX-2 only supports 802.1Q mode.
982 */
983MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3);
984
985enum mlxsw_flood_table_type {
986 MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
987 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
988 MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
989 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
990 MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
991};
992
993/* reg_sfgc_table_type
994 * See mlxsw_flood_table_type
995 * Access: RW
996 *
997 * Note: FID offset and FID types are not supported in SwitchX-2.
998 */
999MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
1000
1001/* reg_sfgc_flood_table
1002 * Flooding table index to associate with the specific type on the specific
1003 * switch partition.
1004 * Access: RW
1005 */
1006MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
1007
1008/* reg_sfgc_mid
1009 * The multicast ID for the swid. Not supported for Spectrum
1010 * Access: RW
1011 */
1012MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
1013
1014/* reg_sfgc_counter_set_type
1015 * Counter Set Type for flow counters.
1016 * Access: RW
1017 */
1018MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
1019
1020/* reg_sfgc_counter_index
1021 * Counter Index for flow counters.
1022 * Access: RW
1023 */
1024MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
1025
1026static inline void
1027mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
1028 enum mlxsw_reg_sfgc_bridge_type bridge_type,
1029 enum mlxsw_flood_table_type table_type,
1030 unsigned int flood_table)
1031{
1032 MLXSW_REG_ZERO(sfgc, payload);
1033 mlxsw_reg_sfgc_type_set(payload, type);
1034 mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
1035 mlxsw_reg_sfgc_table_type_set(payload, table_type);
1036 mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
1037 mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
1038}
1039
1040/* SFTR - Switch Flooding Table Register
1041 * -------------------------------------
1042 * The switch flooding table is used for flooding packet replication. The table
1043 * defines a bit mask of ports for packet replication.
1044 */
1045#define MLXSW_REG_SFTR_ID 0x2012
1046#define MLXSW_REG_SFTR_LEN 0x420
1047
1048static const struct mlxsw_reg_info mlxsw_reg_sftr = {
1049 .id = MLXSW_REG_SFTR_ID,
1050 .len = MLXSW_REG_SFTR_LEN,
1051};
1052
1053/* reg_sftr_swid
1054 * Switch partition ID with which to associate the port.
1055 * Access: Index
1056 */
1057MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
1058
1059/* reg_sftr_flood_table
1060 * Flooding table index to associate with the specific type on the specific
1061 * switch partition.
1062 * Access: Index
1063 */
1064MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
1065
1066/* reg_sftr_index
1067 * Index. Used as an index into the Flooding Table in case the table is
1068 * configured to use VID / FID or FID Offset.
1069 * Access: Index
1070 */
1071MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
1072
1073/* reg_sftr_table_type
1074 * See mlxsw_flood_table_type
1075 * Access: RW
1076 */
1077MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
1078
1079/* reg_sftr_range
1080 * Range of entries to update
1081 * Access: Index
1082 */
1083MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
1084
1085/* reg_sftr_port
1086 * Local port membership (1 bit per port).
1087 * Access: RW
1088 */
1089MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
1090
1091/* reg_sftr_cpu_port_mask
1092 * CPU port mask (1 bit per port).
1093 * Access: W
1094 */
1095MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
1096
1097static inline void mlxsw_reg_sftr_pack(char *payload,
1098 unsigned int flood_table,
1099 unsigned int index,
1100 enum mlxsw_flood_table_type table_type,
Ido Schimmelbc2055f2015-10-16 14:01:23 +02001101 unsigned int range, u8 port, bool set)
Ido Schimmel4ec14b72015-07-29 23:33:48 +02001102{
1103 MLXSW_REG_ZERO(sftr, payload);
1104 mlxsw_reg_sftr_swid_set(payload, 0);
1105 mlxsw_reg_sftr_flood_table_set(payload, flood_table);
1106 mlxsw_reg_sftr_index_set(payload, index);
1107 mlxsw_reg_sftr_table_type_set(payload, table_type);
1108 mlxsw_reg_sftr_range_set(payload, range);
Ido Schimmelbc2055f2015-10-16 14:01:23 +02001109 mlxsw_reg_sftr_port_set(payload, port, set);
1110 mlxsw_reg_sftr_port_mask_set(payload, port, 1);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02001111}
1112
Ido Schimmel41933272016-01-27 15:20:17 +01001113/* SFDF - Switch Filtering DB Flush
1114 * --------------------------------
1115 * The switch filtering DB flush register is used to flush the FDB.
1116 * Note that FDB notifications are flushed as well.
1117 */
1118#define MLXSW_REG_SFDF_ID 0x2013
1119#define MLXSW_REG_SFDF_LEN 0x14
1120
1121static const struct mlxsw_reg_info mlxsw_reg_sfdf = {
1122 .id = MLXSW_REG_SFDF_ID,
1123 .len = MLXSW_REG_SFDF_LEN,
1124};
1125
1126/* reg_sfdf_swid
1127 * Switch partition ID.
1128 * Access: Index
1129 */
1130MLXSW_ITEM32(reg, sfdf, swid, 0x00, 24, 8);
1131
1132enum mlxsw_reg_sfdf_flush_type {
1133 MLXSW_REG_SFDF_FLUSH_PER_SWID,
1134 MLXSW_REG_SFDF_FLUSH_PER_FID,
1135 MLXSW_REG_SFDF_FLUSH_PER_PORT,
1136 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
1137 MLXSW_REG_SFDF_FLUSH_PER_LAG,
1138 MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
1139};
1140
1141/* reg_sfdf_flush_type
1142 * Flush type.
1143 * 0 - All SWID dynamic entries are flushed.
1144 * 1 - All FID dynamic entries are flushed.
1145 * 2 - All dynamic entries pointing to port are flushed.
1146 * 3 - All FID dynamic entries pointing to port are flushed.
1147 * 4 - All dynamic entries pointing to LAG are flushed.
1148 * 5 - All FID dynamic entries pointing to LAG are flushed.
1149 * Access: RW
1150 */
1151MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
1152
1153/* reg_sfdf_flush_static
1154 * Static.
1155 * 0 - Flush only dynamic entries.
1156 * 1 - Flush both dynamic and static entries.
1157 * Access: RW
1158 */
1159MLXSW_ITEM32(reg, sfdf, flush_static, 0x04, 24, 1);
1160
1161static inline void mlxsw_reg_sfdf_pack(char *payload,
1162 enum mlxsw_reg_sfdf_flush_type type)
1163{
1164 MLXSW_REG_ZERO(sfdf, payload);
1165 mlxsw_reg_sfdf_flush_type_set(payload, type);
1166 mlxsw_reg_sfdf_flush_static_set(payload, true);
1167}
1168
1169/* reg_sfdf_fid
1170 * FID to flush.
1171 * Access: RW
1172 */
1173MLXSW_ITEM32(reg, sfdf, fid, 0x0C, 0, 16);
1174
1175/* reg_sfdf_system_port
1176 * Port to flush.
1177 * Access: RW
1178 */
1179MLXSW_ITEM32(reg, sfdf, system_port, 0x0C, 0, 16);
1180
1181/* reg_sfdf_port_fid_system_port
1182 * Port to flush, pointed to by FID.
1183 * Access: RW
1184 */
1185MLXSW_ITEM32(reg, sfdf, port_fid_system_port, 0x08, 0, 16);
1186
1187/* reg_sfdf_lag_id
1188 * LAG ID to flush.
1189 * Access: RW
1190 */
1191MLXSW_ITEM32(reg, sfdf, lag_id, 0x0C, 0, 10);
1192
1193/* reg_sfdf_lag_fid_lag_id
1194 * LAG ID to flush, pointed to by FID.
1195 * Access: RW
1196 */
1197MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10);
1198
Jiri Pirkod1d40be2015-12-03 12:12:25 +01001199/* SLDR - Switch LAG Descriptor Register
1200 * -----------------------------------------
1201 * The switch LAG descriptor register is populated by LAG descriptors.
1202 * Each LAG descriptor is indexed by lag_id. The LAG ID runs from 0 to
1203 * max_lag-1.
1204 */
1205#define MLXSW_REG_SLDR_ID 0x2014
1206#define MLXSW_REG_SLDR_LEN 0x0C /* counting in only one port in list */
1207
1208static const struct mlxsw_reg_info mlxsw_reg_sldr = {
1209 .id = MLXSW_REG_SLDR_ID,
1210 .len = MLXSW_REG_SLDR_LEN,
1211};
1212
1213enum mlxsw_reg_sldr_op {
1214 /* Indicates a creation of a new LAG-ID, lag_id must be valid */
1215 MLXSW_REG_SLDR_OP_LAG_CREATE,
1216 MLXSW_REG_SLDR_OP_LAG_DESTROY,
1217 /* Ports that appear in the list have the Distributor enabled */
1218 MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST,
1219 /* Removes ports from the disributor list */
1220 MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST,
1221};
1222
1223/* reg_sldr_op
1224 * Operation.
1225 * Access: RW
1226 */
1227MLXSW_ITEM32(reg, sldr, op, 0x00, 29, 3);
1228
1229/* reg_sldr_lag_id
1230 * LAG identifier. The lag_id is the index into the LAG descriptor table.
1231 * Access: Index
1232 */
1233MLXSW_ITEM32(reg, sldr, lag_id, 0x00, 0, 10);
1234
1235static inline void mlxsw_reg_sldr_lag_create_pack(char *payload, u8 lag_id)
1236{
1237 MLXSW_REG_ZERO(sldr, payload);
1238 mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_CREATE);
1239 mlxsw_reg_sldr_lag_id_set(payload, lag_id);
1240}
1241
1242static inline void mlxsw_reg_sldr_lag_destroy_pack(char *payload, u8 lag_id)
1243{
1244 MLXSW_REG_ZERO(sldr, payload);
1245 mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_DESTROY);
1246 mlxsw_reg_sldr_lag_id_set(payload, lag_id);
1247}
1248
1249/* reg_sldr_num_ports
1250 * The number of member ports of the LAG.
1251 * Reserved for Create / Destroy operations
1252 * For Add / Remove operations - indicates the number of ports in the list.
1253 * Access: RW
1254 */
1255MLXSW_ITEM32(reg, sldr, num_ports, 0x04, 24, 8);
1256
1257/* reg_sldr_system_port
1258 * System port.
1259 * Access: RW
1260 */
1261MLXSW_ITEM32_INDEXED(reg, sldr, system_port, 0x08, 0, 16, 4, 0, false);
1262
1263static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id,
1264 u8 local_port)
1265{
1266 MLXSW_REG_ZERO(sldr, payload);
1267 mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST);
1268 mlxsw_reg_sldr_lag_id_set(payload, lag_id);
1269 mlxsw_reg_sldr_num_ports_set(payload, 1);
1270 mlxsw_reg_sldr_system_port_set(payload, 0, local_port);
1271}
1272
1273static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id,
1274 u8 local_port)
1275{
1276 MLXSW_REG_ZERO(sldr, payload);
1277 mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST);
1278 mlxsw_reg_sldr_lag_id_set(payload, lag_id);
1279 mlxsw_reg_sldr_num_ports_set(payload, 1);
1280 mlxsw_reg_sldr_system_port_set(payload, 0, local_port);
1281}
1282
1283/* SLCR - Switch LAG Configuration 2 Register
1284 * -------------------------------------------
1285 * The Switch LAG Configuration register is used for configuring the
1286 * LAG properties of the switch.
1287 */
1288#define MLXSW_REG_SLCR_ID 0x2015
1289#define MLXSW_REG_SLCR_LEN 0x10
1290
1291static const struct mlxsw_reg_info mlxsw_reg_slcr = {
1292 .id = MLXSW_REG_SLCR_ID,
1293 .len = MLXSW_REG_SLCR_LEN,
1294};
1295
1296enum mlxsw_reg_slcr_pp {
1297 /* Global Configuration (for all ports) */
1298 MLXSW_REG_SLCR_PP_GLOBAL,
1299 /* Per port configuration, based on local_port field */
1300 MLXSW_REG_SLCR_PP_PER_PORT,
1301};
1302
1303/* reg_slcr_pp
1304 * Per Port Configuration
1305 * Note: Reading at Global mode results in reading port 1 configuration.
1306 * Access: Index
1307 */
1308MLXSW_ITEM32(reg, slcr, pp, 0x00, 24, 1);
1309
1310/* reg_slcr_local_port
1311 * Local port number
1312 * Supported from CPU port
1313 * Not supported from router port
1314 * Reserved when pp = Global Configuration
1315 * Access: Index
1316 */
1317MLXSW_ITEM32(reg, slcr, local_port, 0x00, 16, 8);
1318
1319enum mlxsw_reg_slcr_type {
1320 MLXSW_REG_SLCR_TYPE_CRC, /* default */
1321 MLXSW_REG_SLCR_TYPE_XOR,
1322 MLXSW_REG_SLCR_TYPE_RANDOM,
1323};
1324
1325/* reg_slcr_type
1326 * Hash type
1327 * Access: RW
1328 */
1329MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4);
1330
1331/* Ingress port */
1332#define MLXSW_REG_SLCR_LAG_HASH_IN_PORT BIT(0)
1333/* SMAC - for IPv4 and IPv6 packets */
1334#define MLXSW_REG_SLCR_LAG_HASH_SMAC_IP BIT(1)
1335/* SMAC - for non-IP packets */
1336#define MLXSW_REG_SLCR_LAG_HASH_SMAC_NONIP BIT(2)
1337#define MLXSW_REG_SLCR_LAG_HASH_SMAC \
1338 (MLXSW_REG_SLCR_LAG_HASH_SMAC_IP | \
1339 MLXSW_REG_SLCR_LAG_HASH_SMAC_NONIP)
1340/* DMAC - for IPv4 and IPv6 packets */
1341#define MLXSW_REG_SLCR_LAG_HASH_DMAC_IP BIT(3)
1342/* DMAC - for non-IP packets */
1343#define MLXSW_REG_SLCR_LAG_HASH_DMAC_NONIP BIT(4)
1344#define MLXSW_REG_SLCR_LAG_HASH_DMAC \
1345 (MLXSW_REG_SLCR_LAG_HASH_DMAC_IP | \
1346 MLXSW_REG_SLCR_LAG_HASH_DMAC_NONIP)
1347/* Ethertype - for IPv4 and IPv6 packets */
1348#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_IP BIT(5)
1349/* Ethertype - for non-IP packets */
1350#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_NONIP BIT(6)
1351#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE \
1352 (MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_IP | \
1353 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_NONIP)
1354/* VLAN ID - for IPv4 and IPv6 packets */
1355#define MLXSW_REG_SLCR_LAG_HASH_VLANID_IP BIT(7)
1356/* VLAN ID - for non-IP packets */
1357#define MLXSW_REG_SLCR_LAG_HASH_VLANID_NONIP BIT(8)
1358#define MLXSW_REG_SLCR_LAG_HASH_VLANID \
1359 (MLXSW_REG_SLCR_LAG_HASH_VLANID_IP | \
1360 MLXSW_REG_SLCR_LAG_HASH_VLANID_NONIP)
1361/* Source IP address (can be IPv4 or IPv6) */
1362#define MLXSW_REG_SLCR_LAG_HASH_SIP BIT(9)
1363/* Destination IP address (can be IPv4 or IPv6) */
1364#define MLXSW_REG_SLCR_LAG_HASH_DIP BIT(10)
1365/* TCP/UDP source port */
1366#define MLXSW_REG_SLCR_LAG_HASH_SPORT BIT(11)
1367/* TCP/UDP destination port*/
1368#define MLXSW_REG_SLCR_LAG_HASH_DPORT BIT(12)
1369/* IPv4 Protocol/IPv6 Next Header */
1370#define MLXSW_REG_SLCR_LAG_HASH_IPPROTO BIT(13)
1371/* IPv6 Flow label */
1372#define MLXSW_REG_SLCR_LAG_HASH_FLOWLABEL BIT(14)
1373/* SID - FCoE source ID */
1374#define MLXSW_REG_SLCR_LAG_HASH_FCOE_SID BIT(15)
1375/* DID - FCoE destination ID */
1376#define MLXSW_REG_SLCR_LAG_HASH_FCOE_DID BIT(16)
1377/* OXID - FCoE originator exchange ID */
1378#define MLXSW_REG_SLCR_LAG_HASH_FCOE_OXID BIT(17)
1379/* Destination QP number - for RoCE packets */
1380#define MLXSW_REG_SLCR_LAG_HASH_ROCE_DQP BIT(19)
1381
1382/* reg_slcr_lag_hash
1383 * LAG hashing configuration. This is a bitmask, in which each set
1384 * bit includes the corresponding item in the LAG hash calculation.
1385 * The default lag_hash contains SMAC, DMAC, VLANID and
1386 * Ethertype (for all packet types).
1387 * Access: RW
1388 */
1389MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20);
1390
1391static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash)
1392{
1393 MLXSW_REG_ZERO(slcr, payload);
1394 mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
1395 mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_XOR);
1396 mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
1397}
1398
1399/* SLCOR - Switch LAG Collector Register
1400 * -------------------------------------
1401 * The Switch LAG Collector register controls the Local Port membership
1402 * in a LAG and enablement of the collector.
1403 */
1404#define MLXSW_REG_SLCOR_ID 0x2016
1405#define MLXSW_REG_SLCOR_LEN 0x10
1406
1407static const struct mlxsw_reg_info mlxsw_reg_slcor = {
1408 .id = MLXSW_REG_SLCOR_ID,
1409 .len = MLXSW_REG_SLCOR_LEN,
1410};
1411
1412enum mlxsw_reg_slcor_col {
1413 /* Port is added with collector disabled */
1414 MLXSW_REG_SLCOR_COL_LAG_ADD_PORT,
1415 MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED,
1416 MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_DISABLED,
1417 MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT,
1418};
1419
1420/* reg_slcor_col
1421 * Collector configuration
1422 * Access: RW
1423 */
1424MLXSW_ITEM32(reg, slcor, col, 0x00, 30, 2);
1425
1426/* reg_slcor_local_port
1427 * Local port number
1428 * Not supported for CPU port
1429 * Access: Index
1430 */
1431MLXSW_ITEM32(reg, slcor, local_port, 0x00, 16, 8);
1432
1433/* reg_slcor_lag_id
1434 * LAG Identifier. Index into the LAG descriptor table.
1435 * Access: Index
1436 */
1437MLXSW_ITEM32(reg, slcor, lag_id, 0x00, 0, 10);
1438
1439/* reg_slcor_port_index
1440 * Port index in the LAG list. Only valid on Add Port to LAG col.
1441 * Valid range is from 0 to cap_max_lag_members-1
1442 * Access: RW
1443 */
1444MLXSW_ITEM32(reg, slcor, port_index, 0x04, 0, 10);
1445
1446static inline void mlxsw_reg_slcor_pack(char *payload,
1447 u8 local_port, u16 lag_id,
1448 enum mlxsw_reg_slcor_col col)
1449{
1450 MLXSW_REG_ZERO(slcor, payload);
1451 mlxsw_reg_slcor_col_set(payload, col);
1452 mlxsw_reg_slcor_local_port_set(payload, local_port);
1453 mlxsw_reg_slcor_lag_id_set(payload, lag_id);
1454}
1455
1456static inline void mlxsw_reg_slcor_port_add_pack(char *payload,
1457 u8 local_port, u16 lag_id,
1458 u8 port_index)
1459{
1460 mlxsw_reg_slcor_pack(payload, local_port, lag_id,
1461 MLXSW_REG_SLCOR_COL_LAG_ADD_PORT);
1462 mlxsw_reg_slcor_port_index_set(payload, port_index);
1463}
1464
1465static inline void mlxsw_reg_slcor_port_remove_pack(char *payload,
1466 u8 local_port, u16 lag_id)
1467{
1468 mlxsw_reg_slcor_pack(payload, local_port, lag_id,
1469 MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT);
1470}
1471
1472static inline void mlxsw_reg_slcor_col_enable_pack(char *payload,
1473 u8 local_port, u16 lag_id)
1474{
1475 mlxsw_reg_slcor_pack(payload, local_port, lag_id,
1476 MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED);
1477}
1478
1479static inline void mlxsw_reg_slcor_col_disable_pack(char *payload,
1480 u8 local_port, u16 lag_id)
1481{
1482 mlxsw_reg_slcor_pack(payload, local_port, lag_id,
1483 MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED);
1484}
1485
Ido Schimmel4ec14b72015-07-29 23:33:48 +02001486/* SPMLR - Switch Port MAC Learning Register
1487 * -----------------------------------------
1488 * Controls the Switch MAC learning policy per port.
1489 */
1490#define MLXSW_REG_SPMLR_ID 0x2018
1491#define MLXSW_REG_SPMLR_LEN 0x8
1492
1493static const struct mlxsw_reg_info mlxsw_reg_spmlr = {
1494 .id = MLXSW_REG_SPMLR_ID,
1495 .len = MLXSW_REG_SPMLR_LEN,
1496};
1497
1498/* reg_spmlr_local_port
1499 * Local port number.
1500 * Access: Index
1501 */
1502MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
1503
1504/* reg_spmlr_sub_port
1505 * Virtual port within the physical port.
1506 * Should be set to 0 when virtual ports are not enabled on the port.
1507 * Access: Index
1508 */
1509MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8);
1510
1511enum mlxsw_reg_spmlr_learn_mode {
1512 MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0,
1513 MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2,
1514 MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3,
1515};
1516
1517/* reg_spmlr_learn_mode
1518 * Learning mode on the port.
1519 * 0 - Learning disabled.
1520 * 2 - Learning enabled.
1521 * 3 - Security mode.
1522 *
1523 * In security mode the switch does not learn MACs on the port, but uses the
1524 * SMAC to see if it exists on another ingress port. If so, the packet is
1525 * classified as a bad packet and is discarded unless the software registers
1526 * to receive port security error packets usign HPKT.
1527 */
1528MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
1529
1530static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
1531 enum mlxsw_reg_spmlr_learn_mode mode)
1532{
1533 MLXSW_REG_ZERO(spmlr, payload);
1534 mlxsw_reg_spmlr_local_port_set(payload, local_port);
1535 mlxsw_reg_spmlr_sub_port_set(payload, 0);
1536 mlxsw_reg_spmlr_learn_mode_set(payload, mode);
1537}
1538
Ido Schimmel64790232015-10-16 14:01:33 +02001539/* SVFA - Switch VID to FID Allocation Register
1540 * --------------------------------------------
1541 * Controls the VID to FID mapping and {Port, VID} to FID mapping for
1542 * virtualized ports.
1543 */
1544#define MLXSW_REG_SVFA_ID 0x201C
1545#define MLXSW_REG_SVFA_LEN 0x10
1546
1547static const struct mlxsw_reg_info mlxsw_reg_svfa = {
1548 .id = MLXSW_REG_SVFA_ID,
1549 .len = MLXSW_REG_SVFA_LEN,
1550};
1551
1552/* reg_svfa_swid
1553 * Switch partition ID.
1554 * Access: Index
1555 */
1556MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8);
1557
1558/* reg_svfa_local_port
1559 * Local port number.
1560 * Access: Index
1561 *
1562 * Note: Reserved for 802.1Q FIDs.
1563 */
1564MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8);
1565
1566enum mlxsw_reg_svfa_mt {
1567 MLXSW_REG_SVFA_MT_VID_TO_FID,
1568 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
1569};
1570
1571/* reg_svfa_mapping_table
1572 * Mapping table:
1573 * 0 - VID to FID
1574 * 1 - {Port, VID} to FID
1575 * Access: Index
1576 *
1577 * Note: Reserved for SwitchX-2.
1578 */
1579MLXSW_ITEM32(reg, svfa, mapping_table, 0x00, 8, 3);
1580
1581/* reg_svfa_v
1582 * Valid.
1583 * Valid if set.
1584 * Access: RW
1585 *
1586 * Note: Reserved for SwitchX-2.
1587 */
1588MLXSW_ITEM32(reg, svfa, v, 0x00, 0, 1);
1589
1590/* reg_svfa_fid
1591 * Filtering ID.
1592 * Access: RW
1593 */
1594MLXSW_ITEM32(reg, svfa, fid, 0x04, 16, 16);
1595
1596/* reg_svfa_vid
1597 * VLAN ID.
1598 * Access: Index
1599 */
1600MLXSW_ITEM32(reg, svfa, vid, 0x04, 0, 12);
1601
1602/* reg_svfa_counter_set_type
1603 * Counter set type for flow counters.
1604 * Access: RW
1605 *
1606 * Note: Reserved for SwitchX-2.
1607 */
1608MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
1609
1610/* reg_svfa_counter_index
1611 * Counter index for flow counters.
1612 * Access: RW
1613 *
1614 * Note: Reserved for SwitchX-2.
1615 */
1616MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
1617
1618static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
1619 enum mlxsw_reg_svfa_mt mt, bool valid,
1620 u16 fid, u16 vid)
1621{
1622 MLXSW_REG_ZERO(svfa, payload);
1623 local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port;
1624 mlxsw_reg_svfa_swid_set(payload, 0);
1625 mlxsw_reg_svfa_local_port_set(payload, local_port);
1626 mlxsw_reg_svfa_mapping_table_set(payload, mt);
1627 mlxsw_reg_svfa_v_set(payload, valid);
1628 mlxsw_reg_svfa_fid_set(payload, fid);
1629 mlxsw_reg_svfa_vid_set(payload, vid);
1630}
1631
Ido Schimmel1f65da72015-10-16 14:01:34 +02001632/* SVPE - Switch Virtual-Port Enabling Register
1633 * --------------------------------------------
1634 * Enables port virtualization.
1635 */
1636#define MLXSW_REG_SVPE_ID 0x201E
1637#define MLXSW_REG_SVPE_LEN 0x4
1638
1639static const struct mlxsw_reg_info mlxsw_reg_svpe = {
1640 .id = MLXSW_REG_SVPE_ID,
1641 .len = MLXSW_REG_SVPE_LEN,
1642};
1643
1644/* reg_svpe_local_port
1645 * Local port number
1646 * Access: Index
1647 *
1648 * Note: CPU port is not supported (uses VLAN mode only).
1649 */
1650MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
1651
1652/* reg_svpe_vp_en
1653 * Virtual port enable.
1654 * 0 - Disable, VLAN mode (VID to FID).
1655 * 1 - Enable, Virtual port mode ({Port, VID} to FID).
1656 * Access: RW
1657 */
1658MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1);
1659
1660static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port,
1661 bool enable)
1662{
1663 MLXSW_REG_ZERO(svpe, payload);
1664 mlxsw_reg_svpe_local_port_set(payload, local_port);
1665 mlxsw_reg_svpe_vp_en_set(payload, enable);
1666}
1667
Ido Schimmelf1fb6932015-10-16 14:01:32 +02001668/* SFMR - Switch FID Management Register
1669 * -------------------------------------
1670 * Creates and configures FIDs.
1671 */
1672#define MLXSW_REG_SFMR_ID 0x201F
1673#define MLXSW_REG_SFMR_LEN 0x18
1674
1675static const struct mlxsw_reg_info mlxsw_reg_sfmr = {
1676 .id = MLXSW_REG_SFMR_ID,
1677 .len = MLXSW_REG_SFMR_LEN,
1678};
1679
1680enum mlxsw_reg_sfmr_op {
1681 MLXSW_REG_SFMR_OP_CREATE_FID,
1682 MLXSW_REG_SFMR_OP_DESTROY_FID,
1683};
1684
1685/* reg_sfmr_op
1686 * Operation.
1687 * 0 - Create or edit FID.
1688 * 1 - Destroy FID.
1689 * Access: WO
1690 */
1691MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4);
1692
1693/* reg_sfmr_fid
1694 * Filtering ID.
1695 * Access: Index
1696 */
1697MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
1698
1699/* reg_sfmr_fid_offset
1700 * FID offset.
1701 * Used to point into the flooding table selected by SFGC register if
1702 * the table is of type FID-Offset. Otherwise, this field is reserved.
1703 * Access: RW
1704 */
1705MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16);
1706
1707/* reg_sfmr_vtfp
1708 * Valid Tunnel Flood Pointer.
1709 * If not set, then nve_tunnel_flood_ptr is reserved and considered NULL.
1710 * Access: RW
1711 *
1712 * Note: Reserved for 802.1Q FIDs.
1713 */
1714MLXSW_ITEM32(reg, sfmr, vtfp, 0x0C, 31, 1);
1715
1716/* reg_sfmr_nve_tunnel_flood_ptr
1717 * Underlay Flooding and BC Pointer.
1718 * Used as a pointer to the first entry of the group based link lists of
1719 * flooding or BC entries (for NVE tunnels).
1720 * Access: RW
1721 */
1722MLXSW_ITEM32(reg, sfmr, nve_tunnel_flood_ptr, 0x0C, 0, 24);
1723
1724/* reg_sfmr_vv
1725 * VNI Valid.
1726 * If not set, then vni is reserved.
1727 * Access: RW
1728 *
1729 * Note: Reserved for 802.1Q FIDs.
1730 */
1731MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1);
1732
1733/* reg_sfmr_vni
1734 * Virtual Network Identifier.
1735 * Access: RW
1736 *
1737 * Note: A given VNI can only be assigned to one FID.
1738 */
1739MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24);
1740
1741static inline void mlxsw_reg_sfmr_pack(char *payload,
1742 enum mlxsw_reg_sfmr_op op, u16 fid,
1743 u16 fid_offset)
1744{
1745 MLXSW_REG_ZERO(sfmr, payload);
1746 mlxsw_reg_sfmr_op_set(payload, op);
1747 mlxsw_reg_sfmr_fid_set(payload, fid);
1748 mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
1749 mlxsw_reg_sfmr_vtfp_set(payload, false);
1750 mlxsw_reg_sfmr_vv_set(payload, false);
1751}
1752
Ido Schimmela4feea72015-10-16 14:01:36 +02001753/* SPVMLR - Switch Port VLAN MAC Learning Register
1754 * -----------------------------------------------
1755 * Controls the switch MAC learning policy per {Port, VID}.
1756 */
1757#define MLXSW_REG_SPVMLR_ID 0x2020
1758#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
1759#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
1760#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
1761#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
1762 MLXSW_REG_SPVMLR_REC_LEN * \
1763 MLXSW_REG_SPVMLR_REC_MAX_COUNT)
1764
1765static const struct mlxsw_reg_info mlxsw_reg_spvmlr = {
1766 .id = MLXSW_REG_SPVMLR_ID,
1767 .len = MLXSW_REG_SPVMLR_LEN,
1768};
1769
1770/* reg_spvmlr_local_port
1771 * Local ingress port.
1772 * Access: Index
1773 *
1774 * Note: CPU port is not supported.
1775 */
1776MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8);
1777
1778/* reg_spvmlr_num_rec
1779 * Number of records to update.
1780 * Access: OP
1781 */
1782MLXSW_ITEM32(reg, spvmlr, num_rec, 0x00, 0, 8);
1783
1784/* reg_spvmlr_rec_learn_enable
1785 * 0 - Disable learning for {Port, VID}.
1786 * 1 - Enable learning for {Port, VID}.
1787 * Access: RW
1788 */
1789MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN,
1790 31, 1, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
1791
1792/* reg_spvmlr_rec_vid
1793 * VLAN ID to be added/removed from port or for querying.
1794 * Access: Index
1795 */
1796MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12,
1797 MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
1798
1799static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
1800 u16 vid_begin, u16 vid_end,
1801 bool learn_enable)
1802{
1803 int num_rec = vid_end - vid_begin + 1;
1804 int i;
1805
1806 WARN_ON(num_rec < 1 || num_rec > MLXSW_REG_SPVMLR_REC_MAX_COUNT);
1807
1808 MLXSW_REG_ZERO(spvmlr, payload);
1809 mlxsw_reg_spvmlr_local_port_set(payload, local_port);
1810 mlxsw_reg_spvmlr_num_rec_set(payload, num_rec);
1811
1812 for (i = 0; i < num_rec; i++) {
1813 mlxsw_reg_spvmlr_rec_learn_enable_set(payload, i, learn_enable);
1814 mlxsw_reg_spvmlr_rec_vid_set(payload, i, vid_begin + i);
1815 }
1816}
1817
Ido Schimmel2c63a552016-04-06 17:10:07 +02001818/* QTCT - QoS Switch Traffic Class Table
1819 * -------------------------------------
1820 * Configures the mapping between the packet switch priority and the
1821 * traffic class on the transmit port.
1822 */
1823#define MLXSW_REG_QTCT_ID 0x400A
1824#define MLXSW_REG_QTCT_LEN 0x08
1825
1826static const struct mlxsw_reg_info mlxsw_reg_qtct = {
1827 .id = MLXSW_REG_QTCT_ID,
1828 .len = MLXSW_REG_QTCT_LEN,
1829};
1830
1831/* reg_qtct_local_port
1832 * Local port number.
1833 * Access: Index
1834 *
1835 * Note: CPU port is not supported.
1836 */
1837MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8);
1838
1839/* reg_qtct_sub_port
1840 * Virtual port within the physical port.
1841 * Should be set to 0 when virtual ports are not enabled on the port.
1842 * Access: Index
1843 */
1844MLXSW_ITEM32(reg, qtct, sub_port, 0x00, 8, 8);
1845
1846/* reg_qtct_switch_prio
1847 * Switch priority.
1848 * Access: Index
1849 */
1850MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4);
1851
1852/* reg_qtct_tclass
1853 * Traffic class.
1854 * Default values:
1855 * switch_prio 0 : tclass 1
1856 * switch_prio 1 : tclass 0
1857 * switch_prio i : tclass i, for i > 1
1858 * Access: RW
1859 */
1860MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4);
1861
1862static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
1863 u8 switch_prio, u8 tclass)
1864{
1865 MLXSW_REG_ZERO(qtct, payload);
1866 mlxsw_reg_qtct_local_port_set(payload, local_port);
1867 mlxsw_reg_qtct_switch_prio_set(payload, switch_prio);
1868 mlxsw_reg_qtct_tclass_set(payload, tclass);
1869}
1870
Ido Schimmelb9b7cee2016-04-06 17:10:06 +02001871/* QEEC - QoS ETS Element Configuration Register
1872 * ---------------------------------------------
1873 * Configures the ETS elements.
1874 */
1875#define MLXSW_REG_QEEC_ID 0x400D
1876#define MLXSW_REG_QEEC_LEN 0x1C
1877
1878static const struct mlxsw_reg_info mlxsw_reg_qeec = {
1879 .id = MLXSW_REG_QEEC_ID,
1880 .len = MLXSW_REG_QEEC_LEN,
1881};
1882
1883/* reg_qeec_local_port
1884 * Local port number.
1885 * Access: Index
1886 *
1887 * Note: CPU port is supported.
1888 */
1889MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
1890
1891enum mlxsw_reg_qeec_hr {
1892 MLXSW_REG_QEEC_HIERARCY_PORT,
1893 MLXSW_REG_QEEC_HIERARCY_GROUP,
1894 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1895 MLXSW_REG_QEEC_HIERARCY_TC,
1896};
1897
1898/* reg_qeec_element_hierarchy
1899 * 0 - Port
1900 * 1 - Group
1901 * 2 - Subgroup
1902 * 3 - Traffic Class
1903 * Access: Index
1904 */
1905MLXSW_ITEM32(reg, qeec, element_hierarchy, 0x04, 16, 4);
1906
1907/* reg_qeec_element_index
1908 * The index of the element in the hierarchy.
1909 * Access: Index
1910 */
1911MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
1912
1913/* reg_qeec_next_element_index
1914 * The index of the next (lower) element in the hierarchy.
1915 * Access: RW
1916 *
1917 * Note: Reserved for element_hierarchy 0.
1918 */
1919MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
1920
1921enum {
1922 MLXSW_REG_QEEC_BYTES_MODE,
1923 MLXSW_REG_QEEC_PACKETS_MODE,
1924};
1925
1926/* reg_qeec_pb
1927 * Packets or bytes mode.
1928 * 0 - Bytes mode
1929 * 1 - Packets mode
1930 * Access: RW
1931 *
1932 * Note: Used for max shaper configuration. For Spectrum, packets mode
1933 * is supported only for traffic classes of CPU port.
1934 */
1935MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
1936
1937/* reg_qeec_mase
1938 * Max shaper configuration enable. Enables configuration of the max
1939 * shaper on this ETS element.
1940 * 0 - Disable
1941 * 1 - Enable
1942 * Access: RW
1943 */
1944MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
1945
1946/* A large max rate will disable the max shaper. */
1947#define MLXSW_REG_QEEC_MAS_DIS 200000000 /* Kbps */
1948
1949/* reg_qeec_max_shaper_rate
1950 * Max shaper information rate.
1951 * For CPU port, can only be configured for port hierarchy.
1952 * When in bytes mode, value is specified in units of 1000bps.
1953 * Access: RW
1954 */
1955MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28);
1956
1957/* reg_qeec_de
1958 * DWRR configuration enable. Enables configuration of the dwrr and
1959 * dwrr_weight.
1960 * 0 - Disable
1961 * 1 - Enable
1962 * Access: RW
1963 */
1964MLXSW_ITEM32(reg, qeec, de, 0x18, 31, 1);
1965
1966/* reg_qeec_dwrr
1967 * Transmission selection algorithm to use on the link going down from
1968 * the ETS element.
1969 * 0 - Strict priority
1970 * 1 - DWRR
1971 * Access: RW
1972 */
1973MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1);
1974
1975/* reg_qeec_dwrr_weight
1976 * DWRR weight on the link going down from the ETS element. The
1977 * percentage of bandwidth guaranteed to an ETS element within
1978 * its hierarchy. The sum of all weights across all ETS elements
1979 * within one hierarchy should be equal to 100. Reserved when
1980 * transmission selection algorithm is strict priority.
1981 * Access: RW
1982 */
1983MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8);
1984
1985static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
1986 enum mlxsw_reg_qeec_hr hr, u8 index,
1987 u8 next_index)
1988{
1989 MLXSW_REG_ZERO(qeec, payload);
1990 mlxsw_reg_qeec_local_port_set(payload, local_port);
1991 mlxsw_reg_qeec_element_hierarchy_set(payload, hr);
1992 mlxsw_reg_qeec_element_index_set(payload, index);
1993 mlxsw_reg_qeec_next_element_index_set(payload, next_index);
1994}
1995
Ido Schimmel4ec14b72015-07-29 23:33:48 +02001996/* PMLP - Ports Module to Local Port Register
1997 * ------------------------------------------
1998 * Configures the assignment of modules to local ports.
1999 */
2000#define MLXSW_REG_PMLP_ID 0x5002
2001#define MLXSW_REG_PMLP_LEN 0x40
2002
2003static const struct mlxsw_reg_info mlxsw_reg_pmlp = {
2004 .id = MLXSW_REG_PMLP_ID,
2005 .len = MLXSW_REG_PMLP_LEN,
2006};
2007
2008/* reg_pmlp_rxtx
2009 * 0 - Tx value is used for both Tx and Rx.
2010 * 1 - Rx value is taken from a separte field.
2011 * Access: RW
2012 */
2013MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
2014
2015/* reg_pmlp_local_port
2016 * Local port number.
2017 * Access: Index
2018 */
2019MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
2020
2021/* reg_pmlp_width
2022 * 0 - Unmap local port.
2023 * 1 - Lane 0 is used.
2024 * 2 - Lanes 0 and 1 are used.
2025 * 4 - Lanes 0, 1, 2 and 3 are used.
2026 * Access: RW
2027 */
2028MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
2029
2030/* reg_pmlp_module
2031 * Module number.
2032 * Access: RW
2033 */
Ido Schimmelbbeeda22016-01-27 15:20:26 +01002034MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002035
2036/* reg_pmlp_tx_lane
2037 * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
2038 * Access: RW
2039 */
Ido Schimmelbbeeda22016-01-27 15:20:26 +01002040MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002041
2042/* reg_pmlp_rx_lane
2043 * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
2044 * equal to Tx lane.
2045 * Access: RW
2046 */
Ido Schimmelbbeeda22016-01-27 15:20:26 +01002047MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002048
2049static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
2050{
2051 MLXSW_REG_ZERO(pmlp, payload);
2052 mlxsw_reg_pmlp_local_port_set(payload, local_port);
2053}
2054
2055/* PMTU - Port MTU Register
2056 * ------------------------
2057 * Configures and reports the port MTU.
2058 */
2059#define MLXSW_REG_PMTU_ID 0x5003
2060#define MLXSW_REG_PMTU_LEN 0x10
2061
2062static const struct mlxsw_reg_info mlxsw_reg_pmtu = {
2063 .id = MLXSW_REG_PMTU_ID,
2064 .len = MLXSW_REG_PMTU_LEN,
2065};
2066
2067/* reg_pmtu_local_port
2068 * Local port number.
2069 * Access: Index
2070 */
2071MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
2072
2073/* reg_pmtu_max_mtu
2074 * Maximum MTU.
2075 * When port type (e.g. Ethernet) is configured, the relevant MTU is
2076 * reported, otherwise the minimum between the max_mtu of the different
2077 * types is reported.
2078 * Access: RO
2079 */
2080MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16);
2081
2082/* reg_pmtu_admin_mtu
2083 * MTU value to set port to. Must be smaller or equal to max_mtu.
2084 * Note: If port type is Infiniband, then port must be disabled, when its
2085 * MTU is set.
2086 * Access: RW
2087 */
2088MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
2089
2090/* reg_pmtu_oper_mtu
2091 * The actual MTU configured on the port. Packets exceeding this size
2092 * will be dropped.
2093 * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband
2094 * oper_mtu might be smaller than admin_mtu.
2095 * Access: RO
2096 */
2097MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
2098
2099static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
2100 u16 new_mtu)
2101{
2102 MLXSW_REG_ZERO(pmtu, payload);
2103 mlxsw_reg_pmtu_local_port_set(payload, local_port);
2104 mlxsw_reg_pmtu_max_mtu_set(payload, 0);
2105 mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu);
2106 mlxsw_reg_pmtu_oper_mtu_set(payload, 0);
2107}
2108
2109/* PTYS - Port Type and Speed Register
2110 * -----------------------------------
2111 * Configures and reports the port speed type.
2112 *
2113 * Note: When set while the link is up, the changes will not take effect
2114 * until the port transitions from down to up state.
2115 */
2116#define MLXSW_REG_PTYS_ID 0x5004
2117#define MLXSW_REG_PTYS_LEN 0x40
2118
2119static const struct mlxsw_reg_info mlxsw_reg_ptys = {
2120 .id = MLXSW_REG_PTYS_ID,
2121 .len = MLXSW_REG_PTYS_LEN,
2122};
2123
2124/* reg_ptys_local_port
2125 * Local port number.
2126 * Access: Index
2127 */
2128MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
2129
2130#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2)
2131
2132/* reg_ptys_proto_mask
2133 * Protocol mask. Indicates which protocol is used.
2134 * 0 - Infiniband.
2135 * 1 - Fibre Channel.
2136 * 2 - Ethernet.
2137 * Access: Index
2138 */
2139MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
2140
Ido Schimmel4149b972016-09-12 13:26:24 +02002141enum {
2142 MLXSW_REG_PTYS_AN_STATUS_NA,
2143 MLXSW_REG_PTYS_AN_STATUS_OK,
2144 MLXSW_REG_PTYS_AN_STATUS_FAIL,
2145};
2146
2147/* reg_ptys_an_status
2148 * Autonegotiation status.
2149 * Access: RO
2150 */
2151MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
2152
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002153#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0)
2154#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1)
2155#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
2156#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 BIT(3)
2157#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR BIT(4)
2158#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5)
2159#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6)
2160#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7)
2161#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4 BIT(8)
2162#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12)
2163#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR BIT(13)
2164#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
2165#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15)
2166#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16)
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002167#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2 BIT(18)
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002168#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19)
2169#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
2170#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
2171#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
2172#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
2173#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX BIT(24)
2174#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(25)
2175#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T BIT(26)
2176#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
2177#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
2178#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
2179#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 BIT(30)
2180#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2 BIT(31)
2181
2182/* reg_ptys_eth_proto_cap
2183 * Ethernet port supported speeds and protocols.
2184 * Access: RO
2185 */
2186MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
2187
2188/* reg_ptys_eth_proto_admin
2189 * Speed and protocol to set port to.
2190 * Access: RW
2191 */
2192MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
2193
2194/* reg_ptys_eth_proto_oper
2195 * The current speed and protocol configured for the port.
2196 * Access: RO
2197 */
2198MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
2199
Ido Schimmel4149b972016-09-12 13:26:24 +02002200/* reg_ptys_eth_proto_lp_advertise
2201 * The protocols that were advertised by the link partner during
2202 * autonegotiation.
2203 * Access: RO
2204 */
2205MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32);
2206
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002207static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
2208 u32 proto_admin)
2209{
2210 MLXSW_REG_ZERO(ptys, payload);
2211 mlxsw_reg_ptys_local_port_set(payload, local_port);
2212 mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
2213 mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
2214}
2215
2216static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
2217 u32 *p_eth_proto_adm,
2218 u32 *p_eth_proto_oper)
2219{
2220 if (p_eth_proto_cap)
2221 *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
2222 if (p_eth_proto_adm)
2223 *p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload);
2224 if (p_eth_proto_oper)
2225 *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
2226}
2227
2228/* PPAD - Port Physical Address Register
2229 * -------------------------------------
2230 * The PPAD register configures the per port physical MAC address.
2231 */
2232#define MLXSW_REG_PPAD_ID 0x5005
2233#define MLXSW_REG_PPAD_LEN 0x10
2234
2235static const struct mlxsw_reg_info mlxsw_reg_ppad = {
2236 .id = MLXSW_REG_PPAD_ID,
2237 .len = MLXSW_REG_PPAD_LEN,
2238};
2239
2240/* reg_ppad_single_base_mac
2241 * 0: base_mac, local port should be 0 and mac[7:0] is
2242 * reserved. HW will set incremental
2243 * 1: single_mac - mac of the local_port
2244 * Access: RW
2245 */
2246MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
2247
2248/* reg_ppad_local_port
2249 * port number, if single_base_mac = 0 then local_port is reserved
2250 * Access: RW
2251 */
2252MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
2253
2254/* reg_ppad_mac
2255 * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
2256 * If single_base_mac = 1 - the per port MAC address
2257 * Access: RW
2258 */
2259MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
2260
2261static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
2262 u8 local_port)
2263{
2264 MLXSW_REG_ZERO(ppad, payload);
2265 mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
2266 mlxsw_reg_ppad_local_port_set(payload, local_port);
2267}
2268
2269/* PAOS - Ports Administrative and Operational Status Register
2270 * -----------------------------------------------------------
2271 * Configures and retrieves per port administrative and operational status.
2272 */
2273#define MLXSW_REG_PAOS_ID 0x5006
2274#define MLXSW_REG_PAOS_LEN 0x10
2275
2276static const struct mlxsw_reg_info mlxsw_reg_paos = {
2277 .id = MLXSW_REG_PAOS_ID,
2278 .len = MLXSW_REG_PAOS_LEN,
2279};
2280
2281/* reg_paos_swid
2282 * Switch partition ID with which to associate the port.
2283 * Note: while external ports uses unique local port numbers (and thus swid is
2284 * redundant), router ports use the same local port number where swid is the
2285 * only indication for the relevant port.
2286 * Access: Index
2287 */
2288MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
2289
2290/* reg_paos_local_port
2291 * Local port number.
2292 * Access: Index
2293 */
2294MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
2295
2296/* reg_paos_admin_status
2297 * Port administrative state (the desired state of the port):
2298 * 1 - Up.
2299 * 2 - Down.
2300 * 3 - Up once. This means that in case of link failure, the port won't go
2301 * into polling mode, but will wait to be re-enabled by software.
2302 * 4 - Disabled by system. Can only be set by hardware.
2303 * Access: RW
2304 */
2305MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4);
2306
2307/* reg_paos_oper_status
2308 * Port operational state (the current state):
2309 * 1 - Up.
2310 * 2 - Down.
2311 * 3 - Down by port failure. This means that the device will not let the
2312 * port up again until explicitly specified by software.
2313 * Access: RO
2314 */
2315MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4);
2316
2317/* reg_paos_ase
2318 * Admin state update enabled.
2319 * Access: WO
2320 */
2321MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1);
2322
2323/* reg_paos_ee
2324 * Event update enable. If this bit is set, event generation will be
2325 * updated based on the e field.
2326 * Access: WO
2327 */
2328MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
2329
2330/* reg_paos_e
2331 * Event generation on operational state change:
2332 * 0 - Do not generate event.
2333 * 1 - Generate Event.
2334 * 2 - Generate Single Event.
2335 * Access: RW
2336 */
2337MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
2338
2339static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
2340 enum mlxsw_port_admin_status status)
2341{
2342 MLXSW_REG_ZERO(paos, payload);
2343 mlxsw_reg_paos_swid_set(payload, 0);
2344 mlxsw_reg_paos_local_port_set(payload, local_port);
2345 mlxsw_reg_paos_admin_status_set(payload, status);
2346 mlxsw_reg_paos_oper_status_set(payload, 0);
2347 mlxsw_reg_paos_ase_set(payload, 1);
2348 mlxsw_reg_paos_ee_set(payload, 1);
2349 mlxsw_reg_paos_e_set(payload, 1);
2350}
2351
Ido Schimmel6f253d82016-04-06 17:10:12 +02002352/* PFCC - Ports Flow Control Configuration Register
2353 * ------------------------------------------------
2354 * Configures and retrieves the per port flow control configuration.
2355 */
2356#define MLXSW_REG_PFCC_ID 0x5007
2357#define MLXSW_REG_PFCC_LEN 0x20
2358
2359static const struct mlxsw_reg_info mlxsw_reg_pfcc = {
2360 .id = MLXSW_REG_PFCC_ID,
2361 .len = MLXSW_REG_PFCC_LEN,
2362};
2363
2364/* reg_pfcc_local_port
2365 * Local port number.
2366 * Access: Index
2367 */
2368MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8);
2369
2370/* reg_pfcc_pnat
2371 * Port number access type. Determines the way local_port is interpreted:
2372 * 0 - Local port number.
2373 * 1 - IB / label port number.
2374 * Access: Index
2375 */
2376MLXSW_ITEM32(reg, pfcc, pnat, 0x00, 14, 2);
2377
2378/* reg_pfcc_shl_cap
2379 * Send to higher layers capabilities:
2380 * 0 - No capability of sending Pause and PFC frames to higher layers.
2381 * 1 - Device has capability of sending Pause and PFC frames to higher
2382 * layers.
2383 * Access: RO
2384 */
2385MLXSW_ITEM32(reg, pfcc, shl_cap, 0x00, 1, 1);
2386
2387/* reg_pfcc_shl_opr
2388 * Send to higher layers operation:
2389 * 0 - Pause and PFC frames are handled by the port (default).
2390 * 1 - Pause and PFC frames are handled by the port and also sent to
2391 * higher layers. Only valid if shl_cap = 1.
2392 * Access: RW
2393 */
2394MLXSW_ITEM32(reg, pfcc, shl_opr, 0x00, 0, 1);
2395
2396/* reg_pfcc_ppan
2397 * Pause policy auto negotiation.
2398 * 0 - Disabled. Generate / ignore Pause frames based on pptx / pprtx.
2399 * 1 - Enabled. When auto-negotiation is performed, set the Pause policy
2400 * based on the auto-negotiation resolution.
2401 * Access: RW
2402 *
2403 * Note: The auto-negotiation advertisement is set according to pptx and
2404 * pprtx. When PFC is set on Tx / Rx, ppan must be set to 0.
2405 */
2406MLXSW_ITEM32(reg, pfcc, ppan, 0x04, 28, 4);
2407
2408/* reg_pfcc_prio_mask_tx
2409 * Bit per priority indicating if Tx flow control policy should be
2410 * updated based on bit pfctx.
2411 * Access: WO
2412 */
2413MLXSW_ITEM32(reg, pfcc, prio_mask_tx, 0x04, 16, 8);
2414
2415/* reg_pfcc_prio_mask_rx
2416 * Bit per priority indicating if Rx flow control policy should be
2417 * updated based on bit pfcrx.
2418 * Access: WO
2419 */
2420MLXSW_ITEM32(reg, pfcc, prio_mask_rx, 0x04, 0, 8);
2421
2422/* reg_pfcc_pptx
2423 * Admin Pause policy on Tx.
2424 * 0 - Never generate Pause frames (default).
2425 * 1 - Generate Pause frames according to Rx buffer threshold.
2426 * Access: RW
2427 */
2428MLXSW_ITEM32(reg, pfcc, pptx, 0x08, 31, 1);
2429
2430/* reg_pfcc_aptx
2431 * Active (operational) Pause policy on Tx.
2432 * 0 - Never generate Pause frames.
2433 * 1 - Generate Pause frames according to Rx buffer threshold.
2434 * Access: RO
2435 */
2436MLXSW_ITEM32(reg, pfcc, aptx, 0x08, 30, 1);
2437
2438/* reg_pfcc_pfctx
2439 * Priority based flow control policy on Tx[7:0]. Per-priority bit mask:
2440 * 0 - Never generate priority Pause frames on the specified priority
2441 * (default).
2442 * 1 - Generate priority Pause frames according to Rx buffer threshold on
2443 * the specified priority.
2444 * Access: RW
2445 *
2446 * Note: pfctx and pptx must be mutually exclusive.
2447 */
2448MLXSW_ITEM32(reg, pfcc, pfctx, 0x08, 16, 8);
2449
2450/* reg_pfcc_pprx
2451 * Admin Pause policy on Rx.
2452 * 0 - Ignore received Pause frames (default).
2453 * 1 - Respect received Pause frames.
2454 * Access: RW
2455 */
2456MLXSW_ITEM32(reg, pfcc, pprx, 0x0C, 31, 1);
2457
2458/* reg_pfcc_aprx
2459 * Active (operational) Pause policy on Rx.
2460 * 0 - Ignore received Pause frames.
2461 * 1 - Respect received Pause frames.
2462 * Access: RO
2463 */
2464MLXSW_ITEM32(reg, pfcc, aprx, 0x0C, 30, 1);
2465
2466/* reg_pfcc_pfcrx
2467 * Priority based flow control policy on Rx[7:0]. Per-priority bit mask:
2468 * 0 - Ignore incoming priority Pause frames on the specified priority
2469 * (default).
2470 * 1 - Respect incoming priority Pause frames on the specified priority.
2471 * Access: RW
2472 */
2473MLXSW_ITEM32(reg, pfcc, pfcrx, 0x0C, 16, 8);
2474
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02002475#define MLXSW_REG_PFCC_ALL_PRIO 0xFF
2476
2477static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en)
2478{
2479 mlxsw_reg_pfcc_prio_mask_tx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
2480 mlxsw_reg_pfcc_prio_mask_rx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
2481 mlxsw_reg_pfcc_pfctx_set(payload, pfc_en);
2482 mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en);
2483}
2484
Ido Schimmel6f253d82016-04-06 17:10:12 +02002485static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
2486{
2487 MLXSW_REG_ZERO(pfcc, payload);
2488 mlxsw_reg_pfcc_local_port_set(payload, local_port);
2489}
2490
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002491/* PPCNT - Ports Performance Counters Register
2492 * -------------------------------------------
2493 * The PPCNT register retrieves per port performance counters.
2494 */
2495#define MLXSW_REG_PPCNT_ID 0x5008
2496#define MLXSW_REG_PPCNT_LEN 0x100
2497
2498static const struct mlxsw_reg_info mlxsw_reg_ppcnt = {
2499 .id = MLXSW_REG_PPCNT_ID,
2500 .len = MLXSW_REG_PPCNT_LEN,
2501};
2502
2503/* reg_ppcnt_swid
2504 * For HCA: must be always 0.
2505 * Switch partition ID to associate port with.
2506 * Switch partitions are numbered from 0 to 7 inclusively.
2507 * Switch partition 254 indicates stacking ports.
2508 * Switch partition 255 indicates all switch partitions.
2509 * Only valid on Set() operation with local_port=255.
2510 * Access: Index
2511 */
2512MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
2513
2514/* reg_ppcnt_local_port
2515 * Local port number.
2516 * 255 indicates all ports on the device, and is only allowed
2517 * for Set() operation.
2518 * Access: Index
2519 */
2520MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
2521
2522/* reg_ppcnt_pnat
2523 * Port number access type:
2524 * 0 - Local port number
2525 * 1 - IB port number
2526 * Access: Index
2527 */
2528MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
2529
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002530enum mlxsw_reg_ppcnt_grp {
2531 MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
2532 MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002533 MLXSW_REG_PPCNT_TC_CNT = 0x11,
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002534};
2535
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002536/* reg_ppcnt_grp
2537 * Performance counter group.
2538 * Group 63 indicates all groups. Only valid on Set() operation with
2539 * clr bit set.
2540 * 0x0: IEEE 802.3 Counters
2541 * 0x1: RFC 2863 Counters
2542 * 0x2: RFC 2819 Counters
2543 * 0x3: RFC 3635 Counters
2544 * 0x5: Ethernet Extended Counters
2545 * 0x8: Link Level Retransmission Counters
2546 * 0x10: Per Priority Counters
2547 * 0x11: Per Traffic Class Counters
2548 * 0x12: Physical Layer Counters
2549 * Access: Index
2550 */
2551MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
2552
2553/* reg_ppcnt_clr
2554 * Clear counters. Setting the clr bit will reset the counter value
2555 * for all counters in the counter group. This bit can be set
2556 * for both Set() and Get() operation.
2557 * Access: OP
2558 */
2559MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
2560
2561/* reg_ppcnt_prio_tc
2562 * Priority for counter set that support per priority, valid values: 0-7.
2563 * Traffic class for counter set that support per traffic class,
2564 * valid values: 0- cap_max_tclass-1 .
2565 * For HCA: cap_max_tclass is always 8.
2566 * Otherwise must be 0.
2567 * Access: Index
2568 */
2569MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
2570
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002571/* Ethernet IEEE 802.3 Counter Group */
2572
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002573/* reg_ppcnt_a_frames_transmitted_ok
2574 * Access: RO
2575 */
2576MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
2577 0x08 + 0x00, 0, 64);
2578
2579/* reg_ppcnt_a_frames_received_ok
2580 * Access: RO
2581 */
2582MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
2583 0x08 + 0x08, 0, 64);
2584
2585/* reg_ppcnt_a_frame_check_sequence_errors
2586 * Access: RO
2587 */
2588MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
2589 0x08 + 0x10, 0, 64);
2590
2591/* reg_ppcnt_a_alignment_errors
2592 * Access: RO
2593 */
2594MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
2595 0x08 + 0x18, 0, 64);
2596
2597/* reg_ppcnt_a_octets_transmitted_ok
2598 * Access: RO
2599 */
2600MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
2601 0x08 + 0x20, 0, 64);
2602
2603/* reg_ppcnt_a_octets_received_ok
2604 * Access: RO
2605 */
2606MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
2607 0x08 + 0x28, 0, 64);
2608
2609/* reg_ppcnt_a_multicast_frames_xmitted_ok
2610 * Access: RO
2611 */
2612MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
2613 0x08 + 0x30, 0, 64);
2614
2615/* reg_ppcnt_a_broadcast_frames_xmitted_ok
2616 * Access: RO
2617 */
2618MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
2619 0x08 + 0x38, 0, 64);
2620
2621/* reg_ppcnt_a_multicast_frames_received_ok
2622 * Access: RO
2623 */
2624MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
2625 0x08 + 0x40, 0, 64);
2626
2627/* reg_ppcnt_a_broadcast_frames_received_ok
2628 * Access: RO
2629 */
2630MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
2631 0x08 + 0x48, 0, 64);
2632
2633/* reg_ppcnt_a_in_range_length_errors
2634 * Access: RO
2635 */
2636MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
2637 0x08 + 0x50, 0, 64);
2638
2639/* reg_ppcnt_a_out_of_range_length_field
2640 * Access: RO
2641 */
2642MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
2643 0x08 + 0x58, 0, 64);
2644
2645/* reg_ppcnt_a_frame_too_long_errors
2646 * Access: RO
2647 */
2648MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
2649 0x08 + 0x60, 0, 64);
2650
2651/* reg_ppcnt_a_symbol_error_during_carrier
2652 * Access: RO
2653 */
2654MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
2655 0x08 + 0x68, 0, 64);
2656
2657/* reg_ppcnt_a_mac_control_frames_transmitted
2658 * Access: RO
2659 */
2660MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
2661 0x08 + 0x70, 0, 64);
2662
2663/* reg_ppcnt_a_mac_control_frames_received
2664 * Access: RO
2665 */
2666MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
2667 0x08 + 0x78, 0, 64);
2668
2669/* reg_ppcnt_a_unsupported_opcodes_received
2670 * Access: RO
2671 */
2672MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
2673 0x08 + 0x80, 0, 64);
2674
2675/* reg_ppcnt_a_pause_mac_ctrl_frames_received
2676 * Access: RO
2677 */
2678MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
2679 0x08 + 0x88, 0, 64);
2680
2681/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
2682 * Access: RO
2683 */
2684MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
2685 0x08 + 0x90, 0, 64);
2686
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002687/* Ethernet Per Priority Group Counters */
2688
2689/* reg_ppcnt_rx_octets
2690 * Access: RO
2691 */
2692MLXSW_ITEM64(reg, ppcnt, rx_octets, 0x08 + 0x00, 0, 64);
2693
2694/* reg_ppcnt_rx_frames
2695 * Access: RO
2696 */
2697MLXSW_ITEM64(reg, ppcnt, rx_frames, 0x08 + 0x20, 0, 64);
2698
2699/* reg_ppcnt_tx_octets
2700 * Access: RO
2701 */
2702MLXSW_ITEM64(reg, ppcnt, tx_octets, 0x08 + 0x28, 0, 64);
2703
2704/* reg_ppcnt_tx_frames
2705 * Access: RO
2706 */
2707MLXSW_ITEM64(reg, ppcnt, tx_frames, 0x08 + 0x48, 0, 64);
2708
2709/* reg_ppcnt_rx_pause
2710 * Access: RO
2711 */
2712MLXSW_ITEM64(reg, ppcnt, rx_pause, 0x08 + 0x50, 0, 64);
2713
2714/* reg_ppcnt_rx_pause_duration
2715 * Access: RO
2716 */
2717MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, 0x08 + 0x58, 0, 64);
2718
2719/* reg_ppcnt_tx_pause
2720 * Access: RO
2721 */
2722MLXSW_ITEM64(reg, ppcnt, tx_pause, 0x08 + 0x60, 0, 64);
2723
2724/* reg_ppcnt_tx_pause_duration
2725 * Access: RO
2726 */
2727MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64);
2728
2729/* reg_ppcnt_rx_pause_transition
2730 * Access: RO
2731 */
2732MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
2733
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002734/* Ethernet Per Traffic Group Counters */
2735
2736/* reg_ppcnt_tc_transmit_queue
2737 * Contains the transmit queue depth in cells of traffic class
2738 * selected by prio_tc and the port selected by local_port.
2739 * The field cannot be cleared.
2740 * Access: RO
2741 */
2742MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, 0x08 + 0x00, 0, 64);
2743
2744/* reg_ppcnt_tc_no_buffer_discard_uc
2745 * The number of unicast packets dropped due to lack of shared
2746 * buffer resources.
2747 * Access: RO
2748 */
2749MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, 0x08 + 0x08, 0, 64);
2750
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002751static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
2752 enum mlxsw_reg_ppcnt_grp grp,
2753 u8 prio_tc)
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002754{
2755 MLXSW_REG_ZERO(ppcnt, payload);
2756 mlxsw_reg_ppcnt_swid_set(payload, 0);
2757 mlxsw_reg_ppcnt_local_port_set(payload, local_port);
2758 mlxsw_reg_ppcnt_pnat_set(payload, 0);
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002759 mlxsw_reg_ppcnt_grp_set(payload, grp);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002760 mlxsw_reg_ppcnt_clr_set(payload, 0);
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002761 mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002762}
2763
Ido Schimmelb98ff152016-04-06 17:10:00 +02002764/* PPTB - Port Prio To Buffer Register
2765 * -----------------------------------
2766 * Configures the switch priority to buffer table.
2767 */
2768#define MLXSW_REG_PPTB_ID 0x500B
Ido Schimmel11719a52016-07-15 11:15:02 +02002769#define MLXSW_REG_PPTB_LEN 0x10
Ido Schimmelb98ff152016-04-06 17:10:00 +02002770
2771static const struct mlxsw_reg_info mlxsw_reg_pptb = {
2772 .id = MLXSW_REG_PPTB_ID,
2773 .len = MLXSW_REG_PPTB_LEN,
2774};
2775
2776enum {
2777 MLXSW_REG_PPTB_MM_UM,
2778 MLXSW_REG_PPTB_MM_UNICAST,
2779 MLXSW_REG_PPTB_MM_MULTICAST,
2780};
2781
2782/* reg_pptb_mm
2783 * Mapping mode.
2784 * 0 - Map both unicast and multicast packets to the same buffer.
2785 * 1 - Map only unicast packets.
2786 * 2 - Map only multicast packets.
2787 * Access: Index
2788 *
2789 * Note: SwitchX-2 only supports the first option.
2790 */
2791MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2);
2792
2793/* reg_pptb_local_port
2794 * Local port number.
2795 * Access: Index
2796 */
2797MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8);
2798
2799/* reg_pptb_um
2800 * Enables the update of the untagged_buf field.
2801 * Access: RW
2802 */
2803MLXSW_ITEM32(reg, pptb, um, 0x00, 8, 1);
2804
2805/* reg_pptb_pm
2806 * Enables the update of the prio_to_buff field.
2807 * Bit <i> is a flag for updating the mapping for switch priority <i>.
2808 * Access: RW
2809 */
2810MLXSW_ITEM32(reg, pptb, pm, 0x00, 0, 8);
2811
2812/* reg_pptb_prio_to_buff
2813 * Mapping of switch priority <i> to one of the allocated receive port
2814 * buffers.
2815 * Access: RW
2816 */
2817MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff, 0x04, 0x04, 4);
2818
2819/* reg_pptb_pm_msb
2820 * Enables the update of the prio_to_buff field.
2821 * Bit <i> is a flag for updating the mapping for switch priority <i+8>.
2822 * Access: RW
2823 */
2824MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
2825
2826/* reg_pptb_untagged_buff
2827 * Mapping of untagged frames to one of the allocated receive port buffers.
2828 * Access: RW
2829 *
2830 * Note: In SwitchX-2 this field must be mapped to buffer 8. Reserved for
2831 * Spectrum, as it maps untagged packets based on the default switch priority.
2832 */
2833MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
2834
Ido Schimmel11719a52016-07-15 11:15:02 +02002835/* reg_pptb_prio_to_buff_msb
2836 * Mapping of switch priority <i+8> to one of the allocated receive port
2837 * buffers.
2838 * Access: RW
2839 */
2840MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
2841
Ido Schimmelb98ff152016-04-06 17:10:00 +02002842#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
2843
2844static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
2845{
2846 MLXSW_REG_ZERO(pptb, payload);
2847 mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
2848 mlxsw_reg_pptb_local_port_set(payload, local_port);
2849 mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
Ido Schimmel11719a52016-07-15 11:15:02 +02002850 mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
2851}
2852
2853static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
2854 u8 buff)
2855{
2856 mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
2857 mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
Ido Schimmelb98ff152016-04-06 17:10:00 +02002858}
2859
Jiri Pirkoe0594362015-10-16 14:01:31 +02002860/* PBMC - Port Buffer Management Control Register
2861 * ----------------------------------------------
2862 * The PBMC register configures and retrieves the port packet buffer
2863 * allocation for different Prios, and the Pause threshold management.
2864 */
2865#define MLXSW_REG_PBMC_ID 0x500C
Ido Schimmel7ad7cd62016-04-06 17:10:04 +02002866#define MLXSW_REG_PBMC_LEN 0x6C
Jiri Pirkoe0594362015-10-16 14:01:31 +02002867
2868static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
2869 .id = MLXSW_REG_PBMC_ID,
2870 .len = MLXSW_REG_PBMC_LEN,
2871};
2872
2873/* reg_pbmc_local_port
2874 * Local port number.
2875 * Access: Index
2876 */
2877MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8);
2878
2879/* reg_pbmc_xoff_timer_value
2880 * When device generates a pause frame, it uses this value as the pause
2881 * timer (time for the peer port to pause in quota-512 bit time).
2882 * Access: RW
2883 */
2884MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16);
2885
2886/* reg_pbmc_xoff_refresh
2887 * The time before a new pause frame should be sent to refresh the pause RW
2888 * state. Using the same units as xoff_timer_value above (in quota-512 bit
2889 * time).
2890 * Access: RW
2891 */
2892MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
2893
Ido Schimmeld6b7c132016-04-06 17:10:05 +02002894#define MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX 11
2895
Jiri Pirkoe0594362015-10-16 14:01:31 +02002896/* reg_pbmc_buf_lossy
2897 * The field indicates if the buffer is lossy.
2898 * 0 - Lossless
2899 * 1 - Lossy
2900 * Access: RW
2901 */
2902MLXSW_ITEM32_INDEXED(reg, pbmc, buf_lossy, 0x0C, 25, 1, 0x08, 0x00, false);
2903
2904/* reg_pbmc_buf_epsb
2905 * Eligible for Port Shared buffer.
2906 * If epsb is set, packets assigned to buffer are allowed to insert the port
2907 * shared buffer.
2908 * When buf_lossy is MLXSW_REG_PBMC_LOSSY_LOSSY this field is reserved.
2909 * Access: RW
2910 */
2911MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false);
2912
2913/* reg_pbmc_buf_size
2914 * The part of the packet buffer array is allocated for the specific buffer.
2915 * Units are represented in cells.
2916 * Access: RW
2917 */
2918MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
2919
Ido Schimmel155f9de2016-04-06 17:10:13 +02002920/* reg_pbmc_buf_xoff_threshold
2921 * Once the amount of data in the buffer goes above this value, device
2922 * starts sending PFC frames for all priorities associated with the
2923 * buffer. Units are represented in cells. Reserved in case of lossy
2924 * buffer.
2925 * Access: RW
2926 *
2927 * Note: In Spectrum, reserved for buffer[9].
2928 */
2929MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16,
2930 0x08, 0x04, false);
2931
2932/* reg_pbmc_buf_xon_threshold
2933 * When the amount of data in the buffer goes below this value, device
2934 * stops sending PFC frames for the priorities associated with the
2935 * buffer. Units are represented in cells. Reserved in case of lossy
2936 * buffer.
2937 * Access: RW
2938 *
2939 * Note: In Spectrum, reserved for buffer[9].
2940 */
2941MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16,
2942 0x08, 0x04, false);
2943
Jiri Pirkoe0594362015-10-16 14:01:31 +02002944static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
2945 u16 xoff_timer_value, u16 xoff_refresh)
2946{
2947 MLXSW_REG_ZERO(pbmc, payload);
2948 mlxsw_reg_pbmc_local_port_set(payload, local_port);
2949 mlxsw_reg_pbmc_xoff_timer_value_set(payload, xoff_timer_value);
2950 mlxsw_reg_pbmc_xoff_refresh_set(payload, xoff_refresh);
2951}
2952
2953static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload,
2954 int buf_index,
2955 u16 size)
2956{
2957 mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 1);
2958 mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
2959 mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
2960}
2961
Ido Schimmel155f9de2016-04-06 17:10:13 +02002962static inline void mlxsw_reg_pbmc_lossless_buffer_pack(char *payload,
2963 int buf_index, u16 size,
2964 u16 threshold)
2965{
2966 mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 0);
2967 mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
2968 mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
2969 mlxsw_reg_pbmc_buf_xoff_threshold_set(payload, buf_index, threshold);
2970 mlxsw_reg_pbmc_buf_xon_threshold_set(payload, buf_index, threshold);
2971}
2972
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002973/* PSPA - Port Switch Partition Allocation
2974 * ---------------------------------------
2975 * Controls the association of a port with a switch partition and enables
2976 * configuring ports as stacking ports.
2977 */
Jiri Pirko3f0effd2015-10-15 17:43:23 +02002978#define MLXSW_REG_PSPA_ID 0x500D
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002979#define MLXSW_REG_PSPA_LEN 0x8
2980
2981static const struct mlxsw_reg_info mlxsw_reg_pspa = {
2982 .id = MLXSW_REG_PSPA_ID,
2983 .len = MLXSW_REG_PSPA_LEN,
2984};
2985
2986/* reg_pspa_swid
2987 * Switch partition ID.
2988 * Access: RW
2989 */
2990MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
2991
2992/* reg_pspa_local_port
2993 * Local port number.
2994 * Access: Index
2995 */
2996MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
2997
2998/* reg_pspa_sub_port
2999 * Virtual port within the local port. Set to 0 when virtual ports are
3000 * disabled on the local port.
3001 * Access: Index
3002 */
3003MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
3004
3005static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
3006{
3007 MLXSW_REG_ZERO(pspa, payload);
3008 mlxsw_reg_pspa_swid_set(payload, swid);
3009 mlxsw_reg_pspa_local_port_set(payload, local_port);
3010 mlxsw_reg_pspa_sub_port_set(payload, 0);
3011}
3012
3013/* HTGT - Host Trap Group Table
3014 * ----------------------------
3015 * Configures the properties for forwarding to CPU.
3016 */
3017#define MLXSW_REG_HTGT_ID 0x7002
3018#define MLXSW_REG_HTGT_LEN 0x100
3019
3020static const struct mlxsw_reg_info mlxsw_reg_htgt = {
3021 .id = MLXSW_REG_HTGT_ID,
3022 .len = MLXSW_REG_HTGT_LEN,
3023};
3024
3025/* reg_htgt_swid
3026 * Switch partition ID.
3027 * Access: Index
3028 */
3029MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
3030
3031#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0 /* For locally attached CPU */
3032
3033/* reg_htgt_type
3034 * CPU path type.
3035 * Access: RW
3036 */
3037MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
3038
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003039enum mlxsw_reg_htgt_trap_group {
3040 MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3041 MLXSW_REG_HTGT_TRAP_GROUP_RX,
3042 MLXSW_REG_HTGT_TRAP_GROUP_CTRL,
3043};
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003044
3045/* reg_htgt_trap_group
3046 * Trap group number. User defined number specifying which trap groups
3047 * should be forwarded to the CPU. The mapping between trap IDs and trap
3048 * groups is configured using HPKT register.
3049 * Access: Index
3050 */
3051MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8);
3052
3053enum {
3054 MLXSW_REG_HTGT_POLICER_DISABLE,
3055 MLXSW_REG_HTGT_POLICER_ENABLE,
3056};
3057
3058/* reg_htgt_pide
3059 * Enable policer ID specified using 'pid' field.
3060 * Access: RW
3061 */
3062MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
3063
3064/* reg_htgt_pid
3065 * Policer ID for the trap group.
3066 * Access: RW
3067 */
3068MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8);
3069
3070#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0
3071
3072/* reg_htgt_mirror_action
3073 * Mirror action to use.
3074 * 0 - Trap to CPU.
3075 * 1 - Trap to CPU and mirror to a mirroring agent.
3076 * 2 - Mirror to a mirroring agent and do not trap to CPU.
3077 * Access: RW
3078 *
3079 * Note: Mirroring to a mirroring agent is only supported in Spectrum.
3080 */
3081MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
3082
3083/* reg_htgt_mirroring_agent
3084 * Mirroring agent.
3085 * Access: RW
3086 */
3087MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
3088
3089/* reg_htgt_priority
3090 * Trap group priority.
3091 * In case a packet matches multiple classification rules, the packet will
3092 * only be trapped once, based on the trap ID associated with the group (via
3093 * register HPKT) with the highest priority.
3094 * Supported values are 0-7, with 7 represnting the highest priority.
3095 * Access: RW
3096 *
3097 * Note: In SwitchX-2 this field is ignored and the priority value is replaced
3098 * by the 'trap_group' field.
3099 */
3100MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
3101
3102/* reg_htgt_local_path_cpu_tclass
3103 * CPU ingress traffic class for the trap group.
3104 * Access: RW
3105 */
3106MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
3107
3108#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15
3109#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003110#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL 0x13
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003111
3112/* reg_htgt_local_path_rdq
3113 * Receive descriptor queue (RDQ) to use for the trap group.
3114 * Access: RW
3115 */
3116MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
3117
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003118static inline void mlxsw_reg_htgt_pack(char *payload,
3119 enum mlxsw_reg_htgt_trap_group group)
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003120{
3121 u8 swid, rdq;
3122
3123 MLXSW_REG_ZERO(htgt, payload);
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003124 switch (group) {
3125 case MLXSW_REG_HTGT_TRAP_GROUP_EMAD:
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003126 swid = MLXSW_PORT_SWID_ALL_SWIDS;
3127 rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003128 break;
3129 case MLXSW_REG_HTGT_TRAP_GROUP_RX:
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003130 swid = 0;
3131 rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003132 break;
3133 case MLXSW_REG_HTGT_TRAP_GROUP_CTRL:
3134 swid = 0;
3135 rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL;
3136 break;
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003137 }
3138 mlxsw_reg_htgt_swid_set(payload, swid);
3139 mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003140 mlxsw_reg_htgt_trap_group_set(payload, group);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003141 mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
3142 mlxsw_reg_htgt_pid_set(payload, 0);
3143 mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
3144 mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
3145 mlxsw_reg_htgt_priority_set(payload, 0);
3146 mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7);
3147 mlxsw_reg_htgt_local_path_rdq_set(payload, rdq);
3148}
3149
3150/* HPKT - Host Packet Trap
3151 * -----------------------
3152 * Configures trap IDs inside trap groups.
3153 */
3154#define MLXSW_REG_HPKT_ID 0x7003
3155#define MLXSW_REG_HPKT_LEN 0x10
3156
3157static const struct mlxsw_reg_info mlxsw_reg_hpkt = {
3158 .id = MLXSW_REG_HPKT_ID,
3159 .len = MLXSW_REG_HPKT_LEN,
3160};
3161
3162enum {
3163 MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
3164 MLXSW_REG_HPKT_ACK_REQUIRED,
3165};
3166
3167/* reg_hpkt_ack
3168 * Require acknowledgements from the host for events.
3169 * If set, then the device will wait for the event it sent to be acknowledged
3170 * by the host. This option is only relevant for event trap IDs.
3171 * Access: RW
3172 *
3173 * Note: Currently not supported by firmware.
3174 */
3175MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1);
3176
3177enum mlxsw_reg_hpkt_action {
3178 MLXSW_REG_HPKT_ACTION_FORWARD,
3179 MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
3180 MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU,
3181 MLXSW_REG_HPKT_ACTION_DISCARD,
3182 MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
3183 MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
3184};
3185
3186/* reg_hpkt_action
3187 * Action to perform on packet when trapped.
3188 * 0 - No action. Forward to CPU based on switching rules.
3189 * 1 - Trap to CPU (CPU receives sole copy).
3190 * 2 - Mirror to CPU (CPU receives a replica of the packet).
3191 * 3 - Discard.
3192 * 4 - Soft discard (allow other traps to act on the packet).
3193 * 5 - Trap and soft discard (allow other traps to overwrite this trap).
3194 * Access: RW
3195 *
3196 * Note: Must be set to 0 (forward) for event trap IDs, as they are already
3197 * addressed to the CPU.
3198 */
3199MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3);
3200
3201/* reg_hpkt_trap_group
3202 * Trap group to associate the trap with.
3203 * Access: RW
3204 */
3205MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
3206
3207/* reg_hpkt_trap_id
3208 * Trap ID.
3209 * Access: Index
3210 *
3211 * Note: A trap ID can only be associated with a single trap group. The device
3212 * will associate the trap ID with the last trap group configured.
3213 */
3214MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9);
3215
3216enum {
3217 MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
3218 MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER,
3219 MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER,
3220};
3221
3222/* reg_hpkt_ctrl
3223 * Configure dedicated buffer resources for control packets.
3224 * 0 - Keep factory defaults.
3225 * 1 - Do not use control buffer for this trap ID.
3226 * 2 - Use control buffer for this trap ID.
3227 * Access: RW
3228 */
3229MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
3230
Ido Schimmelf24af332015-10-15 17:43:27 +02003231static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003232{
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003233 enum mlxsw_reg_htgt_trap_group trap_group;
Ido Schimmelf24af332015-10-15 17:43:27 +02003234
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003235 MLXSW_REG_ZERO(hpkt, payload);
3236 mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
3237 mlxsw_reg_hpkt_action_set(payload, action);
Ido Schimmelf24af332015-10-15 17:43:27 +02003238 switch (trap_id) {
3239 case MLXSW_TRAP_ID_ETHEMAD:
3240 case MLXSW_TRAP_ID_PUDE:
3241 trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD;
3242 break;
3243 default:
3244 trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX;
3245 break;
3246 }
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003247 mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
3248 mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
3249 mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
3250}
3251
Ido Schimmel69c407a2016-07-02 11:00:13 +02003252/* RGCR - Router General Configuration Register
3253 * --------------------------------------------
3254 * The register is used for setting up the router configuration.
3255 */
3256#define MLXSW_REG_RGCR_ID 0x8001
3257#define MLXSW_REG_RGCR_LEN 0x28
3258
3259static const struct mlxsw_reg_info mlxsw_reg_rgcr = {
3260 .id = MLXSW_REG_RGCR_ID,
3261 .len = MLXSW_REG_RGCR_LEN,
3262};
3263
3264/* reg_rgcr_ipv4_en
3265 * IPv4 router enable.
3266 * Access: RW
3267 */
3268MLXSW_ITEM32(reg, rgcr, ipv4_en, 0x00, 31, 1);
3269
3270/* reg_rgcr_ipv6_en
3271 * IPv6 router enable.
3272 * Access: RW
3273 */
3274MLXSW_ITEM32(reg, rgcr, ipv6_en, 0x00, 30, 1);
3275
3276/* reg_rgcr_max_router_interfaces
3277 * Defines the maximum number of active router interfaces for all virtual
3278 * routers.
3279 * Access: RW
3280 */
3281MLXSW_ITEM32(reg, rgcr, max_router_interfaces, 0x10, 0, 16);
3282
3283/* reg_rgcr_usp
3284 * Update switch priority and packet color.
3285 * 0 - Preserve the value of Switch Priority and packet color.
3286 * 1 - Recalculate the value of Switch Priority and packet color.
3287 * Access: RW
3288 *
3289 * Note: Not supported by SwitchX and SwitchX-2.
3290 */
3291MLXSW_ITEM32(reg, rgcr, usp, 0x18, 20, 1);
3292
3293/* reg_rgcr_pcp_rw
3294 * Indicates how to handle the pcp_rewrite_en value:
3295 * 0 - Preserve the value of pcp_rewrite_en.
3296 * 2 - Disable PCP rewrite.
3297 * 3 - Enable PCP rewrite.
3298 * Access: RW
3299 *
3300 * Note: Not supported by SwitchX and SwitchX-2.
3301 */
3302MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2);
3303
3304/* reg_rgcr_activity_dis
3305 * Activity disable:
3306 * 0 - Activity will be set when an entry is hit (default).
3307 * 1 - Activity will not be set when an entry is hit.
3308 *
3309 * Bit 0 - Disable activity bit in Router Algorithmic LPM Unicast Entry
3310 * (RALUE).
3311 * Bit 1 - Disable activity bit in Router Algorithmic LPM Unicast Host
3312 * Entry (RAUHT).
3313 * Bits 2:7 are reserved.
3314 * Access: RW
3315 *
3316 * Note: Not supported by SwitchX, SwitchX-2 and Switch-IB.
3317 */
3318MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8);
3319
3320static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en)
3321{
3322 MLXSW_REG_ZERO(rgcr, payload);
3323 mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en);
3324}
3325
Ido Schimmel3dc26682016-07-02 11:00:18 +02003326/* RITR - Router Interface Table Register
3327 * --------------------------------------
3328 * The register is used to configure the router interface table.
3329 */
3330#define MLXSW_REG_RITR_ID 0x8002
3331#define MLXSW_REG_RITR_LEN 0x40
3332
3333static const struct mlxsw_reg_info mlxsw_reg_ritr = {
3334 .id = MLXSW_REG_RITR_ID,
3335 .len = MLXSW_REG_RITR_LEN,
3336};
3337
3338/* reg_ritr_enable
3339 * Enables routing on the router interface.
3340 * Access: RW
3341 */
3342MLXSW_ITEM32(reg, ritr, enable, 0x00, 31, 1);
3343
3344/* reg_ritr_ipv4
3345 * IPv4 routing enable. Enables routing of IPv4 traffic on the router
3346 * interface.
3347 * Access: RW
3348 */
3349MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1);
3350
3351/* reg_ritr_ipv6
3352 * IPv6 routing enable. Enables routing of IPv6 traffic on the router
3353 * interface.
3354 * Access: RW
3355 */
3356MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1);
3357
3358enum mlxsw_reg_ritr_if_type {
3359 MLXSW_REG_RITR_VLAN_IF,
3360 MLXSW_REG_RITR_FID_IF,
3361 MLXSW_REG_RITR_SP_IF,
3362};
3363
3364/* reg_ritr_type
3365 * Router interface type.
3366 * 0 - VLAN interface.
3367 * 1 - FID interface.
3368 * 2 - Sub-port interface.
3369 * Access: RW
3370 */
3371MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3);
3372
3373enum {
3374 MLXSW_REG_RITR_RIF_CREATE,
3375 MLXSW_REG_RITR_RIF_DEL,
3376};
3377
3378/* reg_ritr_op
3379 * Opcode:
3380 * 0 - Create or edit RIF.
3381 * 1 - Delete RIF.
3382 * Reserved for SwitchX-2. For Spectrum, editing of interface properties
3383 * is not supported. An interface must be deleted and re-created in order
3384 * to update properties.
3385 * Access: WO
3386 */
3387MLXSW_ITEM32(reg, ritr, op, 0x00, 20, 2);
3388
3389/* reg_ritr_rif
3390 * Router interface index. A pointer to the Router Interface Table.
3391 * Access: Index
3392 */
3393MLXSW_ITEM32(reg, ritr, rif, 0x00, 0, 16);
3394
3395/* reg_ritr_ipv4_fe
3396 * IPv4 Forwarding Enable.
3397 * Enables routing of IPv4 traffic on the router interface. When disabled,
3398 * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
3399 * Not supported in SwitchX-2.
3400 * Access: RW
3401 */
3402MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
3403
3404/* reg_ritr_ipv6_fe
3405 * IPv6 Forwarding Enable.
3406 * Enables routing of IPv6 traffic on the router interface. When disabled,
3407 * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
3408 * Not supported in SwitchX-2.
3409 * Access: RW
3410 */
3411MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
3412
Ido Schimmela94a6142016-08-17 16:39:33 +02003413/* reg_ritr_lb_en
3414 * Loop-back filter enable for unicast packets.
3415 * If the flag is set then loop-back filter for unicast packets is
3416 * implemented on the RIF. Multicast packets are always subject to
3417 * loop-back filtering.
3418 * Access: RW
3419 */
3420MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1);
3421
Ido Schimmel3dc26682016-07-02 11:00:18 +02003422/* reg_ritr_virtual_router
3423 * Virtual router ID associated with the router interface.
3424 * Access: RW
3425 */
3426MLXSW_ITEM32(reg, ritr, virtual_router, 0x04, 0, 16);
3427
3428/* reg_ritr_mtu
3429 * Router interface MTU.
3430 * Access: RW
3431 */
3432MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16);
3433
3434/* reg_ritr_if_swid
3435 * Switch partition ID.
3436 * Access: RW
3437 */
3438MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
3439
3440/* reg_ritr_if_mac
3441 * Router interface MAC address.
3442 * In Spectrum, all MAC addresses must have the same 38 MSBits.
3443 * Access: RW
3444 */
3445MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6);
3446
3447/* VLAN Interface */
3448
3449/* reg_ritr_vlan_if_vid
3450 * VLAN ID.
3451 * Access: RW
3452 */
3453MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12);
3454
3455/* FID Interface */
3456
3457/* reg_ritr_fid_if_fid
3458 * Filtering ID. Used to connect a bridge to the router. Only FIDs from
3459 * the vFID range are supported.
3460 * Access: RW
3461 */
3462MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16);
3463
3464static inline void mlxsw_reg_ritr_fid_set(char *payload,
3465 enum mlxsw_reg_ritr_if_type rif_type,
3466 u16 fid)
3467{
3468 if (rif_type == MLXSW_REG_RITR_FID_IF)
3469 mlxsw_reg_ritr_fid_if_fid_set(payload, fid);
3470 else
3471 mlxsw_reg_ritr_vlan_if_vid_set(payload, fid);
3472}
3473
3474/* Sub-port Interface */
3475
3476/* reg_ritr_sp_if_lag
3477 * LAG indication. When this bit is set the system_port field holds the
3478 * LAG identifier.
3479 * Access: RW
3480 */
3481MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1);
3482
3483/* reg_ritr_sp_system_port
3484 * Port unique indentifier. When lag bit is set, this field holds the
3485 * lag_id in bits 0:9.
3486 * Access: RW
3487 */
3488MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
3489
3490/* reg_ritr_sp_if_vid
3491 * VLAN ID.
3492 * Access: RW
3493 */
3494MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12);
3495
3496static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
3497{
3498 MLXSW_REG_ZERO(ritr, payload);
3499 mlxsw_reg_ritr_rif_set(payload, rif);
3500}
3501
3502static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
3503 u16 system_port, u16 vid)
3504{
3505 mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
3506 mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
3507 mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
3508}
3509
3510static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
3511 enum mlxsw_reg_ritr_if_type type,
3512 u16 rif, u16 mtu, const char *mac)
3513{
3514 bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
3515
3516 MLXSW_REG_ZERO(ritr, payload);
3517 mlxsw_reg_ritr_enable_set(payload, enable);
3518 mlxsw_reg_ritr_ipv4_set(payload, 1);
3519 mlxsw_reg_ritr_type_set(payload, type);
3520 mlxsw_reg_ritr_op_set(payload, op);
3521 mlxsw_reg_ritr_rif_set(payload, rif);
3522 mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
Ido Schimmela94a6142016-08-17 16:39:33 +02003523 mlxsw_reg_ritr_lb_en_set(payload, 1);
Ido Schimmel3dc26682016-07-02 11:00:18 +02003524 mlxsw_reg_ritr_mtu_set(payload, mtu);
3525 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
3526}
3527
Yotam Gigi089f9812016-07-05 11:27:48 +02003528/* RATR - Router Adjacency Table Register
3529 * --------------------------------------
3530 * The RATR register is used to configure the Router Adjacency (next-hop)
3531 * Table.
3532 */
3533#define MLXSW_REG_RATR_ID 0x8008
3534#define MLXSW_REG_RATR_LEN 0x2C
3535
3536static const struct mlxsw_reg_info mlxsw_reg_ratr = {
3537 .id = MLXSW_REG_RATR_ID,
3538 .len = MLXSW_REG_RATR_LEN,
3539};
3540
3541enum mlxsw_reg_ratr_op {
3542 /* Read */
3543 MLXSW_REG_RATR_OP_QUERY_READ = 0,
3544 /* Read and clear activity */
3545 MLXSW_REG_RATR_OP_QUERY_READ_CLEAR = 2,
3546 /* Write Adjacency entry */
3547 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY = 1,
3548 /* Write Adjacency entry only if the activity is cleared.
3549 * The write may not succeed if the activity is set. There is not
3550 * direct feedback if the write has succeeded or not, however
3551 * the get will reveal the actual entry (SW can compare the get
3552 * response to the set command).
3553 */
3554 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY = 3,
3555};
3556
3557/* reg_ratr_op
3558 * Note that Write operation may also be used for updating
3559 * counter_set_type and counter_index. In this case all other
3560 * fields must not be updated.
3561 * Access: OP
3562 */
3563MLXSW_ITEM32(reg, ratr, op, 0x00, 28, 4);
3564
3565/* reg_ratr_v
3566 * Valid bit. Indicates if the adjacency entry is valid.
3567 * Note: the device may need some time before reusing an invalidated
3568 * entry. During this time the entry can not be reused. It is
3569 * recommended to use another entry before reusing an invalidated
3570 * entry (e.g. software can put it at the end of the list for
3571 * reusing). Trying to access an invalidated entry not yet cleared
3572 * by the device results with failure indicating "Try Again" status.
3573 * When valid is '0' then egress_router_interface,trap_action,
3574 * adjacency_parameters and counters are reserved
3575 * Access: RW
3576 */
3577MLXSW_ITEM32(reg, ratr, v, 0x00, 24, 1);
3578
3579/* reg_ratr_a
3580 * Activity. Set for new entries. Set if a packet lookup has hit on
3581 * the specific entry. To clear the a bit, use "clear activity".
3582 * Access: RO
3583 */
3584MLXSW_ITEM32(reg, ratr, a, 0x00, 16, 1);
3585
3586/* reg_ratr_adjacency_index_low
3587 * Bits 15:0 of index into the adjacency table.
3588 * For SwitchX and SwitchX-2, the adjacency table is linear and
3589 * used for adjacency entries only.
3590 * For Spectrum, the index is to the KVD linear.
3591 * Access: Index
3592 */
3593MLXSW_ITEM32(reg, ratr, adjacency_index_low, 0x04, 0, 16);
3594
3595/* reg_ratr_egress_router_interface
3596 * Range is 0 .. cap_max_router_interfaces - 1
3597 * Access: RW
3598 */
3599MLXSW_ITEM32(reg, ratr, egress_router_interface, 0x08, 0, 16);
3600
3601enum mlxsw_reg_ratr_trap_action {
3602 MLXSW_REG_RATR_TRAP_ACTION_NOP,
3603 MLXSW_REG_RATR_TRAP_ACTION_TRAP,
3604 MLXSW_REG_RATR_TRAP_ACTION_MIRROR_TO_CPU,
3605 MLXSW_REG_RATR_TRAP_ACTION_MIRROR,
3606 MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS,
3607};
3608
3609/* reg_ratr_trap_action
3610 * see mlxsw_reg_ratr_trap_action
3611 * Access: RW
3612 */
3613MLXSW_ITEM32(reg, ratr, trap_action, 0x0C, 28, 4);
3614
3615enum mlxsw_reg_ratr_trap_id {
3616 MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS0 = 0,
3617 MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS1 = 1,
3618};
3619
3620/* reg_ratr_adjacency_index_high
3621 * Bits 23:16 of the adjacency_index.
3622 * Access: Index
3623 */
3624MLXSW_ITEM32(reg, ratr, adjacency_index_high, 0x0C, 16, 8);
3625
3626/* reg_ratr_trap_id
3627 * Trap ID to be reported to CPU.
3628 * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
3629 * For trap_action of NOP, MIRROR and DISCARD_ERROR
3630 * Access: RW
3631 */
3632MLXSW_ITEM32(reg, ratr, trap_id, 0x0C, 0, 8);
3633
3634/* reg_ratr_eth_destination_mac
3635 * MAC address of the destination next-hop.
3636 * Access: RW
3637 */
3638MLXSW_ITEM_BUF(reg, ratr, eth_destination_mac, 0x12, 6);
3639
3640static inline void
3641mlxsw_reg_ratr_pack(char *payload,
3642 enum mlxsw_reg_ratr_op op, bool valid,
3643 u32 adjacency_index, u16 egress_rif)
3644{
3645 MLXSW_REG_ZERO(ratr, payload);
3646 mlxsw_reg_ratr_op_set(payload, op);
3647 mlxsw_reg_ratr_v_set(payload, valid);
3648 mlxsw_reg_ratr_adjacency_index_low_set(payload, adjacency_index);
3649 mlxsw_reg_ratr_adjacency_index_high_set(payload, adjacency_index >> 16);
3650 mlxsw_reg_ratr_egress_router_interface_set(payload, egress_rif);
3651}
3652
3653static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload,
3654 const char *dest_mac)
3655{
3656 mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac);
3657}
3658
Jiri Pirko6f9fc3c2016-07-04 08:23:05 +02003659/* RALTA - Router Algorithmic LPM Tree Allocation Register
3660 * -------------------------------------------------------
3661 * RALTA is used to allocate the LPM trees of the SHSPM method.
3662 */
3663#define MLXSW_REG_RALTA_ID 0x8010
3664#define MLXSW_REG_RALTA_LEN 0x04
3665
3666static const struct mlxsw_reg_info mlxsw_reg_ralta = {
3667 .id = MLXSW_REG_RALTA_ID,
3668 .len = MLXSW_REG_RALTA_LEN,
3669};
3670
3671/* reg_ralta_op
3672 * opcode (valid for Write, must be 0 on Read)
3673 * 0 - allocate a tree
3674 * 1 - deallocate a tree
3675 * Access: OP
3676 */
3677MLXSW_ITEM32(reg, ralta, op, 0x00, 28, 2);
3678
3679enum mlxsw_reg_ralxx_protocol {
3680 MLXSW_REG_RALXX_PROTOCOL_IPV4,
3681 MLXSW_REG_RALXX_PROTOCOL_IPV6,
3682};
3683
3684/* reg_ralta_protocol
3685 * Protocol.
3686 * Deallocation opcode: Reserved.
3687 * Access: RW
3688 */
3689MLXSW_ITEM32(reg, ralta, protocol, 0x00, 24, 4);
3690
3691/* reg_ralta_tree_id
3692 * An identifier (numbered from 1..cap_shspm_max_trees-1) representing
3693 * the tree identifier (managed by software).
3694 * Note that tree_id 0 is allocated for a default-route tree.
3695 * Access: Index
3696 */
3697MLXSW_ITEM32(reg, ralta, tree_id, 0x00, 0, 8);
3698
3699static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc,
3700 enum mlxsw_reg_ralxx_protocol protocol,
3701 u8 tree_id)
3702{
3703 MLXSW_REG_ZERO(ralta, payload);
3704 mlxsw_reg_ralta_op_set(payload, !alloc);
3705 mlxsw_reg_ralta_protocol_set(payload, protocol);
3706 mlxsw_reg_ralta_tree_id_set(payload, tree_id);
3707}
3708
Jiri Pirkoa9823352016-07-04 08:23:06 +02003709/* RALST - Router Algorithmic LPM Structure Tree Register
3710 * ------------------------------------------------------
3711 * RALST is used to set and query the structure of an LPM tree.
3712 * The structure of the tree must be sorted as a sorted binary tree, while
3713 * each node is a bin that is tagged as the length of the prefixes the lookup
3714 * will refer to. Therefore, bin X refers to a set of entries with prefixes
3715 * of X bits to match with the destination address. The bin 0 indicates
3716 * the default action, when there is no match of any prefix.
3717 */
3718#define MLXSW_REG_RALST_ID 0x8011
3719#define MLXSW_REG_RALST_LEN 0x104
3720
3721static const struct mlxsw_reg_info mlxsw_reg_ralst = {
3722 .id = MLXSW_REG_RALST_ID,
3723 .len = MLXSW_REG_RALST_LEN,
3724};
3725
3726/* reg_ralst_root_bin
3727 * The bin number of the root bin.
3728 * 0<root_bin=<(length of IP address)
3729 * For a default-route tree configure 0xff
3730 * Access: RW
3731 */
3732MLXSW_ITEM32(reg, ralst, root_bin, 0x00, 16, 8);
3733
3734/* reg_ralst_tree_id
3735 * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
3736 * Access: Index
3737 */
3738MLXSW_ITEM32(reg, ralst, tree_id, 0x00, 0, 8);
3739
3740#define MLXSW_REG_RALST_BIN_NO_CHILD 0xff
3741#define MLXSW_REG_RALST_BIN_OFFSET 0x04
3742#define MLXSW_REG_RALST_BIN_COUNT 128
3743
3744/* reg_ralst_left_child_bin
3745 * Holding the children of the bin according to the stored tree's structure.
3746 * For trees composed of less than 4 blocks, the bins in excess are reserved.
3747 * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
3748 * Access: RW
3749 */
3750MLXSW_ITEM16_INDEXED(reg, ralst, left_child_bin, 0x04, 8, 8, 0x02, 0x00, false);
3751
3752/* reg_ralst_right_child_bin
3753 * Holding the children of the bin according to the stored tree's structure.
3754 * For trees composed of less than 4 blocks, the bins in excess are reserved.
3755 * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
3756 * Access: RW
3757 */
3758MLXSW_ITEM16_INDEXED(reg, ralst, right_child_bin, 0x04, 0, 8, 0x02, 0x00,
3759 false);
3760
3761static inline void mlxsw_reg_ralst_pack(char *payload, u8 root_bin, u8 tree_id)
3762{
3763 MLXSW_REG_ZERO(ralst, payload);
3764
3765 /* Initialize all bins to have no left or right child */
3766 memset(payload + MLXSW_REG_RALST_BIN_OFFSET,
3767 MLXSW_REG_RALST_BIN_NO_CHILD, MLXSW_REG_RALST_BIN_COUNT * 2);
3768
3769 mlxsw_reg_ralst_root_bin_set(payload, root_bin);
3770 mlxsw_reg_ralst_tree_id_set(payload, tree_id);
3771}
3772
3773static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number,
3774 u8 left_child_bin,
3775 u8 right_child_bin)
3776{
3777 int bin_index = bin_number - 1;
3778
3779 mlxsw_reg_ralst_left_child_bin_set(payload, bin_index, left_child_bin);
3780 mlxsw_reg_ralst_right_child_bin_set(payload, bin_index,
3781 right_child_bin);
3782}
3783
Jiri Pirko20ae4052016-07-04 08:23:07 +02003784/* RALTB - Router Algorithmic LPM Tree Binding Register
3785 * ----------------------------------------------------
3786 * RALTB is used to bind virtual router and protocol to an allocated LPM tree.
3787 */
3788#define MLXSW_REG_RALTB_ID 0x8012
3789#define MLXSW_REG_RALTB_LEN 0x04
3790
3791static const struct mlxsw_reg_info mlxsw_reg_raltb = {
3792 .id = MLXSW_REG_RALTB_ID,
3793 .len = MLXSW_REG_RALTB_LEN,
3794};
3795
3796/* reg_raltb_virtual_router
3797 * Virtual Router ID
3798 * Range is 0..cap_max_virtual_routers-1
3799 * Access: Index
3800 */
3801MLXSW_ITEM32(reg, raltb, virtual_router, 0x00, 16, 16);
3802
3803/* reg_raltb_protocol
3804 * Protocol.
3805 * Access: Index
3806 */
3807MLXSW_ITEM32(reg, raltb, protocol, 0x00, 12, 4);
3808
3809/* reg_raltb_tree_id
3810 * Tree to be used for the {virtual_router, protocol}
3811 * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
3812 * By default, all Unicast IPv4 and IPv6 are bound to tree_id 0.
3813 * Access: RW
3814 */
3815MLXSW_ITEM32(reg, raltb, tree_id, 0x00, 0, 8);
3816
3817static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router,
3818 enum mlxsw_reg_ralxx_protocol protocol,
3819 u8 tree_id)
3820{
3821 MLXSW_REG_ZERO(raltb, payload);
3822 mlxsw_reg_raltb_virtual_router_set(payload, virtual_router);
3823 mlxsw_reg_raltb_protocol_set(payload, protocol);
3824 mlxsw_reg_raltb_tree_id_set(payload, tree_id);
3825}
3826
Jiri Pirkod5a1c742016-07-04 08:23:10 +02003827/* RALUE - Router Algorithmic LPM Unicast Entry Register
3828 * -----------------------------------------------------
3829 * RALUE is used to configure and query LPM entries that serve
3830 * the Unicast protocols.
3831 */
3832#define MLXSW_REG_RALUE_ID 0x8013
3833#define MLXSW_REG_RALUE_LEN 0x38
3834
3835static const struct mlxsw_reg_info mlxsw_reg_ralue = {
3836 .id = MLXSW_REG_RALUE_ID,
3837 .len = MLXSW_REG_RALUE_LEN,
3838};
3839
3840/* reg_ralue_protocol
3841 * Protocol.
3842 * Access: Index
3843 */
3844MLXSW_ITEM32(reg, ralue, protocol, 0x00, 24, 4);
3845
3846enum mlxsw_reg_ralue_op {
3847 /* Read operation. If entry doesn't exist, the operation fails. */
3848 MLXSW_REG_RALUE_OP_QUERY_READ = 0,
3849 /* Clear on read operation. Used to read entry and
3850 * clear Activity bit.
3851 */
3852 MLXSW_REG_RALUE_OP_QUERY_CLEAR = 1,
3853 /* Write operation. Used to write a new entry to the table. All RW
3854 * fields are written for new entry. Activity bit is set
3855 * for new entries.
3856 */
3857 MLXSW_REG_RALUE_OP_WRITE_WRITE = 0,
3858 /* Update operation. Used to update an existing route entry and
3859 * only update the RW fields that are detailed in the field
3860 * op_u_mask. If entry doesn't exist, the operation fails.
3861 */
3862 MLXSW_REG_RALUE_OP_WRITE_UPDATE = 1,
3863 /* Clear activity. The Activity bit (the field a) is cleared
3864 * for the entry.
3865 */
3866 MLXSW_REG_RALUE_OP_WRITE_CLEAR = 2,
3867 /* Delete operation. Used to delete an existing entry. If entry
3868 * doesn't exist, the operation fails.
3869 */
3870 MLXSW_REG_RALUE_OP_WRITE_DELETE = 3,
3871};
3872
3873/* reg_ralue_op
3874 * Operation.
3875 * Access: OP
3876 */
3877MLXSW_ITEM32(reg, ralue, op, 0x00, 20, 3);
3878
3879/* reg_ralue_a
3880 * Activity. Set for new entries. Set if a packet lookup has hit on the
3881 * specific entry, only if the entry is a route. To clear the a bit, use
3882 * "clear activity" op.
3883 * Enabled by activity_dis in RGCR
3884 * Access: RO
3885 */
3886MLXSW_ITEM32(reg, ralue, a, 0x00, 16, 1);
3887
3888/* reg_ralue_virtual_router
3889 * Virtual Router ID
3890 * Range is 0..cap_max_virtual_routers-1
3891 * Access: Index
3892 */
3893MLXSW_ITEM32(reg, ralue, virtual_router, 0x04, 16, 16);
3894
3895#define MLXSW_REG_RALUE_OP_U_MASK_ENTRY_TYPE BIT(0)
3896#define MLXSW_REG_RALUE_OP_U_MASK_BMP_LEN BIT(1)
3897#define MLXSW_REG_RALUE_OP_U_MASK_ACTION BIT(2)
3898
3899/* reg_ralue_op_u_mask
3900 * opcode update mask.
3901 * On read operation, this field is reserved.
3902 * This field is valid for update opcode, otherwise - reserved.
3903 * This field is a bitmask of the fields that should be updated.
3904 * Access: WO
3905 */
3906MLXSW_ITEM32(reg, ralue, op_u_mask, 0x04, 8, 3);
3907
3908/* reg_ralue_prefix_len
3909 * Number of bits in the prefix of the LPM route.
3910 * Note that for IPv6 prefixes, if prefix_len>64 the entry consumes
3911 * two entries in the physical HW table.
3912 * Access: Index
3913 */
3914MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8);
3915
3916/* reg_ralue_dip*
3917 * The prefix of the route or of the marker that the object of the LPM
3918 * is compared with. The most significant bits of the dip are the prefix.
3919 * The list significant bits must be '0' if the prefix_len is smaller
3920 * than 128 for IPv6 or smaller than 32 for IPv4.
3921 * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved.
3922 * Access: Index
3923 */
3924MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32);
3925
3926enum mlxsw_reg_ralue_entry_type {
3927 MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1,
3928 MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY = 2,
3929 MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_AND_ROUTE_ENTRY = 3,
3930};
3931
3932/* reg_ralue_entry_type
3933 * Entry type.
3934 * Note - for Marker entries, the action_type and action fields are reserved.
3935 * Access: RW
3936 */
3937MLXSW_ITEM32(reg, ralue, entry_type, 0x1C, 30, 2);
3938
3939/* reg_ralue_bmp_len
3940 * The best match prefix length in the case that there is no match for
3941 * longer prefixes.
3942 * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
3943 * Note for any update operation with entry_type modification this
3944 * field must be set.
3945 * Access: RW
3946 */
3947MLXSW_ITEM32(reg, ralue, bmp_len, 0x1C, 16, 8);
3948
3949enum mlxsw_reg_ralue_action_type {
3950 MLXSW_REG_RALUE_ACTION_TYPE_REMOTE,
3951 MLXSW_REG_RALUE_ACTION_TYPE_LOCAL,
3952 MLXSW_REG_RALUE_ACTION_TYPE_IP2ME,
3953};
3954
3955/* reg_ralue_action_type
3956 * Action Type
3957 * Indicates how the IP address is connected.
3958 * It can be connected to a local subnet through local_erif or can be
3959 * on a remote subnet connected through a next-hop router,
3960 * or transmitted to the CPU.
3961 * Reserved when entry_type = MARKER_ENTRY
3962 * Access: RW
3963 */
3964MLXSW_ITEM32(reg, ralue, action_type, 0x1C, 0, 2);
3965
3966enum mlxsw_reg_ralue_trap_action {
3967 MLXSW_REG_RALUE_TRAP_ACTION_NOP,
3968 MLXSW_REG_RALUE_TRAP_ACTION_TRAP,
3969 MLXSW_REG_RALUE_TRAP_ACTION_MIRROR_TO_CPU,
3970 MLXSW_REG_RALUE_TRAP_ACTION_MIRROR,
3971 MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR,
3972};
3973
3974/* reg_ralue_trap_action
3975 * Trap action.
3976 * For IP2ME action, only NOP and MIRROR are possible.
3977 * Access: RW
3978 */
3979MLXSW_ITEM32(reg, ralue, trap_action, 0x20, 28, 4);
3980
3981/* reg_ralue_trap_id
3982 * Trap ID to be reported to CPU.
3983 * Trap ID is RTR_INGRESS0 or RTR_INGRESS1.
3984 * For trap_action of NOP, MIRROR and DISCARD_ERROR, trap_id is reserved.
3985 * Access: RW
3986 */
3987MLXSW_ITEM32(reg, ralue, trap_id, 0x20, 0, 9);
3988
3989/* reg_ralue_adjacency_index
3990 * Points to the first entry of the group-based ECMP.
3991 * Only relevant in case of REMOTE action.
3992 * Access: RW
3993 */
3994MLXSW_ITEM32(reg, ralue, adjacency_index, 0x24, 0, 24);
3995
3996/* reg_ralue_ecmp_size
3997 * Amount of sequential entries starting
3998 * from the adjacency_index (the number of ECMPs).
3999 * The valid range is 1-64, 512, 1024, 2048 and 4096.
4000 * Reserved when trap_action is TRAP or DISCARD_ERROR.
4001 * Only relevant in case of REMOTE action.
4002 * Access: RW
4003 */
4004MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13);
4005
4006/* reg_ralue_local_erif
4007 * Egress Router Interface.
4008 * Only relevant in case of LOCAL action.
4009 * Access: RW
4010 */
4011MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16);
4012
4013/* reg_ralue_v
4014 * Valid bit for the tunnel_ptr field.
4015 * If valid = 0 then trap to CPU as IP2ME trap ID.
4016 * If valid = 1 and the packet format allows NVE or IPinIP tunnel
4017 * decapsulation then tunnel decapsulation is done.
4018 * If valid = 1 and packet format does not allow NVE or IPinIP tunnel
4019 * decapsulation then trap as IP2ME trap ID.
4020 * Only relevant in case of IP2ME action.
4021 * Access: RW
4022 */
4023MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1);
4024
4025/* reg_ralue_tunnel_ptr
4026 * Tunnel Pointer for NVE or IPinIP tunnel decapsulation.
4027 * For Spectrum, pointer to KVD Linear.
4028 * Only relevant in case of IP2ME action.
4029 * Access: RW
4030 */
4031MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24);
4032
4033static inline void mlxsw_reg_ralue_pack(char *payload,
4034 enum mlxsw_reg_ralxx_protocol protocol,
4035 enum mlxsw_reg_ralue_op op,
4036 u16 virtual_router, u8 prefix_len)
4037{
4038 MLXSW_REG_ZERO(ralue, payload);
4039 mlxsw_reg_ralue_protocol_set(payload, protocol);
Jiri Pirko0e7df1a2016-08-17 16:39:34 +02004040 mlxsw_reg_ralue_op_set(payload, op);
Jiri Pirkod5a1c742016-07-04 08:23:10 +02004041 mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
4042 mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
4043 mlxsw_reg_ralue_entry_type_set(payload,
4044 MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
4045 mlxsw_reg_ralue_bmp_len_set(payload, prefix_len);
4046}
4047
4048static inline void mlxsw_reg_ralue_pack4(char *payload,
4049 enum mlxsw_reg_ralxx_protocol protocol,
4050 enum mlxsw_reg_ralue_op op,
4051 u16 virtual_router, u8 prefix_len,
4052 u32 dip)
4053{
4054 mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
4055 mlxsw_reg_ralue_dip4_set(payload, dip);
4056}
4057
4058static inline void
4059mlxsw_reg_ralue_act_remote_pack(char *payload,
4060 enum mlxsw_reg_ralue_trap_action trap_action,
4061 u16 trap_id, u32 adjacency_index, u16 ecmp_size)
4062{
4063 mlxsw_reg_ralue_action_type_set(payload,
4064 MLXSW_REG_RALUE_ACTION_TYPE_REMOTE);
4065 mlxsw_reg_ralue_trap_action_set(payload, trap_action);
4066 mlxsw_reg_ralue_trap_id_set(payload, trap_id);
4067 mlxsw_reg_ralue_adjacency_index_set(payload, adjacency_index);
4068 mlxsw_reg_ralue_ecmp_size_set(payload, ecmp_size);
4069}
4070
4071static inline void
4072mlxsw_reg_ralue_act_local_pack(char *payload,
4073 enum mlxsw_reg_ralue_trap_action trap_action,
4074 u16 trap_id, u16 local_erif)
4075{
4076 mlxsw_reg_ralue_action_type_set(payload,
4077 MLXSW_REG_RALUE_ACTION_TYPE_LOCAL);
4078 mlxsw_reg_ralue_trap_action_set(payload, trap_action);
4079 mlxsw_reg_ralue_trap_id_set(payload, trap_id);
4080 mlxsw_reg_ralue_local_erif_set(payload, local_erif);
4081}
4082
4083static inline void
4084mlxsw_reg_ralue_act_ip2me_pack(char *payload)
4085{
4086 mlxsw_reg_ralue_action_type_set(payload,
4087 MLXSW_REG_RALUE_ACTION_TYPE_IP2ME);
4088}
4089
Yotam Gigi4457b3df2016-07-05 11:27:40 +02004090/* RAUHT - Router Algorithmic LPM Unicast Host Table Register
4091 * ----------------------------------------------------------
4092 * The RAUHT register is used to configure and query the Unicast Host table in
4093 * devices that implement the Algorithmic LPM.
4094 */
4095#define MLXSW_REG_RAUHT_ID 0x8014
4096#define MLXSW_REG_RAUHT_LEN 0x74
4097
4098static const struct mlxsw_reg_info mlxsw_reg_rauht = {
4099 .id = MLXSW_REG_RAUHT_ID,
4100 .len = MLXSW_REG_RAUHT_LEN,
4101};
4102
4103enum mlxsw_reg_rauht_type {
4104 MLXSW_REG_RAUHT_TYPE_IPV4,
4105 MLXSW_REG_RAUHT_TYPE_IPV6,
4106};
4107
4108/* reg_rauht_type
4109 * Access: Index
4110 */
4111MLXSW_ITEM32(reg, rauht, type, 0x00, 24, 2);
4112
4113enum mlxsw_reg_rauht_op {
4114 MLXSW_REG_RAUHT_OP_QUERY_READ = 0,
4115 /* Read operation */
4116 MLXSW_REG_RAUHT_OP_QUERY_CLEAR_ON_READ = 1,
4117 /* Clear on read operation. Used to read entry and clear
4118 * activity bit.
4119 */
4120 MLXSW_REG_RAUHT_OP_WRITE_ADD = 0,
4121 /* Add. Used to write a new entry to the table. All R/W fields are
4122 * relevant for new entry. Activity bit is set for new entries.
4123 */
4124 MLXSW_REG_RAUHT_OP_WRITE_UPDATE = 1,
4125 /* Update action. Used to update an existing route entry and
4126 * only update the following fields:
4127 * trap_action, trap_id, mac, counter_set_type, counter_index
4128 */
4129 MLXSW_REG_RAUHT_OP_WRITE_CLEAR_ACTIVITY = 2,
4130 /* Clear activity. A bit is cleared for the entry. */
4131 MLXSW_REG_RAUHT_OP_WRITE_DELETE = 3,
4132 /* Delete entry */
4133 MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL = 4,
4134 /* Delete all host entries on a RIF. In this command, dip
4135 * field is reserved.
4136 */
4137};
4138
4139/* reg_rauht_op
4140 * Access: OP
4141 */
4142MLXSW_ITEM32(reg, rauht, op, 0x00, 20, 3);
4143
4144/* reg_rauht_a
4145 * Activity. Set for new entries. Set if a packet lookup has hit on
4146 * the specific entry.
4147 * To clear the a bit, use "clear activity" op.
4148 * Enabled by activity_dis in RGCR
4149 * Access: RO
4150 */
4151MLXSW_ITEM32(reg, rauht, a, 0x00, 16, 1);
4152
4153/* reg_rauht_rif
4154 * Router Interface
4155 * Access: Index
4156 */
4157MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16);
4158
4159/* reg_rauht_dip*
4160 * Destination address.
4161 * Access: Index
4162 */
4163MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32);
4164
4165enum mlxsw_reg_rauht_trap_action {
4166 MLXSW_REG_RAUHT_TRAP_ACTION_NOP,
4167 MLXSW_REG_RAUHT_TRAP_ACTION_TRAP,
4168 MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR_TO_CPU,
4169 MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR,
4170 MLXSW_REG_RAUHT_TRAP_ACTION_DISCARD_ERRORS,
4171};
4172
4173/* reg_rauht_trap_action
4174 * Access: RW
4175 */
4176MLXSW_ITEM32(reg, rauht, trap_action, 0x60, 28, 4);
4177
4178enum mlxsw_reg_rauht_trap_id {
4179 MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS0,
4180 MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS1,
4181};
4182
4183/* reg_rauht_trap_id
4184 * Trap ID to be reported to CPU.
4185 * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
4186 * For trap_action of NOP, MIRROR and DISCARD_ERROR,
4187 * trap_id is reserved.
4188 * Access: RW
4189 */
4190MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9);
4191
4192/* reg_rauht_counter_set_type
4193 * Counter set type for flow counters
4194 * Access: RW
4195 */
4196MLXSW_ITEM32(reg, rauht, counter_set_type, 0x68, 24, 8);
4197
4198/* reg_rauht_counter_index
4199 * Counter index for flow counters
4200 * Access: RW
4201 */
4202MLXSW_ITEM32(reg, rauht, counter_index, 0x68, 0, 24);
4203
4204/* reg_rauht_mac
4205 * MAC address.
4206 * Access: RW
4207 */
4208MLXSW_ITEM_BUF(reg, rauht, mac, 0x6E, 6);
4209
4210static inline void mlxsw_reg_rauht_pack(char *payload,
4211 enum mlxsw_reg_rauht_op op, u16 rif,
4212 const char *mac)
4213{
4214 MLXSW_REG_ZERO(rauht, payload);
4215 mlxsw_reg_rauht_op_set(payload, op);
4216 mlxsw_reg_rauht_rif_set(payload, rif);
4217 mlxsw_reg_rauht_mac_memcpy_to(payload, mac);
4218}
4219
4220static inline void mlxsw_reg_rauht_pack4(char *payload,
4221 enum mlxsw_reg_rauht_op op, u16 rif,
4222 const char *mac, u32 dip)
4223{
4224 mlxsw_reg_rauht_pack(payload, op, rif, mac);
4225 mlxsw_reg_rauht_dip4_set(payload, dip);
4226}
4227
Jiri Pirkoa59f0b32016-07-05 11:27:49 +02004228/* RALEU - Router Algorithmic LPM ECMP Update Register
4229 * ---------------------------------------------------
4230 * The register enables updating the ECMP section in the action for multiple
4231 * LPM Unicast entries in a single operation. The update is executed to
4232 * all entries of a {virtual router, protocol} tuple using the same ECMP group.
4233 */
4234#define MLXSW_REG_RALEU_ID 0x8015
4235#define MLXSW_REG_RALEU_LEN 0x28
4236
4237static const struct mlxsw_reg_info mlxsw_reg_raleu = {
4238 .id = MLXSW_REG_RALEU_ID,
4239 .len = MLXSW_REG_RALEU_LEN,
4240};
4241
4242/* reg_raleu_protocol
4243 * Protocol.
4244 * Access: Index
4245 */
4246MLXSW_ITEM32(reg, raleu, protocol, 0x00, 24, 4);
4247
4248/* reg_raleu_virtual_router
4249 * Virtual Router ID
4250 * Range is 0..cap_max_virtual_routers-1
4251 * Access: Index
4252 */
4253MLXSW_ITEM32(reg, raleu, virtual_router, 0x00, 0, 16);
4254
4255/* reg_raleu_adjacency_index
4256 * Adjacency Index used for matching on the existing entries.
4257 * Access: Index
4258 */
4259MLXSW_ITEM32(reg, raleu, adjacency_index, 0x10, 0, 24);
4260
4261/* reg_raleu_ecmp_size
4262 * ECMP Size used for matching on the existing entries.
4263 * Access: Index
4264 */
4265MLXSW_ITEM32(reg, raleu, ecmp_size, 0x14, 0, 13);
4266
4267/* reg_raleu_new_adjacency_index
4268 * New Adjacency Index.
4269 * Access: WO
4270 */
4271MLXSW_ITEM32(reg, raleu, new_adjacency_index, 0x20, 0, 24);
4272
4273/* reg_raleu_new_ecmp_size
4274 * New ECMP Size.
4275 * Access: WO
4276 */
4277MLXSW_ITEM32(reg, raleu, new_ecmp_size, 0x24, 0, 13);
4278
4279static inline void mlxsw_reg_raleu_pack(char *payload,
4280 enum mlxsw_reg_ralxx_protocol protocol,
4281 u16 virtual_router,
4282 u32 adjacency_index, u16 ecmp_size,
4283 u32 new_adjacency_index,
4284 u16 new_ecmp_size)
4285{
4286 MLXSW_REG_ZERO(raleu, payload);
4287 mlxsw_reg_raleu_protocol_set(payload, protocol);
4288 mlxsw_reg_raleu_virtual_router_set(payload, virtual_router);
4289 mlxsw_reg_raleu_adjacency_index_set(payload, adjacency_index);
4290 mlxsw_reg_raleu_ecmp_size_set(payload, ecmp_size);
4291 mlxsw_reg_raleu_new_adjacency_index_set(payload, new_adjacency_index);
4292 mlxsw_reg_raleu_new_ecmp_size_set(payload, new_ecmp_size);
4293}
4294
Yotam Gigi7cf2c202016-07-05 11:27:41 +02004295/* RAUHTD - Router Algorithmic LPM Unicast Host Table Dump Register
4296 * ----------------------------------------------------------------
4297 * The RAUHTD register allows dumping entries from the Router Unicast Host
4298 * Table. For a given session an entry is dumped no more than one time. The
4299 * first RAUHTD access after reset is a new session. A session ends when the
4300 * num_rec response is smaller than num_rec request or for IPv4 when the
4301 * num_entries is smaller than 4. The clear activity affect the current session
4302 * or the last session if a new session has not started.
4303 */
4304#define MLXSW_REG_RAUHTD_ID 0x8018
4305#define MLXSW_REG_RAUHTD_BASE_LEN 0x20
4306#define MLXSW_REG_RAUHTD_REC_LEN 0x20
4307#define MLXSW_REG_RAUHTD_REC_MAX_NUM 32
4308#define MLXSW_REG_RAUHTD_LEN (MLXSW_REG_RAUHTD_BASE_LEN + \
4309 MLXSW_REG_RAUHTD_REC_MAX_NUM * MLXSW_REG_RAUHTD_REC_LEN)
4310#define MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC 4
4311
4312static const struct mlxsw_reg_info mlxsw_reg_rauhtd = {
4313 .id = MLXSW_REG_RAUHTD_ID,
4314 .len = MLXSW_REG_RAUHTD_LEN,
4315};
4316
4317#define MLXSW_REG_RAUHTD_FILTER_A BIT(0)
4318#define MLXSW_REG_RAUHTD_FILTER_RIF BIT(3)
4319
4320/* reg_rauhtd_filter_fields
4321 * if a bit is '0' then the relevant field is ignored and dump is done
4322 * regardless of the field value
4323 * Bit0 - filter by activity: entry_a
4324 * Bit3 - filter by entry rip: entry_rif
4325 * Access: Index
4326 */
4327MLXSW_ITEM32(reg, rauhtd, filter_fields, 0x00, 0, 8);
4328
4329enum mlxsw_reg_rauhtd_op {
4330 MLXSW_REG_RAUHTD_OP_DUMP,
4331 MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR,
4332};
4333
4334/* reg_rauhtd_op
4335 * Access: OP
4336 */
4337MLXSW_ITEM32(reg, rauhtd, op, 0x04, 24, 2);
4338
4339/* reg_rauhtd_num_rec
4340 * At request: number of records requested
4341 * At response: number of records dumped
4342 * For IPv4, each record has 4 entries at request and up to 4 entries
4343 * at response
4344 * Range is 0..MLXSW_REG_RAUHTD_REC_MAX_NUM
4345 * Access: Index
4346 */
4347MLXSW_ITEM32(reg, rauhtd, num_rec, 0x04, 0, 8);
4348
4349/* reg_rauhtd_entry_a
4350 * Dump only if activity has value of entry_a
4351 * Reserved if filter_fields bit0 is '0'
4352 * Access: Index
4353 */
4354MLXSW_ITEM32(reg, rauhtd, entry_a, 0x08, 16, 1);
4355
4356enum mlxsw_reg_rauhtd_type {
4357 MLXSW_REG_RAUHTD_TYPE_IPV4,
4358 MLXSW_REG_RAUHTD_TYPE_IPV6,
4359};
4360
4361/* reg_rauhtd_type
4362 * Dump only if record type is:
4363 * 0 - IPv4
4364 * 1 - IPv6
4365 * Access: Index
4366 */
4367MLXSW_ITEM32(reg, rauhtd, type, 0x08, 0, 4);
4368
4369/* reg_rauhtd_entry_rif
4370 * Dump only if RIF has value of entry_rif
4371 * Reserved if filter_fields bit3 is '0'
4372 * Access: Index
4373 */
4374MLXSW_ITEM32(reg, rauhtd, entry_rif, 0x0C, 0, 16);
4375
4376static inline void mlxsw_reg_rauhtd_pack(char *payload,
4377 enum mlxsw_reg_rauhtd_type type)
4378{
4379 MLXSW_REG_ZERO(rauhtd, payload);
4380 mlxsw_reg_rauhtd_filter_fields_set(payload, MLXSW_REG_RAUHTD_FILTER_A);
4381 mlxsw_reg_rauhtd_op_set(payload, MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR);
4382 mlxsw_reg_rauhtd_num_rec_set(payload, MLXSW_REG_RAUHTD_REC_MAX_NUM);
4383 mlxsw_reg_rauhtd_entry_a_set(payload, 1);
4384 mlxsw_reg_rauhtd_type_set(payload, type);
4385}
4386
4387/* reg_rauhtd_ipv4_rec_num_entries
4388 * Number of valid entries in this record:
4389 * 0 - 1 valid entry
4390 * 1 - 2 valid entries
4391 * 2 - 3 valid entries
4392 * 3 - 4 valid entries
4393 * Access: RO
4394 */
4395MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_rec_num_entries,
4396 MLXSW_REG_RAUHTD_BASE_LEN, 28, 2,
4397 MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
4398
4399/* reg_rauhtd_rec_type
4400 * Record type.
4401 * 0 - IPv4
4402 * 1 - IPv6
4403 * Access: RO
4404 */
4405MLXSW_ITEM32_INDEXED(reg, rauhtd, rec_type, MLXSW_REG_RAUHTD_BASE_LEN, 24, 2,
4406 MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
4407
4408#define MLXSW_REG_RAUHTD_IPV4_ENT_LEN 0x8
4409
4410/* reg_rauhtd_ipv4_ent_a
4411 * Activity. Set for new entries. Set if a packet lookup has hit on the
4412 * specific entry.
4413 * Access: RO
4414 */
4415MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1,
4416 MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
4417
4418/* reg_rauhtd_ipv4_ent_rif
4419 * Router interface.
4420 * Access: RO
4421 */
4422MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0,
4423 16, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
4424
4425/* reg_rauhtd_ipv4_ent_dip
4426 * Destination IPv4 address.
4427 * Access: RO
4428 */
4429MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0,
4430 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false);
4431
4432static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload,
4433 int ent_index, u16 *p_rif,
4434 u32 *p_dip)
4435{
4436 *p_rif = mlxsw_reg_rauhtd_ipv4_ent_rif_get(payload, ent_index);
4437 *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index);
4438}
4439
Jiri Pirko5246f2e2015-11-27 13:45:58 +01004440/* MFCR - Management Fan Control Register
4441 * --------------------------------------
4442 * This register controls the settings of the Fan Speed PWM mechanism.
4443 */
4444#define MLXSW_REG_MFCR_ID 0x9001
4445#define MLXSW_REG_MFCR_LEN 0x08
4446
4447static const struct mlxsw_reg_info mlxsw_reg_mfcr = {
4448 .id = MLXSW_REG_MFCR_ID,
4449 .len = MLXSW_REG_MFCR_LEN,
4450};
4451
4452enum mlxsw_reg_mfcr_pwm_frequency {
4453 MLXSW_REG_MFCR_PWM_FEQ_11HZ = 0x00,
4454 MLXSW_REG_MFCR_PWM_FEQ_14_7HZ = 0x01,
4455 MLXSW_REG_MFCR_PWM_FEQ_22_1HZ = 0x02,
4456 MLXSW_REG_MFCR_PWM_FEQ_1_4KHZ = 0x40,
4457 MLXSW_REG_MFCR_PWM_FEQ_5KHZ = 0x41,
4458 MLXSW_REG_MFCR_PWM_FEQ_20KHZ = 0x42,
4459 MLXSW_REG_MFCR_PWM_FEQ_22_5KHZ = 0x43,
4460 MLXSW_REG_MFCR_PWM_FEQ_25KHZ = 0x44,
4461};
4462
4463/* reg_mfcr_pwm_frequency
4464 * Controls the frequency of the PWM signal.
4465 * Access: RW
4466 */
4467MLXSW_ITEM32(reg, mfcr, pwm_frequency, 0x00, 0, 6);
4468
4469#define MLXSW_MFCR_TACHOS_MAX 10
4470
4471/* reg_mfcr_tacho_active
4472 * Indicates which of the tachometer is active (bit per tachometer).
4473 * Access: RO
4474 */
4475MLXSW_ITEM32(reg, mfcr, tacho_active, 0x04, 16, MLXSW_MFCR_TACHOS_MAX);
4476
4477#define MLXSW_MFCR_PWMS_MAX 5
4478
4479/* reg_mfcr_pwm_active
4480 * Indicates which of the PWM control is active (bit per PWM).
4481 * Access: RO
4482 */
4483MLXSW_ITEM32(reg, mfcr, pwm_active, 0x04, 0, MLXSW_MFCR_PWMS_MAX);
4484
4485static inline void
4486mlxsw_reg_mfcr_pack(char *payload,
4487 enum mlxsw_reg_mfcr_pwm_frequency pwm_frequency)
4488{
4489 MLXSW_REG_ZERO(mfcr, payload);
4490 mlxsw_reg_mfcr_pwm_frequency_set(payload, pwm_frequency);
4491}
4492
4493static inline void
4494mlxsw_reg_mfcr_unpack(char *payload,
4495 enum mlxsw_reg_mfcr_pwm_frequency *p_pwm_frequency,
4496 u16 *p_tacho_active, u8 *p_pwm_active)
4497{
4498 *p_pwm_frequency = mlxsw_reg_mfcr_pwm_frequency_get(payload);
4499 *p_tacho_active = mlxsw_reg_mfcr_tacho_active_get(payload);
4500 *p_pwm_active = mlxsw_reg_mfcr_pwm_active_get(payload);
4501}
4502
4503/* MFSC - Management Fan Speed Control Register
4504 * --------------------------------------------
4505 * This register controls the settings of the Fan Speed PWM mechanism.
4506 */
4507#define MLXSW_REG_MFSC_ID 0x9002
4508#define MLXSW_REG_MFSC_LEN 0x08
4509
4510static const struct mlxsw_reg_info mlxsw_reg_mfsc = {
4511 .id = MLXSW_REG_MFSC_ID,
4512 .len = MLXSW_REG_MFSC_LEN,
4513};
4514
4515/* reg_mfsc_pwm
4516 * Fan pwm to control / monitor.
4517 * Access: Index
4518 */
4519MLXSW_ITEM32(reg, mfsc, pwm, 0x00, 24, 3);
4520
4521/* reg_mfsc_pwm_duty_cycle
4522 * Controls the duty cycle of the PWM. Value range from 0..255 to
4523 * represent duty cycle of 0%...100%.
4524 * Access: RW
4525 */
4526MLXSW_ITEM32(reg, mfsc, pwm_duty_cycle, 0x04, 0, 8);
4527
4528static inline void mlxsw_reg_mfsc_pack(char *payload, u8 pwm,
4529 u8 pwm_duty_cycle)
4530{
4531 MLXSW_REG_ZERO(mfsc, payload);
4532 mlxsw_reg_mfsc_pwm_set(payload, pwm);
4533 mlxsw_reg_mfsc_pwm_duty_cycle_set(payload, pwm_duty_cycle);
4534}
4535
4536/* MFSM - Management Fan Speed Measurement
4537 * ---------------------------------------
4538 * This register controls the settings of the Tacho measurements and
4539 * enables reading the Tachometer measurements.
4540 */
4541#define MLXSW_REG_MFSM_ID 0x9003
4542#define MLXSW_REG_MFSM_LEN 0x08
4543
4544static const struct mlxsw_reg_info mlxsw_reg_mfsm = {
4545 .id = MLXSW_REG_MFSM_ID,
4546 .len = MLXSW_REG_MFSM_LEN,
4547};
4548
4549/* reg_mfsm_tacho
4550 * Fan tachometer index.
4551 * Access: Index
4552 */
4553MLXSW_ITEM32(reg, mfsm, tacho, 0x00, 24, 4);
4554
4555/* reg_mfsm_rpm
4556 * Fan speed (round per minute).
4557 * Access: RO
4558 */
4559MLXSW_ITEM32(reg, mfsm, rpm, 0x04, 0, 16);
4560
4561static inline void mlxsw_reg_mfsm_pack(char *payload, u8 tacho)
4562{
4563 MLXSW_REG_ZERO(mfsm, payload);
4564 mlxsw_reg_mfsm_tacho_set(payload, tacho);
4565}
4566
Jiri Pirko85926f82015-11-27 13:45:56 +01004567/* MTCAP - Management Temperature Capabilities
4568 * -------------------------------------------
4569 * This register exposes the capabilities of the device and
4570 * system temperature sensing.
4571 */
4572#define MLXSW_REG_MTCAP_ID 0x9009
4573#define MLXSW_REG_MTCAP_LEN 0x08
4574
4575static const struct mlxsw_reg_info mlxsw_reg_mtcap = {
4576 .id = MLXSW_REG_MTCAP_ID,
4577 .len = MLXSW_REG_MTCAP_LEN,
4578};
4579
4580/* reg_mtcap_sensor_count
4581 * Number of sensors supported by the device.
4582 * This includes the QSFP module sensors (if exists in the QSFP module).
4583 * Access: RO
4584 */
4585MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7);
4586
4587/* MTMP - Management Temperature
4588 * -----------------------------
4589 * This register controls the settings of the temperature measurements
4590 * and enables reading the temperature measurements. Note that temperature
4591 * is in 0.125 degrees Celsius.
4592 */
4593#define MLXSW_REG_MTMP_ID 0x900A
4594#define MLXSW_REG_MTMP_LEN 0x20
4595
4596static const struct mlxsw_reg_info mlxsw_reg_mtmp = {
4597 .id = MLXSW_REG_MTMP_ID,
4598 .len = MLXSW_REG_MTMP_LEN,
4599};
4600
4601/* reg_mtmp_sensor_index
4602 * Sensors index to access.
4603 * 64-127 of sensor_index are mapped to the SFP+/QSFP modules sequentially
4604 * (module 0 is mapped to sensor_index 64).
4605 * Access: Index
4606 */
4607MLXSW_ITEM32(reg, mtmp, sensor_index, 0x00, 0, 7);
4608
4609/* Convert to milli degrees Celsius */
4610#define MLXSW_REG_MTMP_TEMP_TO_MC(val) (val * 125)
4611
4612/* reg_mtmp_temperature
4613 * Temperature reading from the sensor. Reading is in 0.125 Celsius
4614 * degrees units.
4615 * Access: RO
4616 */
4617MLXSW_ITEM32(reg, mtmp, temperature, 0x04, 0, 16);
4618
4619/* reg_mtmp_mte
4620 * Max Temperature Enable - enables measuring the max temperature on a sensor.
4621 * Access: RW
4622 */
4623MLXSW_ITEM32(reg, mtmp, mte, 0x08, 31, 1);
4624
4625/* reg_mtmp_mtr
4626 * Max Temperature Reset - clears the value of the max temperature register.
4627 * Access: WO
4628 */
4629MLXSW_ITEM32(reg, mtmp, mtr, 0x08, 30, 1);
4630
4631/* reg_mtmp_max_temperature
4632 * The highest measured temperature from the sensor.
4633 * When the bit mte is cleared, the field max_temperature is reserved.
4634 * Access: RO
4635 */
4636MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16);
4637
4638#define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8
4639
4640/* reg_mtmp_sensor_name
4641 * Sensor Name
4642 * Access: RO
4643 */
4644MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE);
4645
4646static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index,
4647 bool max_temp_enable,
4648 bool max_temp_reset)
4649{
4650 MLXSW_REG_ZERO(mtmp, payload);
4651 mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index);
4652 mlxsw_reg_mtmp_mte_set(payload, max_temp_enable);
4653 mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset);
4654}
4655
4656static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
4657 unsigned int *p_max_temp,
4658 char *sensor_name)
4659{
4660 u16 temp;
4661
4662 if (p_temp) {
4663 temp = mlxsw_reg_mtmp_temperature_get(payload);
4664 *p_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
4665 }
4666 if (p_max_temp) {
Jiri Pirkoacf35a42015-12-11 16:10:39 +01004667 temp = mlxsw_reg_mtmp_max_temperature_get(payload);
Jiri Pirko85926f82015-11-27 13:45:56 +01004668 *p_max_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
4669 }
4670 if (sensor_name)
4671 mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
4672}
4673
Yotam Gigi43a46852016-07-21 12:03:14 +02004674/* MPAT - Monitoring Port Analyzer Table
4675 * -------------------------------------
4676 * MPAT Register is used to query and configure the Switch PortAnalyzer Table.
4677 * For an enabled analyzer, all fields except e (enable) cannot be modified.
4678 */
4679#define MLXSW_REG_MPAT_ID 0x901A
4680#define MLXSW_REG_MPAT_LEN 0x78
4681
4682static const struct mlxsw_reg_info mlxsw_reg_mpat = {
4683 .id = MLXSW_REG_MPAT_ID,
4684 .len = MLXSW_REG_MPAT_LEN,
4685};
4686
4687/* reg_mpat_pa_id
4688 * Port Analyzer ID.
4689 * Access: Index
4690 */
4691MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4);
4692
4693/* reg_mpat_system_port
4694 * A unique port identifier for the final destination of the packet.
4695 * Access: RW
4696 */
4697MLXSW_ITEM32(reg, mpat, system_port, 0x00, 0, 16);
4698
4699/* reg_mpat_e
4700 * Enable. Indicating the Port Analyzer is enabled.
4701 * Access: RW
4702 */
4703MLXSW_ITEM32(reg, mpat, e, 0x04, 31, 1);
4704
4705/* reg_mpat_qos
4706 * Quality Of Service Mode.
4707 * 0: CONFIGURED - QoS parameters (Switch Priority, and encapsulation
4708 * PCP, DEI, DSCP or VL) are configured.
4709 * 1: MAINTAIN - QoS parameters (Switch Priority, Color) are the
4710 * same as in the original packet that has triggered the mirroring. For
4711 * SPAN also the pcp,dei are maintained.
4712 * Access: RW
4713 */
4714MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1);
4715
Yotam Gigi23019052016-07-21 12:03:15 +02004716/* reg_mpat_be
4717 * Best effort mode. Indicates mirroring traffic should not cause packet
4718 * drop or back pressure, but will discard the mirrored packets. Mirrored
4719 * packets will be forwarded on a best effort manner.
4720 * 0: Do not discard mirrored packets
4721 * 1: Discard mirrored packets if causing congestion
4722 * Access: RW
4723 */
4724MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1);
4725
Yotam Gigi43a46852016-07-21 12:03:14 +02004726static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
4727 u16 system_port, bool e)
4728{
4729 MLXSW_REG_ZERO(mpat, payload);
4730 mlxsw_reg_mpat_pa_id_set(payload, pa_id);
4731 mlxsw_reg_mpat_system_port_set(payload, system_port);
4732 mlxsw_reg_mpat_e_set(payload, e);
4733 mlxsw_reg_mpat_qos_set(payload, 1);
Yotam Gigi23019052016-07-21 12:03:15 +02004734 mlxsw_reg_mpat_be_set(payload, 1);
4735}
4736
4737/* MPAR - Monitoring Port Analyzer Register
4738 * ----------------------------------------
4739 * MPAR register is used to query and configure the port analyzer port mirroring
4740 * properties.
4741 */
4742#define MLXSW_REG_MPAR_ID 0x901B
4743#define MLXSW_REG_MPAR_LEN 0x08
4744
4745static const struct mlxsw_reg_info mlxsw_reg_mpar = {
4746 .id = MLXSW_REG_MPAR_ID,
4747 .len = MLXSW_REG_MPAR_LEN,
4748};
4749
4750/* reg_mpar_local_port
4751 * The local port to mirror the packets from.
4752 * Access: Index
4753 */
4754MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8);
4755
4756enum mlxsw_reg_mpar_i_e {
4757 MLXSW_REG_MPAR_TYPE_EGRESS,
4758 MLXSW_REG_MPAR_TYPE_INGRESS,
4759};
4760
4761/* reg_mpar_i_e
4762 * Ingress/Egress
4763 * Access: Index
4764 */
4765MLXSW_ITEM32(reg, mpar, i_e, 0x00, 0, 4);
4766
4767/* reg_mpar_enable
4768 * Enable mirroring
4769 * By default, port mirroring is disabled for all ports.
4770 * Access: RW
4771 */
4772MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1);
4773
4774/* reg_mpar_pa_id
4775 * Port Analyzer ID.
4776 * Access: RW
4777 */
4778MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4);
4779
4780static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
4781 enum mlxsw_reg_mpar_i_e i_e,
4782 bool enable, u8 pa_id)
4783{
4784 MLXSW_REG_ZERO(mpar, payload);
4785 mlxsw_reg_mpar_local_port_set(payload, local_port);
4786 mlxsw_reg_mpar_enable_set(payload, enable);
4787 mlxsw_reg_mpar_i_e_set(payload, i_e);
4788 mlxsw_reg_mpar_pa_id_set(payload, pa_id);
Yotam Gigi43a46852016-07-21 12:03:14 +02004789}
4790
Ido Schimmel3161c152015-11-27 13:45:54 +01004791/* MLCR - Management LED Control Register
4792 * --------------------------------------
4793 * Controls the system LEDs.
4794 */
4795#define MLXSW_REG_MLCR_ID 0x902B
4796#define MLXSW_REG_MLCR_LEN 0x0C
4797
4798static const struct mlxsw_reg_info mlxsw_reg_mlcr = {
4799 .id = MLXSW_REG_MLCR_ID,
4800 .len = MLXSW_REG_MLCR_LEN,
4801};
4802
4803/* reg_mlcr_local_port
4804 * Local port number.
4805 * Access: RW
4806 */
4807MLXSW_ITEM32(reg, mlcr, local_port, 0x00, 16, 8);
4808
4809#define MLXSW_REG_MLCR_DURATION_MAX 0xFFFF
4810
4811/* reg_mlcr_beacon_duration
4812 * Duration of the beacon to be active, in seconds.
4813 * 0x0 - Will turn off the beacon.
4814 * 0xFFFF - Will turn on the beacon until explicitly turned off.
4815 * Access: RW
4816 */
4817MLXSW_ITEM32(reg, mlcr, beacon_duration, 0x04, 0, 16);
4818
4819/* reg_mlcr_beacon_remain
4820 * Remaining duration of the beacon, in seconds.
4821 * 0xFFFF indicates an infinite amount of time.
4822 * Access: RO
4823 */
4824MLXSW_ITEM32(reg, mlcr, beacon_remain, 0x08, 0, 16);
4825
4826static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
4827 bool active)
4828{
4829 MLXSW_REG_ZERO(mlcr, payload);
4830 mlxsw_reg_mlcr_local_port_set(payload, local_port);
4831 mlxsw_reg_mlcr_beacon_duration_set(payload, active ?
4832 MLXSW_REG_MLCR_DURATION_MAX : 0);
4833}
4834
Jiri Pirkoe0594362015-10-16 14:01:31 +02004835/* SBPR - Shared Buffer Pools Register
4836 * -----------------------------------
4837 * The SBPR configures and retrieves the shared buffer pools and configuration.
4838 */
4839#define MLXSW_REG_SBPR_ID 0xB001
4840#define MLXSW_REG_SBPR_LEN 0x14
4841
4842static const struct mlxsw_reg_info mlxsw_reg_sbpr = {
4843 .id = MLXSW_REG_SBPR_ID,
4844 .len = MLXSW_REG_SBPR_LEN,
4845};
4846
Jiri Pirko497e8592016-04-08 19:11:24 +02004847/* shared direstion enum for SBPR, SBCM, SBPM */
4848enum mlxsw_reg_sbxx_dir {
4849 MLXSW_REG_SBXX_DIR_INGRESS,
4850 MLXSW_REG_SBXX_DIR_EGRESS,
Jiri Pirkoe0594362015-10-16 14:01:31 +02004851};
4852
4853/* reg_sbpr_dir
4854 * Direction.
4855 * Access: Index
4856 */
4857MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2);
4858
4859/* reg_sbpr_pool
4860 * Pool index.
4861 * Access: Index
4862 */
4863MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4);
4864
4865/* reg_sbpr_size
4866 * Pool size in buffer cells.
4867 * Access: RW
4868 */
4869MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24);
4870
4871enum mlxsw_reg_sbpr_mode {
4872 MLXSW_REG_SBPR_MODE_STATIC,
4873 MLXSW_REG_SBPR_MODE_DYNAMIC,
4874};
4875
4876/* reg_sbpr_mode
4877 * Pool quota calculation mode.
4878 * Access: RW
4879 */
4880MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
4881
4882static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
Jiri Pirko497e8592016-04-08 19:11:24 +02004883 enum mlxsw_reg_sbxx_dir dir,
Jiri Pirkoe0594362015-10-16 14:01:31 +02004884 enum mlxsw_reg_sbpr_mode mode, u32 size)
4885{
4886 MLXSW_REG_ZERO(sbpr, payload);
4887 mlxsw_reg_sbpr_pool_set(payload, pool);
4888 mlxsw_reg_sbpr_dir_set(payload, dir);
4889 mlxsw_reg_sbpr_mode_set(payload, mode);
4890 mlxsw_reg_sbpr_size_set(payload, size);
4891}
4892
4893/* SBCM - Shared Buffer Class Management Register
4894 * ----------------------------------------------
4895 * The SBCM register configures and retrieves the shared buffer allocation
4896 * and configuration according to Port-PG, including the binding to pool
4897 * and definition of the associated quota.
4898 */
4899#define MLXSW_REG_SBCM_ID 0xB002
4900#define MLXSW_REG_SBCM_LEN 0x28
4901
4902static const struct mlxsw_reg_info mlxsw_reg_sbcm = {
4903 .id = MLXSW_REG_SBCM_ID,
4904 .len = MLXSW_REG_SBCM_LEN,
4905};
4906
4907/* reg_sbcm_local_port
4908 * Local port number.
4909 * For Ingress: excludes CPU port and Router port
4910 * For Egress: excludes IP Router
4911 * Access: Index
4912 */
4913MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
4914
4915/* reg_sbcm_pg_buff
4916 * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress)
4917 * For PG buffer: range is 0..cap_max_pg_buffers - 1
4918 * For traffic class: range is 0..cap_max_tclass - 1
4919 * Note that when traffic class is in MC aware mode then the traffic
4920 * classes which are MC aware cannot be configured.
4921 * Access: Index
4922 */
4923MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
4924
Jiri Pirkoe0594362015-10-16 14:01:31 +02004925/* reg_sbcm_dir
4926 * Direction.
4927 * Access: Index
4928 */
4929MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
4930
4931/* reg_sbcm_min_buff
4932 * Minimum buffer size for the limiter, in cells.
4933 * Access: RW
4934 */
4935MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
4936
Jiri Pirkoc30a53c2016-04-14 18:19:22 +02004937/* shared max_buff limits for dynamic threshold for SBCM, SBPM */
4938#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1
4939#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14
4940
Jiri Pirkoe0594362015-10-16 14:01:31 +02004941/* reg_sbcm_max_buff
4942 * When the pool associated to the port-pg/tclass is configured to
4943 * static, Maximum buffer size for the limiter configured in cells.
4944 * When the pool associated to the port-pg/tclass is configured to
4945 * dynamic, the max_buff holds the "alpha" parameter, supporting
4946 * the following values:
4947 * 0: 0
4948 * i: (1/128)*2^(i-1), for i=1..14
4949 * 0xFF: Infinity
4950 * Access: RW
4951 */
4952MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
4953
4954/* reg_sbcm_pool
4955 * Association of the port-priority to a pool.
4956 * Access: RW
4957 */
4958MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
4959
4960static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
Jiri Pirko497e8592016-04-08 19:11:24 +02004961 enum mlxsw_reg_sbxx_dir dir,
Jiri Pirkoe0594362015-10-16 14:01:31 +02004962 u32 min_buff, u32 max_buff, u8 pool)
4963{
4964 MLXSW_REG_ZERO(sbcm, payload);
4965 mlxsw_reg_sbcm_local_port_set(payload, local_port);
4966 mlxsw_reg_sbcm_pg_buff_set(payload, pg_buff);
4967 mlxsw_reg_sbcm_dir_set(payload, dir);
4968 mlxsw_reg_sbcm_min_buff_set(payload, min_buff);
4969 mlxsw_reg_sbcm_max_buff_set(payload, max_buff);
4970 mlxsw_reg_sbcm_pool_set(payload, pool);
4971}
4972
Jiri Pirko9efc8f62016-04-08 19:11:25 +02004973/* SBPM - Shared Buffer Port Management Register
4974 * ---------------------------------------------
Jiri Pirkoe0594362015-10-16 14:01:31 +02004975 * The SBPM register configures and retrieves the shared buffer allocation
4976 * and configuration according to Port-Pool, including the definition
4977 * of the associated quota.
4978 */
4979#define MLXSW_REG_SBPM_ID 0xB003
4980#define MLXSW_REG_SBPM_LEN 0x28
4981
4982static const struct mlxsw_reg_info mlxsw_reg_sbpm = {
4983 .id = MLXSW_REG_SBPM_ID,
4984 .len = MLXSW_REG_SBPM_LEN,
4985};
4986
4987/* reg_sbpm_local_port
4988 * Local port number.
4989 * For Ingress: excludes CPU port and Router port
4990 * For Egress: excludes IP Router
4991 * Access: Index
4992 */
4993MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
4994
4995/* reg_sbpm_pool
4996 * The pool associated to quota counting on the local_port.
4997 * Access: Index
4998 */
4999MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
5000
Jiri Pirkoe0594362015-10-16 14:01:31 +02005001/* reg_sbpm_dir
5002 * Direction.
5003 * Access: Index
5004 */
5005MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
5006
Jiri Pirko42a7f1d2016-04-14 18:19:27 +02005007/* reg_sbpm_buff_occupancy
5008 * Current buffer occupancy in cells.
5009 * Access: RO
5010 */
5011MLXSW_ITEM32(reg, sbpm, buff_occupancy, 0x10, 0, 24);
5012
5013/* reg_sbpm_clr
5014 * Clear Max Buffer Occupancy
5015 * When this bit is set, max_buff_occupancy field is cleared (and a
5016 * new max value is tracked from the time the clear was performed).
5017 * Access: OP
5018 */
5019MLXSW_ITEM32(reg, sbpm, clr, 0x14, 31, 1);
5020
5021/* reg_sbpm_max_buff_occupancy
5022 * Maximum value of buffer occupancy in cells monitored. Cleared by
5023 * writing to the clr field.
5024 * Access: RO
5025 */
5026MLXSW_ITEM32(reg, sbpm, max_buff_occupancy, 0x14, 0, 24);
5027
Jiri Pirkoe0594362015-10-16 14:01:31 +02005028/* reg_sbpm_min_buff
5029 * Minimum buffer size for the limiter, in cells.
5030 * Access: RW
5031 */
5032MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
5033
5034/* reg_sbpm_max_buff
5035 * When the pool associated to the port-pg/tclass is configured to
5036 * static, Maximum buffer size for the limiter configured in cells.
5037 * When the pool associated to the port-pg/tclass is configured to
5038 * dynamic, the max_buff holds the "alpha" parameter, supporting
5039 * the following values:
5040 * 0: 0
5041 * i: (1/128)*2^(i-1), for i=1..14
5042 * 0xFF: Infinity
5043 * Access: RW
5044 */
5045MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
5046
5047static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
Jiri Pirko42a7f1d2016-04-14 18:19:27 +02005048 enum mlxsw_reg_sbxx_dir dir, bool clr,
Jiri Pirkoe0594362015-10-16 14:01:31 +02005049 u32 min_buff, u32 max_buff)
5050{
5051 MLXSW_REG_ZERO(sbpm, payload);
5052 mlxsw_reg_sbpm_local_port_set(payload, local_port);
5053 mlxsw_reg_sbpm_pool_set(payload, pool);
5054 mlxsw_reg_sbpm_dir_set(payload, dir);
Jiri Pirko42a7f1d2016-04-14 18:19:27 +02005055 mlxsw_reg_sbpm_clr_set(payload, clr);
Jiri Pirkoe0594362015-10-16 14:01:31 +02005056 mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
5057 mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
5058}
5059
Jiri Pirko42a7f1d2016-04-14 18:19:27 +02005060static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy,
5061 u32 *p_max_buff_occupancy)
5062{
5063 *p_buff_occupancy = mlxsw_reg_sbpm_buff_occupancy_get(payload);
5064 *p_max_buff_occupancy = mlxsw_reg_sbpm_max_buff_occupancy_get(payload);
5065}
5066
Jiri Pirkoe0594362015-10-16 14:01:31 +02005067/* SBMM - Shared Buffer Multicast Management Register
5068 * --------------------------------------------------
5069 * The SBMM register configures and retrieves the shared buffer allocation
5070 * and configuration for MC packets according to Switch-Priority, including
5071 * the binding to pool and definition of the associated quota.
5072 */
5073#define MLXSW_REG_SBMM_ID 0xB004
5074#define MLXSW_REG_SBMM_LEN 0x28
5075
5076static const struct mlxsw_reg_info mlxsw_reg_sbmm = {
5077 .id = MLXSW_REG_SBMM_ID,
5078 .len = MLXSW_REG_SBMM_LEN,
5079};
5080
5081/* reg_sbmm_prio
5082 * Switch Priority.
5083 * Access: Index
5084 */
5085MLXSW_ITEM32(reg, sbmm, prio, 0x00, 8, 4);
5086
5087/* reg_sbmm_min_buff
5088 * Minimum buffer size for the limiter, in cells.
5089 * Access: RW
5090 */
5091MLXSW_ITEM32(reg, sbmm, min_buff, 0x18, 0, 24);
5092
5093/* reg_sbmm_max_buff
5094 * When the pool associated to the port-pg/tclass is configured to
5095 * static, Maximum buffer size for the limiter configured in cells.
5096 * When the pool associated to the port-pg/tclass is configured to
5097 * dynamic, the max_buff holds the "alpha" parameter, supporting
5098 * the following values:
5099 * 0: 0
5100 * i: (1/128)*2^(i-1), for i=1..14
5101 * 0xFF: Infinity
5102 * Access: RW
5103 */
5104MLXSW_ITEM32(reg, sbmm, max_buff, 0x1C, 0, 24);
5105
5106/* reg_sbmm_pool
5107 * Association of the port-priority to a pool.
5108 * Access: RW
5109 */
5110MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4);
5111
5112static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
5113 u32 max_buff, u8 pool)
5114{
5115 MLXSW_REG_ZERO(sbmm, payload);
5116 mlxsw_reg_sbmm_prio_set(payload, prio);
5117 mlxsw_reg_sbmm_min_buff_set(payload, min_buff);
5118 mlxsw_reg_sbmm_max_buff_set(payload, max_buff);
5119 mlxsw_reg_sbmm_pool_set(payload, pool);
5120}
5121
Jiri Pirko26176de2016-04-14 18:19:26 +02005122/* SBSR - Shared Buffer Status Register
5123 * ------------------------------------
5124 * The SBSR register retrieves the shared buffer occupancy according to
5125 * Port-Pool. Note that this register enables reading a large amount of data.
5126 * It is the user's responsibility to limit the amount of data to ensure the
5127 * response can match the maximum transfer unit. In case the response exceeds
5128 * the maximum transport unit, it will be truncated with no special notice.
5129 */
5130#define MLXSW_REG_SBSR_ID 0xB005
5131#define MLXSW_REG_SBSR_BASE_LEN 0x5C /* base length, without records */
5132#define MLXSW_REG_SBSR_REC_LEN 0x8 /* record length */
5133#define MLXSW_REG_SBSR_REC_MAX_COUNT 120
5134#define MLXSW_REG_SBSR_LEN (MLXSW_REG_SBSR_BASE_LEN + \
5135 MLXSW_REG_SBSR_REC_LEN * \
5136 MLXSW_REG_SBSR_REC_MAX_COUNT)
5137
5138static const struct mlxsw_reg_info mlxsw_reg_sbsr = {
5139 .id = MLXSW_REG_SBSR_ID,
5140 .len = MLXSW_REG_SBSR_LEN,
5141};
5142
5143/* reg_sbsr_clr
5144 * Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy
5145 * field is cleared (and a new max value is tracked from the time the clear
5146 * was performed).
5147 * Access: OP
5148 */
5149MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
5150
5151/* reg_sbsr_ingress_port_mask
5152 * Bit vector for all ingress network ports.
5153 * Indicates which of the ports (for which the relevant bit is set)
5154 * are affected by the set operation. Configuration of any other port
5155 * does not change.
5156 * Access: Index
5157 */
5158MLXSW_ITEM_BIT_ARRAY(reg, sbsr, ingress_port_mask, 0x10, 0x20, 1);
5159
5160/* reg_sbsr_pg_buff_mask
5161 * Bit vector for all switch priority groups.
5162 * Indicates which of the priorities (for which the relevant bit is set)
5163 * are affected by the set operation. Configuration of any other priority
5164 * does not change.
5165 * Range is 0..cap_max_pg_buffers - 1
5166 * Access: Index
5167 */
5168MLXSW_ITEM_BIT_ARRAY(reg, sbsr, pg_buff_mask, 0x30, 0x4, 1);
5169
5170/* reg_sbsr_egress_port_mask
5171 * Bit vector for all egress network ports.
5172 * Indicates which of the ports (for which the relevant bit is set)
5173 * are affected by the set operation. Configuration of any other port
5174 * does not change.
5175 * Access: Index
5176 */
5177MLXSW_ITEM_BIT_ARRAY(reg, sbsr, egress_port_mask, 0x34, 0x20, 1);
5178
5179/* reg_sbsr_tclass_mask
5180 * Bit vector for all traffic classes.
5181 * Indicates which of the traffic classes (for which the relevant bit is
5182 * set) are affected by the set operation. Configuration of any other
5183 * traffic class does not change.
5184 * Range is 0..cap_max_tclass - 1
5185 * Access: Index
5186 */
5187MLXSW_ITEM_BIT_ARRAY(reg, sbsr, tclass_mask, 0x54, 0x8, 1);
5188
5189static inline void mlxsw_reg_sbsr_pack(char *payload, bool clr)
5190{
5191 MLXSW_REG_ZERO(sbsr, payload);
5192 mlxsw_reg_sbsr_clr_set(payload, clr);
5193}
5194
5195/* reg_sbsr_rec_buff_occupancy
5196 * Current buffer occupancy in cells.
5197 * Access: RO
5198 */
5199MLXSW_ITEM32_INDEXED(reg, sbsr, rec_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
5200 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x00, false);
5201
5202/* reg_sbsr_rec_max_buff_occupancy
5203 * Maximum value of buffer occupancy in cells monitored. Cleared by
5204 * writing to the clr field.
5205 * Access: RO
5206 */
5207MLXSW_ITEM32_INDEXED(reg, sbsr, rec_max_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
5208 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x04, false);
5209
5210static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
5211 u32 *p_buff_occupancy,
5212 u32 *p_max_buff_occupancy)
5213{
5214 *p_buff_occupancy =
5215 mlxsw_reg_sbsr_rec_buff_occupancy_get(payload, rec_index);
5216 *p_max_buff_occupancy =
5217 mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index);
5218}
5219
Yotam Gigi51ae8cc2016-07-21 12:03:13 +02005220/* SBIB - Shared Buffer Internal Buffer Register
5221 * ---------------------------------------------
5222 * The SBIB register configures per port buffers for internal use. The internal
5223 * buffers consume memory on the port buffers (note that the port buffers are
5224 * used also by PBMC).
5225 *
5226 * For Spectrum this is used for egress mirroring.
5227 */
5228#define MLXSW_REG_SBIB_ID 0xB006
5229#define MLXSW_REG_SBIB_LEN 0x10
5230
5231static const struct mlxsw_reg_info mlxsw_reg_sbib = {
5232 .id = MLXSW_REG_SBIB_ID,
5233 .len = MLXSW_REG_SBIB_LEN,
5234};
5235
5236/* reg_sbib_local_port
5237 * Local port number
5238 * Not supported for CPU port and router port
5239 * Access: Index
5240 */
5241MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8);
5242
5243/* reg_sbib_buff_size
5244 * Units represented in cells
5245 * Allowed range is 0 to (cap_max_headroom_size - 1)
5246 * Default is 0
5247 * Access: RW
5248 */
5249MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24);
5250
5251static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
5252 u32 buff_size)
5253{
5254 MLXSW_REG_ZERO(sbib, payload);
5255 mlxsw_reg_sbib_local_port_set(payload, local_port);
5256 mlxsw_reg_sbib_buff_size_set(payload, buff_size);
5257}
5258
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005259static inline const char *mlxsw_reg_id_str(u16 reg_id)
5260{
5261 switch (reg_id) {
5262 case MLXSW_REG_SGCR_ID:
5263 return "SGCR";
5264 case MLXSW_REG_SPAD_ID:
5265 return "SPAD";
Elad Razfabe5482016-01-10 21:06:25 +01005266 case MLXSW_REG_SMID_ID:
5267 return "SMID";
Ido Schimmele61011b2015-08-06 16:41:53 +02005268 case MLXSW_REG_SSPR_ID:
5269 return "SSPR";
Jiri Pirkoe534a56a2015-10-16 14:01:35 +02005270 case MLXSW_REG_SFDAT_ID:
5271 return "SFDAT";
Jiri Pirko236033b2015-10-16 14:01:28 +02005272 case MLXSW_REG_SFD_ID:
5273 return "SFD";
Jiri Pirkof5d88f52015-10-16 14:01:29 +02005274 case MLXSW_REG_SFN_ID:
5275 return "SFN";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005276 case MLXSW_REG_SPMS_ID:
5277 return "SPMS";
Elad Razb2e345f2015-10-16 14:01:30 +02005278 case MLXSW_REG_SPVID_ID:
5279 return "SPVID";
5280 case MLXSW_REG_SPVM_ID:
5281 return "SPVM";
Ido Schimmel148f4722016-02-18 11:30:01 +01005282 case MLXSW_REG_SPAFT_ID:
5283 return "SPAFT";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005284 case MLXSW_REG_SFGC_ID:
5285 return "SFGC";
5286 case MLXSW_REG_SFTR_ID:
5287 return "SFTR";
Ido Schimmel41933272016-01-27 15:20:17 +01005288 case MLXSW_REG_SFDF_ID:
5289 return "SFDF";
Jiri Pirkod1d40be2015-12-03 12:12:25 +01005290 case MLXSW_REG_SLDR_ID:
5291 return "SLDR";
5292 case MLXSW_REG_SLCR_ID:
5293 return "SLCR";
5294 case MLXSW_REG_SLCOR_ID:
5295 return "SLCOR";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005296 case MLXSW_REG_SPMLR_ID:
5297 return "SPMLR";
Ido Schimmel64790232015-10-16 14:01:33 +02005298 case MLXSW_REG_SVFA_ID:
5299 return "SVFA";
Ido Schimmel1f65da72015-10-16 14:01:34 +02005300 case MLXSW_REG_SVPE_ID:
5301 return "SVPE";
Ido Schimmelf1fb6932015-10-16 14:01:32 +02005302 case MLXSW_REG_SFMR_ID:
5303 return "SFMR";
Ido Schimmela4feea72015-10-16 14:01:36 +02005304 case MLXSW_REG_SPVMLR_ID:
5305 return "SPVMLR";
Ido Schimmel2c63a552016-04-06 17:10:07 +02005306 case MLXSW_REG_QTCT_ID:
5307 return "QTCT";
Ido Schimmelb9b7cee2016-04-06 17:10:06 +02005308 case MLXSW_REG_QEEC_ID:
5309 return "QEEC";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005310 case MLXSW_REG_PMLP_ID:
5311 return "PMLP";
5312 case MLXSW_REG_PMTU_ID:
5313 return "PMTU";
5314 case MLXSW_REG_PTYS_ID:
5315 return "PTYS";
5316 case MLXSW_REG_PPAD_ID:
5317 return "PPAD";
5318 case MLXSW_REG_PAOS_ID:
5319 return "PAOS";
Ido Schimmel6f253d82016-04-06 17:10:12 +02005320 case MLXSW_REG_PFCC_ID:
5321 return "PFCC";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005322 case MLXSW_REG_PPCNT_ID:
5323 return "PPCNT";
Ido Schimmelb98ff152016-04-06 17:10:00 +02005324 case MLXSW_REG_PPTB_ID:
5325 return "PPTB";
Jiri Pirkoe0594362015-10-16 14:01:31 +02005326 case MLXSW_REG_PBMC_ID:
5327 return "PBMC";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005328 case MLXSW_REG_PSPA_ID:
5329 return "PSPA";
5330 case MLXSW_REG_HTGT_ID:
5331 return "HTGT";
5332 case MLXSW_REG_HPKT_ID:
5333 return "HPKT";
Ido Schimmel69c407a2016-07-02 11:00:13 +02005334 case MLXSW_REG_RGCR_ID:
5335 return "RGCR";
Ido Schimmel3dc26682016-07-02 11:00:18 +02005336 case MLXSW_REG_RITR_ID:
5337 return "RITR";
Yotam Gigi089f9812016-07-05 11:27:48 +02005338 case MLXSW_REG_RATR_ID:
5339 return "RATR";
Jiri Pirko6f9fc3c2016-07-04 08:23:05 +02005340 case MLXSW_REG_RALTA_ID:
5341 return "RALTA";
Jiri Pirkoa9823352016-07-04 08:23:06 +02005342 case MLXSW_REG_RALST_ID:
5343 return "RALST";
Jiri Pirko20ae4052016-07-04 08:23:07 +02005344 case MLXSW_REG_RALTB_ID:
5345 return "RALTB";
Jiri Pirkod5a1c742016-07-04 08:23:10 +02005346 case MLXSW_REG_RALUE_ID:
5347 return "RALUE";
Yotam Gigi4457b3df2016-07-05 11:27:40 +02005348 case MLXSW_REG_RAUHT_ID:
5349 return "RAUHT";
Jiri Pirkoa59f0b32016-07-05 11:27:49 +02005350 case MLXSW_REG_RALEU_ID:
5351 return "RALEU";
Yotam Gigi7cf2c202016-07-05 11:27:41 +02005352 case MLXSW_REG_RAUHTD_ID:
5353 return "RAUHTD";
Jiri Pirko5246f2e2015-11-27 13:45:58 +01005354 case MLXSW_REG_MFCR_ID:
5355 return "MFCR";
5356 case MLXSW_REG_MFSC_ID:
5357 return "MFSC";
5358 case MLXSW_REG_MFSM_ID:
5359 return "MFSM";
Jiri Pirko85926f82015-11-27 13:45:56 +01005360 case MLXSW_REG_MTCAP_ID:
5361 return "MTCAP";
Yotam Gigi43a46852016-07-21 12:03:14 +02005362 case MLXSW_REG_MPAT_ID:
5363 return "MPAT";
Yotam Gigi23019052016-07-21 12:03:15 +02005364 case MLXSW_REG_MPAR_ID:
5365 return "MPAR";
Jiri Pirko85926f82015-11-27 13:45:56 +01005366 case MLXSW_REG_MTMP_ID:
5367 return "MTMP";
Ido Schimmel3161c152015-11-27 13:45:54 +01005368 case MLXSW_REG_MLCR_ID:
5369 return "MLCR";
Jiri Pirkoe0594362015-10-16 14:01:31 +02005370 case MLXSW_REG_SBPR_ID:
5371 return "SBPR";
5372 case MLXSW_REG_SBCM_ID:
5373 return "SBCM";
5374 case MLXSW_REG_SBPM_ID:
5375 return "SBPM";
5376 case MLXSW_REG_SBMM_ID:
5377 return "SBMM";
Jiri Pirko26176de2016-04-14 18:19:26 +02005378 case MLXSW_REG_SBSR_ID:
5379 return "SBSR";
Yotam Gigi51ae8cc2016-07-21 12:03:13 +02005380 case MLXSW_REG_SBIB_ID:
5381 return "SBIB";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005382 default:
5383 return "*UNKNOWN*";
5384 }
5385}
5386
5387/* PUDE - Port Up / Down Event
5388 * ---------------------------
5389 * Reports the operational state change of a port.
5390 */
5391#define MLXSW_REG_PUDE_LEN 0x10
5392
5393/* reg_pude_swid
5394 * Switch partition ID with which to associate the port.
5395 * Access: Index
5396 */
5397MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
5398
5399/* reg_pude_local_port
5400 * Local port number.
5401 * Access: Index
5402 */
5403MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
5404
5405/* reg_pude_admin_status
5406 * Port administrative state (the desired state).
5407 * 1 - Up.
5408 * 2 - Down.
5409 * 3 - Up once. This means that in case of link failure, the port won't go
5410 * into polling mode, but will wait to be re-enabled by software.
5411 * 4 - Disabled by system. Can only be set by hardware.
5412 * Access: RO
5413 */
5414MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4);
5415
5416/* reg_pude_oper_status
5417 * Port operatioanl state.
5418 * 1 - Up.
5419 * 2 - Down.
5420 * 3 - Down by port failure. This means that the device will not let the
5421 * port up again until explicitly specified by software.
5422 * Access: RO
5423 */
5424MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4);
5425
5426#endif