blob: 43ce27f8c4a75e49260dde0e00c6cc33efa4278a [file] [log] [blame]
Ido Schimmel4ec14b72015-07-29 23:33:48 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/reg.h
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
Ido Schimmel69c407a2016-07-02 11:00:13 +02004 * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
Jiri Pirko6f9fc3c2016-07-04 08:23:05 +02006 * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
Yotam Gigi4457b3df2016-07-05 11:27:40 +02007 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Ido Schimmel4ec14b72015-07-29 23:33:48 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#ifndef _MLXSW_REG_H
39#define _MLXSW_REG_H
40
41#include <linux/string.h>
42#include <linux/bitops.h>
43#include <linux/if_vlan.h>
44
45#include "item.h"
46#include "port.h"
47
48struct mlxsw_reg_info {
49 u16 id;
50 u16 len; /* In u8 */
51};
52
53#define MLXSW_REG(type) (&mlxsw_reg_##type)
54#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
55#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
56
57/* SGCR - Switch General Configuration Register
58 * --------------------------------------------
59 * This register is used for configuration of the switch capabilities.
60 */
61#define MLXSW_REG_SGCR_ID 0x2000
62#define MLXSW_REG_SGCR_LEN 0x10
63
64static const struct mlxsw_reg_info mlxsw_reg_sgcr = {
65 .id = MLXSW_REG_SGCR_ID,
66 .len = MLXSW_REG_SGCR_LEN,
67};
68
69/* reg_sgcr_llb
70 * Link Local Broadcast (Default=0)
71 * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
72 * packets and ignore the IGMP snooping entries.
73 * Access: RW
74 */
75MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
76
77static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
78{
79 MLXSW_REG_ZERO(sgcr, payload);
80 mlxsw_reg_sgcr_llb_set(payload, !!llb);
81}
82
83/* SPAD - Switch Physical Address Register
84 * ---------------------------------------
85 * The SPAD register configures the switch physical MAC address.
86 */
87#define MLXSW_REG_SPAD_ID 0x2002
88#define MLXSW_REG_SPAD_LEN 0x10
89
90static const struct mlxsw_reg_info mlxsw_reg_spad = {
91 .id = MLXSW_REG_SPAD_ID,
92 .len = MLXSW_REG_SPAD_LEN,
93};
94
95/* reg_spad_base_mac
96 * Base MAC address for the switch partitions.
97 * Per switch partition MAC address is equal to:
98 * base_mac + swid
99 * Access: RW
100 */
101MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
102
Elad Razfabe5482016-01-10 21:06:25 +0100103/* SMID - Switch Multicast ID
104 * --------------------------
105 * The MID record maps from a MID (Multicast ID), which is a unique identifier
106 * of the multicast group within the stacking domain, into a list of local
107 * ports into which the packet is replicated.
108 */
109#define MLXSW_REG_SMID_ID 0x2007
110#define MLXSW_REG_SMID_LEN 0x240
111
112static const struct mlxsw_reg_info mlxsw_reg_smid = {
113 .id = MLXSW_REG_SMID_ID,
114 .len = MLXSW_REG_SMID_LEN,
115};
116
117/* reg_smid_swid
118 * Switch partition ID.
119 * Access: Index
120 */
121MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
122
123/* reg_smid_mid
124 * Multicast identifier - global identifier that represents the multicast group
125 * across all devices.
126 * Access: Index
127 */
128MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
129
130/* reg_smid_port
131 * Local port memebership (1 bit per port).
132 * Access: RW
133 */
134MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
135
136/* reg_smid_port_mask
137 * Local port mask (1 bit per port).
138 * Access: W
139 */
140MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
141
142static inline void mlxsw_reg_smid_pack(char *payload, u16 mid,
143 u8 port, bool set)
144{
145 MLXSW_REG_ZERO(smid, payload);
146 mlxsw_reg_smid_swid_set(payload, 0);
147 mlxsw_reg_smid_mid_set(payload, mid);
148 mlxsw_reg_smid_port_set(payload, port, set);
149 mlxsw_reg_smid_port_mask_set(payload, port, 1);
150}
151
Ido Schimmele61011b2015-08-06 16:41:53 +0200152/* SSPR - Switch System Port Record Register
153 * -----------------------------------------
154 * Configures the system port to local port mapping.
155 */
156#define MLXSW_REG_SSPR_ID 0x2008
157#define MLXSW_REG_SSPR_LEN 0x8
158
159static const struct mlxsw_reg_info mlxsw_reg_sspr = {
160 .id = MLXSW_REG_SSPR_ID,
161 .len = MLXSW_REG_SSPR_LEN,
162};
163
164/* reg_sspr_m
165 * Master - if set, then the record describes the master system port.
166 * This is needed in case a local port is mapped into several system ports
167 * (for multipathing). That number will be reported as the source system
168 * port when packets are forwarded to the CPU. Only one master port is allowed
169 * per local port.
170 *
171 * Note: Must be set for Spectrum.
172 * Access: RW
173 */
174MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
175
176/* reg_sspr_local_port
177 * Local port number.
178 *
179 * Access: RW
180 */
181MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8);
182
183/* reg_sspr_sub_port
184 * Virtual port within the physical port.
185 * Should be set to 0 when virtual ports are not enabled on the port.
186 *
187 * Access: RW
188 */
189MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
190
191/* reg_sspr_system_port
192 * Unique identifier within the stacking domain that represents all the ports
193 * that are available in the system (external ports).
194 *
195 * Currently, only single-ASIC configurations are supported, so we default to
196 * 1:1 mapping between system ports and local ports.
197 * Access: Index
198 */
199MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
200
201static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
202{
203 MLXSW_REG_ZERO(sspr, payload);
204 mlxsw_reg_sspr_m_set(payload, 1);
205 mlxsw_reg_sspr_local_port_set(payload, local_port);
206 mlxsw_reg_sspr_sub_port_set(payload, 0);
207 mlxsw_reg_sspr_system_port_set(payload, local_port);
208}
209
Jiri Pirkoe534a56a2015-10-16 14:01:35 +0200210/* SFDAT - Switch Filtering Database Aging Time
211 * --------------------------------------------
212 * Controls the Switch aging time. Aging time is able to be set per Switch
213 * Partition.
214 */
215#define MLXSW_REG_SFDAT_ID 0x2009
216#define MLXSW_REG_SFDAT_LEN 0x8
217
218static const struct mlxsw_reg_info mlxsw_reg_sfdat = {
219 .id = MLXSW_REG_SFDAT_ID,
220 .len = MLXSW_REG_SFDAT_LEN,
221};
222
223/* reg_sfdat_swid
224 * Switch partition ID.
225 * Access: Index
226 */
227MLXSW_ITEM32(reg, sfdat, swid, 0x00, 24, 8);
228
229/* reg_sfdat_age_time
230 * Aging time in seconds
231 * Min - 10 seconds
232 * Max - 1,000,000 seconds
233 * Default is 300 seconds.
234 * Access: RW
235 */
236MLXSW_ITEM32(reg, sfdat, age_time, 0x04, 0, 20);
237
238static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time)
239{
240 MLXSW_REG_ZERO(sfdat, payload);
241 mlxsw_reg_sfdat_swid_set(payload, 0);
242 mlxsw_reg_sfdat_age_time_set(payload, age_time);
243}
244
Jiri Pirko236033b2015-10-16 14:01:28 +0200245/* SFD - Switch Filtering Database
246 * -------------------------------
247 * The following register defines the access to the filtering database.
248 * The register supports querying, adding, removing and modifying the database.
249 * The access is optimized for bulk updates in which case more than one
250 * FDB record is present in the same command.
251 */
252#define MLXSW_REG_SFD_ID 0x200A
253#define MLXSW_REG_SFD_BASE_LEN 0x10 /* base length, without records */
254#define MLXSW_REG_SFD_REC_LEN 0x10 /* record length */
255#define MLXSW_REG_SFD_REC_MAX_COUNT 64
256#define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN + \
257 MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT)
258
259static const struct mlxsw_reg_info mlxsw_reg_sfd = {
260 .id = MLXSW_REG_SFD_ID,
261 .len = MLXSW_REG_SFD_LEN,
262};
263
264/* reg_sfd_swid
265 * Switch partition ID for queries. Reserved on Write.
266 * Access: Index
267 */
268MLXSW_ITEM32(reg, sfd, swid, 0x00, 24, 8);
269
270enum mlxsw_reg_sfd_op {
271 /* Dump entire FDB a (process according to record_locator) */
272 MLXSW_REG_SFD_OP_QUERY_DUMP = 0,
273 /* Query records by {MAC, VID/FID} value */
274 MLXSW_REG_SFD_OP_QUERY_QUERY = 1,
275 /* Query and clear activity. Query records by {MAC, VID/FID} value */
276 MLXSW_REG_SFD_OP_QUERY_QUERY_AND_CLEAR_ACTIVITY = 2,
277 /* Test. Response indicates if each of the records could be
278 * added to the FDB.
279 */
280 MLXSW_REG_SFD_OP_WRITE_TEST = 0,
281 /* Add/modify. Aged-out records cannot be added. This command removes
282 * the learning notification of the {MAC, VID/FID}. Response includes
283 * the entries that were added to the FDB.
284 */
285 MLXSW_REG_SFD_OP_WRITE_EDIT = 1,
286 /* Remove record by {MAC, VID/FID}. This command also removes
287 * the learning notification and aged-out notifications
288 * of the {MAC, VID/FID}. The response provides current (pre-removal)
289 * entries as non-aged-out.
290 */
291 MLXSW_REG_SFD_OP_WRITE_REMOVE = 2,
292 /* Remove learned notification by {MAC, VID/FID}. The response provides
293 * the removed learning notification.
294 */
295 MLXSW_REG_SFD_OP_WRITE_REMOVE_NOTIFICATION = 2,
296};
297
298/* reg_sfd_op
299 * Operation.
300 * Access: OP
301 */
302MLXSW_ITEM32(reg, sfd, op, 0x04, 30, 2);
303
304/* reg_sfd_record_locator
305 * Used for querying the FDB. Use record_locator=0 to initiate the
306 * query. When a record is returned, a new record_locator is
307 * returned to be used in the subsequent query.
308 * Reserved for database update.
309 * Access: Index
310 */
311MLXSW_ITEM32(reg, sfd, record_locator, 0x04, 0, 30);
312
313/* reg_sfd_num_rec
314 * Request: Number of records to read/add/modify/remove
315 * Response: Number of records read/added/replaced/removed
316 * See above description for more details.
317 * Ranges 0..64
318 * Access: RW
319 */
320MLXSW_ITEM32(reg, sfd, num_rec, 0x08, 0, 8);
321
322static inline void mlxsw_reg_sfd_pack(char *payload, enum mlxsw_reg_sfd_op op,
323 u32 record_locator)
324{
325 MLXSW_REG_ZERO(sfd, payload);
326 mlxsw_reg_sfd_op_set(payload, op);
327 mlxsw_reg_sfd_record_locator_set(payload, record_locator);
328}
329
330/* reg_sfd_rec_swid
331 * Switch partition ID.
332 * Access: Index
333 */
334MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8,
335 MLXSW_REG_SFD_REC_LEN, 0x00, false);
336
337enum mlxsw_reg_sfd_rec_type {
338 MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0,
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100339 MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1,
Elad Raz5230b252016-01-10 21:06:24 +0100340 MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2,
Jiri Pirko236033b2015-10-16 14:01:28 +0200341};
342
343/* reg_sfd_rec_type
344 * FDB record type.
345 * Access: RW
346 */
347MLXSW_ITEM32_INDEXED(reg, sfd, rec_type, MLXSW_REG_SFD_BASE_LEN, 20, 4,
348 MLXSW_REG_SFD_REC_LEN, 0x00, false);
349
350enum mlxsw_reg_sfd_rec_policy {
351 /* Replacement disabled, aging disabled. */
352 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY = 0,
353 /* (mlag remote): Replacement enabled, aging disabled,
354 * learning notification enabled on this port.
355 */
356 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG = 1,
357 /* (ingress device): Replacement enabled, aging enabled. */
358 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS = 3,
359};
360
361/* reg_sfd_rec_policy
362 * Policy.
363 * Access: RW
364 */
365MLXSW_ITEM32_INDEXED(reg, sfd, rec_policy, MLXSW_REG_SFD_BASE_LEN, 18, 2,
366 MLXSW_REG_SFD_REC_LEN, 0x00, false);
367
368/* reg_sfd_rec_a
369 * Activity. Set for new static entries. Set for static entries if a frame SMAC
370 * lookup hits on the entry.
371 * To clear the a bit, use "query and clear activity" op.
372 * Access: RO
373 */
374MLXSW_ITEM32_INDEXED(reg, sfd, rec_a, MLXSW_REG_SFD_BASE_LEN, 16, 1,
375 MLXSW_REG_SFD_REC_LEN, 0x00, false);
376
377/* reg_sfd_rec_mac
378 * MAC address.
379 * Access: Index
380 */
381MLXSW_ITEM_BUF_INDEXED(reg, sfd, rec_mac, MLXSW_REG_SFD_BASE_LEN, 6,
382 MLXSW_REG_SFD_REC_LEN, 0x02);
383
384enum mlxsw_reg_sfd_rec_action {
385 /* forward */
386 MLXSW_REG_SFD_REC_ACTION_NOP = 0,
387 /* forward and trap, trap_id is FDB_TRAP */
388 MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1,
389 /* trap and do not forward, trap_id is FDB_TRAP */
Ido Schimmeld82d8c02016-07-02 11:00:17 +0200390 MLXSW_REG_SFD_REC_ACTION_TRAP = 2,
391 /* forward to IP router */
392 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER = 3,
Jiri Pirko236033b2015-10-16 14:01:28 +0200393 MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15,
394};
395
396/* reg_sfd_rec_action
397 * Action to apply on the packet.
398 * Note: Dynamic entries can only be configured with NOP action.
399 * Access: RW
400 */
401MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
402 MLXSW_REG_SFD_REC_LEN, 0x0C, false);
403
404/* reg_sfd_uc_sub_port
Jiri Pirko4e9ec082015-10-28 10:16:59 +0100405 * VEPA channel on local port.
406 * Valid only if local port is a non-stacking port. Must be 0 if multichannel
407 * VEPA is not enabled.
Jiri Pirko236033b2015-10-16 14:01:28 +0200408 * Access: RW
409 */
410MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
411 MLXSW_REG_SFD_REC_LEN, 0x08, false);
412
413/* reg_sfd_uc_fid_vid
414 * Filtering ID or VLAN ID
415 * For SwitchX and SwitchX-2:
416 * - Dynamic entries (policy 2,3) use FID
417 * - Static entries (policy 0) use VID
418 * - When independent learning is configured, VID=FID
419 * For Spectrum: use FID for both Dynamic and Static entries.
420 * VID should not be used.
421 * Access: Index
422 */
423MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
424 MLXSW_REG_SFD_REC_LEN, 0x08, false);
425
426/* reg_sfd_uc_system_port
427 * Unique port identifier for the final destination of the packet.
428 * Access: RW
429 */
430MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16,
431 MLXSW_REG_SFD_REC_LEN, 0x0C, false);
432
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100433static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index,
434 enum mlxsw_reg_sfd_rec_type rec_type,
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100435 const char *mac,
436 enum mlxsw_reg_sfd_rec_action action)
Jiri Pirko236033b2015-10-16 14:01:28 +0200437{
438 u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload);
439
440 if (rec_index >= num_rec)
441 mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1);
442 mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0);
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100443 mlxsw_reg_sfd_rec_type_set(payload, rec_index, rec_type);
Jiri Pirko236033b2015-10-16 14:01:28 +0200444 mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac);
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100445 mlxsw_reg_sfd_rec_action_set(payload, rec_index, action);
446}
447
448static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
449 enum mlxsw_reg_sfd_rec_policy policy,
Ido Schimmel9de6a802015-12-15 16:03:40 +0100450 const char *mac, u16 fid_vid,
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100451 enum mlxsw_reg_sfd_rec_action action,
452 u8 local_port)
453{
454 mlxsw_reg_sfd_rec_pack(payload, rec_index,
Elad Raz5230b252016-01-10 21:06:24 +0100455 MLXSW_REG_SFD_REC_TYPE_UNICAST, mac, action);
456 mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
Jiri Pirko236033b2015-10-16 14:01:28 +0200457 mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
Ido Schimmel9de6a802015-12-15 16:03:40 +0100458 mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid);
Jiri Pirko236033b2015-10-16 14:01:28 +0200459 mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
460}
461
Jiri Pirko75c09282015-10-28 10:17:01 +0100462static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
Ido Schimmel9de6a802015-12-15 16:03:40 +0100463 char *mac, u16 *p_fid_vid,
Jiri Pirko75c09282015-10-28 10:17:01 +0100464 u8 *p_local_port)
Jiri Pirko236033b2015-10-16 14:01:28 +0200465{
466 mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
Ido Schimmel9de6a802015-12-15 16:03:40 +0100467 *p_fid_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
Jiri Pirko236033b2015-10-16 14:01:28 +0200468 *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index);
469}
470
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100471/* reg_sfd_uc_lag_sub_port
472 * LAG sub port.
473 * Must be 0 if multichannel VEPA is not enabled.
474 * Access: RW
475 */
476MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
477 MLXSW_REG_SFD_REC_LEN, 0x08, false);
478
479/* reg_sfd_uc_lag_fid_vid
480 * Filtering ID or VLAN ID
481 * For SwitchX and SwitchX-2:
482 * - Dynamic entries (policy 2,3) use FID
483 * - Static entries (policy 0) use VID
484 * - When independent learning is configured, VID=FID
485 * For Spectrum: use FID for both Dynamic and Static entries.
486 * VID should not be used.
487 * Access: Index
488 */
489MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
490 MLXSW_REG_SFD_REC_LEN, 0x08, false);
491
Ido Schimmelafd7f972015-12-15 16:03:45 +0100492/* reg_sfd_uc_lag_lag_vid
493 * Indicates VID in case of vFIDs. Reserved for FIDs.
494 * Access: RW
495 */
496MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
497 MLXSW_REG_SFD_REC_LEN, 0x0C, false);
498
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100499/* reg_sfd_uc_lag_lag_id
500 * LAG Identifier - pointer into the LAG descriptor table.
501 * Access: RW
502 */
503MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_id, MLXSW_REG_SFD_BASE_LEN, 0, 10,
504 MLXSW_REG_SFD_REC_LEN, 0x0C, false);
505
506static inline void
507mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
508 enum mlxsw_reg_sfd_rec_policy policy,
Ido Schimmel9de6a802015-12-15 16:03:40 +0100509 const char *mac, u16 fid_vid,
Ido Schimmelafd7f972015-12-15 16:03:45 +0100510 enum mlxsw_reg_sfd_rec_action action, u16 lag_vid,
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100511 u16 lag_id)
512{
513 mlxsw_reg_sfd_rec_pack(payload, rec_index,
514 MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG,
Elad Raz5230b252016-01-10 21:06:24 +0100515 mac, action);
516 mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100517 mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0);
Ido Schimmel9de6a802015-12-15 16:03:40 +0100518 mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid);
Ido Schimmelafd7f972015-12-15 16:03:45 +0100519 mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid);
Jiri Pirkoe4bfbae2015-12-03 12:12:26 +0100520 mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
521}
522
523static inline void mlxsw_reg_sfd_uc_lag_unpack(char *payload, int rec_index,
524 char *mac, u16 *p_vid,
525 u16 *p_lag_id)
526{
527 mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
528 *p_vid = mlxsw_reg_sfd_uc_lag_fid_vid_get(payload, rec_index);
529 *p_lag_id = mlxsw_reg_sfd_uc_lag_lag_id_get(payload, rec_index);
530}
531
Elad Raz5230b252016-01-10 21:06:24 +0100532/* reg_sfd_mc_pgi
533 *
534 * Multicast port group index - index into the port group table.
535 * Value 0x1FFF indicates the pgi should point to the MID entry.
536 * For Spectrum this value must be set to 0x1FFF
537 * Access: RW
538 */
539MLXSW_ITEM32_INDEXED(reg, sfd, mc_pgi, MLXSW_REG_SFD_BASE_LEN, 16, 13,
540 MLXSW_REG_SFD_REC_LEN, 0x08, false);
541
542/* reg_sfd_mc_fid_vid
543 *
544 * Filtering ID or VLAN ID
545 * Access: Index
546 */
547MLXSW_ITEM32_INDEXED(reg, sfd, mc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
548 MLXSW_REG_SFD_REC_LEN, 0x08, false);
549
550/* reg_sfd_mc_mid
551 *
552 * Multicast identifier - global identifier that represents the multicast
553 * group across all devices.
554 * Access: RW
555 */
556MLXSW_ITEM32_INDEXED(reg, sfd, mc_mid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
557 MLXSW_REG_SFD_REC_LEN, 0x0C, false);
558
559static inline void
560mlxsw_reg_sfd_mc_pack(char *payload, int rec_index,
561 const char *mac, u16 fid_vid,
562 enum mlxsw_reg_sfd_rec_action action, u16 mid)
563{
564 mlxsw_reg_sfd_rec_pack(payload, rec_index,
565 MLXSW_REG_SFD_REC_TYPE_MULTICAST, mac, action);
566 mlxsw_reg_sfd_mc_pgi_set(payload, rec_index, 0x1FFF);
567 mlxsw_reg_sfd_mc_fid_vid_set(payload, rec_index, fid_vid);
568 mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid);
569}
570
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200571/* SFN - Switch FDB Notification Register
572 * -------------------------------------------
573 * The switch provides notifications on newly learned FDB entries and
574 * aged out entries. The notifications can be polled by software.
575 */
576#define MLXSW_REG_SFN_ID 0x200B
577#define MLXSW_REG_SFN_BASE_LEN 0x10 /* base length, without records */
578#define MLXSW_REG_SFN_REC_LEN 0x10 /* record length */
579#define MLXSW_REG_SFN_REC_MAX_COUNT 64
580#define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN + \
581 MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT)
582
583static const struct mlxsw_reg_info mlxsw_reg_sfn = {
584 .id = MLXSW_REG_SFN_ID,
585 .len = MLXSW_REG_SFN_LEN,
586};
587
588/* reg_sfn_swid
589 * Switch partition ID.
590 * Access: Index
591 */
592MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8);
593
Ido Schimmel1803e0f2016-08-24 12:00:23 +0200594/* reg_sfn_end
595 * Forces the current session to end.
596 * Access: OP
597 */
598MLXSW_ITEM32(reg, sfn, end, 0x04, 20, 1);
599
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200600/* reg_sfn_num_rec
601 * Request: Number of learned notifications and aged-out notification
602 * records requested.
603 * Response: Number of notification records returned (must be smaller
604 * than or equal to the value requested)
605 * Ranges 0..64
606 * Access: OP
607 */
608MLXSW_ITEM32(reg, sfn, num_rec, 0x04, 0, 8);
609
610static inline void mlxsw_reg_sfn_pack(char *payload)
611{
612 MLXSW_REG_ZERO(sfn, payload);
613 mlxsw_reg_sfn_swid_set(payload, 0);
Ido Schimmel1803e0f2016-08-24 12:00:23 +0200614 mlxsw_reg_sfn_end_set(payload, 1);
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200615 mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
616}
617
618/* reg_sfn_rec_swid
619 * Switch partition ID.
620 * Access: RO
621 */
622MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8,
623 MLXSW_REG_SFN_REC_LEN, 0x00, false);
624
625enum mlxsw_reg_sfn_rec_type {
626 /* MAC addresses learned on a regular port. */
627 MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5,
Jiri Pirko3b715712015-12-03 12:12:27 +0100628 /* MAC addresses learned on a LAG port. */
629 MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG = 0x6,
630 /* Aged-out MAC address on a regular port. */
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200631 MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7,
Jiri Pirko3b715712015-12-03 12:12:27 +0100632 /* Aged-out MAC address on a LAG port. */
633 MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG = 0x8,
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200634};
635
636/* reg_sfn_rec_type
637 * Notification record type.
638 * Access: RO
639 */
640MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4,
641 MLXSW_REG_SFN_REC_LEN, 0x00, false);
642
643/* reg_sfn_rec_mac
644 * MAC address.
645 * Access: RO
646 */
647MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
648 MLXSW_REG_SFN_REC_LEN, 0x02);
649
Jiri Pirko8316f082015-10-28 10:17:00 +0100650/* reg_sfn_mac_sub_port
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200651 * VEPA channel on the local port.
652 * 0 if multichannel VEPA is not enabled.
653 * Access: RO
654 */
655MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
656 MLXSW_REG_SFN_REC_LEN, 0x08, false);
657
Jiri Pirko8316f082015-10-28 10:17:00 +0100658/* reg_sfn_mac_fid
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200659 * Filtering identifier.
660 * Access: RO
661 */
662MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
663 MLXSW_REG_SFN_REC_LEN, 0x08, false);
664
Jiri Pirko8316f082015-10-28 10:17:00 +0100665/* reg_sfn_mac_system_port
Jiri Pirkof5d88f52015-10-16 14:01:29 +0200666 * Unique port identifier for the final destination of the packet.
667 * Access: RO
668 */
669MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16,
670 MLXSW_REG_SFN_REC_LEN, 0x0C, false);
671
672static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index,
673 char *mac, u16 *p_vid,
674 u8 *p_local_port)
675{
676 mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
677 *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
678 *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index);
679}
680
Jiri Pirko3b715712015-12-03 12:12:27 +0100681/* reg_sfn_mac_lag_lag_id
682 * LAG ID (pointer into the LAG descriptor table).
683 * Access: RO
684 */
685MLXSW_ITEM32_INDEXED(reg, sfn, mac_lag_lag_id, MLXSW_REG_SFN_BASE_LEN, 0, 10,
686 MLXSW_REG_SFN_REC_LEN, 0x0C, false);
687
688static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index,
689 char *mac, u16 *p_vid,
690 u16 *p_lag_id)
691{
692 mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
693 *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
694 *p_lag_id = mlxsw_reg_sfn_mac_lag_lag_id_get(payload, rec_index);
695}
696
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200697/* SPMS - Switch Port MSTP/RSTP State Register
698 * -------------------------------------------
699 * Configures the spanning tree state of a physical port.
700 */
Jiri Pirko3f0effd2015-10-15 17:43:23 +0200701#define MLXSW_REG_SPMS_ID 0x200D
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200702#define MLXSW_REG_SPMS_LEN 0x404
703
704static const struct mlxsw_reg_info mlxsw_reg_spms = {
705 .id = MLXSW_REG_SPMS_ID,
706 .len = MLXSW_REG_SPMS_LEN,
707};
708
709/* reg_spms_local_port
710 * Local port number.
711 * Access: Index
712 */
713MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
714
715enum mlxsw_reg_spms_state {
716 MLXSW_REG_SPMS_STATE_NO_CHANGE,
717 MLXSW_REG_SPMS_STATE_DISCARDING,
718 MLXSW_REG_SPMS_STATE_LEARNING,
719 MLXSW_REG_SPMS_STATE_FORWARDING,
720};
721
722/* reg_spms_state
723 * Spanning tree state of each VLAN ID (VID) of the local port.
724 * 0 - Do not change spanning tree state (used only when writing).
725 * 1 - Discarding. No learning or forwarding to/from this port (default).
726 * 2 - Learning. Port is learning, but not forwarding.
727 * 3 - Forwarding. Port is learning and forwarding.
728 * Access: RW
729 */
730MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
731
Jiri Pirkoebb79632015-10-15 17:43:26 +0200732static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port)
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200733{
734 MLXSW_REG_ZERO(spms, payload);
735 mlxsw_reg_spms_local_port_set(payload, local_port);
Jiri Pirkoebb79632015-10-15 17:43:26 +0200736}
737
738static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid,
739 enum mlxsw_reg_spms_state state)
740{
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200741 mlxsw_reg_spms_state_set(payload, vid, state);
742}
743
Elad Razb2e345f2015-10-16 14:01:30 +0200744/* SPVID - Switch Port VID
745 * -----------------------
746 * The switch port VID configures the default VID for a port.
747 */
748#define MLXSW_REG_SPVID_ID 0x200E
749#define MLXSW_REG_SPVID_LEN 0x08
750
751static const struct mlxsw_reg_info mlxsw_reg_spvid = {
752 .id = MLXSW_REG_SPVID_ID,
753 .len = MLXSW_REG_SPVID_LEN,
754};
755
756/* reg_spvid_local_port
757 * Local port number.
758 * Access: Index
759 */
760MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
761
762/* reg_spvid_sub_port
763 * Virtual port within the physical port.
764 * Should be set to 0 when virtual ports are not enabled on the port.
765 * Access: Index
766 */
767MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
768
769/* reg_spvid_pvid
770 * Port default VID
771 * Access: RW
772 */
773MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
774
775static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
776{
777 MLXSW_REG_ZERO(spvid, payload);
778 mlxsw_reg_spvid_local_port_set(payload, local_port);
779 mlxsw_reg_spvid_pvid_set(payload, pvid);
780}
781
782/* SPVM - Switch Port VLAN Membership
783 * ----------------------------------
784 * The Switch Port VLAN Membership register configures the VLAN membership
785 * of a port in a VLAN denoted by VID. VLAN membership is managed per
786 * virtual port. The register can be used to add and remove VID(s) from a port.
787 */
788#define MLXSW_REG_SPVM_ID 0x200F
789#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
790#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
791#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
792#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
793 MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
794
795static const struct mlxsw_reg_info mlxsw_reg_spvm = {
796 .id = MLXSW_REG_SPVM_ID,
797 .len = MLXSW_REG_SPVM_LEN,
798};
799
800/* reg_spvm_pt
801 * Priority tagged. If this bit is set, packets forwarded to the port with
802 * untagged VLAN membership (u bit is set) will be tagged with priority tag
803 * (VID=0)
804 * Access: RW
805 */
806MLXSW_ITEM32(reg, spvm, pt, 0x00, 31, 1);
807
808/* reg_spvm_pte
809 * Priority Tagged Update Enable. On Write operations, if this bit is cleared,
810 * the pt bit will NOT be updated. To update the pt bit, pte must be set.
811 * Access: WO
812 */
813MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1);
814
815/* reg_spvm_local_port
816 * Local port number.
817 * Access: Index
818 */
819MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8);
820
821/* reg_spvm_sub_port
822 * Virtual port within the physical port.
823 * Should be set to 0 when virtual ports are not enabled on the port.
824 * Access: Index
825 */
826MLXSW_ITEM32(reg, spvm, sub_port, 0x00, 8, 8);
827
828/* reg_spvm_num_rec
829 * Number of records to update. Each record contains: i, e, u, vid.
830 * Access: OP
831 */
832MLXSW_ITEM32(reg, spvm, num_rec, 0x00, 0, 8);
833
834/* reg_spvm_rec_i
835 * Ingress membership in VLAN ID.
836 * Access: Index
837 */
838MLXSW_ITEM32_INDEXED(reg, spvm, rec_i,
839 MLXSW_REG_SPVM_BASE_LEN, 14, 1,
840 MLXSW_REG_SPVM_REC_LEN, 0, false);
841
842/* reg_spvm_rec_e
843 * Egress membership in VLAN ID.
844 * Access: Index
845 */
846MLXSW_ITEM32_INDEXED(reg, spvm, rec_e,
847 MLXSW_REG_SPVM_BASE_LEN, 13, 1,
848 MLXSW_REG_SPVM_REC_LEN, 0, false);
849
850/* reg_spvm_rec_u
851 * Untagged - port is an untagged member - egress transmission uses untagged
852 * frames on VID<n>
853 * Access: Index
854 */
855MLXSW_ITEM32_INDEXED(reg, spvm, rec_u,
856 MLXSW_REG_SPVM_BASE_LEN, 12, 1,
857 MLXSW_REG_SPVM_REC_LEN, 0, false);
858
859/* reg_spvm_rec_vid
860 * Egress membership in VLAN ID.
861 * Access: Index
862 */
863MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid,
864 MLXSW_REG_SPVM_BASE_LEN, 0, 12,
865 MLXSW_REG_SPVM_REC_LEN, 0, false);
866
867static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
868 u16 vid_begin, u16 vid_end,
869 bool is_member, bool untagged)
870{
871 int size = vid_end - vid_begin + 1;
872 int i;
873
874 MLXSW_REG_ZERO(spvm, payload);
875 mlxsw_reg_spvm_local_port_set(payload, local_port);
876 mlxsw_reg_spvm_num_rec_set(payload, size);
877
878 for (i = 0; i < size; i++) {
879 mlxsw_reg_spvm_rec_i_set(payload, i, is_member);
880 mlxsw_reg_spvm_rec_e_set(payload, i, is_member);
881 mlxsw_reg_spvm_rec_u_set(payload, i, untagged);
882 mlxsw_reg_spvm_rec_vid_set(payload, i, vid_begin + i);
883 }
884}
885
Ido Schimmel148f4722016-02-18 11:30:01 +0100886/* SPAFT - Switch Port Acceptable Frame Types
887 * ------------------------------------------
888 * The Switch Port Acceptable Frame Types register configures the frame
889 * admittance of the port.
890 */
891#define MLXSW_REG_SPAFT_ID 0x2010
892#define MLXSW_REG_SPAFT_LEN 0x08
893
894static const struct mlxsw_reg_info mlxsw_reg_spaft = {
895 .id = MLXSW_REG_SPAFT_ID,
896 .len = MLXSW_REG_SPAFT_LEN,
897};
898
899/* reg_spaft_local_port
900 * Local port number.
901 * Access: Index
902 *
903 * Note: CPU port is not supported (all tag types are allowed).
904 */
905MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8);
906
907/* reg_spaft_sub_port
908 * Virtual port within the physical port.
909 * Should be set to 0 when virtual ports are not enabled on the port.
910 * Access: RW
911 */
912MLXSW_ITEM32(reg, spaft, sub_port, 0x00, 8, 8);
913
914/* reg_spaft_allow_untagged
915 * When set, untagged frames on the ingress are allowed (default).
916 * Access: RW
917 */
918MLXSW_ITEM32(reg, spaft, allow_untagged, 0x04, 31, 1);
919
920/* reg_spaft_allow_prio_tagged
921 * When set, priority tagged frames on the ingress are allowed (default).
922 * Access: RW
923 */
924MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
925
926/* reg_spaft_allow_tagged
927 * When set, tagged frames on the ingress are allowed (default).
928 * Access: RW
929 */
930MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
931
932static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
933 bool allow_untagged)
934{
935 MLXSW_REG_ZERO(spaft, payload);
936 mlxsw_reg_spaft_local_port_set(payload, local_port);
937 mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
938 mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
939 mlxsw_reg_spaft_allow_tagged_set(payload, true);
940}
941
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200942/* SFGC - Switch Flooding Group Configuration
943 * ------------------------------------------
944 * The following register controls the association of flooding tables and MIDs
945 * to packet types used for flooding.
946 */
Jiri Pirko36b78e82015-10-15 17:43:24 +0200947#define MLXSW_REG_SFGC_ID 0x2011
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200948#define MLXSW_REG_SFGC_LEN 0x10
949
950static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
951 .id = MLXSW_REG_SFGC_ID,
952 .len = MLXSW_REG_SFGC_LEN,
953};
954
955enum mlxsw_reg_sfgc_type {
Ido Schimmelfa6ad052015-10-15 17:43:25 +0200956 MLXSW_REG_SFGC_TYPE_BROADCAST,
957 MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
958 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
959 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
960 MLXSW_REG_SFGC_TYPE_RESERVED,
961 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
962 MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL,
963 MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST,
964 MLXSW_REG_SFGC_TYPE_MAX,
Ido Schimmel4ec14b72015-07-29 23:33:48 +0200965};
966
967/* reg_sfgc_type
968 * The traffic type to reach the flooding table.
969 * Access: Index
970 */
971MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
972
973enum mlxsw_reg_sfgc_bridge_type {
974 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
975 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
976};
977
978/* reg_sfgc_bridge_type
979 * Access: Index
980 *
981 * Note: SwitchX-2 only supports 802.1Q mode.
982 */
983MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3);
984
985enum mlxsw_flood_table_type {
986 MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
987 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
988 MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
989 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
990 MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
991};
992
993/* reg_sfgc_table_type
994 * See mlxsw_flood_table_type
995 * Access: RW
996 *
997 * Note: FID offset and FID types are not supported in SwitchX-2.
998 */
999MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
1000
1001/* reg_sfgc_flood_table
1002 * Flooding table index to associate with the specific type on the specific
1003 * switch partition.
1004 * Access: RW
1005 */
1006MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
1007
1008/* reg_sfgc_mid
1009 * The multicast ID for the swid. Not supported for Spectrum
1010 * Access: RW
1011 */
1012MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
1013
1014/* reg_sfgc_counter_set_type
1015 * Counter Set Type for flow counters.
1016 * Access: RW
1017 */
1018MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
1019
1020/* reg_sfgc_counter_index
1021 * Counter Index for flow counters.
1022 * Access: RW
1023 */
1024MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
1025
1026static inline void
1027mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
1028 enum mlxsw_reg_sfgc_bridge_type bridge_type,
1029 enum mlxsw_flood_table_type table_type,
1030 unsigned int flood_table)
1031{
1032 MLXSW_REG_ZERO(sfgc, payload);
1033 mlxsw_reg_sfgc_type_set(payload, type);
1034 mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
1035 mlxsw_reg_sfgc_table_type_set(payload, table_type);
1036 mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
1037 mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
1038}
1039
1040/* SFTR - Switch Flooding Table Register
1041 * -------------------------------------
1042 * The switch flooding table is used for flooding packet replication. The table
1043 * defines a bit mask of ports for packet replication.
1044 */
1045#define MLXSW_REG_SFTR_ID 0x2012
1046#define MLXSW_REG_SFTR_LEN 0x420
1047
1048static const struct mlxsw_reg_info mlxsw_reg_sftr = {
1049 .id = MLXSW_REG_SFTR_ID,
1050 .len = MLXSW_REG_SFTR_LEN,
1051};
1052
1053/* reg_sftr_swid
1054 * Switch partition ID with which to associate the port.
1055 * Access: Index
1056 */
1057MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
1058
1059/* reg_sftr_flood_table
1060 * Flooding table index to associate with the specific type on the specific
1061 * switch partition.
1062 * Access: Index
1063 */
1064MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
1065
1066/* reg_sftr_index
1067 * Index. Used as an index into the Flooding Table in case the table is
1068 * configured to use VID / FID or FID Offset.
1069 * Access: Index
1070 */
1071MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
1072
1073/* reg_sftr_table_type
1074 * See mlxsw_flood_table_type
1075 * Access: RW
1076 */
1077MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
1078
1079/* reg_sftr_range
1080 * Range of entries to update
1081 * Access: Index
1082 */
1083MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
1084
1085/* reg_sftr_port
1086 * Local port membership (1 bit per port).
1087 * Access: RW
1088 */
1089MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
1090
1091/* reg_sftr_cpu_port_mask
1092 * CPU port mask (1 bit per port).
1093 * Access: W
1094 */
1095MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
1096
1097static inline void mlxsw_reg_sftr_pack(char *payload,
1098 unsigned int flood_table,
1099 unsigned int index,
1100 enum mlxsw_flood_table_type table_type,
Ido Schimmelbc2055f2015-10-16 14:01:23 +02001101 unsigned int range, u8 port, bool set)
Ido Schimmel4ec14b72015-07-29 23:33:48 +02001102{
1103 MLXSW_REG_ZERO(sftr, payload);
1104 mlxsw_reg_sftr_swid_set(payload, 0);
1105 mlxsw_reg_sftr_flood_table_set(payload, flood_table);
1106 mlxsw_reg_sftr_index_set(payload, index);
1107 mlxsw_reg_sftr_table_type_set(payload, table_type);
1108 mlxsw_reg_sftr_range_set(payload, range);
Ido Schimmelbc2055f2015-10-16 14:01:23 +02001109 mlxsw_reg_sftr_port_set(payload, port, set);
1110 mlxsw_reg_sftr_port_mask_set(payload, port, 1);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02001111}
1112
Ido Schimmel41933272016-01-27 15:20:17 +01001113/* SFDF - Switch Filtering DB Flush
1114 * --------------------------------
1115 * The switch filtering DB flush register is used to flush the FDB.
1116 * Note that FDB notifications are flushed as well.
1117 */
1118#define MLXSW_REG_SFDF_ID 0x2013
1119#define MLXSW_REG_SFDF_LEN 0x14
1120
1121static const struct mlxsw_reg_info mlxsw_reg_sfdf = {
1122 .id = MLXSW_REG_SFDF_ID,
1123 .len = MLXSW_REG_SFDF_LEN,
1124};
1125
1126/* reg_sfdf_swid
1127 * Switch partition ID.
1128 * Access: Index
1129 */
1130MLXSW_ITEM32(reg, sfdf, swid, 0x00, 24, 8);
1131
1132enum mlxsw_reg_sfdf_flush_type {
1133 MLXSW_REG_SFDF_FLUSH_PER_SWID,
1134 MLXSW_REG_SFDF_FLUSH_PER_FID,
1135 MLXSW_REG_SFDF_FLUSH_PER_PORT,
1136 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
1137 MLXSW_REG_SFDF_FLUSH_PER_LAG,
1138 MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
1139};
1140
1141/* reg_sfdf_flush_type
1142 * Flush type.
1143 * 0 - All SWID dynamic entries are flushed.
1144 * 1 - All FID dynamic entries are flushed.
1145 * 2 - All dynamic entries pointing to port are flushed.
1146 * 3 - All FID dynamic entries pointing to port are flushed.
1147 * 4 - All dynamic entries pointing to LAG are flushed.
1148 * 5 - All FID dynamic entries pointing to LAG are flushed.
1149 * Access: RW
1150 */
1151MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
1152
1153/* reg_sfdf_flush_static
1154 * Static.
1155 * 0 - Flush only dynamic entries.
1156 * 1 - Flush both dynamic and static entries.
1157 * Access: RW
1158 */
1159MLXSW_ITEM32(reg, sfdf, flush_static, 0x04, 24, 1);
1160
1161static inline void mlxsw_reg_sfdf_pack(char *payload,
1162 enum mlxsw_reg_sfdf_flush_type type)
1163{
1164 MLXSW_REG_ZERO(sfdf, payload);
1165 mlxsw_reg_sfdf_flush_type_set(payload, type);
1166 mlxsw_reg_sfdf_flush_static_set(payload, true);
1167}
1168
1169/* reg_sfdf_fid
1170 * FID to flush.
1171 * Access: RW
1172 */
1173MLXSW_ITEM32(reg, sfdf, fid, 0x0C, 0, 16);
1174
1175/* reg_sfdf_system_port
1176 * Port to flush.
1177 * Access: RW
1178 */
1179MLXSW_ITEM32(reg, sfdf, system_port, 0x0C, 0, 16);
1180
1181/* reg_sfdf_port_fid_system_port
1182 * Port to flush, pointed to by FID.
1183 * Access: RW
1184 */
1185MLXSW_ITEM32(reg, sfdf, port_fid_system_port, 0x08, 0, 16);
1186
1187/* reg_sfdf_lag_id
1188 * LAG ID to flush.
1189 * Access: RW
1190 */
1191MLXSW_ITEM32(reg, sfdf, lag_id, 0x0C, 0, 10);
1192
1193/* reg_sfdf_lag_fid_lag_id
1194 * LAG ID to flush, pointed to by FID.
1195 * Access: RW
1196 */
1197MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10);
1198
Jiri Pirkod1d40be2015-12-03 12:12:25 +01001199/* SLDR - Switch LAG Descriptor Register
1200 * -----------------------------------------
1201 * The switch LAG descriptor register is populated by LAG descriptors.
1202 * Each LAG descriptor is indexed by lag_id. The LAG ID runs from 0 to
1203 * max_lag-1.
1204 */
1205#define MLXSW_REG_SLDR_ID 0x2014
1206#define MLXSW_REG_SLDR_LEN 0x0C /* counting in only one port in list */
1207
1208static const struct mlxsw_reg_info mlxsw_reg_sldr = {
1209 .id = MLXSW_REG_SLDR_ID,
1210 .len = MLXSW_REG_SLDR_LEN,
1211};
1212
1213enum mlxsw_reg_sldr_op {
1214 /* Indicates a creation of a new LAG-ID, lag_id must be valid */
1215 MLXSW_REG_SLDR_OP_LAG_CREATE,
1216 MLXSW_REG_SLDR_OP_LAG_DESTROY,
1217 /* Ports that appear in the list have the Distributor enabled */
1218 MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST,
1219 /* Removes ports from the disributor list */
1220 MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST,
1221};
1222
1223/* reg_sldr_op
1224 * Operation.
1225 * Access: RW
1226 */
1227MLXSW_ITEM32(reg, sldr, op, 0x00, 29, 3);
1228
1229/* reg_sldr_lag_id
1230 * LAG identifier. The lag_id is the index into the LAG descriptor table.
1231 * Access: Index
1232 */
1233MLXSW_ITEM32(reg, sldr, lag_id, 0x00, 0, 10);
1234
1235static inline void mlxsw_reg_sldr_lag_create_pack(char *payload, u8 lag_id)
1236{
1237 MLXSW_REG_ZERO(sldr, payload);
1238 mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_CREATE);
1239 mlxsw_reg_sldr_lag_id_set(payload, lag_id);
1240}
1241
1242static inline void mlxsw_reg_sldr_lag_destroy_pack(char *payload, u8 lag_id)
1243{
1244 MLXSW_REG_ZERO(sldr, payload);
1245 mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_DESTROY);
1246 mlxsw_reg_sldr_lag_id_set(payload, lag_id);
1247}
1248
1249/* reg_sldr_num_ports
1250 * The number of member ports of the LAG.
1251 * Reserved for Create / Destroy operations
1252 * For Add / Remove operations - indicates the number of ports in the list.
1253 * Access: RW
1254 */
1255MLXSW_ITEM32(reg, sldr, num_ports, 0x04, 24, 8);
1256
1257/* reg_sldr_system_port
1258 * System port.
1259 * Access: RW
1260 */
1261MLXSW_ITEM32_INDEXED(reg, sldr, system_port, 0x08, 0, 16, 4, 0, false);
1262
1263static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id,
1264 u8 local_port)
1265{
1266 MLXSW_REG_ZERO(sldr, payload);
1267 mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST);
1268 mlxsw_reg_sldr_lag_id_set(payload, lag_id);
1269 mlxsw_reg_sldr_num_ports_set(payload, 1);
1270 mlxsw_reg_sldr_system_port_set(payload, 0, local_port);
1271}
1272
1273static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id,
1274 u8 local_port)
1275{
1276 MLXSW_REG_ZERO(sldr, payload);
1277 mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST);
1278 mlxsw_reg_sldr_lag_id_set(payload, lag_id);
1279 mlxsw_reg_sldr_num_ports_set(payload, 1);
1280 mlxsw_reg_sldr_system_port_set(payload, 0, local_port);
1281}
1282
1283/* SLCR - Switch LAG Configuration 2 Register
1284 * -------------------------------------------
1285 * The Switch LAG Configuration register is used for configuring the
1286 * LAG properties of the switch.
1287 */
1288#define MLXSW_REG_SLCR_ID 0x2015
1289#define MLXSW_REG_SLCR_LEN 0x10
1290
1291static const struct mlxsw_reg_info mlxsw_reg_slcr = {
1292 .id = MLXSW_REG_SLCR_ID,
1293 .len = MLXSW_REG_SLCR_LEN,
1294};
1295
1296enum mlxsw_reg_slcr_pp {
1297 /* Global Configuration (for all ports) */
1298 MLXSW_REG_SLCR_PP_GLOBAL,
1299 /* Per port configuration, based on local_port field */
1300 MLXSW_REG_SLCR_PP_PER_PORT,
1301};
1302
1303/* reg_slcr_pp
1304 * Per Port Configuration
1305 * Note: Reading at Global mode results in reading port 1 configuration.
1306 * Access: Index
1307 */
1308MLXSW_ITEM32(reg, slcr, pp, 0x00, 24, 1);
1309
1310/* reg_slcr_local_port
1311 * Local port number
1312 * Supported from CPU port
1313 * Not supported from router port
1314 * Reserved when pp = Global Configuration
1315 * Access: Index
1316 */
1317MLXSW_ITEM32(reg, slcr, local_port, 0x00, 16, 8);
1318
1319enum mlxsw_reg_slcr_type {
1320 MLXSW_REG_SLCR_TYPE_CRC, /* default */
1321 MLXSW_REG_SLCR_TYPE_XOR,
1322 MLXSW_REG_SLCR_TYPE_RANDOM,
1323};
1324
1325/* reg_slcr_type
1326 * Hash type
1327 * Access: RW
1328 */
1329MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4);
1330
1331/* Ingress port */
1332#define MLXSW_REG_SLCR_LAG_HASH_IN_PORT BIT(0)
1333/* SMAC - for IPv4 and IPv6 packets */
1334#define MLXSW_REG_SLCR_LAG_HASH_SMAC_IP BIT(1)
1335/* SMAC - for non-IP packets */
1336#define MLXSW_REG_SLCR_LAG_HASH_SMAC_NONIP BIT(2)
1337#define MLXSW_REG_SLCR_LAG_HASH_SMAC \
1338 (MLXSW_REG_SLCR_LAG_HASH_SMAC_IP | \
1339 MLXSW_REG_SLCR_LAG_HASH_SMAC_NONIP)
1340/* DMAC - for IPv4 and IPv6 packets */
1341#define MLXSW_REG_SLCR_LAG_HASH_DMAC_IP BIT(3)
1342/* DMAC - for non-IP packets */
1343#define MLXSW_REG_SLCR_LAG_HASH_DMAC_NONIP BIT(4)
1344#define MLXSW_REG_SLCR_LAG_HASH_DMAC \
1345 (MLXSW_REG_SLCR_LAG_HASH_DMAC_IP | \
1346 MLXSW_REG_SLCR_LAG_HASH_DMAC_NONIP)
1347/* Ethertype - for IPv4 and IPv6 packets */
1348#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_IP BIT(5)
1349/* Ethertype - for non-IP packets */
1350#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_NONIP BIT(6)
1351#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE \
1352 (MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_IP | \
1353 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_NONIP)
1354/* VLAN ID - for IPv4 and IPv6 packets */
1355#define MLXSW_REG_SLCR_LAG_HASH_VLANID_IP BIT(7)
1356/* VLAN ID - for non-IP packets */
1357#define MLXSW_REG_SLCR_LAG_HASH_VLANID_NONIP BIT(8)
1358#define MLXSW_REG_SLCR_LAG_HASH_VLANID \
1359 (MLXSW_REG_SLCR_LAG_HASH_VLANID_IP | \
1360 MLXSW_REG_SLCR_LAG_HASH_VLANID_NONIP)
1361/* Source IP address (can be IPv4 or IPv6) */
1362#define MLXSW_REG_SLCR_LAG_HASH_SIP BIT(9)
1363/* Destination IP address (can be IPv4 or IPv6) */
1364#define MLXSW_REG_SLCR_LAG_HASH_DIP BIT(10)
1365/* TCP/UDP source port */
1366#define MLXSW_REG_SLCR_LAG_HASH_SPORT BIT(11)
1367/* TCP/UDP destination port*/
1368#define MLXSW_REG_SLCR_LAG_HASH_DPORT BIT(12)
1369/* IPv4 Protocol/IPv6 Next Header */
1370#define MLXSW_REG_SLCR_LAG_HASH_IPPROTO BIT(13)
1371/* IPv6 Flow label */
1372#define MLXSW_REG_SLCR_LAG_HASH_FLOWLABEL BIT(14)
1373/* SID - FCoE source ID */
1374#define MLXSW_REG_SLCR_LAG_HASH_FCOE_SID BIT(15)
1375/* DID - FCoE destination ID */
1376#define MLXSW_REG_SLCR_LAG_HASH_FCOE_DID BIT(16)
1377/* OXID - FCoE originator exchange ID */
1378#define MLXSW_REG_SLCR_LAG_HASH_FCOE_OXID BIT(17)
1379/* Destination QP number - for RoCE packets */
1380#define MLXSW_REG_SLCR_LAG_HASH_ROCE_DQP BIT(19)
1381
1382/* reg_slcr_lag_hash
1383 * LAG hashing configuration. This is a bitmask, in which each set
1384 * bit includes the corresponding item in the LAG hash calculation.
1385 * The default lag_hash contains SMAC, DMAC, VLANID and
1386 * Ethertype (for all packet types).
1387 * Access: RW
1388 */
1389MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20);
1390
1391static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash)
1392{
1393 MLXSW_REG_ZERO(slcr, payload);
1394 mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
1395 mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_XOR);
1396 mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
1397}
1398
1399/* SLCOR - Switch LAG Collector Register
1400 * -------------------------------------
1401 * The Switch LAG Collector register controls the Local Port membership
1402 * in a LAG and enablement of the collector.
1403 */
1404#define MLXSW_REG_SLCOR_ID 0x2016
1405#define MLXSW_REG_SLCOR_LEN 0x10
1406
1407static const struct mlxsw_reg_info mlxsw_reg_slcor = {
1408 .id = MLXSW_REG_SLCOR_ID,
1409 .len = MLXSW_REG_SLCOR_LEN,
1410};
1411
1412enum mlxsw_reg_slcor_col {
1413 /* Port is added with collector disabled */
1414 MLXSW_REG_SLCOR_COL_LAG_ADD_PORT,
1415 MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED,
1416 MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_DISABLED,
1417 MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT,
1418};
1419
1420/* reg_slcor_col
1421 * Collector configuration
1422 * Access: RW
1423 */
1424MLXSW_ITEM32(reg, slcor, col, 0x00, 30, 2);
1425
1426/* reg_slcor_local_port
1427 * Local port number
1428 * Not supported for CPU port
1429 * Access: Index
1430 */
1431MLXSW_ITEM32(reg, slcor, local_port, 0x00, 16, 8);
1432
1433/* reg_slcor_lag_id
1434 * LAG Identifier. Index into the LAG descriptor table.
1435 * Access: Index
1436 */
1437MLXSW_ITEM32(reg, slcor, lag_id, 0x00, 0, 10);
1438
1439/* reg_slcor_port_index
1440 * Port index in the LAG list. Only valid on Add Port to LAG col.
1441 * Valid range is from 0 to cap_max_lag_members-1
1442 * Access: RW
1443 */
1444MLXSW_ITEM32(reg, slcor, port_index, 0x04, 0, 10);
1445
1446static inline void mlxsw_reg_slcor_pack(char *payload,
1447 u8 local_port, u16 lag_id,
1448 enum mlxsw_reg_slcor_col col)
1449{
1450 MLXSW_REG_ZERO(slcor, payload);
1451 mlxsw_reg_slcor_col_set(payload, col);
1452 mlxsw_reg_slcor_local_port_set(payload, local_port);
1453 mlxsw_reg_slcor_lag_id_set(payload, lag_id);
1454}
1455
1456static inline void mlxsw_reg_slcor_port_add_pack(char *payload,
1457 u8 local_port, u16 lag_id,
1458 u8 port_index)
1459{
1460 mlxsw_reg_slcor_pack(payload, local_port, lag_id,
1461 MLXSW_REG_SLCOR_COL_LAG_ADD_PORT);
1462 mlxsw_reg_slcor_port_index_set(payload, port_index);
1463}
1464
1465static inline void mlxsw_reg_slcor_port_remove_pack(char *payload,
1466 u8 local_port, u16 lag_id)
1467{
1468 mlxsw_reg_slcor_pack(payload, local_port, lag_id,
1469 MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT);
1470}
1471
1472static inline void mlxsw_reg_slcor_col_enable_pack(char *payload,
1473 u8 local_port, u16 lag_id)
1474{
1475 mlxsw_reg_slcor_pack(payload, local_port, lag_id,
1476 MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED);
1477}
1478
1479static inline void mlxsw_reg_slcor_col_disable_pack(char *payload,
1480 u8 local_port, u16 lag_id)
1481{
1482 mlxsw_reg_slcor_pack(payload, local_port, lag_id,
1483 MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED);
1484}
1485
Ido Schimmel4ec14b72015-07-29 23:33:48 +02001486/* SPMLR - Switch Port MAC Learning Register
1487 * -----------------------------------------
1488 * Controls the Switch MAC learning policy per port.
1489 */
1490#define MLXSW_REG_SPMLR_ID 0x2018
1491#define MLXSW_REG_SPMLR_LEN 0x8
1492
1493static const struct mlxsw_reg_info mlxsw_reg_spmlr = {
1494 .id = MLXSW_REG_SPMLR_ID,
1495 .len = MLXSW_REG_SPMLR_LEN,
1496};
1497
1498/* reg_spmlr_local_port
1499 * Local port number.
1500 * Access: Index
1501 */
1502MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
1503
1504/* reg_spmlr_sub_port
1505 * Virtual port within the physical port.
1506 * Should be set to 0 when virtual ports are not enabled on the port.
1507 * Access: Index
1508 */
1509MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8);
1510
1511enum mlxsw_reg_spmlr_learn_mode {
1512 MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0,
1513 MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2,
1514 MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3,
1515};
1516
1517/* reg_spmlr_learn_mode
1518 * Learning mode on the port.
1519 * 0 - Learning disabled.
1520 * 2 - Learning enabled.
1521 * 3 - Security mode.
1522 *
1523 * In security mode the switch does not learn MACs on the port, but uses the
1524 * SMAC to see if it exists on another ingress port. If so, the packet is
1525 * classified as a bad packet and is discarded unless the software registers
1526 * to receive port security error packets usign HPKT.
1527 */
1528MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
1529
1530static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
1531 enum mlxsw_reg_spmlr_learn_mode mode)
1532{
1533 MLXSW_REG_ZERO(spmlr, payload);
1534 mlxsw_reg_spmlr_local_port_set(payload, local_port);
1535 mlxsw_reg_spmlr_sub_port_set(payload, 0);
1536 mlxsw_reg_spmlr_learn_mode_set(payload, mode);
1537}
1538
Ido Schimmel64790232015-10-16 14:01:33 +02001539/* SVFA - Switch VID to FID Allocation Register
1540 * --------------------------------------------
1541 * Controls the VID to FID mapping and {Port, VID} to FID mapping for
1542 * virtualized ports.
1543 */
1544#define MLXSW_REG_SVFA_ID 0x201C
1545#define MLXSW_REG_SVFA_LEN 0x10
1546
1547static const struct mlxsw_reg_info mlxsw_reg_svfa = {
1548 .id = MLXSW_REG_SVFA_ID,
1549 .len = MLXSW_REG_SVFA_LEN,
1550};
1551
1552/* reg_svfa_swid
1553 * Switch partition ID.
1554 * Access: Index
1555 */
1556MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8);
1557
1558/* reg_svfa_local_port
1559 * Local port number.
1560 * Access: Index
1561 *
1562 * Note: Reserved for 802.1Q FIDs.
1563 */
1564MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8);
1565
1566enum mlxsw_reg_svfa_mt {
1567 MLXSW_REG_SVFA_MT_VID_TO_FID,
1568 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
1569};
1570
1571/* reg_svfa_mapping_table
1572 * Mapping table:
1573 * 0 - VID to FID
1574 * 1 - {Port, VID} to FID
1575 * Access: Index
1576 *
1577 * Note: Reserved for SwitchX-2.
1578 */
1579MLXSW_ITEM32(reg, svfa, mapping_table, 0x00, 8, 3);
1580
1581/* reg_svfa_v
1582 * Valid.
1583 * Valid if set.
1584 * Access: RW
1585 *
1586 * Note: Reserved for SwitchX-2.
1587 */
1588MLXSW_ITEM32(reg, svfa, v, 0x00, 0, 1);
1589
1590/* reg_svfa_fid
1591 * Filtering ID.
1592 * Access: RW
1593 */
1594MLXSW_ITEM32(reg, svfa, fid, 0x04, 16, 16);
1595
1596/* reg_svfa_vid
1597 * VLAN ID.
1598 * Access: Index
1599 */
1600MLXSW_ITEM32(reg, svfa, vid, 0x04, 0, 12);
1601
1602/* reg_svfa_counter_set_type
1603 * Counter set type for flow counters.
1604 * Access: RW
1605 *
1606 * Note: Reserved for SwitchX-2.
1607 */
1608MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
1609
1610/* reg_svfa_counter_index
1611 * Counter index for flow counters.
1612 * Access: RW
1613 *
1614 * Note: Reserved for SwitchX-2.
1615 */
1616MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
1617
1618static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
1619 enum mlxsw_reg_svfa_mt mt, bool valid,
1620 u16 fid, u16 vid)
1621{
1622 MLXSW_REG_ZERO(svfa, payload);
1623 local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port;
1624 mlxsw_reg_svfa_swid_set(payload, 0);
1625 mlxsw_reg_svfa_local_port_set(payload, local_port);
1626 mlxsw_reg_svfa_mapping_table_set(payload, mt);
1627 mlxsw_reg_svfa_v_set(payload, valid);
1628 mlxsw_reg_svfa_fid_set(payload, fid);
1629 mlxsw_reg_svfa_vid_set(payload, vid);
1630}
1631
Ido Schimmel1f65da72015-10-16 14:01:34 +02001632/* SVPE - Switch Virtual-Port Enabling Register
1633 * --------------------------------------------
1634 * Enables port virtualization.
1635 */
1636#define MLXSW_REG_SVPE_ID 0x201E
1637#define MLXSW_REG_SVPE_LEN 0x4
1638
1639static const struct mlxsw_reg_info mlxsw_reg_svpe = {
1640 .id = MLXSW_REG_SVPE_ID,
1641 .len = MLXSW_REG_SVPE_LEN,
1642};
1643
1644/* reg_svpe_local_port
1645 * Local port number
1646 * Access: Index
1647 *
1648 * Note: CPU port is not supported (uses VLAN mode only).
1649 */
1650MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
1651
1652/* reg_svpe_vp_en
1653 * Virtual port enable.
1654 * 0 - Disable, VLAN mode (VID to FID).
1655 * 1 - Enable, Virtual port mode ({Port, VID} to FID).
1656 * Access: RW
1657 */
1658MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1);
1659
1660static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port,
1661 bool enable)
1662{
1663 MLXSW_REG_ZERO(svpe, payload);
1664 mlxsw_reg_svpe_local_port_set(payload, local_port);
1665 mlxsw_reg_svpe_vp_en_set(payload, enable);
1666}
1667
Ido Schimmelf1fb6932015-10-16 14:01:32 +02001668/* SFMR - Switch FID Management Register
1669 * -------------------------------------
1670 * Creates and configures FIDs.
1671 */
1672#define MLXSW_REG_SFMR_ID 0x201F
1673#define MLXSW_REG_SFMR_LEN 0x18
1674
1675static const struct mlxsw_reg_info mlxsw_reg_sfmr = {
1676 .id = MLXSW_REG_SFMR_ID,
1677 .len = MLXSW_REG_SFMR_LEN,
1678};
1679
1680enum mlxsw_reg_sfmr_op {
1681 MLXSW_REG_SFMR_OP_CREATE_FID,
1682 MLXSW_REG_SFMR_OP_DESTROY_FID,
1683};
1684
1685/* reg_sfmr_op
1686 * Operation.
1687 * 0 - Create or edit FID.
1688 * 1 - Destroy FID.
1689 * Access: WO
1690 */
1691MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4);
1692
1693/* reg_sfmr_fid
1694 * Filtering ID.
1695 * Access: Index
1696 */
1697MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
1698
1699/* reg_sfmr_fid_offset
1700 * FID offset.
1701 * Used to point into the flooding table selected by SFGC register if
1702 * the table is of type FID-Offset. Otherwise, this field is reserved.
1703 * Access: RW
1704 */
1705MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16);
1706
1707/* reg_sfmr_vtfp
1708 * Valid Tunnel Flood Pointer.
1709 * If not set, then nve_tunnel_flood_ptr is reserved and considered NULL.
1710 * Access: RW
1711 *
1712 * Note: Reserved for 802.1Q FIDs.
1713 */
1714MLXSW_ITEM32(reg, sfmr, vtfp, 0x0C, 31, 1);
1715
1716/* reg_sfmr_nve_tunnel_flood_ptr
1717 * Underlay Flooding and BC Pointer.
1718 * Used as a pointer to the first entry of the group based link lists of
1719 * flooding or BC entries (for NVE tunnels).
1720 * Access: RW
1721 */
1722MLXSW_ITEM32(reg, sfmr, nve_tunnel_flood_ptr, 0x0C, 0, 24);
1723
1724/* reg_sfmr_vv
1725 * VNI Valid.
1726 * If not set, then vni is reserved.
1727 * Access: RW
1728 *
1729 * Note: Reserved for 802.1Q FIDs.
1730 */
1731MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1);
1732
1733/* reg_sfmr_vni
1734 * Virtual Network Identifier.
1735 * Access: RW
1736 *
1737 * Note: A given VNI can only be assigned to one FID.
1738 */
1739MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24);
1740
1741static inline void mlxsw_reg_sfmr_pack(char *payload,
1742 enum mlxsw_reg_sfmr_op op, u16 fid,
1743 u16 fid_offset)
1744{
1745 MLXSW_REG_ZERO(sfmr, payload);
1746 mlxsw_reg_sfmr_op_set(payload, op);
1747 mlxsw_reg_sfmr_fid_set(payload, fid);
1748 mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
1749 mlxsw_reg_sfmr_vtfp_set(payload, false);
1750 mlxsw_reg_sfmr_vv_set(payload, false);
1751}
1752
Ido Schimmela4feea72015-10-16 14:01:36 +02001753/* SPVMLR - Switch Port VLAN MAC Learning Register
1754 * -----------------------------------------------
1755 * Controls the switch MAC learning policy per {Port, VID}.
1756 */
1757#define MLXSW_REG_SPVMLR_ID 0x2020
1758#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
1759#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
1760#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
1761#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
1762 MLXSW_REG_SPVMLR_REC_LEN * \
1763 MLXSW_REG_SPVMLR_REC_MAX_COUNT)
1764
1765static const struct mlxsw_reg_info mlxsw_reg_spvmlr = {
1766 .id = MLXSW_REG_SPVMLR_ID,
1767 .len = MLXSW_REG_SPVMLR_LEN,
1768};
1769
1770/* reg_spvmlr_local_port
1771 * Local ingress port.
1772 * Access: Index
1773 *
1774 * Note: CPU port is not supported.
1775 */
1776MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8);
1777
1778/* reg_spvmlr_num_rec
1779 * Number of records to update.
1780 * Access: OP
1781 */
1782MLXSW_ITEM32(reg, spvmlr, num_rec, 0x00, 0, 8);
1783
1784/* reg_spvmlr_rec_learn_enable
1785 * 0 - Disable learning for {Port, VID}.
1786 * 1 - Enable learning for {Port, VID}.
1787 * Access: RW
1788 */
1789MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN,
1790 31, 1, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
1791
1792/* reg_spvmlr_rec_vid
1793 * VLAN ID to be added/removed from port or for querying.
1794 * Access: Index
1795 */
1796MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12,
1797 MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
1798
1799static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
1800 u16 vid_begin, u16 vid_end,
1801 bool learn_enable)
1802{
1803 int num_rec = vid_end - vid_begin + 1;
1804 int i;
1805
1806 WARN_ON(num_rec < 1 || num_rec > MLXSW_REG_SPVMLR_REC_MAX_COUNT);
1807
1808 MLXSW_REG_ZERO(spvmlr, payload);
1809 mlxsw_reg_spvmlr_local_port_set(payload, local_port);
1810 mlxsw_reg_spvmlr_num_rec_set(payload, num_rec);
1811
1812 for (i = 0; i < num_rec; i++) {
1813 mlxsw_reg_spvmlr_rec_learn_enable_set(payload, i, learn_enable);
1814 mlxsw_reg_spvmlr_rec_vid_set(payload, i, vid_begin + i);
1815 }
1816}
1817
Ido Schimmel2c63a552016-04-06 17:10:07 +02001818/* QTCT - QoS Switch Traffic Class Table
1819 * -------------------------------------
1820 * Configures the mapping between the packet switch priority and the
1821 * traffic class on the transmit port.
1822 */
1823#define MLXSW_REG_QTCT_ID 0x400A
1824#define MLXSW_REG_QTCT_LEN 0x08
1825
1826static const struct mlxsw_reg_info mlxsw_reg_qtct = {
1827 .id = MLXSW_REG_QTCT_ID,
1828 .len = MLXSW_REG_QTCT_LEN,
1829};
1830
1831/* reg_qtct_local_port
1832 * Local port number.
1833 * Access: Index
1834 *
1835 * Note: CPU port is not supported.
1836 */
1837MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8);
1838
1839/* reg_qtct_sub_port
1840 * Virtual port within the physical port.
1841 * Should be set to 0 when virtual ports are not enabled on the port.
1842 * Access: Index
1843 */
1844MLXSW_ITEM32(reg, qtct, sub_port, 0x00, 8, 8);
1845
1846/* reg_qtct_switch_prio
1847 * Switch priority.
1848 * Access: Index
1849 */
1850MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4);
1851
1852/* reg_qtct_tclass
1853 * Traffic class.
1854 * Default values:
1855 * switch_prio 0 : tclass 1
1856 * switch_prio 1 : tclass 0
1857 * switch_prio i : tclass i, for i > 1
1858 * Access: RW
1859 */
1860MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4);
1861
1862static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
1863 u8 switch_prio, u8 tclass)
1864{
1865 MLXSW_REG_ZERO(qtct, payload);
1866 mlxsw_reg_qtct_local_port_set(payload, local_port);
1867 mlxsw_reg_qtct_switch_prio_set(payload, switch_prio);
1868 mlxsw_reg_qtct_tclass_set(payload, tclass);
1869}
1870
Ido Schimmelb9b7cee2016-04-06 17:10:06 +02001871/* QEEC - QoS ETS Element Configuration Register
1872 * ---------------------------------------------
1873 * Configures the ETS elements.
1874 */
1875#define MLXSW_REG_QEEC_ID 0x400D
1876#define MLXSW_REG_QEEC_LEN 0x1C
1877
1878static const struct mlxsw_reg_info mlxsw_reg_qeec = {
1879 .id = MLXSW_REG_QEEC_ID,
1880 .len = MLXSW_REG_QEEC_LEN,
1881};
1882
1883/* reg_qeec_local_port
1884 * Local port number.
1885 * Access: Index
1886 *
1887 * Note: CPU port is supported.
1888 */
1889MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
1890
1891enum mlxsw_reg_qeec_hr {
1892 MLXSW_REG_QEEC_HIERARCY_PORT,
1893 MLXSW_REG_QEEC_HIERARCY_GROUP,
1894 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1895 MLXSW_REG_QEEC_HIERARCY_TC,
1896};
1897
1898/* reg_qeec_element_hierarchy
1899 * 0 - Port
1900 * 1 - Group
1901 * 2 - Subgroup
1902 * 3 - Traffic Class
1903 * Access: Index
1904 */
1905MLXSW_ITEM32(reg, qeec, element_hierarchy, 0x04, 16, 4);
1906
1907/* reg_qeec_element_index
1908 * The index of the element in the hierarchy.
1909 * Access: Index
1910 */
1911MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
1912
1913/* reg_qeec_next_element_index
1914 * The index of the next (lower) element in the hierarchy.
1915 * Access: RW
1916 *
1917 * Note: Reserved for element_hierarchy 0.
1918 */
1919MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
1920
1921enum {
1922 MLXSW_REG_QEEC_BYTES_MODE,
1923 MLXSW_REG_QEEC_PACKETS_MODE,
1924};
1925
1926/* reg_qeec_pb
1927 * Packets or bytes mode.
1928 * 0 - Bytes mode
1929 * 1 - Packets mode
1930 * Access: RW
1931 *
1932 * Note: Used for max shaper configuration. For Spectrum, packets mode
1933 * is supported only for traffic classes of CPU port.
1934 */
1935MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
1936
1937/* reg_qeec_mase
1938 * Max shaper configuration enable. Enables configuration of the max
1939 * shaper on this ETS element.
1940 * 0 - Disable
1941 * 1 - Enable
1942 * Access: RW
1943 */
1944MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
1945
1946/* A large max rate will disable the max shaper. */
1947#define MLXSW_REG_QEEC_MAS_DIS 200000000 /* Kbps */
1948
1949/* reg_qeec_max_shaper_rate
1950 * Max shaper information rate.
1951 * For CPU port, can only be configured for port hierarchy.
1952 * When in bytes mode, value is specified in units of 1000bps.
1953 * Access: RW
1954 */
1955MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28);
1956
1957/* reg_qeec_de
1958 * DWRR configuration enable. Enables configuration of the dwrr and
1959 * dwrr_weight.
1960 * 0 - Disable
1961 * 1 - Enable
1962 * Access: RW
1963 */
1964MLXSW_ITEM32(reg, qeec, de, 0x18, 31, 1);
1965
1966/* reg_qeec_dwrr
1967 * Transmission selection algorithm to use on the link going down from
1968 * the ETS element.
1969 * 0 - Strict priority
1970 * 1 - DWRR
1971 * Access: RW
1972 */
1973MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1);
1974
1975/* reg_qeec_dwrr_weight
1976 * DWRR weight on the link going down from the ETS element. The
1977 * percentage of bandwidth guaranteed to an ETS element within
1978 * its hierarchy. The sum of all weights across all ETS elements
1979 * within one hierarchy should be equal to 100. Reserved when
1980 * transmission selection algorithm is strict priority.
1981 * Access: RW
1982 */
1983MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8);
1984
1985static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
1986 enum mlxsw_reg_qeec_hr hr, u8 index,
1987 u8 next_index)
1988{
1989 MLXSW_REG_ZERO(qeec, payload);
1990 mlxsw_reg_qeec_local_port_set(payload, local_port);
1991 mlxsw_reg_qeec_element_hierarchy_set(payload, hr);
1992 mlxsw_reg_qeec_element_index_set(payload, index);
1993 mlxsw_reg_qeec_next_element_index_set(payload, next_index);
1994}
1995
Ido Schimmel4ec14b72015-07-29 23:33:48 +02001996/* PMLP - Ports Module to Local Port Register
1997 * ------------------------------------------
1998 * Configures the assignment of modules to local ports.
1999 */
2000#define MLXSW_REG_PMLP_ID 0x5002
2001#define MLXSW_REG_PMLP_LEN 0x40
2002
2003static const struct mlxsw_reg_info mlxsw_reg_pmlp = {
2004 .id = MLXSW_REG_PMLP_ID,
2005 .len = MLXSW_REG_PMLP_LEN,
2006};
2007
2008/* reg_pmlp_rxtx
2009 * 0 - Tx value is used for both Tx and Rx.
2010 * 1 - Rx value is taken from a separte field.
2011 * Access: RW
2012 */
2013MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
2014
2015/* reg_pmlp_local_port
2016 * Local port number.
2017 * Access: Index
2018 */
2019MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
2020
2021/* reg_pmlp_width
2022 * 0 - Unmap local port.
2023 * 1 - Lane 0 is used.
2024 * 2 - Lanes 0 and 1 are used.
2025 * 4 - Lanes 0, 1, 2 and 3 are used.
2026 * Access: RW
2027 */
2028MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
2029
2030/* reg_pmlp_module
2031 * Module number.
2032 * Access: RW
2033 */
Ido Schimmelbbeeda22016-01-27 15:20:26 +01002034MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002035
2036/* reg_pmlp_tx_lane
2037 * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
2038 * Access: RW
2039 */
Ido Schimmelbbeeda22016-01-27 15:20:26 +01002040MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002041
2042/* reg_pmlp_rx_lane
2043 * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
2044 * equal to Tx lane.
2045 * Access: RW
2046 */
Ido Schimmelbbeeda22016-01-27 15:20:26 +01002047MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002048
2049static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
2050{
2051 MLXSW_REG_ZERO(pmlp, payload);
2052 mlxsw_reg_pmlp_local_port_set(payload, local_port);
2053}
2054
2055/* PMTU - Port MTU Register
2056 * ------------------------
2057 * Configures and reports the port MTU.
2058 */
2059#define MLXSW_REG_PMTU_ID 0x5003
2060#define MLXSW_REG_PMTU_LEN 0x10
2061
2062static const struct mlxsw_reg_info mlxsw_reg_pmtu = {
2063 .id = MLXSW_REG_PMTU_ID,
2064 .len = MLXSW_REG_PMTU_LEN,
2065};
2066
2067/* reg_pmtu_local_port
2068 * Local port number.
2069 * Access: Index
2070 */
2071MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
2072
2073/* reg_pmtu_max_mtu
2074 * Maximum MTU.
2075 * When port type (e.g. Ethernet) is configured, the relevant MTU is
2076 * reported, otherwise the minimum between the max_mtu of the different
2077 * types is reported.
2078 * Access: RO
2079 */
2080MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16);
2081
2082/* reg_pmtu_admin_mtu
2083 * MTU value to set port to. Must be smaller or equal to max_mtu.
2084 * Note: If port type is Infiniband, then port must be disabled, when its
2085 * MTU is set.
2086 * Access: RW
2087 */
2088MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
2089
2090/* reg_pmtu_oper_mtu
2091 * The actual MTU configured on the port. Packets exceeding this size
2092 * will be dropped.
2093 * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband
2094 * oper_mtu might be smaller than admin_mtu.
2095 * Access: RO
2096 */
2097MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
2098
2099static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
2100 u16 new_mtu)
2101{
2102 MLXSW_REG_ZERO(pmtu, payload);
2103 mlxsw_reg_pmtu_local_port_set(payload, local_port);
2104 mlxsw_reg_pmtu_max_mtu_set(payload, 0);
2105 mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu);
2106 mlxsw_reg_pmtu_oper_mtu_set(payload, 0);
2107}
2108
2109/* PTYS - Port Type and Speed Register
2110 * -----------------------------------
2111 * Configures and reports the port speed type.
2112 *
2113 * Note: When set while the link is up, the changes will not take effect
2114 * until the port transitions from down to up state.
2115 */
2116#define MLXSW_REG_PTYS_ID 0x5004
2117#define MLXSW_REG_PTYS_LEN 0x40
2118
2119static const struct mlxsw_reg_info mlxsw_reg_ptys = {
2120 .id = MLXSW_REG_PTYS_ID,
2121 .len = MLXSW_REG_PTYS_LEN,
2122};
2123
2124/* reg_ptys_local_port
2125 * Local port number.
2126 * Access: Index
2127 */
2128MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
2129
2130#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2)
2131
2132/* reg_ptys_proto_mask
2133 * Protocol mask. Indicates which protocol is used.
2134 * 0 - Infiniband.
2135 * 1 - Fibre Channel.
2136 * 2 - Ethernet.
2137 * Access: Index
2138 */
2139MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
2140
Ido Schimmel4149b972016-09-12 13:26:24 +02002141enum {
2142 MLXSW_REG_PTYS_AN_STATUS_NA,
2143 MLXSW_REG_PTYS_AN_STATUS_OK,
2144 MLXSW_REG_PTYS_AN_STATUS_FAIL,
2145};
2146
2147/* reg_ptys_an_status
2148 * Autonegotiation status.
2149 * Access: RO
2150 */
2151MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
2152
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002153#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0)
2154#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1)
2155#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
2156#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 BIT(3)
2157#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR BIT(4)
2158#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5)
2159#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6)
2160#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7)
2161#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4 BIT(8)
2162#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12)
2163#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR BIT(13)
2164#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
2165#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15)
2166#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16)
2167#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19)
2168#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
2169#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
2170#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
2171#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
2172#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX BIT(24)
2173#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(25)
2174#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T BIT(26)
2175#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
2176#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
2177#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
2178#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 BIT(30)
2179#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2 BIT(31)
2180
2181/* reg_ptys_eth_proto_cap
2182 * Ethernet port supported speeds and protocols.
2183 * Access: RO
2184 */
2185MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
2186
2187/* reg_ptys_eth_proto_admin
2188 * Speed and protocol to set port to.
2189 * Access: RW
2190 */
2191MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
2192
2193/* reg_ptys_eth_proto_oper
2194 * The current speed and protocol configured for the port.
2195 * Access: RO
2196 */
2197MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
2198
Ido Schimmel4149b972016-09-12 13:26:24 +02002199/* reg_ptys_eth_proto_lp_advertise
2200 * The protocols that were advertised by the link partner during
2201 * autonegotiation.
2202 * Access: RO
2203 */
2204MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32);
2205
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002206static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
2207 u32 proto_admin)
2208{
2209 MLXSW_REG_ZERO(ptys, payload);
2210 mlxsw_reg_ptys_local_port_set(payload, local_port);
2211 mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
2212 mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
2213}
2214
2215static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
2216 u32 *p_eth_proto_adm,
2217 u32 *p_eth_proto_oper)
2218{
2219 if (p_eth_proto_cap)
2220 *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
2221 if (p_eth_proto_adm)
2222 *p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload);
2223 if (p_eth_proto_oper)
2224 *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
2225}
2226
2227/* PPAD - Port Physical Address Register
2228 * -------------------------------------
2229 * The PPAD register configures the per port physical MAC address.
2230 */
2231#define MLXSW_REG_PPAD_ID 0x5005
2232#define MLXSW_REG_PPAD_LEN 0x10
2233
2234static const struct mlxsw_reg_info mlxsw_reg_ppad = {
2235 .id = MLXSW_REG_PPAD_ID,
2236 .len = MLXSW_REG_PPAD_LEN,
2237};
2238
2239/* reg_ppad_single_base_mac
2240 * 0: base_mac, local port should be 0 and mac[7:0] is
2241 * reserved. HW will set incremental
2242 * 1: single_mac - mac of the local_port
2243 * Access: RW
2244 */
2245MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
2246
2247/* reg_ppad_local_port
2248 * port number, if single_base_mac = 0 then local_port is reserved
2249 * Access: RW
2250 */
2251MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
2252
2253/* reg_ppad_mac
2254 * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
2255 * If single_base_mac = 1 - the per port MAC address
2256 * Access: RW
2257 */
2258MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
2259
2260static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
2261 u8 local_port)
2262{
2263 MLXSW_REG_ZERO(ppad, payload);
2264 mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
2265 mlxsw_reg_ppad_local_port_set(payload, local_port);
2266}
2267
2268/* PAOS - Ports Administrative and Operational Status Register
2269 * -----------------------------------------------------------
2270 * Configures and retrieves per port administrative and operational status.
2271 */
2272#define MLXSW_REG_PAOS_ID 0x5006
2273#define MLXSW_REG_PAOS_LEN 0x10
2274
2275static const struct mlxsw_reg_info mlxsw_reg_paos = {
2276 .id = MLXSW_REG_PAOS_ID,
2277 .len = MLXSW_REG_PAOS_LEN,
2278};
2279
2280/* reg_paos_swid
2281 * Switch partition ID with which to associate the port.
2282 * Note: while external ports uses unique local port numbers (and thus swid is
2283 * redundant), router ports use the same local port number where swid is the
2284 * only indication for the relevant port.
2285 * Access: Index
2286 */
2287MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
2288
2289/* reg_paos_local_port
2290 * Local port number.
2291 * Access: Index
2292 */
2293MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
2294
2295/* reg_paos_admin_status
2296 * Port administrative state (the desired state of the port):
2297 * 1 - Up.
2298 * 2 - Down.
2299 * 3 - Up once. This means that in case of link failure, the port won't go
2300 * into polling mode, but will wait to be re-enabled by software.
2301 * 4 - Disabled by system. Can only be set by hardware.
2302 * Access: RW
2303 */
2304MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4);
2305
2306/* reg_paos_oper_status
2307 * Port operational state (the current state):
2308 * 1 - Up.
2309 * 2 - Down.
2310 * 3 - Down by port failure. This means that the device will not let the
2311 * port up again until explicitly specified by software.
2312 * Access: RO
2313 */
2314MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4);
2315
2316/* reg_paos_ase
2317 * Admin state update enabled.
2318 * Access: WO
2319 */
2320MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1);
2321
2322/* reg_paos_ee
2323 * Event update enable. If this bit is set, event generation will be
2324 * updated based on the e field.
2325 * Access: WO
2326 */
2327MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
2328
2329/* reg_paos_e
2330 * Event generation on operational state change:
2331 * 0 - Do not generate event.
2332 * 1 - Generate Event.
2333 * 2 - Generate Single Event.
2334 * Access: RW
2335 */
2336MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
2337
2338static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
2339 enum mlxsw_port_admin_status status)
2340{
2341 MLXSW_REG_ZERO(paos, payload);
2342 mlxsw_reg_paos_swid_set(payload, 0);
2343 mlxsw_reg_paos_local_port_set(payload, local_port);
2344 mlxsw_reg_paos_admin_status_set(payload, status);
2345 mlxsw_reg_paos_oper_status_set(payload, 0);
2346 mlxsw_reg_paos_ase_set(payload, 1);
2347 mlxsw_reg_paos_ee_set(payload, 1);
2348 mlxsw_reg_paos_e_set(payload, 1);
2349}
2350
Ido Schimmel6f253d82016-04-06 17:10:12 +02002351/* PFCC - Ports Flow Control Configuration Register
2352 * ------------------------------------------------
2353 * Configures and retrieves the per port flow control configuration.
2354 */
2355#define MLXSW_REG_PFCC_ID 0x5007
2356#define MLXSW_REG_PFCC_LEN 0x20
2357
2358static const struct mlxsw_reg_info mlxsw_reg_pfcc = {
2359 .id = MLXSW_REG_PFCC_ID,
2360 .len = MLXSW_REG_PFCC_LEN,
2361};
2362
2363/* reg_pfcc_local_port
2364 * Local port number.
2365 * Access: Index
2366 */
2367MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8);
2368
2369/* reg_pfcc_pnat
2370 * Port number access type. Determines the way local_port is interpreted:
2371 * 0 - Local port number.
2372 * 1 - IB / label port number.
2373 * Access: Index
2374 */
2375MLXSW_ITEM32(reg, pfcc, pnat, 0x00, 14, 2);
2376
2377/* reg_pfcc_shl_cap
2378 * Send to higher layers capabilities:
2379 * 0 - No capability of sending Pause and PFC frames to higher layers.
2380 * 1 - Device has capability of sending Pause and PFC frames to higher
2381 * layers.
2382 * Access: RO
2383 */
2384MLXSW_ITEM32(reg, pfcc, shl_cap, 0x00, 1, 1);
2385
2386/* reg_pfcc_shl_opr
2387 * Send to higher layers operation:
2388 * 0 - Pause and PFC frames are handled by the port (default).
2389 * 1 - Pause and PFC frames are handled by the port and also sent to
2390 * higher layers. Only valid if shl_cap = 1.
2391 * Access: RW
2392 */
2393MLXSW_ITEM32(reg, pfcc, shl_opr, 0x00, 0, 1);
2394
2395/* reg_pfcc_ppan
2396 * Pause policy auto negotiation.
2397 * 0 - Disabled. Generate / ignore Pause frames based on pptx / pprtx.
2398 * 1 - Enabled. When auto-negotiation is performed, set the Pause policy
2399 * based on the auto-negotiation resolution.
2400 * Access: RW
2401 *
2402 * Note: The auto-negotiation advertisement is set according to pptx and
2403 * pprtx. When PFC is set on Tx / Rx, ppan must be set to 0.
2404 */
2405MLXSW_ITEM32(reg, pfcc, ppan, 0x04, 28, 4);
2406
2407/* reg_pfcc_prio_mask_tx
2408 * Bit per priority indicating if Tx flow control policy should be
2409 * updated based on bit pfctx.
2410 * Access: WO
2411 */
2412MLXSW_ITEM32(reg, pfcc, prio_mask_tx, 0x04, 16, 8);
2413
2414/* reg_pfcc_prio_mask_rx
2415 * Bit per priority indicating if Rx flow control policy should be
2416 * updated based on bit pfcrx.
2417 * Access: WO
2418 */
2419MLXSW_ITEM32(reg, pfcc, prio_mask_rx, 0x04, 0, 8);
2420
2421/* reg_pfcc_pptx
2422 * Admin Pause policy on Tx.
2423 * 0 - Never generate Pause frames (default).
2424 * 1 - Generate Pause frames according to Rx buffer threshold.
2425 * Access: RW
2426 */
2427MLXSW_ITEM32(reg, pfcc, pptx, 0x08, 31, 1);
2428
2429/* reg_pfcc_aptx
2430 * Active (operational) Pause policy on Tx.
2431 * 0 - Never generate Pause frames.
2432 * 1 - Generate Pause frames according to Rx buffer threshold.
2433 * Access: RO
2434 */
2435MLXSW_ITEM32(reg, pfcc, aptx, 0x08, 30, 1);
2436
2437/* reg_pfcc_pfctx
2438 * Priority based flow control policy on Tx[7:0]. Per-priority bit mask:
2439 * 0 - Never generate priority Pause frames on the specified priority
2440 * (default).
2441 * 1 - Generate priority Pause frames according to Rx buffer threshold on
2442 * the specified priority.
2443 * Access: RW
2444 *
2445 * Note: pfctx and pptx must be mutually exclusive.
2446 */
2447MLXSW_ITEM32(reg, pfcc, pfctx, 0x08, 16, 8);
2448
2449/* reg_pfcc_pprx
2450 * Admin Pause policy on Rx.
2451 * 0 - Ignore received Pause frames (default).
2452 * 1 - Respect received Pause frames.
2453 * Access: RW
2454 */
2455MLXSW_ITEM32(reg, pfcc, pprx, 0x0C, 31, 1);
2456
2457/* reg_pfcc_aprx
2458 * Active (operational) Pause policy on Rx.
2459 * 0 - Ignore received Pause frames.
2460 * 1 - Respect received Pause frames.
2461 * Access: RO
2462 */
2463MLXSW_ITEM32(reg, pfcc, aprx, 0x0C, 30, 1);
2464
2465/* reg_pfcc_pfcrx
2466 * Priority based flow control policy on Rx[7:0]. Per-priority bit mask:
2467 * 0 - Ignore incoming priority Pause frames on the specified priority
2468 * (default).
2469 * 1 - Respect incoming priority Pause frames on the specified priority.
2470 * Access: RW
2471 */
2472MLXSW_ITEM32(reg, pfcc, pfcrx, 0x0C, 16, 8);
2473
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02002474#define MLXSW_REG_PFCC_ALL_PRIO 0xFF
2475
2476static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en)
2477{
2478 mlxsw_reg_pfcc_prio_mask_tx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
2479 mlxsw_reg_pfcc_prio_mask_rx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
2480 mlxsw_reg_pfcc_pfctx_set(payload, pfc_en);
2481 mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en);
2482}
2483
Ido Schimmel6f253d82016-04-06 17:10:12 +02002484static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
2485{
2486 MLXSW_REG_ZERO(pfcc, payload);
2487 mlxsw_reg_pfcc_local_port_set(payload, local_port);
2488}
2489
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002490/* PPCNT - Ports Performance Counters Register
2491 * -------------------------------------------
2492 * The PPCNT register retrieves per port performance counters.
2493 */
2494#define MLXSW_REG_PPCNT_ID 0x5008
2495#define MLXSW_REG_PPCNT_LEN 0x100
2496
2497static const struct mlxsw_reg_info mlxsw_reg_ppcnt = {
2498 .id = MLXSW_REG_PPCNT_ID,
2499 .len = MLXSW_REG_PPCNT_LEN,
2500};
2501
2502/* reg_ppcnt_swid
2503 * For HCA: must be always 0.
2504 * Switch partition ID to associate port with.
2505 * Switch partitions are numbered from 0 to 7 inclusively.
2506 * Switch partition 254 indicates stacking ports.
2507 * Switch partition 255 indicates all switch partitions.
2508 * Only valid on Set() operation with local_port=255.
2509 * Access: Index
2510 */
2511MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
2512
2513/* reg_ppcnt_local_port
2514 * Local port number.
2515 * 255 indicates all ports on the device, and is only allowed
2516 * for Set() operation.
2517 * Access: Index
2518 */
2519MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
2520
2521/* reg_ppcnt_pnat
2522 * Port number access type:
2523 * 0 - Local port number
2524 * 1 - IB port number
2525 * Access: Index
2526 */
2527MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
2528
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002529enum mlxsw_reg_ppcnt_grp {
2530 MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
2531 MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002532 MLXSW_REG_PPCNT_TC_CNT = 0x11,
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002533};
2534
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002535/* reg_ppcnt_grp
2536 * Performance counter group.
2537 * Group 63 indicates all groups. Only valid on Set() operation with
2538 * clr bit set.
2539 * 0x0: IEEE 802.3 Counters
2540 * 0x1: RFC 2863 Counters
2541 * 0x2: RFC 2819 Counters
2542 * 0x3: RFC 3635 Counters
2543 * 0x5: Ethernet Extended Counters
2544 * 0x8: Link Level Retransmission Counters
2545 * 0x10: Per Priority Counters
2546 * 0x11: Per Traffic Class Counters
2547 * 0x12: Physical Layer Counters
2548 * Access: Index
2549 */
2550MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
2551
2552/* reg_ppcnt_clr
2553 * Clear counters. Setting the clr bit will reset the counter value
2554 * for all counters in the counter group. This bit can be set
2555 * for both Set() and Get() operation.
2556 * Access: OP
2557 */
2558MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
2559
2560/* reg_ppcnt_prio_tc
2561 * Priority for counter set that support per priority, valid values: 0-7.
2562 * Traffic class for counter set that support per traffic class,
2563 * valid values: 0- cap_max_tclass-1 .
2564 * For HCA: cap_max_tclass is always 8.
2565 * Otherwise must be 0.
2566 * Access: Index
2567 */
2568MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
2569
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002570/* Ethernet IEEE 802.3 Counter Group */
2571
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002572/* reg_ppcnt_a_frames_transmitted_ok
2573 * Access: RO
2574 */
2575MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
2576 0x08 + 0x00, 0, 64);
2577
2578/* reg_ppcnt_a_frames_received_ok
2579 * Access: RO
2580 */
2581MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
2582 0x08 + 0x08, 0, 64);
2583
2584/* reg_ppcnt_a_frame_check_sequence_errors
2585 * Access: RO
2586 */
2587MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
2588 0x08 + 0x10, 0, 64);
2589
2590/* reg_ppcnt_a_alignment_errors
2591 * Access: RO
2592 */
2593MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
2594 0x08 + 0x18, 0, 64);
2595
2596/* reg_ppcnt_a_octets_transmitted_ok
2597 * Access: RO
2598 */
2599MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
2600 0x08 + 0x20, 0, 64);
2601
2602/* reg_ppcnt_a_octets_received_ok
2603 * Access: RO
2604 */
2605MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
2606 0x08 + 0x28, 0, 64);
2607
2608/* reg_ppcnt_a_multicast_frames_xmitted_ok
2609 * Access: RO
2610 */
2611MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
2612 0x08 + 0x30, 0, 64);
2613
2614/* reg_ppcnt_a_broadcast_frames_xmitted_ok
2615 * Access: RO
2616 */
2617MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
2618 0x08 + 0x38, 0, 64);
2619
2620/* reg_ppcnt_a_multicast_frames_received_ok
2621 * Access: RO
2622 */
2623MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
2624 0x08 + 0x40, 0, 64);
2625
2626/* reg_ppcnt_a_broadcast_frames_received_ok
2627 * Access: RO
2628 */
2629MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
2630 0x08 + 0x48, 0, 64);
2631
2632/* reg_ppcnt_a_in_range_length_errors
2633 * Access: RO
2634 */
2635MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
2636 0x08 + 0x50, 0, 64);
2637
2638/* reg_ppcnt_a_out_of_range_length_field
2639 * Access: RO
2640 */
2641MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
2642 0x08 + 0x58, 0, 64);
2643
2644/* reg_ppcnt_a_frame_too_long_errors
2645 * Access: RO
2646 */
2647MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
2648 0x08 + 0x60, 0, 64);
2649
2650/* reg_ppcnt_a_symbol_error_during_carrier
2651 * Access: RO
2652 */
2653MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
2654 0x08 + 0x68, 0, 64);
2655
2656/* reg_ppcnt_a_mac_control_frames_transmitted
2657 * Access: RO
2658 */
2659MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
2660 0x08 + 0x70, 0, 64);
2661
2662/* reg_ppcnt_a_mac_control_frames_received
2663 * Access: RO
2664 */
2665MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
2666 0x08 + 0x78, 0, 64);
2667
2668/* reg_ppcnt_a_unsupported_opcodes_received
2669 * Access: RO
2670 */
2671MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
2672 0x08 + 0x80, 0, 64);
2673
2674/* reg_ppcnt_a_pause_mac_ctrl_frames_received
2675 * Access: RO
2676 */
2677MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
2678 0x08 + 0x88, 0, 64);
2679
2680/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
2681 * Access: RO
2682 */
2683MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
2684 0x08 + 0x90, 0, 64);
2685
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002686/* Ethernet Per Priority Group Counters */
2687
2688/* reg_ppcnt_rx_octets
2689 * Access: RO
2690 */
2691MLXSW_ITEM64(reg, ppcnt, rx_octets, 0x08 + 0x00, 0, 64);
2692
2693/* reg_ppcnt_rx_frames
2694 * Access: RO
2695 */
2696MLXSW_ITEM64(reg, ppcnt, rx_frames, 0x08 + 0x20, 0, 64);
2697
2698/* reg_ppcnt_tx_octets
2699 * Access: RO
2700 */
2701MLXSW_ITEM64(reg, ppcnt, tx_octets, 0x08 + 0x28, 0, 64);
2702
2703/* reg_ppcnt_tx_frames
2704 * Access: RO
2705 */
2706MLXSW_ITEM64(reg, ppcnt, tx_frames, 0x08 + 0x48, 0, 64);
2707
2708/* reg_ppcnt_rx_pause
2709 * Access: RO
2710 */
2711MLXSW_ITEM64(reg, ppcnt, rx_pause, 0x08 + 0x50, 0, 64);
2712
2713/* reg_ppcnt_rx_pause_duration
2714 * Access: RO
2715 */
2716MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, 0x08 + 0x58, 0, 64);
2717
2718/* reg_ppcnt_tx_pause
2719 * Access: RO
2720 */
2721MLXSW_ITEM64(reg, ppcnt, tx_pause, 0x08 + 0x60, 0, 64);
2722
2723/* reg_ppcnt_tx_pause_duration
2724 * Access: RO
2725 */
2726MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64);
2727
2728/* reg_ppcnt_rx_pause_transition
2729 * Access: RO
2730 */
2731MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
2732
Ido Schimmeldf4750e2016-07-19 15:35:54 +02002733/* Ethernet Per Traffic Group Counters */
2734
2735/* reg_ppcnt_tc_transmit_queue
2736 * Contains the transmit queue depth in cells of traffic class
2737 * selected by prio_tc and the port selected by local_port.
2738 * The field cannot be cleared.
2739 * Access: RO
2740 */
2741MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, 0x08 + 0x00, 0, 64);
2742
2743/* reg_ppcnt_tc_no_buffer_discard_uc
2744 * The number of unicast packets dropped due to lack of shared
2745 * buffer resources.
2746 * Access: RO
2747 */
2748MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, 0x08 + 0x08, 0, 64);
2749
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002750static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
2751 enum mlxsw_reg_ppcnt_grp grp,
2752 u8 prio_tc)
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002753{
2754 MLXSW_REG_ZERO(ppcnt, payload);
2755 mlxsw_reg_ppcnt_swid_set(payload, 0);
2756 mlxsw_reg_ppcnt_local_port_set(payload, local_port);
2757 mlxsw_reg_ppcnt_pnat_set(payload, 0);
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002758 mlxsw_reg_ppcnt_grp_set(payload, grp);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002759 mlxsw_reg_ppcnt_clr_set(payload, 0);
Ido Schimmel34dba0a2016-04-06 17:10:15 +02002760 mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002761}
2762
Ido Schimmelb98ff152016-04-06 17:10:00 +02002763/* PPTB - Port Prio To Buffer Register
2764 * -----------------------------------
2765 * Configures the switch priority to buffer table.
2766 */
2767#define MLXSW_REG_PPTB_ID 0x500B
Ido Schimmel11719a52016-07-15 11:15:02 +02002768#define MLXSW_REG_PPTB_LEN 0x10
Ido Schimmelb98ff152016-04-06 17:10:00 +02002769
2770static const struct mlxsw_reg_info mlxsw_reg_pptb = {
2771 .id = MLXSW_REG_PPTB_ID,
2772 .len = MLXSW_REG_PPTB_LEN,
2773};
2774
2775enum {
2776 MLXSW_REG_PPTB_MM_UM,
2777 MLXSW_REG_PPTB_MM_UNICAST,
2778 MLXSW_REG_PPTB_MM_MULTICAST,
2779};
2780
2781/* reg_pptb_mm
2782 * Mapping mode.
2783 * 0 - Map both unicast and multicast packets to the same buffer.
2784 * 1 - Map only unicast packets.
2785 * 2 - Map only multicast packets.
2786 * Access: Index
2787 *
2788 * Note: SwitchX-2 only supports the first option.
2789 */
2790MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2);
2791
2792/* reg_pptb_local_port
2793 * Local port number.
2794 * Access: Index
2795 */
2796MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8);
2797
2798/* reg_pptb_um
2799 * Enables the update of the untagged_buf field.
2800 * Access: RW
2801 */
2802MLXSW_ITEM32(reg, pptb, um, 0x00, 8, 1);
2803
2804/* reg_pptb_pm
2805 * Enables the update of the prio_to_buff field.
2806 * Bit <i> is a flag for updating the mapping for switch priority <i>.
2807 * Access: RW
2808 */
2809MLXSW_ITEM32(reg, pptb, pm, 0x00, 0, 8);
2810
2811/* reg_pptb_prio_to_buff
2812 * Mapping of switch priority <i> to one of the allocated receive port
2813 * buffers.
2814 * Access: RW
2815 */
2816MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff, 0x04, 0x04, 4);
2817
2818/* reg_pptb_pm_msb
2819 * Enables the update of the prio_to_buff field.
2820 * Bit <i> is a flag for updating the mapping for switch priority <i+8>.
2821 * Access: RW
2822 */
2823MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
2824
2825/* reg_pptb_untagged_buff
2826 * Mapping of untagged frames to one of the allocated receive port buffers.
2827 * Access: RW
2828 *
2829 * Note: In SwitchX-2 this field must be mapped to buffer 8. Reserved for
2830 * Spectrum, as it maps untagged packets based on the default switch priority.
2831 */
2832MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
2833
Ido Schimmel11719a52016-07-15 11:15:02 +02002834/* reg_pptb_prio_to_buff_msb
2835 * Mapping of switch priority <i+8> to one of the allocated receive port
2836 * buffers.
2837 * Access: RW
2838 */
2839MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
2840
Ido Schimmelb98ff152016-04-06 17:10:00 +02002841#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
2842
2843static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
2844{
2845 MLXSW_REG_ZERO(pptb, payload);
2846 mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
2847 mlxsw_reg_pptb_local_port_set(payload, local_port);
2848 mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
Ido Schimmel11719a52016-07-15 11:15:02 +02002849 mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
2850}
2851
2852static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
2853 u8 buff)
2854{
2855 mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
2856 mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
Ido Schimmelb98ff152016-04-06 17:10:00 +02002857}
2858
Jiri Pirkoe0594362015-10-16 14:01:31 +02002859/* PBMC - Port Buffer Management Control Register
2860 * ----------------------------------------------
2861 * The PBMC register configures and retrieves the port packet buffer
2862 * allocation for different Prios, and the Pause threshold management.
2863 */
2864#define MLXSW_REG_PBMC_ID 0x500C
Ido Schimmel7ad7cd62016-04-06 17:10:04 +02002865#define MLXSW_REG_PBMC_LEN 0x6C
Jiri Pirkoe0594362015-10-16 14:01:31 +02002866
2867static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
2868 .id = MLXSW_REG_PBMC_ID,
2869 .len = MLXSW_REG_PBMC_LEN,
2870};
2871
2872/* reg_pbmc_local_port
2873 * Local port number.
2874 * Access: Index
2875 */
2876MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8);
2877
2878/* reg_pbmc_xoff_timer_value
2879 * When device generates a pause frame, it uses this value as the pause
2880 * timer (time for the peer port to pause in quota-512 bit time).
2881 * Access: RW
2882 */
2883MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16);
2884
2885/* reg_pbmc_xoff_refresh
2886 * The time before a new pause frame should be sent to refresh the pause RW
2887 * state. Using the same units as xoff_timer_value above (in quota-512 bit
2888 * time).
2889 * Access: RW
2890 */
2891MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
2892
Ido Schimmeld6b7c132016-04-06 17:10:05 +02002893#define MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX 11
2894
Jiri Pirkoe0594362015-10-16 14:01:31 +02002895/* reg_pbmc_buf_lossy
2896 * The field indicates if the buffer is lossy.
2897 * 0 - Lossless
2898 * 1 - Lossy
2899 * Access: RW
2900 */
2901MLXSW_ITEM32_INDEXED(reg, pbmc, buf_lossy, 0x0C, 25, 1, 0x08, 0x00, false);
2902
2903/* reg_pbmc_buf_epsb
2904 * Eligible for Port Shared buffer.
2905 * If epsb is set, packets assigned to buffer are allowed to insert the port
2906 * shared buffer.
2907 * When buf_lossy is MLXSW_REG_PBMC_LOSSY_LOSSY this field is reserved.
2908 * Access: RW
2909 */
2910MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false);
2911
2912/* reg_pbmc_buf_size
2913 * The part of the packet buffer array is allocated for the specific buffer.
2914 * Units are represented in cells.
2915 * Access: RW
2916 */
2917MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
2918
Ido Schimmel155f9de2016-04-06 17:10:13 +02002919/* reg_pbmc_buf_xoff_threshold
2920 * Once the amount of data in the buffer goes above this value, device
2921 * starts sending PFC frames for all priorities associated with the
2922 * buffer. Units are represented in cells. Reserved in case of lossy
2923 * buffer.
2924 * Access: RW
2925 *
2926 * Note: In Spectrum, reserved for buffer[9].
2927 */
2928MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16,
2929 0x08, 0x04, false);
2930
2931/* reg_pbmc_buf_xon_threshold
2932 * When the amount of data in the buffer goes below this value, device
2933 * stops sending PFC frames for the priorities associated with the
2934 * buffer. Units are represented in cells. Reserved in case of lossy
2935 * buffer.
2936 * Access: RW
2937 *
2938 * Note: In Spectrum, reserved for buffer[9].
2939 */
2940MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16,
2941 0x08, 0x04, false);
2942
Jiri Pirkoe0594362015-10-16 14:01:31 +02002943static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
2944 u16 xoff_timer_value, u16 xoff_refresh)
2945{
2946 MLXSW_REG_ZERO(pbmc, payload);
2947 mlxsw_reg_pbmc_local_port_set(payload, local_port);
2948 mlxsw_reg_pbmc_xoff_timer_value_set(payload, xoff_timer_value);
2949 mlxsw_reg_pbmc_xoff_refresh_set(payload, xoff_refresh);
2950}
2951
2952static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload,
2953 int buf_index,
2954 u16 size)
2955{
2956 mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 1);
2957 mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
2958 mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
2959}
2960
Ido Schimmel155f9de2016-04-06 17:10:13 +02002961static inline void mlxsw_reg_pbmc_lossless_buffer_pack(char *payload,
2962 int buf_index, u16 size,
2963 u16 threshold)
2964{
2965 mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 0);
2966 mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
2967 mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
2968 mlxsw_reg_pbmc_buf_xoff_threshold_set(payload, buf_index, threshold);
2969 mlxsw_reg_pbmc_buf_xon_threshold_set(payload, buf_index, threshold);
2970}
2971
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002972/* PSPA - Port Switch Partition Allocation
2973 * ---------------------------------------
2974 * Controls the association of a port with a switch partition and enables
2975 * configuring ports as stacking ports.
2976 */
Jiri Pirko3f0effd2015-10-15 17:43:23 +02002977#define MLXSW_REG_PSPA_ID 0x500D
Ido Schimmel4ec14b72015-07-29 23:33:48 +02002978#define MLXSW_REG_PSPA_LEN 0x8
2979
2980static const struct mlxsw_reg_info mlxsw_reg_pspa = {
2981 .id = MLXSW_REG_PSPA_ID,
2982 .len = MLXSW_REG_PSPA_LEN,
2983};
2984
2985/* reg_pspa_swid
2986 * Switch partition ID.
2987 * Access: RW
2988 */
2989MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
2990
2991/* reg_pspa_local_port
2992 * Local port number.
2993 * Access: Index
2994 */
2995MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
2996
2997/* reg_pspa_sub_port
2998 * Virtual port within the local port. Set to 0 when virtual ports are
2999 * disabled on the local port.
3000 * Access: Index
3001 */
3002MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
3003
3004static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
3005{
3006 MLXSW_REG_ZERO(pspa, payload);
3007 mlxsw_reg_pspa_swid_set(payload, swid);
3008 mlxsw_reg_pspa_local_port_set(payload, local_port);
3009 mlxsw_reg_pspa_sub_port_set(payload, 0);
3010}
3011
3012/* HTGT - Host Trap Group Table
3013 * ----------------------------
3014 * Configures the properties for forwarding to CPU.
3015 */
3016#define MLXSW_REG_HTGT_ID 0x7002
3017#define MLXSW_REG_HTGT_LEN 0x100
3018
3019static const struct mlxsw_reg_info mlxsw_reg_htgt = {
3020 .id = MLXSW_REG_HTGT_ID,
3021 .len = MLXSW_REG_HTGT_LEN,
3022};
3023
3024/* reg_htgt_swid
3025 * Switch partition ID.
3026 * Access: Index
3027 */
3028MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
3029
3030#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0 /* For locally attached CPU */
3031
3032/* reg_htgt_type
3033 * CPU path type.
3034 * Access: RW
3035 */
3036MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
3037
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003038enum mlxsw_reg_htgt_trap_group {
3039 MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3040 MLXSW_REG_HTGT_TRAP_GROUP_RX,
3041 MLXSW_REG_HTGT_TRAP_GROUP_CTRL,
3042};
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003043
3044/* reg_htgt_trap_group
3045 * Trap group number. User defined number specifying which trap groups
3046 * should be forwarded to the CPU. The mapping between trap IDs and trap
3047 * groups is configured using HPKT register.
3048 * Access: Index
3049 */
3050MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8);
3051
3052enum {
3053 MLXSW_REG_HTGT_POLICER_DISABLE,
3054 MLXSW_REG_HTGT_POLICER_ENABLE,
3055};
3056
3057/* reg_htgt_pide
3058 * Enable policer ID specified using 'pid' field.
3059 * Access: RW
3060 */
3061MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
3062
3063/* reg_htgt_pid
3064 * Policer ID for the trap group.
3065 * Access: RW
3066 */
3067MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8);
3068
3069#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0
3070
3071/* reg_htgt_mirror_action
3072 * Mirror action to use.
3073 * 0 - Trap to CPU.
3074 * 1 - Trap to CPU and mirror to a mirroring agent.
3075 * 2 - Mirror to a mirroring agent and do not trap to CPU.
3076 * Access: RW
3077 *
3078 * Note: Mirroring to a mirroring agent is only supported in Spectrum.
3079 */
3080MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
3081
3082/* reg_htgt_mirroring_agent
3083 * Mirroring agent.
3084 * Access: RW
3085 */
3086MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
3087
3088/* reg_htgt_priority
3089 * Trap group priority.
3090 * In case a packet matches multiple classification rules, the packet will
3091 * only be trapped once, based on the trap ID associated with the group (via
3092 * register HPKT) with the highest priority.
3093 * Supported values are 0-7, with 7 represnting the highest priority.
3094 * Access: RW
3095 *
3096 * Note: In SwitchX-2 this field is ignored and the priority value is replaced
3097 * by the 'trap_group' field.
3098 */
3099MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
3100
3101/* reg_htgt_local_path_cpu_tclass
3102 * CPU ingress traffic class for the trap group.
3103 * Access: RW
3104 */
3105MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
3106
3107#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15
3108#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003109#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL 0x13
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003110
3111/* reg_htgt_local_path_rdq
3112 * Receive descriptor queue (RDQ) to use for the trap group.
3113 * Access: RW
3114 */
3115MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
3116
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003117static inline void mlxsw_reg_htgt_pack(char *payload,
3118 enum mlxsw_reg_htgt_trap_group group)
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003119{
3120 u8 swid, rdq;
3121
3122 MLXSW_REG_ZERO(htgt, payload);
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003123 switch (group) {
3124 case MLXSW_REG_HTGT_TRAP_GROUP_EMAD:
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003125 swid = MLXSW_PORT_SWID_ALL_SWIDS;
3126 rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003127 break;
3128 case MLXSW_REG_HTGT_TRAP_GROUP_RX:
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003129 swid = 0;
3130 rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003131 break;
3132 case MLXSW_REG_HTGT_TRAP_GROUP_CTRL:
3133 swid = 0;
3134 rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL;
3135 break;
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003136 }
3137 mlxsw_reg_htgt_swid_set(payload, swid);
3138 mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003139 mlxsw_reg_htgt_trap_group_set(payload, group);
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003140 mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
3141 mlxsw_reg_htgt_pid_set(payload, 0);
3142 mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
3143 mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
3144 mlxsw_reg_htgt_priority_set(payload, 0);
3145 mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7);
3146 mlxsw_reg_htgt_local_path_rdq_set(payload, rdq);
3147}
3148
3149/* HPKT - Host Packet Trap
3150 * -----------------------
3151 * Configures trap IDs inside trap groups.
3152 */
3153#define MLXSW_REG_HPKT_ID 0x7003
3154#define MLXSW_REG_HPKT_LEN 0x10
3155
3156static const struct mlxsw_reg_info mlxsw_reg_hpkt = {
3157 .id = MLXSW_REG_HPKT_ID,
3158 .len = MLXSW_REG_HPKT_LEN,
3159};
3160
3161enum {
3162 MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
3163 MLXSW_REG_HPKT_ACK_REQUIRED,
3164};
3165
3166/* reg_hpkt_ack
3167 * Require acknowledgements from the host for events.
3168 * If set, then the device will wait for the event it sent to be acknowledged
3169 * by the host. This option is only relevant for event trap IDs.
3170 * Access: RW
3171 *
3172 * Note: Currently not supported by firmware.
3173 */
3174MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1);
3175
3176enum mlxsw_reg_hpkt_action {
3177 MLXSW_REG_HPKT_ACTION_FORWARD,
3178 MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
3179 MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU,
3180 MLXSW_REG_HPKT_ACTION_DISCARD,
3181 MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
3182 MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
3183};
3184
3185/* reg_hpkt_action
3186 * Action to perform on packet when trapped.
3187 * 0 - No action. Forward to CPU based on switching rules.
3188 * 1 - Trap to CPU (CPU receives sole copy).
3189 * 2 - Mirror to CPU (CPU receives a replica of the packet).
3190 * 3 - Discard.
3191 * 4 - Soft discard (allow other traps to act on the packet).
3192 * 5 - Trap and soft discard (allow other traps to overwrite this trap).
3193 * Access: RW
3194 *
3195 * Note: Must be set to 0 (forward) for event trap IDs, as they are already
3196 * addressed to the CPU.
3197 */
3198MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3);
3199
3200/* reg_hpkt_trap_group
3201 * Trap group to associate the trap with.
3202 * Access: RW
3203 */
3204MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
3205
3206/* reg_hpkt_trap_id
3207 * Trap ID.
3208 * Access: Index
3209 *
3210 * Note: A trap ID can only be associated with a single trap group. The device
3211 * will associate the trap ID with the last trap group configured.
3212 */
3213MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9);
3214
3215enum {
3216 MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
3217 MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER,
3218 MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER,
3219};
3220
3221/* reg_hpkt_ctrl
3222 * Configure dedicated buffer resources for control packets.
3223 * 0 - Keep factory defaults.
3224 * 1 - Do not use control buffer for this trap ID.
3225 * 2 - Use control buffer for this trap ID.
3226 * Access: RW
3227 */
3228MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
3229
Ido Schimmelf24af332015-10-15 17:43:27 +02003230static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003231{
Ido Schimmel801bd3d2015-10-15 17:43:28 +02003232 enum mlxsw_reg_htgt_trap_group trap_group;
Ido Schimmelf24af332015-10-15 17:43:27 +02003233
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003234 MLXSW_REG_ZERO(hpkt, payload);
3235 mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
3236 mlxsw_reg_hpkt_action_set(payload, action);
Ido Schimmelf24af332015-10-15 17:43:27 +02003237 switch (trap_id) {
3238 case MLXSW_TRAP_ID_ETHEMAD:
3239 case MLXSW_TRAP_ID_PUDE:
3240 trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD;
3241 break;
3242 default:
3243 trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX;
3244 break;
3245 }
Ido Schimmel4ec14b72015-07-29 23:33:48 +02003246 mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
3247 mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
3248 mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
3249}
3250
Ido Schimmel69c407a2016-07-02 11:00:13 +02003251/* RGCR - Router General Configuration Register
3252 * --------------------------------------------
3253 * The register is used for setting up the router configuration.
3254 */
3255#define MLXSW_REG_RGCR_ID 0x8001
3256#define MLXSW_REG_RGCR_LEN 0x28
3257
3258static const struct mlxsw_reg_info mlxsw_reg_rgcr = {
3259 .id = MLXSW_REG_RGCR_ID,
3260 .len = MLXSW_REG_RGCR_LEN,
3261};
3262
3263/* reg_rgcr_ipv4_en
3264 * IPv4 router enable.
3265 * Access: RW
3266 */
3267MLXSW_ITEM32(reg, rgcr, ipv4_en, 0x00, 31, 1);
3268
3269/* reg_rgcr_ipv6_en
3270 * IPv6 router enable.
3271 * Access: RW
3272 */
3273MLXSW_ITEM32(reg, rgcr, ipv6_en, 0x00, 30, 1);
3274
3275/* reg_rgcr_max_router_interfaces
3276 * Defines the maximum number of active router interfaces for all virtual
3277 * routers.
3278 * Access: RW
3279 */
3280MLXSW_ITEM32(reg, rgcr, max_router_interfaces, 0x10, 0, 16);
3281
3282/* reg_rgcr_usp
3283 * Update switch priority and packet color.
3284 * 0 - Preserve the value of Switch Priority and packet color.
3285 * 1 - Recalculate the value of Switch Priority and packet color.
3286 * Access: RW
3287 *
3288 * Note: Not supported by SwitchX and SwitchX-2.
3289 */
3290MLXSW_ITEM32(reg, rgcr, usp, 0x18, 20, 1);
3291
3292/* reg_rgcr_pcp_rw
3293 * Indicates how to handle the pcp_rewrite_en value:
3294 * 0 - Preserve the value of pcp_rewrite_en.
3295 * 2 - Disable PCP rewrite.
3296 * 3 - Enable PCP rewrite.
3297 * Access: RW
3298 *
3299 * Note: Not supported by SwitchX and SwitchX-2.
3300 */
3301MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2);
3302
3303/* reg_rgcr_activity_dis
3304 * Activity disable:
3305 * 0 - Activity will be set when an entry is hit (default).
3306 * 1 - Activity will not be set when an entry is hit.
3307 *
3308 * Bit 0 - Disable activity bit in Router Algorithmic LPM Unicast Entry
3309 * (RALUE).
3310 * Bit 1 - Disable activity bit in Router Algorithmic LPM Unicast Host
3311 * Entry (RAUHT).
3312 * Bits 2:7 are reserved.
3313 * Access: RW
3314 *
3315 * Note: Not supported by SwitchX, SwitchX-2 and Switch-IB.
3316 */
3317MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8);
3318
3319static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en)
3320{
3321 MLXSW_REG_ZERO(rgcr, payload);
3322 mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en);
3323}
3324
Ido Schimmel3dc26682016-07-02 11:00:18 +02003325/* RITR - Router Interface Table Register
3326 * --------------------------------------
3327 * The register is used to configure the router interface table.
3328 */
3329#define MLXSW_REG_RITR_ID 0x8002
3330#define MLXSW_REG_RITR_LEN 0x40
3331
3332static const struct mlxsw_reg_info mlxsw_reg_ritr = {
3333 .id = MLXSW_REG_RITR_ID,
3334 .len = MLXSW_REG_RITR_LEN,
3335};
3336
3337/* reg_ritr_enable
3338 * Enables routing on the router interface.
3339 * Access: RW
3340 */
3341MLXSW_ITEM32(reg, ritr, enable, 0x00, 31, 1);
3342
3343/* reg_ritr_ipv4
3344 * IPv4 routing enable. Enables routing of IPv4 traffic on the router
3345 * interface.
3346 * Access: RW
3347 */
3348MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1);
3349
3350/* reg_ritr_ipv6
3351 * IPv6 routing enable. Enables routing of IPv6 traffic on the router
3352 * interface.
3353 * Access: RW
3354 */
3355MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1);
3356
3357enum mlxsw_reg_ritr_if_type {
3358 MLXSW_REG_RITR_VLAN_IF,
3359 MLXSW_REG_RITR_FID_IF,
3360 MLXSW_REG_RITR_SP_IF,
3361};
3362
3363/* reg_ritr_type
3364 * Router interface type.
3365 * 0 - VLAN interface.
3366 * 1 - FID interface.
3367 * 2 - Sub-port interface.
3368 * Access: RW
3369 */
3370MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3);
3371
3372enum {
3373 MLXSW_REG_RITR_RIF_CREATE,
3374 MLXSW_REG_RITR_RIF_DEL,
3375};
3376
3377/* reg_ritr_op
3378 * Opcode:
3379 * 0 - Create or edit RIF.
3380 * 1 - Delete RIF.
3381 * Reserved for SwitchX-2. For Spectrum, editing of interface properties
3382 * is not supported. An interface must be deleted and re-created in order
3383 * to update properties.
3384 * Access: WO
3385 */
3386MLXSW_ITEM32(reg, ritr, op, 0x00, 20, 2);
3387
3388/* reg_ritr_rif
3389 * Router interface index. A pointer to the Router Interface Table.
3390 * Access: Index
3391 */
3392MLXSW_ITEM32(reg, ritr, rif, 0x00, 0, 16);
3393
3394/* reg_ritr_ipv4_fe
3395 * IPv4 Forwarding Enable.
3396 * Enables routing of IPv4 traffic on the router interface. When disabled,
3397 * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
3398 * Not supported in SwitchX-2.
3399 * Access: RW
3400 */
3401MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
3402
3403/* reg_ritr_ipv6_fe
3404 * IPv6 Forwarding Enable.
3405 * Enables routing of IPv6 traffic on the router interface. When disabled,
3406 * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
3407 * Not supported in SwitchX-2.
3408 * Access: RW
3409 */
3410MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
3411
Ido Schimmela94a6142016-08-17 16:39:33 +02003412/* reg_ritr_lb_en
3413 * Loop-back filter enable for unicast packets.
3414 * If the flag is set then loop-back filter for unicast packets is
3415 * implemented on the RIF. Multicast packets are always subject to
3416 * loop-back filtering.
3417 * Access: RW
3418 */
3419MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1);
3420
Ido Schimmel3dc26682016-07-02 11:00:18 +02003421/* reg_ritr_virtual_router
3422 * Virtual router ID associated with the router interface.
3423 * Access: RW
3424 */
3425MLXSW_ITEM32(reg, ritr, virtual_router, 0x04, 0, 16);
3426
3427/* reg_ritr_mtu
3428 * Router interface MTU.
3429 * Access: RW
3430 */
3431MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16);
3432
3433/* reg_ritr_if_swid
3434 * Switch partition ID.
3435 * Access: RW
3436 */
3437MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
3438
3439/* reg_ritr_if_mac
3440 * Router interface MAC address.
3441 * In Spectrum, all MAC addresses must have the same 38 MSBits.
3442 * Access: RW
3443 */
3444MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6);
3445
3446/* VLAN Interface */
3447
3448/* reg_ritr_vlan_if_vid
3449 * VLAN ID.
3450 * Access: RW
3451 */
3452MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12);
3453
3454/* FID Interface */
3455
3456/* reg_ritr_fid_if_fid
3457 * Filtering ID. Used to connect a bridge to the router. Only FIDs from
3458 * the vFID range are supported.
3459 * Access: RW
3460 */
3461MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16);
3462
3463static inline void mlxsw_reg_ritr_fid_set(char *payload,
3464 enum mlxsw_reg_ritr_if_type rif_type,
3465 u16 fid)
3466{
3467 if (rif_type == MLXSW_REG_RITR_FID_IF)
3468 mlxsw_reg_ritr_fid_if_fid_set(payload, fid);
3469 else
3470 mlxsw_reg_ritr_vlan_if_vid_set(payload, fid);
3471}
3472
3473/* Sub-port Interface */
3474
3475/* reg_ritr_sp_if_lag
3476 * LAG indication. When this bit is set the system_port field holds the
3477 * LAG identifier.
3478 * Access: RW
3479 */
3480MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1);
3481
3482/* reg_ritr_sp_system_port
3483 * Port unique indentifier. When lag bit is set, this field holds the
3484 * lag_id in bits 0:9.
3485 * Access: RW
3486 */
3487MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
3488
3489/* reg_ritr_sp_if_vid
3490 * VLAN ID.
3491 * Access: RW
3492 */
3493MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12);
3494
3495static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
3496{
3497 MLXSW_REG_ZERO(ritr, payload);
3498 mlxsw_reg_ritr_rif_set(payload, rif);
3499}
3500
3501static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
3502 u16 system_port, u16 vid)
3503{
3504 mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
3505 mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
3506 mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
3507}
3508
3509static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
3510 enum mlxsw_reg_ritr_if_type type,
3511 u16 rif, u16 mtu, const char *mac)
3512{
3513 bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
3514
3515 MLXSW_REG_ZERO(ritr, payload);
3516 mlxsw_reg_ritr_enable_set(payload, enable);
3517 mlxsw_reg_ritr_ipv4_set(payload, 1);
3518 mlxsw_reg_ritr_type_set(payload, type);
3519 mlxsw_reg_ritr_op_set(payload, op);
3520 mlxsw_reg_ritr_rif_set(payload, rif);
3521 mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
Ido Schimmela94a6142016-08-17 16:39:33 +02003522 mlxsw_reg_ritr_lb_en_set(payload, 1);
Ido Schimmel3dc26682016-07-02 11:00:18 +02003523 mlxsw_reg_ritr_mtu_set(payload, mtu);
3524 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
3525}
3526
Yotam Gigi089f9812016-07-05 11:27:48 +02003527/* RATR - Router Adjacency Table Register
3528 * --------------------------------------
3529 * The RATR register is used to configure the Router Adjacency (next-hop)
3530 * Table.
3531 */
3532#define MLXSW_REG_RATR_ID 0x8008
3533#define MLXSW_REG_RATR_LEN 0x2C
3534
3535static const struct mlxsw_reg_info mlxsw_reg_ratr = {
3536 .id = MLXSW_REG_RATR_ID,
3537 .len = MLXSW_REG_RATR_LEN,
3538};
3539
3540enum mlxsw_reg_ratr_op {
3541 /* Read */
3542 MLXSW_REG_RATR_OP_QUERY_READ = 0,
3543 /* Read and clear activity */
3544 MLXSW_REG_RATR_OP_QUERY_READ_CLEAR = 2,
3545 /* Write Adjacency entry */
3546 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY = 1,
3547 /* Write Adjacency entry only if the activity is cleared.
3548 * The write may not succeed if the activity is set. There is not
3549 * direct feedback if the write has succeeded or not, however
3550 * the get will reveal the actual entry (SW can compare the get
3551 * response to the set command).
3552 */
3553 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY = 3,
3554};
3555
3556/* reg_ratr_op
3557 * Note that Write operation may also be used for updating
3558 * counter_set_type and counter_index. In this case all other
3559 * fields must not be updated.
3560 * Access: OP
3561 */
3562MLXSW_ITEM32(reg, ratr, op, 0x00, 28, 4);
3563
3564/* reg_ratr_v
3565 * Valid bit. Indicates if the adjacency entry is valid.
3566 * Note: the device may need some time before reusing an invalidated
3567 * entry. During this time the entry can not be reused. It is
3568 * recommended to use another entry before reusing an invalidated
3569 * entry (e.g. software can put it at the end of the list for
3570 * reusing). Trying to access an invalidated entry not yet cleared
3571 * by the device results with failure indicating "Try Again" status.
3572 * When valid is '0' then egress_router_interface,trap_action,
3573 * adjacency_parameters and counters are reserved
3574 * Access: RW
3575 */
3576MLXSW_ITEM32(reg, ratr, v, 0x00, 24, 1);
3577
3578/* reg_ratr_a
3579 * Activity. Set for new entries. Set if a packet lookup has hit on
3580 * the specific entry. To clear the a bit, use "clear activity".
3581 * Access: RO
3582 */
3583MLXSW_ITEM32(reg, ratr, a, 0x00, 16, 1);
3584
3585/* reg_ratr_adjacency_index_low
3586 * Bits 15:0 of index into the adjacency table.
3587 * For SwitchX and SwitchX-2, the adjacency table is linear and
3588 * used for adjacency entries only.
3589 * For Spectrum, the index is to the KVD linear.
3590 * Access: Index
3591 */
3592MLXSW_ITEM32(reg, ratr, adjacency_index_low, 0x04, 0, 16);
3593
3594/* reg_ratr_egress_router_interface
3595 * Range is 0 .. cap_max_router_interfaces - 1
3596 * Access: RW
3597 */
3598MLXSW_ITEM32(reg, ratr, egress_router_interface, 0x08, 0, 16);
3599
3600enum mlxsw_reg_ratr_trap_action {
3601 MLXSW_REG_RATR_TRAP_ACTION_NOP,
3602 MLXSW_REG_RATR_TRAP_ACTION_TRAP,
3603 MLXSW_REG_RATR_TRAP_ACTION_MIRROR_TO_CPU,
3604 MLXSW_REG_RATR_TRAP_ACTION_MIRROR,
3605 MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS,
3606};
3607
3608/* reg_ratr_trap_action
3609 * see mlxsw_reg_ratr_trap_action
3610 * Access: RW
3611 */
3612MLXSW_ITEM32(reg, ratr, trap_action, 0x0C, 28, 4);
3613
3614enum mlxsw_reg_ratr_trap_id {
3615 MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS0 = 0,
3616 MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS1 = 1,
3617};
3618
3619/* reg_ratr_adjacency_index_high
3620 * Bits 23:16 of the adjacency_index.
3621 * Access: Index
3622 */
3623MLXSW_ITEM32(reg, ratr, adjacency_index_high, 0x0C, 16, 8);
3624
3625/* reg_ratr_trap_id
3626 * Trap ID to be reported to CPU.
3627 * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
3628 * For trap_action of NOP, MIRROR and DISCARD_ERROR
3629 * Access: RW
3630 */
3631MLXSW_ITEM32(reg, ratr, trap_id, 0x0C, 0, 8);
3632
3633/* reg_ratr_eth_destination_mac
3634 * MAC address of the destination next-hop.
3635 * Access: RW
3636 */
3637MLXSW_ITEM_BUF(reg, ratr, eth_destination_mac, 0x12, 6);
3638
3639static inline void
3640mlxsw_reg_ratr_pack(char *payload,
3641 enum mlxsw_reg_ratr_op op, bool valid,
3642 u32 adjacency_index, u16 egress_rif)
3643{
3644 MLXSW_REG_ZERO(ratr, payload);
3645 mlxsw_reg_ratr_op_set(payload, op);
3646 mlxsw_reg_ratr_v_set(payload, valid);
3647 mlxsw_reg_ratr_adjacency_index_low_set(payload, adjacency_index);
3648 mlxsw_reg_ratr_adjacency_index_high_set(payload, adjacency_index >> 16);
3649 mlxsw_reg_ratr_egress_router_interface_set(payload, egress_rif);
3650}
3651
3652static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload,
3653 const char *dest_mac)
3654{
3655 mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac);
3656}
3657
Jiri Pirko6f9fc3c2016-07-04 08:23:05 +02003658/* RALTA - Router Algorithmic LPM Tree Allocation Register
3659 * -------------------------------------------------------
3660 * RALTA is used to allocate the LPM trees of the SHSPM method.
3661 */
3662#define MLXSW_REG_RALTA_ID 0x8010
3663#define MLXSW_REG_RALTA_LEN 0x04
3664
3665static const struct mlxsw_reg_info mlxsw_reg_ralta = {
3666 .id = MLXSW_REG_RALTA_ID,
3667 .len = MLXSW_REG_RALTA_LEN,
3668};
3669
3670/* reg_ralta_op
3671 * opcode (valid for Write, must be 0 on Read)
3672 * 0 - allocate a tree
3673 * 1 - deallocate a tree
3674 * Access: OP
3675 */
3676MLXSW_ITEM32(reg, ralta, op, 0x00, 28, 2);
3677
3678enum mlxsw_reg_ralxx_protocol {
3679 MLXSW_REG_RALXX_PROTOCOL_IPV4,
3680 MLXSW_REG_RALXX_PROTOCOL_IPV6,
3681};
3682
3683/* reg_ralta_protocol
3684 * Protocol.
3685 * Deallocation opcode: Reserved.
3686 * Access: RW
3687 */
3688MLXSW_ITEM32(reg, ralta, protocol, 0x00, 24, 4);
3689
3690/* reg_ralta_tree_id
3691 * An identifier (numbered from 1..cap_shspm_max_trees-1) representing
3692 * the tree identifier (managed by software).
3693 * Note that tree_id 0 is allocated for a default-route tree.
3694 * Access: Index
3695 */
3696MLXSW_ITEM32(reg, ralta, tree_id, 0x00, 0, 8);
3697
3698static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc,
3699 enum mlxsw_reg_ralxx_protocol protocol,
3700 u8 tree_id)
3701{
3702 MLXSW_REG_ZERO(ralta, payload);
3703 mlxsw_reg_ralta_op_set(payload, !alloc);
3704 mlxsw_reg_ralta_protocol_set(payload, protocol);
3705 mlxsw_reg_ralta_tree_id_set(payload, tree_id);
3706}
3707
Jiri Pirkoa9823352016-07-04 08:23:06 +02003708/* RALST - Router Algorithmic LPM Structure Tree Register
3709 * ------------------------------------------------------
3710 * RALST is used to set and query the structure of an LPM tree.
3711 * The structure of the tree must be sorted as a sorted binary tree, while
3712 * each node is a bin that is tagged as the length of the prefixes the lookup
3713 * will refer to. Therefore, bin X refers to a set of entries with prefixes
3714 * of X bits to match with the destination address. The bin 0 indicates
3715 * the default action, when there is no match of any prefix.
3716 */
3717#define MLXSW_REG_RALST_ID 0x8011
3718#define MLXSW_REG_RALST_LEN 0x104
3719
3720static const struct mlxsw_reg_info mlxsw_reg_ralst = {
3721 .id = MLXSW_REG_RALST_ID,
3722 .len = MLXSW_REG_RALST_LEN,
3723};
3724
3725/* reg_ralst_root_bin
3726 * The bin number of the root bin.
3727 * 0<root_bin=<(length of IP address)
3728 * For a default-route tree configure 0xff
3729 * Access: RW
3730 */
3731MLXSW_ITEM32(reg, ralst, root_bin, 0x00, 16, 8);
3732
3733/* reg_ralst_tree_id
3734 * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
3735 * Access: Index
3736 */
3737MLXSW_ITEM32(reg, ralst, tree_id, 0x00, 0, 8);
3738
3739#define MLXSW_REG_RALST_BIN_NO_CHILD 0xff
3740#define MLXSW_REG_RALST_BIN_OFFSET 0x04
3741#define MLXSW_REG_RALST_BIN_COUNT 128
3742
3743/* reg_ralst_left_child_bin
3744 * Holding the children of the bin according to the stored tree's structure.
3745 * For trees composed of less than 4 blocks, the bins in excess are reserved.
3746 * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
3747 * Access: RW
3748 */
3749MLXSW_ITEM16_INDEXED(reg, ralst, left_child_bin, 0x04, 8, 8, 0x02, 0x00, false);
3750
3751/* reg_ralst_right_child_bin
3752 * Holding the children of the bin according to the stored tree's structure.
3753 * For trees composed of less than 4 blocks, the bins in excess are reserved.
3754 * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
3755 * Access: RW
3756 */
3757MLXSW_ITEM16_INDEXED(reg, ralst, right_child_bin, 0x04, 0, 8, 0x02, 0x00,
3758 false);
3759
3760static inline void mlxsw_reg_ralst_pack(char *payload, u8 root_bin, u8 tree_id)
3761{
3762 MLXSW_REG_ZERO(ralst, payload);
3763
3764 /* Initialize all bins to have no left or right child */
3765 memset(payload + MLXSW_REG_RALST_BIN_OFFSET,
3766 MLXSW_REG_RALST_BIN_NO_CHILD, MLXSW_REG_RALST_BIN_COUNT * 2);
3767
3768 mlxsw_reg_ralst_root_bin_set(payload, root_bin);
3769 mlxsw_reg_ralst_tree_id_set(payload, tree_id);
3770}
3771
3772static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number,
3773 u8 left_child_bin,
3774 u8 right_child_bin)
3775{
3776 int bin_index = bin_number - 1;
3777
3778 mlxsw_reg_ralst_left_child_bin_set(payload, bin_index, left_child_bin);
3779 mlxsw_reg_ralst_right_child_bin_set(payload, bin_index,
3780 right_child_bin);
3781}
3782
Jiri Pirko20ae4052016-07-04 08:23:07 +02003783/* RALTB - Router Algorithmic LPM Tree Binding Register
3784 * ----------------------------------------------------
3785 * RALTB is used to bind virtual router and protocol to an allocated LPM tree.
3786 */
3787#define MLXSW_REG_RALTB_ID 0x8012
3788#define MLXSW_REG_RALTB_LEN 0x04
3789
3790static const struct mlxsw_reg_info mlxsw_reg_raltb = {
3791 .id = MLXSW_REG_RALTB_ID,
3792 .len = MLXSW_REG_RALTB_LEN,
3793};
3794
3795/* reg_raltb_virtual_router
3796 * Virtual Router ID
3797 * Range is 0..cap_max_virtual_routers-1
3798 * Access: Index
3799 */
3800MLXSW_ITEM32(reg, raltb, virtual_router, 0x00, 16, 16);
3801
3802/* reg_raltb_protocol
3803 * Protocol.
3804 * Access: Index
3805 */
3806MLXSW_ITEM32(reg, raltb, protocol, 0x00, 12, 4);
3807
3808/* reg_raltb_tree_id
3809 * Tree to be used for the {virtual_router, protocol}
3810 * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
3811 * By default, all Unicast IPv4 and IPv6 are bound to tree_id 0.
3812 * Access: RW
3813 */
3814MLXSW_ITEM32(reg, raltb, tree_id, 0x00, 0, 8);
3815
3816static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router,
3817 enum mlxsw_reg_ralxx_protocol protocol,
3818 u8 tree_id)
3819{
3820 MLXSW_REG_ZERO(raltb, payload);
3821 mlxsw_reg_raltb_virtual_router_set(payload, virtual_router);
3822 mlxsw_reg_raltb_protocol_set(payload, protocol);
3823 mlxsw_reg_raltb_tree_id_set(payload, tree_id);
3824}
3825
Jiri Pirkod5a1c742016-07-04 08:23:10 +02003826/* RALUE - Router Algorithmic LPM Unicast Entry Register
3827 * -----------------------------------------------------
3828 * RALUE is used to configure and query LPM entries that serve
3829 * the Unicast protocols.
3830 */
3831#define MLXSW_REG_RALUE_ID 0x8013
3832#define MLXSW_REG_RALUE_LEN 0x38
3833
3834static const struct mlxsw_reg_info mlxsw_reg_ralue = {
3835 .id = MLXSW_REG_RALUE_ID,
3836 .len = MLXSW_REG_RALUE_LEN,
3837};
3838
3839/* reg_ralue_protocol
3840 * Protocol.
3841 * Access: Index
3842 */
3843MLXSW_ITEM32(reg, ralue, protocol, 0x00, 24, 4);
3844
3845enum mlxsw_reg_ralue_op {
3846 /* Read operation. If entry doesn't exist, the operation fails. */
3847 MLXSW_REG_RALUE_OP_QUERY_READ = 0,
3848 /* Clear on read operation. Used to read entry and
3849 * clear Activity bit.
3850 */
3851 MLXSW_REG_RALUE_OP_QUERY_CLEAR = 1,
3852 /* Write operation. Used to write a new entry to the table. All RW
3853 * fields are written for new entry. Activity bit is set
3854 * for new entries.
3855 */
3856 MLXSW_REG_RALUE_OP_WRITE_WRITE = 0,
3857 /* Update operation. Used to update an existing route entry and
3858 * only update the RW fields that are detailed in the field
3859 * op_u_mask. If entry doesn't exist, the operation fails.
3860 */
3861 MLXSW_REG_RALUE_OP_WRITE_UPDATE = 1,
3862 /* Clear activity. The Activity bit (the field a) is cleared
3863 * for the entry.
3864 */
3865 MLXSW_REG_RALUE_OP_WRITE_CLEAR = 2,
3866 /* Delete operation. Used to delete an existing entry. If entry
3867 * doesn't exist, the operation fails.
3868 */
3869 MLXSW_REG_RALUE_OP_WRITE_DELETE = 3,
3870};
3871
3872/* reg_ralue_op
3873 * Operation.
3874 * Access: OP
3875 */
3876MLXSW_ITEM32(reg, ralue, op, 0x00, 20, 3);
3877
3878/* reg_ralue_a
3879 * Activity. Set for new entries. Set if a packet lookup has hit on the
3880 * specific entry, only if the entry is a route. To clear the a bit, use
3881 * "clear activity" op.
3882 * Enabled by activity_dis in RGCR
3883 * Access: RO
3884 */
3885MLXSW_ITEM32(reg, ralue, a, 0x00, 16, 1);
3886
3887/* reg_ralue_virtual_router
3888 * Virtual Router ID
3889 * Range is 0..cap_max_virtual_routers-1
3890 * Access: Index
3891 */
3892MLXSW_ITEM32(reg, ralue, virtual_router, 0x04, 16, 16);
3893
3894#define MLXSW_REG_RALUE_OP_U_MASK_ENTRY_TYPE BIT(0)
3895#define MLXSW_REG_RALUE_OP_U_MASK_BMP_LEN BIT(1)
3896#define MLXSW_REG_RALUE_OP_U_MASK_ACTION BIT(2)
3897
3898/* reg_ralue_op_u_mask
3899 * opcode update mask.
3900 * On read operation, this field is reserved.
3901 * This field is valid for update opcode, otherwise - reserved.
3902 * This field is a bitmask of the fields that should be updated.
3903 * Access: WO
3904 */
3905MLXSW_ITEM32(reg, ralue, op_u_mask, 0x04, 8, 3);
3906
3907/* reg_ralue_prefix_len
3908 * Number of bits in the prefix of the LPM route.
3909 * Note that for IPv6 prefixes, if prefix_len>64 the entry consumes
3910 * two entries in the physical HW table.
3911 * Access: Index
3912 */
3913MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8);
3914
3915/* reg_ralue_dip*
3916 * The prefix of the route or of the marker that the object of the LPM
3917 * is compared with. The most significant bits of the dip are the prefix.
3918 * The list significant bits must be '0' if the prefix_len is smaller
3919 * than 128 for IPv6 or smaller than 32 for IPv4.
3920 * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved.
3921 * Access: Index
3922 */
3923MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32);
3924
3925enum mlxsw_reg_ralue_entry_type {
3926 MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1,
3927 MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY = 2,
3928 MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_AND_ROUTE_ENTRY = 3,
3929};
3930
3931/* reg_ralue_entry_type
3932 * Entry type.
3933 * Note - for Marker entries, the action_type and action fields are reserved.
3934 * Access: RW
3935 */
3936MLXSW_ITEM32(reg, ralue, entry_type, 0x1C, 30, 2);
3937
3938/* reg_ralue_bmp_len
3939 * The best match prefix length in the case that there is no match for
3940 * longer prefixes.
3941 * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
3942 * Note for any update operation with entry_type modification this
3943 * field must be set.
3944 * Access: RW
3945 */
3946MLXSW_ITEM32(reg, ralue, bmp_len, 0x1C, 16, 8);
3947
3948enum mlxsw_reg_ralue_action_type {
3949 MLXSW_REG_RALUE_ACTION_TYPE_REMOTE,
3950 MLXSW_REG_RALUE_ACTION_TYPE_LOCAL,
3951 MLXSW_REG_RALUE_ACTION_TYPE_IP2ME,
3952};
3953
3954/* reg_ralue_action_type
3955 * Action Type
3956 * Indicates how the IP address is connected.
3957 * It can be connected to a local subnet through local_erif or can be
3958 * on a remote subnet connected through a next-hop router,
3959 * or transmitted to the CPU.
3960 * Reserved when entry_type = MARKER_ENTRY
3961 * Access: RW
3962 */
3963MLXSW_ITEM32(reg, ralue, action_type, 0x1C, 0, 2);
3964
3965enum mlxsw_reg_ralue_trap_action {
3966 MLXSW_REG_RALUE_TRAP_ACTION_NOP,
3967 MLXSW_REG_RALUE_TRAP_ACTION_TRAP,
3968 MLXSW_REG_RALUE_TRAP_ACTION_MIRROR_TO_CPU,
3969 MLXSW_REG_RALUE_TRAP_ACTION_MIRROR,
3970 MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR,
3971};
3972
3973/* reg_ralue_trap_action
3974 * Trap action.
3975 * For IP2ME action, only NOP and MIRROR are possible.
3976 * Access: RW
3977 */
3978MLXSW_ITEM32(reg, ralue, trap_action, 0x20, 28, 4);
3979
3980/* reg_ralue_trap_id
3981 * Trap ID to be reported to CPU.
3982 * Trap ID is RTR_INGRESS0 or RTR_INGRESS1.
3983 * For trap_action of NOP, MIRROR and DISCARD_ERROR, trap_id is reserved.
3984 * Access: RW
3985 */
3986MLXSW_ITEM32(reg, ralue, trap_id, 0x20, 0, 9);
3987
3988/* reg_ralue_adjacency_index
3989 * Points to the first entry of the group-based ECMP.
3990 * Only relevant in case of REMOTE action.
3991 * Access: RW
3992 */
3993MLXSW_ITEM32(reg, ralue, adjacency_index, 0x24, 0, 24);
3994
3995/* reg_ralue_ecmp_size
3996 * Amount of sequential entries starting
3997 * from the adjacency_index (the number of ECMPs).
3998 * The valid range is 1-64, 512, 1024, 2048 and 4096.
3999 * Reserved when trap_action is TRAP or DISCARD_ERROR.
4000 * Only relevant in case of REMOTE action.
4001 * Access: RW
4002 */
4003MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13);
4004
4005/* reg_ralue_local_erif
4006 * Egress Router Interface.
4007 * Only relevant in case of LOCAL action.
4008 * Access: RW
4009 */
4010MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16);
4011
4012/* reg_ralue_v
4013 * Valid bit for the tunnel_ptr field.
4014 * If valid = 0 then trap to CPU as IP2ME trap ID.
4015 * If valid = 1 and the packet format allows NVE or IPinIP tunnel
4016 * decapsulation then tunnel decapsulation is done.
4017 * If valid = 1 and packet format does not allow NVE or IPinIP tunnel
4018 * decapsulation then trap as IP2ME trap ID.
4019 * Only relevant in case of IP2ME action.
4020 * Access: RW
4021 */
4022MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1);
4023
4024/* reg_ralue_tunnel_ptr
4025 * Tunnel Pointer for NVE or IPinIP tunnel decapsulation.
4026 * For Spectrum, pointer to KVD Linear.
4027 * Only relevant in case of IP2ME action.
4028 * Access: RW
4029 */
4030MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24);
4031
4032static inline void mlxsw_reg_ralue_pack(char *payload,
4033 enum mlxsw_reg_ralxx_protocol protocol,
4034 enum mlxsw_reg_ralue_op op,
4035 u16 virtual_router, u8 prefix_len)
4036{
4037 MLXSW_REG_ZERO(ralue, payload);
4038 mlxsw_reg_ralue_protocol_set(payload, protocol);
Jiri Pirko0e7df1a2016-08-17 16:39:34 +02004039 mlxsw_reg_ralue_op_set(payload, op);
Jiri Pirkod5a1c742016-07-04 08:23:10 +02004040 mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
4041 mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
4042 mlxsw_reg_ralue_entry_type_set(payload,
4043 MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
4044 mlxsw_reg_ralue_bmp_len_set(payload, prefix_len);
4045}
4046
4047static inline void mlxsw_reg_ralue_pack4(char *payload,
4048 enum mlxsw_reg_ralxx_protocol protocol,
4049 enum mlxsw_reg_ralue_op op,
4050 u16 virtual_router, u8 prefix_len,
4051 u32 dip)
4052{
4053 mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
4054 mlxsw_reg_ralue_dip4_set(payload, dip);
4055}
4056
4057static inline void
4058mlxsw_reg_ralue_act_remote_pack(char *payload,
4059 enum mlxsw_reg_ralue_trap_action trap_action,
4060 u16 trap_id, u32 adjacency_index, u16 ecmp_size)
4061{
4062 mlxsw_reg_ralue_action_type_set(payload,
4063 MLXSW_REG_RALUE_ACTION_TYPE_REMOTE);
4064 mlxsw_reg_ralue_trap_action_set(payload, trap_action);
4065 mlxsw_reg_ralue_trap_id_set(payload, trap_id);
4066 mlxsw_reg_ralue_adjacency_index_set(payload, adjacency_index);
4067 mlxsw_reg_ralue_ecmp_size_set(payload, ecmp_size);
4068}
4069
4070static inline void
4071mlxsw_reg_ralue_act_local_pack(char *payload,
4072 enum mlxsw_reg_ralue_trap_action trap_action,
4073 u16 trap_id, u16 local_erif)
4074{
4075 mlxsw_reg_ralue_action_type_set(payload,
4076 MLXSW_REG_RALUE_ACTION_TYPE_LOCAL);
4077 mlxsw_reg_ralue_trap_action_set(payload, trap_action);
4078 mlxsw_reg_ralue_trap_id_set(payload, trap_id);
4079 mlxsw_reg_ralue_local_erif_set(payload, local_erif);
4080}
4081
4082static inline void
4083mlxsw_reg_ralue_act_ip2me_pack(char *payload)
4084{
4085 mlxsw_reg_ralue_action_type_set(payload,
4086 MLXSW_REG_RALUE_ACTION_TYPE_IP2ME);
4087}
4088
Yotam Gigi4457b3df2016-07-05 11:27:40 +02004089/* RAUHT - Router Algorithmic LPM Unicast Host Table Register
4090 * ----------------------------------------------------------
4091 * The RAUHT register is used to configure and query the Unicast Host table in
4092 * devices that implement the Algorithmic LPM.
4093 */
4094#define MLXSW_REG_RAUHT_ID 0x8014
4095#define MLXSW_REG_RAUHT_LEN 0x74
4096
4097static const struct mlxsw_reg_info mlxsw_reg_rauht = {
4098 .id = MLXSW_REG_RAUHT_ID,
4099 .len = MLXSW_REG_RAUHT_LEN,
4100};
4101
4102enum mlxsw_reg_rauht_type {
4103 MLXSW_REG_RAUHT_TYPE_IPV4,
4104 MLXSW_REG_RAUHT_TYPE_IPV6,
4105};
4106
4107/* reg_rauht_type
4108 * Access: Index
4109 */
4110MLXSW_ITEM32(reg, rauht, type, 0x00, 24, 2);
4111
4112enum mlxsw_reg_rauht_op {
4113 MLXSW_REG_RAUHT_OP_QUERY_READ = 0,
4114 /* Read operation */
4115 MLXSW_REG_RAUHT_OP_QUERY_CLEAR_ON_READ = 1,
4116 /* Clear on read operation. Used to read entry and clear
4117 * activity bit.
4118 */
4119 MLXSW_REG_RAUHT_OP_WRITE_ADD = 0,
4120 /* Add. Used to write a new entry to the table. All R/W fields are
4121 * relevant for new entry. Activity bit is set for new entries.
4122 */
4123 MLXSW_REG_RAUHT_OP_WRITE_UPDATE = 1,
4124 /* Update action. Used to update an existing route entry and
4125 * only update the following fields:
4126 * trap_action, trap_id, mac, counter_set_type, counter_index
4127 */
4128 MLXSW_REG_RAUHT_OP_WRITE_CLEAR_ACTIVITY = 2,
4129 /* Clear activity. A bit is cleared for the entry. */
4130 MLXSW_REG_RAUHT_OP_WRITE_DELETE = 3,
4131 /* Delete entry */
4132 MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL = 4,
4133 /* Delete all host entries on a RIF. In this command, dip
4134 * field is reserved.
4135 */
4136};
4137
4138/* reg_rauht_op
4139 * Access: OP
4140 */
4141MLXSW_ITEM32(reg, rauht, op, 0x00, 20, 3);
4142
4143/* reg_rauht_a
4144 * Activity. Set for new entries. Set if a packet lookup has hit on
4145 * the specific entry.
4146 * To clear the a bit, use "clear activity" op.
4147 * Enabled by activity_dis in RGCR
4148 * Access: RO
4149 */
4150MLXSW_ITEM32(reg, rauht, a, 0x00, 16, 1);
4151
4152/* reg_rauht_rif
4153 * Router Interface
4154 * Access: Index
4155 */
4156MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16);
4157
4158/* reg_rauht_dip*
4159 * Destination address.
4160 * Access: Index
4161 */
4162MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32);
4163
4164enum mlxsw_reg_rauht_trap_action {
4165 MLXSW_REG_RAUHT_TRAP_ACTION_NOP,
4166 MLXSW_REG_RAUHT_TRAP_ACTION_TRAP,
4167 MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR_TO_CPU,
4168 MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR,
4169 MLXSW_REG_RAUHT_TRAP_ACTION_DISCARD_ERRORS,
4170};
4171
4172/* reg_rauht_trap_action
4173 * Access: RW
4174 */
4175MLXSW_ITEM32(reg, rauht, trap_action, 0x60, 28, 4);
4176
4177enum mlxsw_reg_rauht_trap_id {
4178 MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS0,
4179 MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS1,
4180};
4181
4182/* reg_rauht_trap_id
4183 * Trap ID to be reported to CPU.
4184 * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
4185 * For trap_action of NOP, MIRROR and DISCARD_ERROR,
4186 * trap_id is reserved.
4187 * Access: RW
4188 */
4189MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9);
4190
4191/* reg_rauht_counter_set_type
4192 * Counter set type for flow counters
4193 * Access: RW
4194 */
4195MLXSW_ITEM32(reg, rauht, counter_set_type, 0x68, 24, 8);
4196
4197/* reg_rauht_counter_index
4198 * Counter index for flow counters
4199 * Access: RW
4200 */
4201MLXSW_ITEM32(reg, rauht, counter_index, 0x68, 0, 24);
4202
4203/* reg_rauht_mac
4204 * MAC address.
4205 * Access: RW
4206 */
4207MLXSW_ITEM_BUF(reg, rauht, mac, 0x6E, 6);
4208
4209static inline void mlxsw_reg_rauht_pack(char *payload,
4210 enum mlxsw_reg_rauht_op op, u16 rif,
4211 const char *mac)
4212{
4213 MLXSW_REG_ZERO(rauht, payload);
4214 mlxsw_reg_rauht_op_set(payload, op);
4215 mlxsw_reg_rauht_rif_set(payload, rif);
4216 mlxsw_reg_rauht_mac_memcpy_to(payload, mac);
4217}
4218
4219static inline void mlxsw_reg_rauht_pack4(char *payload,
4220 enum mlxsw_reg_rauht_op op, u16 rif,
4221 const char *mac, u32 dip)
4222{
4223 mlxsw_reg_rauht_pack(payload, op, rif, mac);
4224 mlxsw_reg_rauht_dip4_set(payload, dip);
4225}
4226
Jiri Pirkoa59f0b32016-07-05 11:27:49 +02004227/* RALEU - Router Algorithmic LPM ECMP Update Register
4228 * ---------------------------------------------------
4229 * The register enables updating the ECMP section in the action for multiple
4230 * LPM Unicast entries in a single operation. The update is executed to
4231 * all entries of a {virtual router, protocol} tuple using the same ECMP group.
4232 */
4233#define MLXSW_REG_RALEU_ID 0x8015
4234#define MLXSW_REG_RALEU_LEN 0x28
4235
4236static const struct mlxsw_reg_info mlxsw_reg_raleu = {
4237 .id = MLXSW_REG_RALEU_ID,
4238 .len = MLXSW_REG_RALEU_LEN,
4239};
4240
4241/* reg_raleu_protocol
4242 * Protocol.
4243 * Access: Index
4244 */
4245MLXSW_ITEM32(reg, raleu, protocol, 0x00, 24, 4);
4246
4247/* reg_raleu_virtual_router
4248 * Virtual Router ID
4249 * Range is 0..cap_max_virtual_routers-1
4250 * Access: Index
4251 */
4252MLXSW_ITEM32(reg, raleu, virtual_router, 0x00, 0, 16);
4253
4254/* reg_raleu_adjacency_index
4255 * Adjacency Index used for matching on the existing entries.
4256 * Access: Index
4257 */
4258MLXSW_ITEM32(reg, raleu, adjacency_index, 0x10, 0, 24);
4259
4260/* reg_raleu_ecmp_size
4261 * ECMP Size used for matching on the existing entries.
4262 * Access: Index
4263 */
4264MLXSW_ITEM32(reg, raleu, ecmp_size, 0x14, 0, 13);
4265
4266/* reg_raleu_new_adjacency_index
4267 * New Adjacency Index.
4268 * Access: WO
4269 */
4270MLXSW_ITEM32(reg, raleu, new_adjacency_index, 0x20, 0, 24);
4271
4272/* reg_raleu_new_ecmp_size
4273 * New ECMP Size.
4274 * Access: WO
4275 */
4276MLXSW_ITEM32(reg, raleu, new_ecmp_size, 0x24, 0, 13);
4277
4278static inline void mlxsw_reg_raleu_pack(char *payload,
4279 enum mlxsw_reg_ralxx_protocol protocol,
4280 u16 virtual_router,
4281 u32 adjacency_index, u16 ecmp_size,
4282 u32 new_adjacency_index,
4283 u16 new_ecmp_size)
4284{
4285 MLXSW_REG_ZERO(raleu, payload);
4286 mlxsw_reg_raleu_protocol_set(payload, protocol);
4287 mlxsw_reg_raleu_virtual_router_set(payload, virtual_router);
4288 mlxsw_reg_raleu_adjacency_index_set(payload, adjacency_index);
4289 mlxsw_reg_raleu_ecmp_size_set(payload, ecmp_size);
4290 mlxsw_reg_raleu_new_adjacency_index_set(payload, new_adjacency_index);
4291 mlxsw_reg_raleu_new_ecmp_size_set(payload, new_ecmp_size);
4292}
4293
Yotam Gigi7cf2c202016-07-05 11:27:41 +02004294/* RAUHTD - Router Algorithmic LPM Unicast Host Table Dump Register
4295 * ----------------------------------------------------------------
4296 * The RAUHTD register allows dumping entries from the Router Unicast Host
4297 * Table. For a given session an entry is dumped no more than one time. The
4298 * first RAUHTD access after reset is a new session. A session ends when the
4299 * num_rec response is smaller than num_rec request or for IPv4 when the
4300 * num_entries is smaller than 4. The clear activity affect the current session
4301 * or the last session if a new session has not started.
4302 */
4303#define MLXSW_REG_RAUHTD_ID 0x8018
4304#define MLXSW_REG_RAUHTD_BASE_LEN 0x20
4305#define MLXSW_REG_RAUHTD_REC_LEN 0x20
4306#define MLXSW_REG_RAUHTD_REC_MAX_NUM 32
4307#define MLXSW_REG_RAUHTD_LEN (MLXSW_REG_RAUHTD_BASE_LEN + \
4308 MLXSW_REG_RAUHTD_REC_MAX_NUM * MLXSW_REG_RAUHTD_REC_LEN)
4309#define MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC 4
4310
4311static const struct mlxsw_reg_info mlxsw_reg_rauhtd = {
4312 .id = MLXSW_REG_RAUHTD_ID,
4313 .len = MLXSW_REG_RAUHTD_LEN,
4314};
4315
4316#define MLXSW_REG_RAUHTD_FILTER_A BIT(0)
4317#define MLXSW_REG_RAUHTD_FILTER_RIF BIT(3)
4318
4319/* reg_rauhtd_filter_fields
4320 * if a bit is '0' then the relevant field is ignored and dump is done
4321 * regardless of the field value
4322 * Bit0 - filter by activity: entry_a
4323 * Bit3 - filter by entry rip: entry_rif
4324 * Access: Index
4325 */
4326MLXSW_ITEM32(reg, rauhtd, filter_fields, 0x00, 0, 8);
4327
4328enum mlxsw_reg_rauhtd_op {
4329 MLXSW_REG_RAUHTD_OP_DUMP,
4330 MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR,
4331};
4332
4333/* reg_rauhtd_op
4334 * Access: OP
4335 */
4336MLXSW_ITEM32(reg, rauhtd, op, 0x04, 24, 2);
4337
4338/* reg_rauhtd_num_rec
4339 * At request: number of records requested
4340 * At response: number of records dumped
4341 * For IPv4, each record has 4 entries at request and up to 4 entries
4342 * at response
4343 * Range is 0..MLXSW_REG_RAUHTD_REC_MAX_NUM
4344 * Access: Index
4345 */
4346MLXSW_ITEM32(reg, rauhtd, num_rec, 0x04, 0, 8);
4347
4348/* reg_rauhtd_entry_a
4349 * Dump only if activity has value of entry_a
4350 * Reserved if filter_fields bit0 is '0'
4351 * Access: Index
4352 */
4353MLXSW_ITEM32(reg, rauhtd, entry_a, 0x08, 16, 1);
4354
4355enum mlxsw_reg_rauhtd_type {
4356 MLXSW_REG_RAUHTD_TYPE_IPV4,
4357 MLXSW_REG_RAUHTD_TYPE_IPV6,
4358};
4359
4360/* reg_rauhtd_type
4361 * Dump only if record type is:
4362 * 0 - IPv4
4363 * 1 - IPv6
4364 * Access: Index
4365 */
4366MLXSW_ITEM32(reg, rauhtd, type, 0x08, 0, 4);
4367
4368/* reg_rauhtd_entry_rif
4369 * Dump only if RIF has value of entry_rif
4370 * Reserved if filter_fields bit3 is '0'
4371 * Access: Index
4372 */
4373MLXSW_ITEM32(reg, rauhtd, entry_rif, 0x0C, 0, 16);
4374
4375static inline void mlxsw_reg_rauhtd_pack(char *payload,
4376 enum mlxsw_reg_rauhtd_type type)
4377{
4378 MLXSW_REG_ZERO(rauhtd, payload);
4379 mlxsw_reg_rauhtd_filter_fields_set(payload, MLXSW_REG_RAUHTD_FILTER_A);
4380 mlxsw_reg_rauhtd_op_set(payload, MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR);
4381 mlxsw_reg_rauhtd_num_rec_set(payload, MLXSW_REG_RAUHTD_REC_MAX_NUM);
4382 mlxsw_reg_rauhtd_entry_a_set(payload, 1);
4383 mlxsw_reg_rauhtd_type_set(payload, type);
4384}
4385
4386/* reg_rauhtd_ipv4_rec_num_entries
4387 * Number of valid entries in this record:
4388 * 0 - 1 valid entry
4389 * 1 - 2 valid entries
4390 * 2 - 3 valid entries
4391 * 3 - 4 valid entries
4392 * Access: RO
4393 */
4394MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_rec_num_entries,
4395 MLXSW_REG_RAUHTD_BASE_LEN, 28, 2,
4396 MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
4397
4398/* reg_rauhtd_rec_type
4399 * Record type.
4400 * 0 - IPv4
4401 * 1 - IPv6
4402 * Access: RO
4403 */
4404MLXSW_ITEM32_INDEXED(reg, rauhtd, rec_type, MLXSW_REG_RAUHTD_BASE_LEN, 24, 2,
4405 MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
4406
4407#define MLXSW_REG_RAUHTD_IPV4_ENT_LEN 0x8
4408
4409/* reg_rauhtd_ipv4_ent_a
4410 * Activity. Set for new entries. Set if a packet lookup has hit on the
4411 * specific entry.
4412 * Access: RO
4413 */
4414MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1,
4415 MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
4416
4417/* reg_rauhtd_ipv4_ent_rif
4418 * Router interface.
4419 * Access: RO
4420 */
4421MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0,
4422 16, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
4423
4424/* reg_rauhtd_ipv4_ent_dip
4425 * Destination IPv4 address.
4426 * Access: RO
4427 */
4428MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0,
4429 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false);
4430
4431static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload,
4432 int ent_index, u16 *p_rif,
4433 u32 *p_dip)
4434{
4435 *p_rif = mlxsw_reg_rauhtd_ipv4_ent_rif_get(payload, ent_index);
4436 *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index);
4437}
4438
Jiri Pirko5246f2e2015-11-27 13:45:58 +01004439/* MFCR - Management Fan Control Register
4440 * --------------------------------------
4441 * This register controls the settings of the Fan Speed PWM mechanism.
4442 */
4443#define MLXSW_REG_MFCR_ID 0x9001
4444#define MLXSW_REG_MFCR_LEN 0x08
4445
4446static const struct mlxsw_reg_info mlxsw_reg_mfcr = {
4447 .id = MLXSW_REG_MFCR_ID,
4448 .len = MLXSW_REG_MFCR_LEN,
4449};
4450
4451enum mlxsw_reg_mfcr_pwm_frequency {
4452 MLXSW_REG_MFCR_PWM_FEQ_11HZ = 0x00,
4453 MLXSW_REG_MFCR_PWM_FEQ_14_7HZ = 0x01,
4454 MLXSW_REG_MFCR_PWM_FEQ_22_1HZ = 0x02,
4455 MLXSW_REG_MFCR_PWM_FEQ_1_4KHZ = 0x40,
4456 MLXSW_REG_MFCR_PWM_FEQ_5KHZ = 0x41,
4457 MLXSW_REG_MFCR_PWM_FEQ_20KHZ = 0x42,
4458 MLXSW_REG_MFCR_PWM_FEQ_22_5KHZ = 0x43,
4459 MLXSW_REG_MFCR_PWM_FEQ_25KHZ = 0x44,
4460};
4461
4462/* reg_mfcr_pwm_frequency
4463 * Controls the frequency of the PWM signal.
4464 * Access: RW
4465 */
4466MLXSW_ITEM32(reg, mfcr, pwm_frequency, 0x00, 0, 6);
4467
4468#define MLXSW_MFCR_TACHOS_MAX 10
4469
4470/* reg_mfcr_tacho_active
4471 * Indicates which of the tachometer is active (bit per tachometer).
4472 * Access: RO
4473 */
4474MLXSW_ITEM32(reg, mfcr, tacho_active, 0x04, 16, MLXSW_MFCR_TACHOS_MAX);
4475
4476#define MLXSW_MFCR_PWMS_MAX 5
4477
4478/* reg_mfcr_pwm_active
4479 * Indicates which of the PWM control is active (bit per PWM).
4480 * Access: RO
4481 */
4482MLXSW_ITEM32(reg, mfcr, pwm_active, 0x04, 0, MLXSW_MFCR_PWMS_MAX);
4483
4484static inline void
4485mlxsw_reg_mfcr_pack(char *payload,
4486 enum mlxsw_reg_mfcr_pwm_frequency pwm_frequency)
4487{
4488 MLXSW_REG_ZERO(mfcr, payload);
4489 mlxsw_reg_mfcr_pwm_frequency_set(payload, pwm_frequency);
4490}
4491
4492static inline void
4493mlxsw_reg_mfcr_unpack(char *payload,
4494 enum mlxsw_reg_mfcr_pwm_frequency *p_pwm_frequency,
4495 u16 *p_tacho_active, u8 *p_pwm_active)
4496{
4497 *p_pwm_frequency = mlxsw_reg_mfcr_pwm_frequency_get(payload);
4498 *p_tacho_active = mlxsw_reg_mfcr_tacho_active_get(payload);
4499 *p_pwm_active = mlxsw_reg_mfcr_pwm_active_get(payload);
4500}
4501
4502/* MFSC - Management Fan Speed Control Register
4503 * --------------------------------------------
4504 * This register controls the settings of the Fan Speed PWM mechanism.
4505 */
4506#define MLXSW_REG_MFSC_ID 0x9002
4507#define MLXSW_REG_MFSC_LEN 0x08
4508
4509static const struct mlxsw_reg_info mlxsw_reg_mfsc = {
4510 .id = MLXSW_REG_MFSC_ID,
4511 .len = MLXSW_REG_MFSC_LEN,
4512};
4513
4514/* reg_mfsc_pwm
4515 * Fan pwm to control / monitor.
4516 * Access: Index
4517 */
4518MLXSW_ITEM32(reg, mfsc, pwm, 0x00, 24, 3);
4519
4520/* reg_mfsc_pwm_duty_cycle
4521 * Controls the duty cycle of the PWM. Value range from 0..255 to
4522 * represent duty cycle of 0%...100%.
4523 * Access: RW
4524 */
4525MLXSW_ITEM32(reg, mfsc, pwm_duty_cycle, 0x04, 0, 8);
4526
4527static inline void mlxsw_reg_mfsc_pack(char *payload, u8 pwm,
4528 u8 pwm_duty_cycle)
4529{
4530 MLXSW_REG_ZERO(mfsc, payload);
4531 mlxsw_reg_mfsc_pwm_set(payload, pwm);
4532 mlxsw_reg_mfsc_pwm_duty_cycle_set(payload, pwm_duty_cycle);
4533}
4534
4535/* MFSM - Management Fan Speed Measurement
4536 * ---------------------------------------
4537 * This register controls the settings of the Tacho measurements and
4538 * enables reading the Tachometer measurements.
4539 */
4540#define MLXSW_REG_MFSM_ID 0x9003
4541#define MLXSW_REG_MFSM_LEN 0x08
4542
4543static const struct mlxsw_reg_info mlxsw_reg_mfsm = {
4544 .id = MLXSW_REG_MFSM_ID,
4545 .len = MLXSW_REG_MFSM_LEN,
4546};
4547
4548/* reg_mfsm_tacho
4549 * Fan tachometer index.
4550 * Access: Index
4551 */
4552MLXSW_ITEM32(reg, mfsm, tacho, 0x00, 24, 4);
4553
4554/* reg_mfsm_rpm
4555 * Fan speed (round per minute).
4556 * Access: RO
4557 */
4558MLXSW_ITEM32(reg, mfsm, rpm, 0x04, 0, 16);
4559
4560static inline void mlxsw_reg_mfsm_pack(char *payload, u8 tacho)
4561{
4562 MLXSW_REG_ZERO(mfsm, payload);
4563 mlxsw_reg_mfsm_tacho_set(payload, tacho);
4564}
4565
Jiri Pirko85926f82015-11-27 13:45:56 +01004566/* MTCAP - Management Temperature Capabilities
4567 * -------------------------------------------
4568 * This register exposes the capabilities of the device and
4569 * system temperature sensing.
4570 */
4571#define MLXSW_REG_MTCAP_ID 0x9009
4572#define MLXSW_REG_MTCAP_LEN 0x08
4573
4574static const struct mlxsw_reg_info mlxsw_reg_mtcap = {
4575 .id = MLXSW_REG_MTCAP_ID,
4576 .len = MLXSW_REG_MTCAP_LEN,
4577};
4578
4579/* reg_mtcap_sensor_count
4580 * Number of sensors supported by the device.
4581 * This includes the QSFP module sensors (if exists in the QSFP module).
4582 * Access: RO
4583 */
4584MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7);
4585
4586/* MTMP - Management Temperature
4587 * -----------------------------
4588 * This register controls the settings of the temperature measurements
4589 * and enables reading the temperature measurements. Note that temperature
4590 * is in 0.125 degrees Celsius.
4591 */
4592#define MLXSW_REG_MTMP_ID 0x900A
4593#define MLXSW_REG_MTMP_LEN 0x20
4594
4595static const struct mlxsw_reg_info mlxsw_reg_mtmp = {
4596 .id = MLXSW_REG_MTMP_ID,
4597 .len = MLXSW_REG_MTMP_LEN,
4598};
4599
4600/* reg_mtmp_sensor_index
4601 * Sensors index to access.
4602 * 64-127 of sensor_index are mapped to the SFP+/QSFP modules sequentially
4603 * (module 0 is mapped to sensor_index 64).
4604 * Access: Index
4605 */
4606MLXSW_ITEM32(reg, mtmp, sensor_index, 0x00, 0, 7);
4607
4608/* Convert to milli degrees Celsius */
4609#define MLXSW_REG_MTMP_TEMP_TO_MC(val) (val * 125)
4610
4611/* reg_mtmp_temperature
4612 * Temperature reading from the sensor. Reading is in 0.125 Celsius
4613 * degrees units.
4614 * Access: RO
4615 */
4616MLXSW_ITEM32(reg, mtmp, temperature, 0x04, 0, 16);
4617
4618/* reg_mtmp_mte
4619 * Max Temperature Enable - enables measuring the max temperature on a sensor.
4620 * Access: RW
4621 */
4622MLXSW_ITEM32(reg, mtmp, mte, 0x08, 31, 1);
4623
4624/* reg_mtmp_mtr
4625 * Max Temperature Reset - clears the value of the max temperature register.
4626 * Access: WO
4627 */
4628MLXSW_ITEM32(reg, mtmp, mtr, 0x08, 30, 1);
4629
4630/* reg_mtmp_max_temperature
4631 * The highest measured temperature from the sensor.
4632 * When the bit mte is cleared, the field max_temperature is reserved.
4633 * Access: RO
4634 */
4635MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16);
4636
4637#define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8
4638
4639/* reg_mtmp_sensor_name
4640 * Sensor Name
4641 * Access: RO
4642 */
4643MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE);
4644
4645static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index,
4646 bool max_temp_enable,
4647 bool max_temp_reset)
4648{
4649 MLXSW_REG_ZERO(mtmp, payload);
4650 mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index);
4651 mlxsw_reg_mtmp_mte_set(payload, max_temp_enable);
4652 mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset);
4653}
4654
4655static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
4656 unsigned int *p_max_temp,
4657 char *sensor_name)
4658{
4659 u16 temp;
4660
4661 if (p_temp) {
4662 temp = mlxsw_reg_mtmp_temperature_get(payload);
4663 *p_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
4664 }
4665 if (p_max_temp) {
Jiri Pirkoacf35a42015-12-11 16:10:39 +01004666 temp = mlxsw_reg_mtmp_max_temperature_get(payload);
Jiri Pirko85926f82015-11-27 13:45:56 +01004667 *p_max_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
4668 }
4669 if (sensor_name)
4670 mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
4671}
4672
Yotam Gigi43a46852016-07-21 12:03:14 +02004673/* MPAT - Monitoring Port Analyzer Table
4674 * -------------------------------------
4675 * MPAT Register is used to query and configure the Switch PortAnalyzer Table.
4676 * For an enabled analyzer, all fields except e (enable) cannot be modified.
4677 */
4678#define MLXSW_REG_MPAT_ID 0x901A
4679#define MLXSW_REG_MPAT_LEN 0x78
4680
4681static const struct mlxsw_reg_info mlxsw_reg_mpat = {
4682 .id = MLXSW_REG_MPAT_ID,
4683 .len = MLXSW_REG_MPAT_LEN,
4684};
4685
4686/* reg_mpat_pa_id
4687 * Port Analyzer ID.
4688 * Access: Index
4689 */
4690MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4);
4691
4692/* reg_mpat_system_port
4693 * A unique port identifier for the final destination of the packet.
4694 * Access: RW
4695 */
4696MLXSW_ITEM32(reg, mpat, system_port, 0x00, 0, 16);
4697
4698/* reg_mpat_e
4699 * Enable. Indicating the Port Analyzer is enabled.
4700 * Access: RW
4701 */
4702MLXSW_ITEM32(reg, mpat, e, 0x04, 31, 1);
4703
4704/* reg_mpat_qos
4705 * Quality Of Service Mode.
4706 * 0: CONFIGURED - QoS parameters (Switch Priority, and encapsulation
4707 * PCP, DEI, DSCP or VL) are configured.
4708 * 1: MAINTAIN - QoS parameters (Switch Priority, Color) are the
4709 * same as in the original packet that has triggered the mirroring. For
4710 * SPAN also the pcp,dei are maintained.
4711 * Access: RW
4712 */
4713MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1);
4714
Yotam Gigi23019052016-07-21 12:03:15 +02004715/* reg_mpat_be
4716 * Best effort mode. Indicates mirroring traffic should not cause packet
4717 * drop or back pressure, but will discard the mirrored packets. Mirrored
4718 * packets will be forwarded on a best effort manner.
4719 * 0: Do not discard mirrored packets
4720 * 1: Discard mirrored packets if causing congestion
4721 * Access: RW
4722 */
4723MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1);
4724
Yotam Gigi43a46852016-07-21 12:03:14 +02004725static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
4726 u16 system_port, bool e)
4727{
4728 MLXSW_REG_ZERO(mpat, payload);
4729 mlxsw_reg_mpat_pa_id_set(payload, pa_id);
4730 mlxsw_reg_mpat_system_port_set(payload, system_port);
4731 mlxsw_reg_mpat_e_set(payload, e);
4732 mlxsw_reg_mpat_qos_set(payload, 1);
Yotam Gigi23019052016-07-21 12:03:15 +02004733 mlxsw_reg_mpat_be_set(payload, 1);
4734}
4735
4736/* MPAR - Monitoring Port Analyzer Register
4737 * ----------------------------------------
4738 * MPAR register is used to query and configure the port analyzer port mirroring
4739 * properties.
4740 */
4741#define MLXSW_REG_MPAR_ID 0x901B
4742#define MLXSW_REG_MPAR_LEN 0x08
4743
4744static const struct mlxsw_reg_info mlxsw_reg_mpar = {
4745 .id = MLXSW_REG_MPAR_ID,
4746 .len = MLXSW_REG_MPAR_LEN,
4747};
4748
4749/* reg_mpar_local_port
4750 * The local port to mirror the packets from.
4751 * Access: Index
4752 */
4753MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8);
4754
4755enum mlxsw_reg_mpar_i_e {
4756 MLXSW_REG_MPAR_TYPE_EGRESS,
4757 MLXSW_REG_MPAR_TYPE_INGRESS,
4758};
4759
4760/* reg_mpar_i_e
4761 * Ingress/Egress
4762 * Access: Index
4763 */
4764MLXSW_ITEM32(reg, mpar, i_e, 0x00, 0, 4);
4765
4766/* reg_mpar_enable
4767 * Enable mirroring
4768 * By default, port mirroring is disabled for all ports.
4769 * Access: RW
4770 */
4771MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1);
4772
4773/* reg_mpar_pa_id
4774 * Port Analyzer ID.
4775 * Access: RW
4776 */
4777MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4);
4778
4779static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
4780 enum mlxsw_reg_mpar_i_e i_e,
4781 bool enable, u8 pa_id)
4782{
4783 MLXSW_REG_ZERO(mpar, payload);
4784 mlxsw_reg_mpar_local_port_set(payload, local_port);
4785 mlxsw_reg_mpar_enable_set(payload, enable);
4786 mlxsw_reg_mpar_i_e_set(payload, i_e);
4787 mlxsw_reg_mpar_pa_id_set(payload, pa_id);
Yotam Gigi43a46852016-07-21 12:03:14 +02004788}
4789
Ido Schimmel3161c152015-11-27 13:45:54 +01004790/* MLCR - Management LED Control Register
4791 * --------------------------------------
4792 * Controls the system LEDs.
4793 */
4794#define MLXSW_REG_MLCR_ID 0x902B
4795#define MLXSW_REG_MLCR_LEN 0x0C
4796
4797static const struct mlxsw_reg_info mlxsw_reg_mlcr = {
4798 .id = MLXSW_REG_MLCR_ID,
4799 .len = MLXSW_REG_MLCR_LEN,
4800};
4801
4802/* reg_mlcr_local_port
4803 * Local port number.
4804 * Access: RW
4805 */
4806MLXSW_ITEM32(reg, mlcr, local_port, 0x00, 16, 8);
4807
4808#define MLXSW_REG_MLCR_DURATION_MAX 0xFFFF
4809
4810/* reg_mlcr_beacon_duration
4811 * Duration of the beacon to be active, in seconds.
4812 * 0x0 - Will turn off the beacon.
4813 * 0xFFFF - Will turn on the beacon until explicitly turned off.
4814 * Access: RW
4815 */
4816MLXSW_ITEM32(reg, mlcr, beacon_duration, 0x04, 0, 16);
4817
4818/* reg_mlcr_beacon_remain
4819 * Remaining duration of the beacon, in seconds.
4820 * 0xFFFF indicates an infinite amount of time.
4821 * Access: RO
4822 */
4823MLXSW_ITEM32(reg, mlcr, beacon_remain, 0x08, 0, 16);
4824
4825static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
4826 bool active)
4827{
4828 MLXSW_REG_ZERO(mlcr, payload);
4829 mlxsw_reg_mlcr_local_port_set(payload, local_port);
4830 mlxsw_reg_mlcr_beacon_duration_set(payload, active ?
4831 MLXSW_REG_MLCR_DURATION_MAX : 0);
4832}
4833
Jiri Pirkoe0594362015-10-16 14:01:31 +02004834/* SBPR - Shared Buffer Pools Register
4835 * -----------------------------------
4836 * The SBPR configures and retrieves the shared buffer pools and configuration.
4837 */
4838#define MLXSW_REG_SBPR_ID 0xB001
4839#define MLXSW_REG_SBPR_LEN 0x14
4840
4841static const struct mlxsw_reg_info mlxsw_reg_sbpr = {
4842 .id = MLXSW_REG_SBPR_ID,
4843 .len = MLXSW_REG_SBPR_LEN,
4844};
4845
Jiri Pirko497e8592016-04-08 19:11:24 +02004846/* shared direstion enum for SBPR, SBCM, SBPM */
4847enum mlxsw_reg_sbxx_dir {
4848 MLXSW_REG_SBXX_DIR_INGRESS,
4849 MLXSW_REG_SBXX_DIR_EGRESS,
Jiri Pirkoe0594362015-10-16 14:01:31 +02004850};
4851
4852/* reg_sbpr_dir
4853 * Direction.
4854 * Access: Index
4855 */
4856MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2);
4857
4858/* reg_sbpr_pool
4859 * Pool index.
4860 * Access: Index
4861 */
4862MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4);
4863
4864/* reg_sbpr_size
4865 * Pool size in buffer cells.
4866 * Access: RW
4867 */
4868MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24);
4869
4870enum mlxsw_reg_sbpr_mode {
4871 MLXSW_REG_SBPR_MODE_STATIC,
4872 MLXSW_REG_SBPR_MODE_DYNAMIC,
4873};
4874
4875/* reg_sbpr_mode
4876 * Pool quota calculation mode.
4877 * Access: RW
4878 */
4879MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
4880
4881static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
Jiri Pirko497e8592016-04-08 19:11:24 +02004882 enum mlxsw_reg_sbxx_dir dir,
Jiri Pirkoe0594362015-10-16 14:01:31 +02004883 enum mlxsw_reg_sbpr_mode mode, u32 size)
4884{
4885 MLXSW_REG_ZERO(sbpr, payload);
4886 mlxsw_reg_sbpr_pool_set(payload, pool);
4887 mlxsw_reg_sbpr_dir_set(payload, dir);
4888 mlxsw_reg_sbpr_mode_set(payload, mode);
4889 mlxsw_reg_sbpr_size_set(payload, size);
4890}
4891
4892/* SBCM - Shared Buffer Class Management Register
4893 * ----------------------------------------------
4894 * The SBCM register configures and retrieves the shared buffer allocation
4895 * and configuration according to Port-PG, including the binding to pool
4896 * and definition of the associated quota.
4897 */
4898#define MLXSW_REG_SBCM_ID 0xB002
4899#define MLXSW_REG_SBCM_LEN 0x28
4900
4901static const struct mlxsw_reg_info mlxsw_reg_sbcm = {
4902 .id = MLXSW_REG_SBCM_ID,
4903 .len = MLXSW_REG_SBCM_LEN,
4904};
4905
4906/* reg_sbcm_local_port
4907 * Local port number.
4908 * For Ingress: excludes CPU port and Router port
4909 * For Egress: excludes IP Router
4910 * Access: Index
4911 */
4912MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
4913
4914/* reg_sbcm_pg_buff
4915 * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress)
4916 * For PG buffer: range is 0..cap_max_pg_buffers - 1
4917 * For traffic class: range is 0..cap_max_tclass - 1
4918 * Note that when traffic class is in MC aware mode then the traffic
4919 * classes which are MC aware cannot be configured.
4920 * Access: Index
4921 */
4922MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
4923
Jiri Pirkoe0594362015-10-16 14:01:31 +02004924/* reg_sbcm_dir
4925 * Direction.
4926 * Access: Index
4927 */
4928MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
4929
4930/* reg_sbcm_min_buff
4931 * Minimum buffer size for the limiter, in cells.
4932 * Access: RW
4933 */
4934MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
4935
Jiri Pirkoc30a53c2016-04-14 18:19:22 +02004936/* shared max_buff limits for dynamic threshold for SBCM, SBPM */
4937#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1
4938#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14
4939
Jiri Pirkoe0594362015-10-16 14:01:31 +02004940/* reg_sbcm_max_buff
4941 * When the pool associated to the port-pg/tclass is configured to
4942 * static, Maximum buffer size for the limiter configured in cells.
4943 * When the pool associated to the port-pg/tclass is configured to
4944 * dynamic, the max_buff holds the "alpha" parameter, supporting
4945 * the following values:
4946 * 0: 0
4947 * i: (1/128)*2^(i-1), for i=1..14
4948 * 0xFF: Infinity
4949 * Access: RW
4950 */
4951MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
4952
4953/* reg_sbcm_pool
4954 * Association of the port-priority to a pool.
4955 * Access: RW
4956 */
4957MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
4958
4959static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
Jiri Pirko497e8592016-04-08 19:11:24 +02004960 enum mlxsw_reg_sbxx_dir dir,
Jiri Pirkoe0594362015-10-16 14:01:31 +02004961 u32 min_buff, u32 max_buff, u8 pool)
4962{
4963 MLXSW_REG_ZERO(sbcm, payload);
4964 mlxsw_reg_sbcm_local_port_set(payload, local_port);
4965 mlxsw_reg_sbcm_pg_buff_set(payload, pg_buff);
4966 mlxsw_reg_sbcm_dir_set(payload, dir);
4967 mlxsw_reg_sbcm_min_buff_set(payload, min_buff);
4968 mlxsw_reg_sbcm_max_buff_set(payload, max_buff);
4969 mlxsw_reg_sbcm_pool_set(payload, pool);
4970}
4971
Jiri Pirko9efc8f62016-04-08 19:11:25 +02004972/* SBPM - Shared Buffer Port Management Register
4973 * ---------------------------------------------
Jiri Pirkoe0594362015-10-16 14:01:31 +02004974 * The SBPM register configures and retrieves the shared buffer allocation
4975 * and configuration according to Port-Pool, including the definition
4976 * of the associated quota.
4977 */
4978#define MLXSW_REG_SBPM_ID 0xB003
4979#define MLXSW_REG_SBPM_LEN 0x28
4980
4981static const struct mlxsw_reg_info mlxsw_reg_sbpm = {
4982 .id = MLXSW_REG_SBPM_ID,
4983 .len = MLXSW_REG_SBPM_LEN,
4984};
4985
4986/* reg_sbpm_local_port
4987 * Local port number.
4988 * For Ingress: excludes CPU port and Router port
4989 * For Egress: excludes IP Router
4990 * Access: Index
4991 */
4992MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
4993
4994/* reg_sbpm_pool
4995 * The pool associated to quota counting on the local_port.
4996 * Access: Index
4997 */
4998MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
4999
Jiri Pirkoe0594362015-10-16 14:01:31 +02005000/* reg_sbpm_dir
5001 * Direction.
5002 * Access: Index
5003 */
5004MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
5005
Jiri Pirko42a7f1d2016-04-14 18:19:27 +02005006/* reg_sbpm_buff_occupancy
5007 * Current buffer occupancy in cells.
5008 * Access: RO
5009 */
5010MLXSW_ITEM32(reg, sbpm, buff_occupancy, 0x10, 0, 24);
5011
5012/* reg_sbpm_clr
5013 * Clear Max Buffer Occupancy
5014 * When this bit is set, max_buff_occupancy field is cleared (and a
5015 * new max value is tracked from the time the clear was performed).
5016 * Access: OP
5017 */
5018MLXSW_ITEM32(reg, sbpm, clr, 0x14, 31, 1);
5019
5020/* reg_sbpm_max_buff_occupancy
5021 * Maximum value of buffer occupancy in cells monitored. Cleared by
5022 * writing to the clr field.
5023 * Access: RO
5024 */
5025MLXSW_ITEM32(reg, sbpm, max_buff_occupancy, 0x14, 0, 24);
5026
Jiri Pirkoe0594362015-10-16 14:01:31 +02005027/* reg_sbpm_min_buff
5028 * Minimum buffer size for the limiter, in cells.
5029 * Access: RW
5030 */
5031MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
5032
5033/* reg_sbpm_max_buff
5034 * When the pool associated to the port-pg/tclass is configured to
5035 * static, Maximum buffer size for the limiter configured in cells.
5036 * When the pool associated to the port-pg/tclass is configured to
5037 * dynamic, the max_buff holds the "alpha" parameter, supporting
5038 * the following values:
5039 * 0: 0
5040 * i: (1/128)*2^(i-1), for i=1..14
5041 * 0xFF: Infinity
5042 * Access: RW
5043 */
5044MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
5045
5046static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
Jiri Pirko42a7f1d2016-04-14 18:19:27 +02005047 enum mlxsw_reg_sbxx_dir dir, bool clr,
Jiri Pirkoe0594362015-10-16 14:01:31 +02005048 u32 min_buff, u32 max_buff)
5049{
5050 MLXSW_REG_ZERO(sbpm, payload);
5051 mlxsw_reg_sbpm_local_port_set(payload, local_port);
5052 mlxsw_reg_sbpm_pool_set(payload, pool);
5053 mlxsw_reg_sbpm_dir_set(payload, dir);
Jiri Pirko42a7f1d2016-04-14 18:19:27 +02005054 mlxsw_reg_sbpm_clr_set(payload, clr);
Jiri Pirkoe0594362015-10-16 14:01:31 +02005055 mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
5056 mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
5057}
5058
Jiri Pirko42a7f1d2016-04-14 18:19:27 +02005059static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy,
5060 u32 *p_max_buff_occupancy)
5061{
5062 *p_buff_occupancy = mlxsw_reg_sbpm_buff_occupancy_get(payload);
5063 *p_max_buff_occupancy = mlxsw_reg_sbpm_max_buff_occupancy_get(payload);
5064}
5065
Jiri Pirkoe0594362015-10-16 14:01:31 +02005066/* SBMM - Shared Buffer Multicast Management Register
5067 * --------------------------------------------------
5068 * The SBMM register configures and retrieves the shared buffer allocation
5069 * and configuration for MC packets according to Switch-Priority, including
5070 * the binding to pool and definition of the associated quota.
5071 */
5072#define MLXSW_REG_SBMM_ID 0xB004
5073#define MLXSW_REG_SBMM_LEN 0x28
5074
5075static const struct mlxsw_reg_info mlxsw_reg_sbmm = {
5076 .id = MLXSW_REG_SBMM_ID,
5077 .len = MLXSW_REG_SBMM_LEN,
5078};
5079
5080/* reg_sbmm_prio
5081 * Switch Priority.
5082 * Access: Index
5083 */
5084MLXSW_ITEM32(reg, sbmm, prio, 0x00, 8, 4);
5085
5086/* reg_sbmm_min_buff
5087 * Minimum buffer size for the limiter, in cells.
5088 * Access: RW
5089 */
5090MLXSW_ITEM32(reg, sbmm, min_buff, 0x18, 0, 24);
5091
5092/* reg_sbmm_max_buff
5093 * When the pool associated to the port-pg/tclass is configured to
5094 * static, Maximum buffer size for the limiter configured in cells.
5095 * When the pool associated to the port-pg/tclass is configured to
5096 * dynamic, the max_buff holds the "alpha" parameter, supporting
5097 * the following values:
5098 * 0: 0
5099 * i: (1/128)*2^(i-1), for i=1..14
5100 * 0xFF: Infinity
5101 * Access: RW
5102 */
5103MLXSW_ITEM32(reg, sbmm, max_buff, 0x1C, 0, 24);
5104
5105/* reg_sbmm_pool
5106 * Association of the port-priority to a pool.
5107 * Access: RW
5108 */
5109MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4);
5110
5111static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
5112 u32 max_buff, u8 pool)
5113{
5114 MLXSW_REG_ZERO(sbmm, payload);
5115 mlxsw_reg_sbmm_prio_set(payload, prio);
5116 mlxsw_reg_sbmm_min_buff_set(payload, min_buff);
5117 mlxsw_reg_sbmm_max_buff_set(payload, max_buff);
5118 mlxsw_reg_sbmm_pool_set(payload, pool);
5119}
5120
Jiri Pirko26176de2016-04-14 18:19:26 +02005121/* SBSR - Shared Buffer Status Register
5122 * ------------------------------------
5123 * The SBSR register retrieves the shared buffer occupancy according to
5124 * Port-Pool. Note that this register enables reading a large amount of data.
5125 * It is the user's responsibility to limit the amount of data to ensure the
5126 * response can match the maximum transfer unit. In case the response exceeds
5127 * the maximum transport unit, it will be truncated with no special notice.
5128 */
5129#define MLXSW_REG_SBSR_ID 0xB005
5130#define MLXSW_REG_SBSR_BASE_LEN 0x5C /* base length, without records */
5131#define MLXSW_REG_SBSR_REC_LEN 0x8 /* record length */
5132#define MLXSW_REG_SBSR_REC_MAX_COUNT 120
5133#define MLXSW_REG_SBSR_LEN (MLXSW_REG_SBSR_BASE_LEN + \
5134 MLXSW_REG_SBSR_REC_LEN * \
5135 MLXSW_REG_SBSR_REC_MAX_COUNT)
5136
5137static const struct mlxsw_reg_info mlxsw_reg_sbsr = {
5138 .id = MLXSW_REG_SBSR_ID,
5139 .len = MLXSW_REG_SBSR_LEN,
5140};
5141
5142/* reg_sbsr_clr
5143 * Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy
5144 * field is cleared (and a new max value is tracked from the time the clear
5145 * was performed).
5146 * Access: OP
5147 */
5148MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
5149
5150/* reg_sbsr_ingress_port_mask
5151 * Bit vector for all ingress network ports.
5152 * Indicates which of the ports (for which the relevant bit is set)
5153 * are affected by the set operation. Configuration of any other port
5154 * does not change.
5155 * Access: Index
5156 */
5157MLXSW_ITEM_BIT_ARRAY(reg, sbsr, ingress_port_mask, 0x10, 0x20, 1);
5158
5159/* reg_sbsr_pg_buff_mask
5160 * Bit vector for all switch priority groups.
5161 * Indicates which of the priorities (for which the relevant bit is set)
5162 * are affected by the set operation. Configuration of any other priority
5163 * does not change.
5164 * Range is 0..cap_max_pg_buffers - 1
5165 * Access: Index
5166 */
5167MLXSW_ITEM_BIT_ARRAY(reg, sbsr, pg_buff_mask, 0x30, 0x4, 1);
5168
5169/* reg_sbsr_egress_port_mask
5170 * Bit vector for all egress network ports.
5171 * Indicates which of the ports (for which the relevant bit is set)
5172 * are affected by the set operation. Configuration of any other port
5173 * does not change.
5174 * Access: Index
5175 */
5176MLXSW_ITEM_BIT_ARRAY(reg, sbsr, egress_port_mask, 0x34, 0x20, 1);
5177
5178/* reg_sbsr_tclass_mask
5179 * Bit vector for all traffic classes.
5180 * Indicates which of the traffic classes (for which the relevant bit is
5181 * set) are affected by the set operation. Configuration of any other
5182 * traffic class does not change.
5183 * Range is 0..cap_max_tclass - 1
5184 * Access: Index
5185 */
5186MLXSW_ITEM_BIT_ARRAY(reg, sbsr, tclass_mask, 0x54, 0x8, 1);
5187
5188static inline void mlxsw_reg_sbsr_pack(char *payload, bool clr)
5189{
5190 MLXSW_REG_ZERO(sbsr, payload);
5191 mlxsw_reg_sbsr_clr_set(payload, clr);
5192}
5193
5194/* reg_sbsr_rec_buff_occupancy
5195 * Current buffer occupancy in cells.
5196 * Access: RO
5197 */
5198MLXSW_ITEM32_INDEXED(reg, sbsr, rec_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
5199 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x00, false);
5200
5201/* reg_sbsr_rec_max_buff_occupancy
5202 * Maximum value of buffer occupancy in cells monitored. Cleared by
5203 * writing to the clr field.
5204 * Access: RO
5205 */
5206MLXSW_ITEM32_INDEXED(reg, sbsr, rec_max_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
5207 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x04, false);
5208
5209static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
5210 u32 *p_buff_occupancy,
5211 u32 *p_max_buff_occupancy)
5212{
5213 *p_buff_occupancy =
5214 mlxsw_reg_sbsr_rec_buff_occupancy_get(payload, rec_index);
5215 *p_max_buff_occupancy =
5216 mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index);
5217}
5218
Yotam Gigi51ae8cc2016-07-21 12:03:13 +02005219/* SBIB - Shared Buffer Internal Buffer Register
5220 * ---------------------------------------------
5221 * The SBIB register configures per port buffers for internal use. The internal
5222 * buffers consume memory on the port buffers (note that the port buffers are
5223 * used also by PBMC).
5224 *
5225 * For Spectrum this is used for egress mirroring.
5226 */
5227#define MLXSW_REG_SBIB_ID 0xB006
5228#define MLXSW_REG_SBIB_LEN 0x10
5229
5230static const struct mlxsw_reg_info mlxsw_reg_sbib = {
5231 .id = MLXSW_REG_SBIB_ID,
5232 .len = MLXSW_REG_SBIB_LEN,
5233};
5234
5235/* reg_sbib_local_port
5236 * Local port number
5237 * Not supported for CPU port and router port
5238 * Access: Index
5239 */
5240MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8);
5241
5242/* reg_sbib_buff_size
5243 * Units represented in cells
5244 * Allowed range is 0 to (cap_max_headroom_size - 1)
5245 * Default is 0
5246 * Access: RW
5247 */
5248MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24);
5249
5250static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
5251 u32 buff_size)
5252{
5253 MLXSW_REG_ZERO(sbib, payload);
5254 mlxsw_reg_sbib_local_port_set(payload, local_port);
5255 mlxsw_reg_sbib_buff_size_set(payload, buff_size);
5256}
5257
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005258static inline const char *mlxsw_reg_id_str(u16 reg_id)
5259{
5260 switch (reg_id) {
5261 case MLXSW_REG_SGCR_ID:
5262 return "SGCR";
5263 case MLXSW_REG_SPAD_ID:
5264 return "SPAD";
Elad Razfabe5482016-01-10 21:06:25 +01005265 case MLXSW_REG_SMID_ID:
5266 return "SMID";
Ido Schimmele61011b2015-08-06 16:41:53 +02005267 case MLXSW_REG_SSPR_ID:
5268 return "SSPR";
Jiri Pirkoe534a56a2015-10-16 14:01:35 +02005269 case MLXSW_REG_SFDAT_ID:
5270 return "SFDAT";
Jiri Pirko236033b2015-10-16 14:01:28 +02005271 case MLXSW_REG_SFD_ID:
5272 return "SFD";
Jiri Pirkof5d88f52015-10-16 14:01:29 +02005273 case MLXSW_REG_SFN_ID:
5274 return "SFN";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005275 case MLXSW_REG_SPMS_ID:
5276 return "SPMS";
Elad Razb2e345f2015-10-16 14:01:30 +02005277 case MLXSW_REG_SPVID_ID:
5278 return "SPVID";
5279 case MLXSW_REG_SPVM_ID:
5280 return "SPVM";
Ido Schimmel148f4722016-02-18 11:30:01 +01005281 case MLXSW_REG_SPAFT_ID:
5282 return "SPAFT";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005283 case MLXSW_REG_SFGC_ID:
5284 return "SFGC";
5285 case MLXSW_REG_SFTR_ID:
5286 return "SFTR";
Ido Schimmel41933272016-01-27 15:20:17 +01005287 case MLXSW_REG_SFDF_ID:
5288 return "SFDF";
Jiri Pirkod1d40be2015-12-03 12:12:25 +01005289 case MLXSW_REG_SLDR_ID:
5290 return "SLDR";
5291 case MLXSW_REG_SLCR_ID:
5292 return "SLCR";
5293 case MLXSW_REG_SLCOR_ID:
5294 return "SLCOR";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005295 case MLXSW_REG_SPMLR_ID:
5296 return "SPMLR";
Ido Schimmel64790232015-10-16 14:01:33 +02005297 case MLXSW_REG_SVFA_ID:
5298 return "SVFA";
Ido Schimmel1f65da72015-10-16 14:01:34 +02005299 case MLXSW_REG_SVPE_ID:
5300 return "SVPE";
Ido Schimmelf1fb6932015-10-16 14:01:32 +02005301 case MLXSW_REG_SFMR_ID:
5302 return "SFMR";
Ido Schimmela4feea72015-10-16 14:01:36 +02005303 case MLXSW_REG_SPVMLR_ID:
5304 return "SPVMLR";
Ido Schimmel2c63a552016-04-06 17:10:07 +02005305 case MLXSW_REG_QTCT_ID:
5306 return "QTCT";
Ido Schimmelb9b7cee2016-04-06 17:10:06 +02005307 case MLXSW_REG_QEEC_ID:
5308 return "QEEC";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005309 case MLXSW_REG_PMLP_ID:
5310 return "PMLP";
5311 case MLXSW_REG_PMTU_ID:
5312 return "PMTU";
5313 case MLXSW_REG_PTYS_ID:
5314 return "PTYS";
5315 case MLXSW_REG_PPAD_ID:
5316 return "PPAD";
5317 case MLXSW_REG_PAOS_ID:
5318 return "PAOS";
Ido Schimmel6f253d82016-04-06 17:10:12 +02005319 case MLXSW_REG_PFCC_ID:
5320 return "PFCC";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005321 case MLXSW_REG_PPCNT_ID:
5322 return "PPCNT";
Ido Schimmelb98ff152016-04-06 17:10:00 +02005323 case MLXSW_REG_PPTB_ID:
5324 return "PPTB";
Jiri Pirkoe0594362015-10-16 14:01:31 +02005325 case MLXSW_REG_PBMC_ID:
5326 return "PBMC";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005327 case MLXSW_REG_PSPA_ID:
5328 return "PSPA";
5329 case MLXSW_REG_HTGT_ID:
5330 return "HTGT";
5331 case MLXSW_REG_HPKT_ID:
5332 return "HPKT";
Ido Schimmel69c407a2016-07-02 11:00:13 +02005333 case MLXSW_REG_RGCR_ID:
5334 return "RGCR";
Ido Schimmel3dc26682016-07-02 11:00:18 +02005335 case MLXSW_REG_RITR_ID:
5336 return "RITR";
Yotam Gigi089f9812016-07-05 11:27:48 +02005337 case MLXSW_REG_RATR_ID:
5338 return "RATR";
Jiri Pirko6f9fc3c2016-07-04 08:23:05 +02005339 case MLXSW_REG_RALTA_ID:
5340 return "RALTA";
Jiri Pirkoa9823352016-07-04 08:23:06 +02005341 case MLXSW_REG_RALST_ID:
5342 return "RALST";
Jiri Pirko20ae4052016-07-04 08:23:07 +02005343 case MLXSW_REG_RALTB_ID:
5344 return "RALTB";
Jiri Pirkod5a1c742016-07-04 08:23:10 +02005345 case MLXSW_REG_RALUE_ID:
5346 return "RALUE";
Yotam Gigi4457b3df2016-07-05 11:27:40 +02005347 case MLXSW_REG_RAUHT_ID:
5348 return "RAUHT";
Jiri Pirkoa59f0b32016-07-05 11:27:49 +02005349 case MLXSW_REG_RALEU_ID:
5350 return "RALEU";
Yotam Gigi7cf2c202016-07-05 11:27:41 +02005351 case MLXSW_REG_RAUHTD_ID:
5352 return "RAUHTD";
Jiri Pirko5246f2e2015-11-27 13:45:58 +01005353 case MLXSW_REG_MFCR_ID:
5354 return "MFCR";
5355 case MLXSW_REG_MFSC_ID:
5356 return "MFSC";
5357 case MLXSW_REG_MFSM_ID:
5358 return "MFSM";
Jiri Pirko85926f82015-11-27 13:45:56 +01005359 case MLXSW_REG_MTCAP_ID:
5360 return "MTCAP";
Yotam Gigi43a46852016-07-21 12:03:14 +02005361 case MLXSW_REG_MPAT_ID:
5362 return "MPAT";
Yotam Gigi23019052016-07-21 12:03:15 +02005363 case MLXSW_REG_MPAR_ID:
5364 return "MPAR";
Jiri Pirko85926f82015-11-27 13:45:56 +01005365 case MLXSW_REG_MTMP_ID:
5366 return "MTMP";
Ido Schimmel3161c152015-11-27 13:45:54 +01005367 case MLXSW_REG_MLCR_ID:
5368 return "MLCR";
Jiri Pirkoe0594362015-10-16 14:01:31 +02005369 case MLXSW_REG_SBPR_ID:
5370 return "SBPR";
5371 case MLXSW_REG_SBCM_ID:
5372 return "SBCM";
5373 case MLXSW_REG_SBPM_ID:
5374 return "SBPM";
5375 case MLXSW_REG_SBMM_ID:
5376 return "SBMM";
Jiri Pirko26176de2016-04-14 18:19:26 +02005377 case MLXSW_REG_SBSR_ID:
5378 return "SBSR";
Yotam Gigi51ae8cc2016-07-21 12:03:13 +02005379 case MLXSW_REG_SBIB_ID:
5380 return "SBIB";
Ido Schimmel4ec14b72015-07-29 23:33:48 +02005381 default:
5382 return "*UNKNOWN*";
5383 }
5384}
5385
5386/* PUDE - Port Up / Down Event
5387 * ---------------------------
5388 * Reports the operational state change of a port.
5389 */
5390#define MLXSW_REG_PUDE_LEN 0x10
5391
5392/* reg_pude_swid
5393 * Switch partition ID with which to associate the port.
5394 * Access: Index
5395 */
5396MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
5397
5398/* reg_pude_local_port
5399 * Local port number.
5400 * Access: Index
5401 */
5402MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
5403
5404/* reg_pude_admin_status
5405 * Port administrative state (the desired state).
5406 * 1 - Up.
5407 * 2 - Down.
5408 * 3 - Up once. This means that in case of link failure, the port won't go
5409 * into polling mode, but will wait to be re-enabled by software.
5410 * 4 - Disabled by system. Can only be set by hardware.
5411 * Access: RO
5412 */
5413MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4);
5414
5415/* reg_pude_oper_status
5416 * Port operatioanl state.
5417 * 1 - Up.
5418 * 2 - Down.
5419 * 3 - Down by port failure. This means that the device will not let the
5420 * port up again until explicitly specified by software.
5421 * Access: RO
5422 */
5423MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4);
5424
5425#endif