Florian Fainelli | 7318166 | 2017-01-30 09:48:43 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Broadcom Starfighter 2 DSA switch CFP support |
| 3 | * |
| 4 | * Copyright (C) 2016, Broadcom |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; either version 2 of the License, or |
| 9 | * (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/list.h> |
Florian Fainelli | 7318166 | 2017-01-30 09:48:43 -0800 | [diff] [blame] | 13 | #include <linux/ethtool.h> |
| 14 | #include <linux/if_ether.h> |
| 15 | #include <linux/in.h> |
Andrew Lunn | c6e970a | 2017-03-28 23:45:06 +0200 | [diff] [blame] | 16 | #include <linux/netdevice.h> |
| 17 | #include <net/dsa.h> |
Florian Fainelli | 7318166 | 2017-01-30 09:48:43 -0800 | [diff] [blame] | 18 | #include <linux/bitmap.h> |
| 19 | |
| 20 | #include "bcm_sf2.h" |
| 21 | #include "bcm_sf2_regs.h" |
| 22 | |
| 23 | struct cfp_udf_layout { |
| 24 | u8 slices[UDF_NUM_SLICES]; |
| 25 | u32 mask_value; |
| 26 | |
| 27 | }; |
| 28 | |
| 29 | /* UDF slices layout for a TCPv4/UDPv4 specification */ |
| 30 | static const struct cfp_udf_layout udf_tcpip4_layout = { |
| 31 | .slices = { |
| 32 | /* End of L2, byte offset 12, src IP[0:15] */ |
| 33 | CFG_UDF_EOL2 | 6, |
| 34 | /* End of L2, byte offset 14, src IP[16:31] */ |
| 35 | CFG_UDF_EOL2 | 7, |
| 36 | /* End of L2, byte offset 16, dst IP[0:15] */ |
| 37 | CFG_UDF_EOL2 | 8, |
| 38 | /* End of L2, byte offset 18, dst IP[16:31] */ |
| 39 | CFG_UDF_EOL2 | 9, |
| 40 | /* End of L3, byte offset 0, src port */ |
| 41 | CFG_UDF_EOL3 | 0, |
| 42 | /* End of L3, byte offset 2, dst port */ |
| 43 | CFG_UDF_EOL3 | 1, |
| 44 | 0, 0, 0 |
| 45 | }, |
| 46 | .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG, |
| 47 | }; |
| 48 | |
| 49 | static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout) |
| 50 | { |
| 51 | unsigned int i, count = 0; |
| 52 | |
| 53 | for (i = 0; i < UDF_NUM_SLICES; i++) { |
| 54 | if (layout[i] != 0) |
| 55 | count++; |
| 56 | } |
| 57 | |
| 58 | return count; |
| 59 | } |
| 60 | |
| 61 | static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv, |
| 62 | unsigned int slice_num, |
| 63 | const u8 *layout) |
| 64 | { |
| 65 | u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET; |
| 66 | unsigned int i; |
| 67 | |
| 68 | for (i = 0; i < UDF_NUM_SLICES; i++) |
| 69 | core_writel(priv, layout[i], offset + i * 4); |
| 70 | } |
| 71 | |
| 72 | static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op) |
| 73 | { |
| 74 | unsigned int timeout = 1000; |
| 75 | u32 reg; |
| 76 | |
| 77 | reg = core_readl(priv, CORE_CFP_ACC); |
| 78 | reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); |
| 79 | reg |= OP_STR_DONE | op; |
| 80 | core_writel(priv, reg, CORE_CFP_ACC); |
| 81 | |
| 82 | do { |
| 83 | reg = core_readl(priv, CORE_CFP_ACC); |
| 84 | if (!(reg & OP_STR_DONE)) |
| 85 | break; |
| 86 | |
| 87 | cpu_relax(); |
| 88 | } while (timeout--); |
| 89 | |
| 90 | if (!timeout) |
| 91 | return -ETIMEDOUT; |
| 92 | |
| 93 | return 0; |
| 94 | } |
| 95 | |
| 96 | static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, |
| 97 | unsigned int addr) |
| 98 | { |
| 99 | u32 reg; |
| 100 | |
| 101 | WARN_ON(addr >= CFP_NUM_RULES); |
| 102 | |
| 103 | reg = core_readl(priv, CORE_CFP_ACC); |
| 104 | reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); |
| 105 | reg |= addr << XCESS_ADDR_SHIFT; |
| 106 | core_writel(priv, reg, CORE_CFP_ACC); |
| 107 | } |
| 108 | |
| 109 | static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) |
| 110 | { |
| 111 | /* Entry #0 is reserved */ |
| 112 | return CFP_NUM_RULES - 1; |
| 113 | } |
| 114 | |
| 115 | static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, |
| 116 | struct ethtool_rx_flow_spec *fs) |
| 117 | { |
| 118 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); |
| 119 | struct ethtool_tcpip4_spec *v4_spec; |
| 120 | const struct cfp_udf_layout *layout; |
| 121 | unsigned int slice_num, rule_index; |
| 122 | unsigned int queue_num, port_num; |
| 123 | u8 ip_proto, ip_frag; |
| 124 | u8 num_udf; |
| 125 | u32 reg; |
| 126 | int ret; |
| 127 | |
| 128 | /* Check for unsupported extensions */ |
| 129 | if ((fs->flow_type & FLOW_EXT) && |
| 130 | (fs->m_ext.vlan_etype || fs->m_ext.data[1])) |
| 131 | return -EINVAL; |
| 132 | |
| 133 | if (fs->location != RX_CLS_LOC_ANY && |
| 134 | test_bit(fs->location, priv->cfp.used)) |
| 135 | return -EBUSY; |
| 136 | |
| 137 | if (fs->location != RX_CLS_LOC_ANY && |
| 138 | fs->location > bcm_sf2_cfp_rule_size(priv)) |
| 139 | return -EINVAL; |
| 140 | |
| 141 | ip_frag = be32_to_cpu(fs->m_ext.data[0]); |
| 142 | |
| 143 | /* We do not support discarding packets, check that the |
| 144 | * destination port is enabled and that we are within the |
| 145 | * number of ports supported by the switch |
| 146 | */ |
| 147 | port_num = fs->ring_cookie / 8; |
| 148 | |
| 149 | if (fs->ring_cookie == RX_CLS_FLOW_DISC || |
| 150 | !(BIT(port_num) & ds->enabled_port_mask) || |
| 151 | port_num >= priv->hw_params.num_ports) |
| 152 | return -EINVAL; |
| 153 | |
| 154 | switch (fs->flow_type & ~FLOW_EXT) { |
| 155 | case TCP_V4_FLOW: |
| 156 | ip_proto = IPPROTO_TCP; |
| 157 | v4_spec = &fs->h_u.tcp_ip4_spec; |
| 158 | break; |
| 159 | case UDP_V4_FLOW: |
| 160 | ip_proto = IPPROTO_UDP; |
| 161 | v4_spec = &fs->h_u.udp_ip4_spec; |
| 162 | break; |
| 163 | default: |
| 164 | return -EINVAL; |
| 165 | } |
| 166 | |
| 167 | /* We only use one UDF slice for now */ |
| 168 | slice_num = 1; |
| 169 | layout = &udf_tcpip4_layout; |
| 170 | num_udf = bcm_sf2_get_num_udf_slices(layout->slices); |
| 171 | |
| 172 | /* Apply the UDF layout for this filter */ |
| 173 | bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices); |
| 174 | |
| 175 | /* Apply to all packets received through this port */ |
| 176 | core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7)); |
| 177 | |
| 178 | /* S-Tag status [31:30] |
| 179 | * C-Tag status [29:28] |
| 180 | * L2 framing [27:26] |
| 181 | * L3 framing [25:24] |
| 182 | * IP ToS [23:16] |
| 183 | * IP proto [15:08] |
| 184 | * IP Fragm [7] |
| 185 | * Non 1st frag [6] |
| 186 | * IP Authen [5] |
| 187 | * TTL range [4:3] |
| 188 | * PPPoE session [2] |
| 189 | * Reserved [1] |
| 190 | * UDF_Valid[8] [0] |
| 191 | */ |
| 192 | core_writel(priv, v4_spec->tos << 16 | ip_proto << 8 | ip_frag << 7, |
| 193 | CORE_CFP_DATA_PORT(6)); |
| 194 | |
| 195 | /* UDF_Valid[7:0] [31:24] |
| 196 | * S-Tag [23:8] |
| 197 | * C-Tag [7:0] |
| 198 | */ |
| 199 | core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5)); |
| 200 | |
| 201 | /* C-Tag [31:24] |
| 202 | * UDF_n_A8 [23:8] |
| 203 | * UDF_n_A7 [7:0] |
| 204 | */ |
| 205 | core_writel(priv, 0, CORE_CFP_DATA_PORT(4)); |
| 206 | |
| 207 | /* UDF_n_A7 [31:24] |
| 208 | * UDF_n_A6 [23:8] |
| 209 | * UDF_n_A5 [7:0] |
| 210 | */ |
| 211 | core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8, |
| 212 | CORE_CFP_DATA_PORT(3)); |
| 213 | |
| 214 | /* UDF_n_A5 [31:24] |
| 215 | * UDF_n_A4 [23:8] |
| 216 | * UDF_n_A3 [7:0] |
| 217 | */ |
| 218 | reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 | |
| 219 | (u32)be16_to_cpu(v4_spec->psrc) << 8 | |
| 220 | (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8; |
| 221 | core_writel(priv, reg, CORE_CFP_DATA_PORT(2)); |
| 222 | |
| 223 | /* UDF_n_A3 [31:24] |
| 224 | * UDF_n_A2 [23:8] |
| 225 | * UDF_n_A1 [7:0] |
| 226 | */ |
| 227 | reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 | |
| 228 | (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 | |
| 229 | (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8; |
| 230 | core_writel(priv, reg, CORE_CFP_DATA_PORT(1)); |
| 231 | |
| 232 | /* UDF_n_A1 [31:24] |
| 233 | * UDF_n_A0 [23:8] |
| 234 | * Reserved [7:4] |
| 235 | * Slice ID [3:2] |
| 236 | * Slice valid [1:0] |
| 237 | */ |
| 238 | reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 | |
| 239 | (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 | |
| 240 | SLICE_NUM(slice_num) | SLICE_VALID; |
| 241 | core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); |
| 242 | |
| 243 | /* Source port map match */ |
| 244 | core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7)); |
| 245 | |
| 246 | /* Mask with the specific layout for IPv4 packets */ |
| 247 | core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6)); |
| 248 | |
| 249 | /* Mask all but valid UDFs */ |
| 250 | core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5)); |
| 251 | |
| 252 | /* Mask all */ |
| 253 | core_writel(priv, 0, CORE_CFP_MASK_PORT(4)); |
| 254 | |
| 255 | /* All other UDFs should be matched with the filter */ |
| 256 | core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3)); |
| 257 | core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2)); |
| 258 | core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1)); |
| 259 | core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0)); |
| 260 | |
| 261 | /* Locate the first rule available */ |
| 262 | if (fs->location == RX_CLS_LOC_ANY) |
| 263 | rule_index = find_first_zero_bit(priv->cfp.used, |
| 264 | bcm_sf2_cfp_rule_size(priv)); |
| 265 | else |
| 266 | rule_index = fs->location; |
| 267 | |
| 268 | /* Insert into TCAM now */ |
| 269 | bcm_sf2_cfp_rule_addr_set(priv, rule_index); |
| 270 | |
| 271 | ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); |
| 272 | if (ret) { |
| 273 | pr_err("TCAM entry at addr %d failed\n", rule_index); |
| 274 | return ret; |
| 275 | } |
| 276 | |
| 277 | /* Replace ARL derived destination with DST_MAP derived, define |
| 278 | * which port and queue this should be forwarded to. |
| 279 | * |
| 280 | * We have a small oddity where Port 6 just does not have a |
| 281 | * valid bit here (so we subtract by one). |
| 282 | */ |
| 283 | queue_num = fs->ring_cookie % 8; |
| 284 | if (port_num >= 7) |
| 285 | port_num -= 1; |
| 286 | |
| 287 | reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) | |
| 288 | CHANGE_TC | queue_num << NEW_TC_SHIFT; |
| 289 | |
| 290 | core_writel(priv, reg, CORE_ACT_POL_DATA0); |
| 291 | |
| 292 | /* Set classification ID that needs to be put in Broadcom tag */ |
| 293 | core_writel(priv, rule_index << CHAIN_ID_SHIFT, |
| 294 | CORE_ACT_POL_DATA1); |
| 295 | |
| 296 | core_writel(priv, 0, CORE_ACT_POL_DATA2); |
| 297 | |
| 298 | /* Configure policer RAM now */ |
| 299 | ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM); |
| 300 | if (ret) { |
| 301 | pr_err("Policer entry at %d failed\n", rule_index); |
| 302 | return ret; |
| 303 | } |
| 304 | |
| 305 | /* Disable the policer */ |
| 306 | core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0); |
| 307 | |
| 308 | /* Now the rate meter */ |
| 309 | ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM); |
| 310 | if (ret) { |
| 311 | pr_err("Meter entry at %d failed\n", rule_index); |
| 312 | return ret; |
| 313 | } |
| 314 | |
| 315 | /* Turn on CFP for this rule now */ |
| 316 | reg = core_readl(priv, CORE_CFP_CTL_REG); |
| 317 | reg |= BIT(port); |
| 318 | core_writel(priv, reg, CORE_CFP_CTL_REG); |
| 319 | |
| 320 | /* Flag the rule as being used and return it */ |
| 321 | set_bit(rule_index, priv->cfp.used); |
| 322 | fs->location = rule_index; |
| 323 | |
| 324 | return 0; |
| 325 | } |
| 326 | |
| 327 | static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, |
| 328 | u32 loc) |
| 329 | { |
| 330 | int ret; |
| 331 | u32 reg; |
| 332 | |
| 333 | /* Refuse deletion of unused rules, and the default reserved rule */ |
| 334 | if (!test_bit(loc, priv->cfp.used) || loc == 0) |
| 335 | return -EINVAL; |
| 336 | |
| 337 | /* Indicate which rule we want to read */ |
| 338 | bcm_sf2_cfp_rule_addr_set(priv, loc); |
| 339 | |
| 340 | ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); |
| 341 | if (ret) |
| 342 | return ret; |
| 343 | |
| 344 | /* Clear its valid bits */ |
| 345 | reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); |
| 346 | reg &= ~SLICE_VALID; |
| 347 | core_writel(priv, reg, CORE_CFP_DATA_PORT(0)); |
| 348 | |
| 349 | /* Write back this entry into the TCAM now */ |
| 350 | ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); |
| 351 | if (ret) |
| 352 | return ret; |
| 353 | |
| 354 | clear_bit(loc, priv->cfp.used); |
| 355 | |
| 356 | return 0; |
| 357 | } |
| 358 | |
| 359 | static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) |
| 360 | { |
| 361 | unsigned int i; |
| 362 | |
| 363 | for (i = 0; i < sizeof(flow->m_u); i++) |
| 364 | flow->m_u.hdata[i] ^= 0xff; |
| 365 | |
| 366 | flow->m_ext.vlan_etype ^= cpu_to_be16(~0); |
| 367 | flow->m_ext.vlan_tci ^= cpu_to_be16(~0); |
| 368 | flow->m_ext.data[0] ^= cpu_to_be32(~0); |
| 369 | flow->m_ext.data[1] ^= cpu_to_be32(~0); |
| 370 | } |
| 371 | |
| 372 | static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, |
| 373 | struct ethtool_rxnfc *nfc, bool search) |
| 374 | { |
| 375 | struct ethtool_tcpip4_spec *v4_spec; |
| 376 | unsigned int queue_num; |
| 377 | u16 src_dst_port; |
| 378 | u32 reg, ipv4; |
| 379 | int ret; |
| 380 | |
| 381 | if (!search) { |
| 382 | bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location); |
| 383 | |
| 384 | ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM); |
| 385 | if (ret) |
| 386 | return ret; |
| 387 | |
| 388 | reg = core_readl(priv, CORE_ACT_POL_DATA0); |
| 389 | |
| 390 | ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); |
| 391 | if (ret) |
| 392 | return ret; |
| 393 | } else { |
| 394 | reg = core_readl(priv, CORE_ACT_POL_DATA0); |
| 395 | } |
| 396 | |
| 397 | /* Extract the destination port */ |
| 398 | nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) & |
| 399 | DST_MAP_IB_MASK) - 1; |
| 400 | |
| 401 | /* There is no Port 6, so we compensate for that here */ |
| 402 | if (nfc->fs.ring_cookie >= 6) |
| 403 | nfc->fs.ring_cookie++; |
| 404 | nfc->fs.ring_cookie *= 8; |
| 405 | |
| 406 | /* Extract the destination queue */ |
| 407 | queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK; |
| 408 | nfc->fs.ring_cookie += queue_num; |
| 409 | |
| 410 | /* Extract the IP protocol */ |
| 411 | reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); |
| 412 | switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { |
| 413 | case IPPROTO_TCP: |
| 414 | nfc->fs.flow_type = TCP_V4_FLOW; |
| 415 | v4_spec = &nfc->fs.h_u.tcp_ip4_spec; |
| 416 | break; |
| 417 | case IPPROTO_UDP: |
| 418 | nfc->fs.flow_type = UDP_V4_FLOW; |
| 419 | v4_spec = &nfc->fs.h_u.udp_ip4_spec; |
| 420 | break; |
| 421 | default: |
| 422 | /* Clear to exit the search process */ |
| 423 | if (search) |
| 424 | core_readl(priv, CORE_CFP_DATA_PORT(7)); |
| 425 | return -EINVAL; |
| 426 | } |
| 427 | |
| 428 | v4_spec->tos = (reg >> 16) & IPPROTO_MASK; |
| 429 | nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> 7) & 1); |
| 430 | |
| 431 | reg = core_readl(priv, CORE_CFP_DATA_PORT(3)); |
| 432 | /* src port [15:8] */ |
| 433 | src_dst_port = reg << 8; |
| 434 | |
| 435 | reg = core_readl(priv, CORE_CFP_DATA_PORT(2)); |
| 436 | /* src port [7:0] */ |
| 437 | src_dst_port |= (reg >> 24); |
| 438 | |
| 439 | v4_spec->pdst = cpu_to_be16(src_dst_port); |
| 440 | nfc->fs.m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); |
| 441 | v4_spec->psrc = cpu_to_be16((u16)(reg >> 8)); |
| 442 | nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); |
| 443 | |
| 444 | /* IPv4 dst [15:8] */ |
Dan Carpenter | ff4cf0e | 2017-02-07 16:15:27 +0300 | [diff] [blame] | 445 | ipv4 = (reg & 0xff) << 8; |
Florian Fainelli | 7318166 | 2017-01-30 09:48:43 -0800 | [diff] [blame] | 446 | reg = core_readl(priv, CORE_CFP_DATA_PORT(1)); |
| 447 | /* IPv4 dst [31:16] */ |
Dan Carpenter | ff4cf0e | 2017-02-07 16:15:27 +0300 | [diff] [blame] | 448 | ipv4 |= ((reg >> 8) & 0xffff) << 16; |
Florian Fainelli | 7318166 | 2017-01-30 09:48:43 -0800 | [diff] [blame] | 449 | /* IPv4 dst [7:0] */ |
| 450 | ipv4 |= (reg >> 24) & 0xff; |
| 451 | v4_spec->ip4dst = cpu_to_be32(ipv4); |
| 452 | nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); |
| 453 | |
| 454 | /* IPv4 src [15:8] */ |
Dan Carpenter | ff4cf0e | 2017-02-07 16:15:27 +0300 | [diff] [blame] | 455 | ipv4 = (reg & 0xff) << 8; |
Florian Fainelli | 7318166 | 2017-01-30 09:48:43 -0800 | [diff] [blame] | 456 | reg = core_readl(priv, CORE_CFP_DATA_PORT(0)); |
| 457 | |
| 458 | if (!(reg & SLICE_VALID)) |
| 459 | return -EINVAL; |
| 460 | |
| 461 | /* IPv4 src [7:0] */ |
| 462 | ipv4 |= (reg >> 24) & 0xff; |
| 463 | /* IPv4 src [31:16] */ |
Dan Carpenter | ff4cf0e | 2017-02-07 16:15:27 +0300 | [diff] [blame] | 464 | ipv4 |= ((reg >> 8) & 0xffff) << 16; |
Florian Fainelli | 7318166 | 2017-01-30 09:48:43 -0800 | [diff] [blame] | 465 | v4_spec->ip4src = cpu_to_be32(ipv4); |
| 466 | nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); |
| 467 | |
| 468 | /* Read last to avoid next entry clobbering the results during search |
| 469 | * operations |
| 470 | */ |
| 471 | reg = core_readl(priv, CORE_CFP_DATA_PORT(7)); |
| 472 | if (!(reg & 1 << port)) |
| 473 | return -EINVAL; |
| 474 | |
| 475 | bcm_sf2_invert_masks(&nfc->fs); |
| 476 | |
| 477 | /* Put the TCAM size here */ |
| 478 | nfc->data = bcm_sf2_cfp_rule_size(priv); |
| 479 | |
| 480 | return 0; |
| 481 | } |
| 482 | |
| 483 | /* We implement the search doing a TCAM search operation */ |
| 484 | static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, |
| 485 | int port, struct ethtool_rxnfc *nfc, |
| 486 | u32 *rule_locs) |
| 487 | { |
| 488 | unsigned int index = 1, rules_cnt = 0; |
| 489 | int ret; |
| 490 | u32 reg; |
| 491 | |
| 492 | /* Do not poll on OP_STR_DONE to be self-clearing for search |
| 493 | * operations, we cannot use bcm_sf2_cfp_op here because it completes |
| 494 | * on clearing OP_STR_DONE which won't clear until the entire search |
| 495 | * operation is over. |
| 496 | */ |
| 497 | reg = core_readl(priv, CORE_CFP_ACC); |
| 498 | reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); |
| 499 | reg |= index << XCESS_ADDR_SHIFT; |
| 500 | reg &= ~(OP_SEL_MASK | RAM_SEL_MASK); |
| 501 | reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE; |
| 502 | core_writel(priv, reg, CORE_CFP_ACC); |
| 503 | |
| 504 | do { |
| 505 | /* Wait for results to be ready */ |
| 506 | reg = core_readl(priv, CORE_CFP_ACC); |
| 507 | |
| 508 | /* Extract the address we are searching */ |
| 509 | index = reg >> XCESS_ADDR_SHIFT; |
| 510 | index &= XCESS_ADDR_MASK; |
| 511 | |
| 512 | /* We have a valid search result, so flag it accordingly */ |
| 513 | if (reg & SEARCH_STS) { |
| 514 | ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true); |
| 515 | if (ret) |
| 516 | continue; |
| 517 | |
| 518 | rule_locs[rules_cnt] = index; |
| 519 | rules_cnt++; |
| 520 | } |
| 521 | |
| 522 | /* Search is over break out */ |
| 523 | if (!(reg & OP_STR_DONE)) |
| 524 | break; |
| 525 | |
| 526 | } while (index < CFP_NUM_RULES); |
| 527 | |
| 528 | /* Put the TCAM size here */ |
| 529 | nfc->data = bcm_sf2_cfp_rule_size(priv); |
| 530 | nfc->rule_cnt = rules_cnt; |
| 531 | |
| 532 | return 0; |
| 533 | } |
| 534 | |
| 535 | int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, |
| 536 | struct ethtool_rxnfc *nfc, u32 *rule_locs) |
| 537 | { |
| 538 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); |
| 539 | int ret = 0; |
| 540 | |
| 541 | mutex_lock(&priv->cfp.lock); |
| 542 | |
| 543 | switch (nfc->cmd) { |
| 544 | case ETHTOOL_GRXCLSRLCNT: |
| 545 | /* Subtract the default, unusable rule */ |
| 546 | nfc->rule_cnt = bitmap_weight(priv->cfp.used, |
| 547 | CFP_NUM_RULES) - 1; |
| 548 | /* We support specifying rule locations */ |
| 549 | nfc->data |= RX_CLS_LOC_SPECIAL; |
| 550 | break; |
| 551 | case ETHTOOL_GRXCLSRULE: |
| 552 | ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false); |
| 553 | break; |
| 554 | case ETHTOOL_GRXCLSRLALL: |
| 555 | ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs); |
| 556 | break; |
| 557 | default: |
| 558 | ret = -EOPNOTSUPP; |
| 559 | break; |
| 560 | } |
| 561 | |
| 562 | mutex_unlock(&priv->cfp.lock); |
| 563 | |
| 564 | return ret; |
| 565 | } |
| 566 | |
| 567 | int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, |
| 568 | struct ethtool_rxnfc *nfc) |
| 569 | { |
| 570 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); |
| 571 | int ret = 0; |
| 572 | |
| 573 | mutex_lock(&priv->cfp.lock); |
| 574 | |
| 575 | switch (nfc->cmd) { |
| 576 | case ETHTOOL_SRXCLSRLINS: |
| 577 | ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs); |
| 578 | break; |
| 579 | |
| 580 | case ETHTOOL_SRXCLSRLDEL: |
| 581 | ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location); |
| 582 | break; |
| 583 | default: |
| 584 | ret = -EOPNOTSUPP; |
| 585 | break; |
| 586 | } |
| 587 | |
| 588 | mutex_unlock(&priv->cfp.lock); |
| 589 | |
| 590 | return ret; |
| 591 | } |
| 592 | |
| 593 | int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv) |
| 594 | { |
| 595 | unsigned int timeout = 1000; |
| 596 | u32 reg; |
| 597 | |
| 598 | reg = core_readl(priv, CORE_CFP_ACC); |
| 599 | reg |= TCAM_RESET; |
| 600 | core_writel(priv, reg, CORE_CFP_ACC); |
| 601 | |
| 602 | do { |
| 603 | reg = core_readl(priv, CORE_CFP_ACC); |
| 604 | if (!(reg & TCAM_RESET)) |
| 605 | break; |
| 606 | |
| 607 | cpu_relax(); |
| 608 | } while (timeout--); |
| 609 | |
| 610 | if (!timeout) |
| 611 | return -ETIMEDOUT; |
| 612 | |
| 613 | return 0; |
| 614 | } |