Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 1 | /* |
| 2 | * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c |
| 3 | * Copyright (c) 2017 Mellanox Technologies. All rights reserved. |
| 4 | * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> |
| 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions are met: |
| 8 | * |
| 9 | * 1. Redistributions of source code must retain the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer. |
| 11 | * 2. Redistributions in binary form must reproduce the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer in the |
| 13 | * documentation and/or other materials provided with the distribution. |
| 14 | * 3. Neither the names of the copyright holders nor the names of its |
| 15 | * contributors may be used to endorse or promote products derived from |
| 16 | * this software without specific prior written permission. |
| 17 | * |
| 18 | * Alternatively, this software may be distributed under the terms of the |
| 19 | * GNU General Public License ("GPL") version 2 as published by the Free |
| 20 | * Software Foundation. |
| 21 | * |
| 22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
| 26 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 32 | * POSSIBILITY OF SUCH DAMAGE. |
| 33 | */ |
| 34 | |
| 35 | #include <linux/kernel.h> |
| 36 | #include <linux/slab.h> |
| 37 | #include <linux/errno.h> |
| 38 | #include <linux/list.h> |
| 39 | #include <linux/string.h> |
| 40 | #include <linux/rhashtable.h> |
| 41 | #include <linux/netdevice.h> |
Petr Machata | a150201 | 2017-03-09 09:25:19 +0100 | [diff] [blame] | 42 | #include <net/tc_act/tc_vlan.h> |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 43 | |
| 44 | #include "reg.h" |
| 45 | #include "core.h" |
| 46 | #include "resources.h" |
| 47 | #include "spectrum.h" |
| 48 | #include "core_acl_flex_keys.h" |
| 49 | #include "core_acl_flex_actions.h" |
| 50 | #include "spectrum_acl_flex_keys.h" |
| 51 | |
| 52 | struct mlxsw_sp_acl { |
Arkadi Sharshevsky | 446a154 | 2017-03-11 09:42:56 +0100 | [diff] [blame] | 53 | struct mlxsw_sp *mlxsw_sp; |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 54 | struct mlxsw_afk *afk; |
| 55 | struct mlxsw_afa *afa; |
Ido Schimmel | a110748 | 2017-05-26 08:37:39 +0200 | [diff] [blame] | 56 | struct mlxsw_sp_fid *dummy_fid; |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 57 | const struct mlxsw_sp_acl_ops *ops; |
| 58 | struct rhashtable ruleset_ht; |
Arkadi Sharshevsky | 096e914 | 2017-03-11 09:42:55 +0100 | [diff] [blame] | 59 | struct list_head rules; |
Arkadi Sharshevsky | 446a154 | 2017-03-11 09:42:56 +0100 | [diff] [blame] | 60 | struct { |
| 61 | struct delayed_work dw; |
| 62 | unsigned long interval; /* ms */ |
| 63 | #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000 |
| 64 | } rule_activity_update; |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 65 | unsigned long priv[0]; |
| 66 | /* priv has to be always the last item */ |
| 67 | }; |
| 68 | |
| 69 | struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) |
| 70 | { |
| 71 | return acl->afk; |
| 72 | } |
| 73 | |
| 74 | struct mlxsw_sp_acl_ruleset_ht_key { |
| 75 | struct net_device *dev; /* dev this ruleset is bound to */ |
| 76 | bool ingress; |
| 77 | const struct mlxsw_sp_acl_profile_ops *ops; |
| 78 | }; |
| 79 | |
| 80 | struct mlxsw_sp_acl_ruleset { |
| 81 | struct rhash_head ht_node; /* Member of acl HT */ |
| 82 | struct mlxsw_sp_acl_ruleset_ht_key ht_key; |
| 83 | struct rhashtable rule_ht; |
| 84 | unsigned int ref_count; |
| 85 | unsigned long priv[0]; |
| 86 | /* priv has to be always the last item */ |
| 87 | }; |
| 88 | |
| 89 | struct mlxsw_sp_acl_rule { |
| 90 | struct rhash_head ht_node; /* Member of rule HT */ |
Arkadi Sharshevsky | 096e914 | 2017-03-11 09:42:55 +0100 | [diff] [blame] | 91 | struct list_head list; |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 92 | unsigned long cookie; /* HT key */ |
| 93 | struct mlxsw_sp_acl_ruleset *ruleset; |
| 94 | struct mlxsw_sp_acl_rule_info *rulei; |
Arkadi Sharshevsky | 446a154 | 2017-03-11 09:42:56 +0100 | [diff] [blame] | 95 | u64 last_used; |
Arkadi Sharshevsky | 7c1b8eb | 2017-03-11 09:42:59 +0100 | [diff] [blame] | 96 | u64 last_packets; |
| 97 | u64 last_bytes; |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 98 | unsigned long priv[0]; |
| 99 | /* priv has to be always the last item */ |
| 100 | }; |
| 101 | |
| 102 | static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = { |
| 103 | .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key), |
| 104 | .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key), |
| 105 | .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node), |
| 106 | .automatic_shrinking = true, |
| 107 | }; |
| 108 | |
| 109 | static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = { |
| 110 | .key_len = sizeof(unsigned long), |
| 111 | .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie), |
| 112 | .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node), |
| 113 | .automatic_shrinking = true, |
| 114 | }; |
| 115 | |
Ido Schimmel | a110748 | 2017-05-26 08:37:39 +0200 | [diff] [blame] | 116 | struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp) |
| 117 | { |
| 118 | return mlxsw_sp->acl->dummy_fid; |
| 119 | } |
| 120 | |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 121 | static struct mlxsw_sp_acl_ruleset * |
| 122 | mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, |
| 123 | const struct mlxsw_sp_acl_profile_ops *ops) |
| 124 | { |
| 125 | struct mlxsw_sp_acl *acl = mlxsw_sp->acl; |
| 126 | struct mlxsw_sp_acl_ruleset *ruleset; |
| 127 | size_t alloc_size; |
| 128 | int err; |
| 129 | |
| 130 | alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size; |
| 131 | ruleset = kzalloc(alloc_size, GFP_KERNEL); |
| 132 | if (!ruleset) |
| 133 | return ERR_PTR(-ENOMEM); |
| 134 | ruleset->ref_count = 1; |
| 135 | ruleset->ht_key.ops = ops; |
| 136 | |
| 137 | err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params); |
| 138 | if (err) |
| 139 | goto err_rhashtable_init; |
| 140 | |
| 141 | err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv); |
| 142 | if (err) |
| 143 | goto err_ops_ruleset_add; |
| 144 | |
| 145 | return ruleset; |
| 146 | |
| 147 | err_ops_ruleset_add: |
| 148 | rhashtable_destroy(&ruleset->rule_ht); |
| 149 | err_rhashtable_init: |
| 150 | kfree(ruleset); |
| 151 | return ERR_PTR(err); |
| 152 | } |
| 153 | |
| 154 | static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, |
| 155 | struct mlxsw_sp_acl_ruleset *ruleset) |
| 156 | { |
| 157 | const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; |
| 158 | |
| 159 | ops->ruleset_del(mlxsw_sp, ruleset->priv); |
| 160 | rhashtable_destroy(&ruleset->rule_ht); |
| 161 | kfree(ruleset); |
| 162 | } |
| 163 | |
| 164 | static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, |
| 165 | struct mlxsw_sp_acl_ruleset *ruleset, |
| 166 | struct net_device *dev, bool ingress) |
| 167 | { |
| 168 | const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; |
| 169 | struct mlxsw_sp_acl *acl = mlxsw_sp->acl; |
| 170 | int err; |
| 171 | |
| 172 | ruleset->ht_key.dev = dev; |
| 173 | ruleset->ht_key.ingress = ingress; |
| 174 | err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, |
| 175 | mlxsw_sp_acl_ruleset_ht_params); |
| 176 | if (err) |
| 177 | return err; |
| 178 | err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress); |
| 179 | if (err) |
| 180 | goto err_ops_ruleset_bind; |
| 181 | return 0; |
| 182 | |
| 183 | err_ops_ruleset_bind: |
| 184 | rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, |
| 185 | mlxsw_sp_acl_ruleset_ht_params); |
| 186 | return err; |
| 187 | } |
| 188 | |
| 189 | static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, |
| 190 | struct mlxsw_sp_acl_ruleset *ruleset) |
| 191 | { |
| 192 | const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; |
| 193 | struct mlxsw_sp_acl *acl = mlxsw_sp->acl; |
| 194 | |
| 195 | ops->ruleset_unbind(mlxsw_sp, ruleset->priv); |
| 196 | rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, |
| 197 | mlxsw_sp_acl_ruleset_ht_params); |
| 198 | } |
| 199 | |
| 200 | static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset) |
| 201 | { |
| 202 | ruleset->ref_count++; |
| 203 | } |
| 204 | |
| 205 | static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, |
| 206 | struct mlxsw_sp_acl_ruleset *ruleset) |
| 207 | { |
| 208 | if (--ruleset->ref_count) |
| 209 | return; |
| 210 | mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset); |
| 211 | mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); |
| 212 | } |
| 213 | |
| 214 | struct mlxsw_sp_acl_ruleset * |
| 215 | mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, |
| 216 | struct net_device *dev, bool ingress, |
| 217 | enum mlxsw_sp_acl_profile profile) |
| 218 | { |
| 219 | const struct mlxsw_sp_acl_profile_ops *ops; |
| 220 | struct mlxsw_sp_acl *acl = mlxsw_sp->acl; |
| 221 | struct mlxsw_sp_acl_ruleset_ht_key ht_key; |
| 222 | struct mlxsw_sp_acl_ruleset *ruleset; |
| 223 | int err; |
| 224 | |
| 225 | ops = acl->ops->profile_ops(mlxsw_sp, profile); |
| 226 | if (!ops) |
| 227 | return ERR_PTR(-EINVAL); |
| 228 | |
| 229 | memset(&ht_key, 0, sizeof(ht_key)); |
| 230 | ht_key.dev = dev; |
| 231 | ht_key.ingress = ingress; |
| 232 | ht_key.ops = ops; |
| 233 | ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, |
| 234 | mlxsw_sp_acl_ruleset_ht_params); |
| 235 | if (ruleset) { |
| 236 | mlxsw_sp_acl_ruleset_ref_inc(ruleset); |
| 237 | return ruleset; |
| 238 | } |
| 239 | ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops); |
| 240 | if (IS_ERR(ruleset)) |
| 241 | return ruleset; |
| 242 | err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress); |
| 243 | if (err) |
| 244 | goto err_ruleset_bind; |
| 245 | return ruleset; |
| 246 | |
| 247 | err_ruleset_bind: |
| 248 | mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); |
| 249 | return ERR_PTR(err); |
| 250 | } |
| 251 | |
| 252 | void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, |
| 253 | struct mlxsw_sp_acl_ruleset *ruleset) |
| 254 | { |
| 255 | mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); |
| 256 | } |
| 257 | |
Arkadi Sharshevsky | 4817072 | 2017-03-11 09:42:58 +0100 | [diff] [blame] | 258 | static int |
| 259 | mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp, |
| 260 | struct mlxsw_sp_acl_rule_info *rulei) |
| 261 | { |
| 262 | int err; |
| 263 | |
| 264 | err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index); |
| 265 | if (err) |
| 266 | return err; |
| 267 | rulei->counter_valid = true; |
| 268 | return 0; |
| 269 | } |
| 270 | |
| 271 | static void |
| 272 | mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp, |
| 273 | struct mlxsw_sp_acl_rule_info *rulei) |
| 274 | { |
| 275 | rulei->counter_valid = false; |
| 276 | mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index); |
| 277 | } |
| 278 | |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 279 | struct mlxsw_sp_acl_rule_info * |
| 280 | mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) |
| 281 | { |
| 282 | struct mlxsw_sp_acl_rule_info *rulei; |
| 283 | int err; |
| 284 | |
| 285 | rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); |
| 286 | if (!rulei) |
| 287 | return NULL; |
| 288 | rulei->act_block = mlxsw_afa_block_create(acl->afa); |
| 289 | if (IS_ERR(rulei->act_block)) { |
| 290 | err = PTR_ERR(rulei->act_block); |
| 291 | goto err_afa_block_create; |
| 292 | } |
| 293 | return rulei; |
| 294 | |
| 295 | err_afa_block_create: |
| 296 | kfree(rulei); |
| 297 | return ERR_PTR(err); |
| 298 | } |
| 299 | |
| 300 | void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei) |
| 301 | { |
| 302 | mlxsw_afa_block_destroy(rulei->act_block); |
| 303 | kfree(rulei); |
| 304 | } |
| 305 | |
| 306 | int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei) |
| 307 | { |
| 308 | return mlxsw_afa_block_commit(rulei->act_block); |
| 309 | } |
| 310 | |
| 311 | void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, |
| 312 | unsigned int priority) |
| 313 | { |
| 314 | rulei->priority = priority; |
| 315 | } |
| 316 | |
| 317 | void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, |
| 318 | enum mlxsw_afk_element element, |
| 319 | u32 key_value, u32 mask_value) |
| 320 | { |
| 321 | mlxsw_afk_values_add_u32(&rulei->values, element, |
| 322 | key_value, mask_value); |
| 323 | } |
| 324 | |
| 325 | void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, |
| 326 | enum mlxsw_afk_element element, |
| 327 | const char *key_value, |
| 328 | const char *mask_value, unsigned int len) |
| 329 | { |
| 330 | mlxsw_afk_values_add_buf(&rulei->values, element, |
| 331 | key_value, mask_value, len); |
| 332 | } |
| 333 | |
| 334 | void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei) |
| 335 | { |
| 336 | mlxsw_afa_block_continue(rulei->act_block); |
| 337 | } |
| 338 | |
| 339 | void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, |
| 340 | u16 group_id) |
| 341 | { |
| 342 | mlxsw_afa_block_jump(rulei->act_block, group_id); |
| 343 | } |
| 344 | |
| 345 | int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) |
| 346 | { |
| 347 | return mlxsw_afa_block_append_drop(rulei->act_block); |
| 348 | } |
| 349 | |
Jiri Pirko | df7eea9 | 2017-06-06 14:12:06 +0200 | [diff] [blame] | 350 | int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei) |
| 351 | { |
| 352 | return mlxsw_afa_block_append_trap(rulei->act_block); |
| 353 | } |
| 354 | |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 355 | int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, |
| 356 | struct mlxsw_sp_acl_rule_info *rulei, |
| 357 | struct net_device *out_dev) |
| 358 | { |
| 359 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 360 | u8 local_port; |
| 361 | bool in_port; |
| 362 | |
| 363 | if (out_dev) { |
| 364 | if (!mlxsw_sp_port_dev_check(out_dev)) |
| 365 | return -EINVAL; |
| 366 | mlxsw_sp_port = netdev_priv(out_dev); |
| 367 | if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) |
| 368 | return -EINVAL; |
| 369 | local_port = mlxsw_sp_port->local_port; |
| 370 | in_port = false; |
| 371 | } else { |
Petr Machata | 4bb51bd | 2017-07-31 09:27:23 +0200 | [diff] [blame] | 372 | /* If out_dev is NULL, the caller wants to |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 373 | * set forward to ingress port. |
| 374 | */ |
| 375 | local_port = 0; |
| 376 | in_port = true; |
| 377 | } |
| 378 | return mlxsw_afa_block_append_fwd(rulei->act_block, |
| 379 | local_port, in_port); |
| 380 | } |
| 381 | |
Petr Machata | a150201 | 2017-03-09 09:25:19 +0100 | [diff] [blame] | 382 | int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, |
| 383 | struct mlxsw_sp_acl_rule_info *rulei, |
| 384 | u32 action, u16 vid, u16 proto, u8 prio) |
| 385 | { |
| 386 | u8 ethertype; |
| 387 | |
| 388 | if (action == TCA_VLAN_ACT_MODIFY) { |
| 389 | switch (proto) { |
| 390 | case ETH_P_8021Q: |
| 391 | ethertype = 0; |
| 392 | break; |
| 393 | case ETH_P_8021AD: |
| 394 | ethertype = 1; |
| 395 | break; |
| 396 | default: |
| 397 | dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n", |
| 398 | proto); |
| 399 | return -EINVAL; |
| 400 | } |
| 401 | |
| 402 | return mlxsw_afa_block_append_vlan_modify(rulei->act_block, |
| 403 | vid, prio, ethertype); |
| 404 | } else { |
| 405 | dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n"); |
| 406 | return -EINVAL; |
| 407 | } |
| 408 | } |
| 409 | |
Arkadi Sharshevsky | 4817072 | 2017-03-11 09:42:58 +0100 | [diff] [blame] | 410 | int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, |
| 411 | struct mlxsw_sp_acl_rule_info *rulei) |
| 412 | { |
| 413 | return mlxsw_afa_block_append_counter(rulei->act_block, |
| 414 | rulei->counter_index); |
| 415 | } |
| 416 | |
Jiri Pirko | ac44dd4 | 2017-04-18 16:55:32 +0200 | [diff] [blame] | 417 | int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, |
| 418 | struct mlxsw_sp_acl_rule_info *rulei, |
| 419 | u16 fid) |
| 420 | { |
| 421 | return mlxsw_afa_block_append_fid_set(rulei->act_block, fid); |
| 422 | } |
| 423 | |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 424 | struct mlxsw_sp_acl_rule * |
| 425 | mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, |
| 426 | struct mlxsw_sp_acl_ruleset *ruleset, |
| 427 | unsigned long cookie) |
| 428 | { |
| 429 | const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; |
| 430 | struct mlxsw_sp_acl_rule *rule; |
| 431 | int err; |
| 432 | |
| 433 | mlxsw_sp_acl_ruleset_ref_inc(ruleset); |
| 434 | rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL); |
| 435 | if (!rule) { |
| 436 | err = -ENOMEM; |
| 437 | goto err_alloc; |
| 438 | } |
| 439 | rule->cookie = cookie; |
| 440 | rule->ruleset = ruleset; |
| 441 | |
| 442 | rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); |
| 443 | if (IS_ERR(rule->rulei)) { |
| 444 | err = PTR_ERR(rule->rulei); |
| 445 | goto err_rulei_create; |
| 446 | } |
Arkadi Sharshevsky | 4817072 | 2017-03-11 09:42:58 +0100 | [diff] [blame] | 447 | |
| 448 | err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei); |
| 449 | if (err) |
| 450 | goto err_counter_alloc; |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 451 | return rule; |
| 452 | |
Arkadi Sharshevsky | 4817072 | 2017-03-11 09:42:58 +0100 | [diff] [blame] | 453 | err_counter_alloc: |
| 454 | mlxsw_sp_acl_rulei_destroy(rule->rulei); |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 455 | err_rulei_create: |
| 456 | kfree(rule); |
| 457 | err_alloc: |
| 458 | mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); |
| 459 | return ERR_PTR(err); |
| 460 | } |
| 461 | |
| 462 | void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, |
| 463 | struct mlxsw_sp_acl_rule *rule) |
| 464 | { |
| 465 | struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; |
| 466 | |
Arkadi Sharshevsky | 4817072 | 2017-03-11 09:42:58 +0100 | [diff] [blame] | 467 | mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei); |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 468 | mlxsw_sp_acl_rulei_destroy(rule->rulei); |
| 469 | kfree(rule); |
| 470 | mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); |
| 471 | } |
| 472 | |
| 473 | int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, |
| 474 | struct mlxsw_sp_acl_rule *rule) |
| 475 | { |
| 476 | struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; |
| 477 | const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; |
| 478 | int err; |
| 479 | |
| 480 | err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei); |
| 481 | if (err) |
| 482 | return err; |
| 483 | |
| 484 | err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node, |
| 485 | mlxsw_sp_acl_rule_ht_params); |
| 486 | if (err) |
| 487 | goto err_rhashtable_insert; |
| 488 | |
Arkadi Sharshevsky | 096e914 | 2017-03-11 09:42:55 +0100 | [diff] [blame] | 489 | list_add_tail(&rule->list, &mlxsw_sp->acl->rules); |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 490 | return 0; |
| 491 | |
| 492 | err_rhashtable_insert: |
| 493 | ops->rule_del(mlxsw_sp, rule->priv); |
| 494 | return err; |
| 495 | } |
| 496 | |
| 497 | void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, |
| 498 | struct mlxsw_sp_acl_rule *rule) |
| 499 | { |
| 500 | struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; |
| 501 | const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; |
| 502 | |
Arkadi Sharshevsky | 096e914 | 2017-03-11 09:42:55 +0100 | [diff] [blame] | 503 | list_del(&rule->list); |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 504 | rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, |
| 505 | mlxsw_sp_acl_rule_ht_params); |
| 506 | ops->rule_del(mlxsw_sp, rule->priv); |
| 507 | } |
| 508 | |
| 509 | struct mlxsw_sp_acl_rule * |
| 510 | mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, |
| 511 | struct mlxsw_sp_acl_ruleset *ruleset, |
| 512 | unsigned long cookie) |
| 513 | { |
| 514 | return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie, |
| 515 | mlxsw_sp_acl_rule_ht_params); |
| 516 | } |
| 517 | |
| 518 | struct mlxsw_sp_acl_rule_info * |
| 519 | mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule) |
| 520 | { |
| 521 | return rule->rulei; |
| 522 | } |
| 523 | |
Arkadi Sharshevsky | 446a154 | 2017-03-11 09:42:56 +0100 | [diff] [blame] | 524 | static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp, |
| 525 | struct mlxsw_sp_acl_rule *rule) |
| 526 | { |
| 527 | struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; |
| 528 | const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; |
| 529 | bool active; |
| 530 | int err; |
| 531 | |
| 532 | err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active); |
| 533 | if (err) |
| 534 | return err; |
| 535 | if (active) |
| 536 | rule->last_used = jiffies; |
| 537 | return 0; |
| 538 | } |
| 539 | |
| 540 | static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl) |
| 541 | { |
| 542 | struct mlxsw_sp_acl_rule *rule; |
| 543 | int err; |
| 544 | |
| 545 | /* Protect internal structures from changes */ |
| 546 | rtnl_lock(); |
| 547 | list_for_each_entry(rule, &acl->rules, list) { |
| 548 | err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp, |
| 549 | rule); |
| 550 | if (err) |
| 551 | goto err_rule_update; |
| 552 | } |
| 553 | rtnl_unlock(); |
| 554 | return 0; |
| 555 | |
| 556 | err_rule_update: |
| 557 | rtnl_unlock(); |
| 558 | return err; |
| 559 | } |
| 560 | |
| 561 | static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl) |
| 562 | { |
| 563 | unsigned long interval = acl->rule_activity_update.interval; |
| 564 | |
| 565 | mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, |
| 566 | msecs_to_jiffies(interval)); |
| 567 | } |
| 568 | |
| 569 | static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work) |
| 570 | { |
| 571 | struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl, |
| 572 | rule_activity_update.dw.work); |
| 573 | int err; |
| 574 | |
| 575 | err = mlxsw_sp_acl_rules_activity_update(acl); |
| 576 | if (err) |
| 577 | dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity"); |
| 578 | |
| 579 | mlxsw_sp_acl_rule_activity_work_schedule(acl); |
| 580 | } |
| 581 | |
Arkadi Sharshevsky | 7c1b8eb | 2017-03-11 09:42:59 +0100 | [diff] [blame] | 582 | int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, |
| 583 | struct mlxsw_sp_acl_rule *rule, |
| 584 | u64 *packets, u64 *bytes, u64 *last_use) |
| 585 | |
| 586 | { |
| 587 | struct mlxsw_sp_acl_rule_info *rulei; |
| 588 | u64 current_packets; |
| 589 | u64 current_bytes; |
| 590 | int err; |
| 591 | |
| 592 | rulei = mlxsw_sp_acl_rule_rulei(rule); |
| 593 | err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index, |
| 594 | ¤t_packets, ¤t_bytes); |
| 595 | if (err) |
| 596 | return err; |
| 597 | |
| 598 | *packets = current_packets - rule->last_packets; |
| 599 | *bytes = current_bytes - rule->last_bytes; |
| 600 | *last_use = rule->last_used; |
| 601 | |
| 602 | rule->last_bytes = current_bytes; |
| 603 | rule->last_packets = current_packets; |
| 604 | |
| 605 | return 0; |
| 606 | } |
| 607 | |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 608 | #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1 |
| 609 | |
| 610 | static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, |
| 611 | char *enc_actions, bool is_first) |
| 612 | { |
| 613 | struct mlxsw_sp *mlxsw_sp = priv; |
| 614 | char pefa_pl[MLXSW_REG_PEFA_LEN]; |
| 615 | u32 kvdl_index; |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 616 | int err; |
| 617 | |
| 618 | /* The first action set of a TCAM entry is stored directly in TCAM, |
| 619 | * not KVD linear area. |
| 620 | */ |
| 621 | if (is_first) |
| 622 | return 0; |
| 623 | |
Arkadi Sharshevsky | 1312444 | 2017-03-25 08:28:22 +0100 | [diff] [blame] | 624 | err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE, |
| 625 | &kvdl_index); |
| 626 | if (err) |
| 627 | return err; |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 628 | mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); |
| 629 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); |
| 630 | if (err) |
| 631 | goto err_pefa_write; |
| 632 | *p_kvdl_index = kvdl_index; |
| 633 | return 0; |
| 634 | |
| 635 | err_pefa_write: |
| 636 | mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); |
| 637 | return err; |
| 638 | } |
| 639 | |
| 640 | static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, |
| 641 | bool is_first) |
| 642 | { |
| 643 | struct mlxsw_sp *mlxsw_sp = priv; |
| 644 | |
| 645 | if (is_first) |
| 646 | return; |
| 647 | mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); |
| 648 | } |
| 649 | |
| 650 | static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, |
| 651 | u8 local_port) |
| 652 | { |
| 653 | struct mlxsw_sp *mlxsw_sp = priv; |
| 654 | char ppbs_pl[MLXSW_REG_PPBS_LEN]; |
| 655 | u32 kvdl_index; |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 656 | int err; |
| 657 | |
Arkadi Sharshevsky | 1312444 | 2017-03-25 08:28:22 +0100 | [diff] [blame] | 658 | err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index); |
| 659 | if (err) |
| 660 | return err; |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 661 | mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); |
| 662 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl); |
| 663 | if (err) |
| 664 | goto err_ppbs_write; |
| 665 | *p_kvdl_index = kvdl_index; |
| 666 | return 0; |
| 667 | |
| 668 | err_ppbs_write: |
| 669 | mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); |
| 670 | return err; |
| 671 | } |
| 672 | |
| 673 | static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) |
| 674 | { |
| 675 | struct mlxsw_sp *mlxsw_sp = priv; |
| 676 | |
| 677 | mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); |
| 678 | } |
| 679 | |
| 680 | static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { |
| 681 | .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, |
| 682 | .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, |
| 683 | .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, |
| 684 | .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, |
| 685 | }; |
| 686 | |
| 687 | int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) |
| 688 | { |
| 689 | const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops; |
Ido Schimmel | a110748 | 2017-05-26 08:37:39 +0200 | [diff] [blame] | 690 | struct mlxsw_sp_fid *fid; |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 691 | struct mlxsw_sp_acl *acl; |
| 692 | int err; |
| 693 | |
| 694 | acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL); |
| 695 | if (!acl) |
| 696 | return -ENOMEM; |
| 697 | mlxsw_sp->acl = acl; |
Arkadi Sharshevsky | 446a154 | 2017-03-11 09:42:56 +0100 | [diff] [blame] | 698 | acl->mlxsw_sp = mlxsw_sp; |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 699 | acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, |
| 700 | ACL_FLEX_KEYS), |
| 701 | mlxsw_sp_afk_blocks, |
| 702 | MLXSW_SP_AFK_BLOCKS_COUNT); |
| 703 | if (!acl->afk) { |
| 704 | err = -ENOMEM; |
| 705 | goto err_afk_create; |
| 706 | } |
| 707 | |
| 708 | acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, |
| 709 | ACL_ACTIONS_PER_SET), |
| 710 | &mlxsw_sp_act_afa_ops, mlxsw_sp); |
| 711 | if (IS_ERR(acl->afa)) { |
| 712 | err = PTR_ERR(acl->afa); |
| 713 | goto err_afa_create; |
| 714 | } |
| 715 | |
| 716 | err = rhashtable_init(&acl->ruleset_ht, |
| 717 | &mlxsw_sp_acl_ruleset_ht_params); |
| 718 | if (err) |
| 719 | goto err_rhashtable_init; |
| 720 | |
Ido Schimmel | a110748 | 2017-05-26 08:37:39 +0200 | [diff] [blame] | 721 | fid = mlxsw_sp_fid_dummy_get(mlxsw_sp); |
| 722 | if (IS_ERR(fid)) { |
| 723 | err = PTR_ERR(fid); |
| 724 | goto err_fid_get; |
| 725 | } |
| 726 | acl->dummy_fid = fid; |
| 727 | |
Arkadi Sharshevsky | 096e914 | 2017-03-11 09:42:55 +0100 | [diff] [blame] | 728 | INIT_LIST_HEAD(&acl->rules); |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 729 | err = acl_ops->init(mlxsw_sp, acl->priv); |
| 730 | if (err) |
| 731 | goto err_acl_ops_init; |
| 732 | |
| 733 | acl->ops = acl_ops; |
Arkadi Sharshevsky | 446a154 | 2017-03-11 09:42:56 +0100 | [diff] [blame] | 734 | |
| 735 | /* Create the delayed work for the rule activity_update */ |
| 736 | INIT_DELAYED_WORK(&acl->rule_activity_update.dw, |
| 737 | mlxsw_sp_acl_rul_activity_update_work); |
| 738 | acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS; |
| 739 | mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0); |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 740 | return 0; |
| 741 | |
| 742 | err_acl_ops_init: |
Ido Schimmel | a110748 | 2017-05-26 08:37:39 +0200 | [diff] [blame] | 743 | mlxsw_sp_fid_put(fid); |
| 744 | err_fid_get: |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 745 | rhashtable_destroy(&acl->ruleset_ht); |
| 746 | err_rhashtable_init: |
| 747 | mlxsw_afa_destroy(acl->afa); |
| 748 | err_afa_create: |
| 749 | mlxsw_afk_destroy(acl->afk); |
| 750 | err_afk_create: |
| 751 | kfree(acl); |
| 752 | return err; |
| 753 | } |
| 754 | |
| 755 | void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) |
| 756 | { |
| 757 | struct mlxsw_sp_acl *acl = mlxsw_sp->acl; |
| 758 | const struct mlxsw_sp_acl_ops *acl_ops = acl->ops; |
| 759 | |
Arkadi Sharshevsky | 446a154 | 2017-03-11 09:42:56 +0100 | [diff] [blame] | 760 | cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw); |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 761 | acl_ops->fini(mlxsw_sp, acl->priv); |
Arkadi Sharshevsky | 096e914 | 2017-03-11 09:42:55 +0100 | [diff] [blame] | 762 | WARN_ON(!list_empty(&acl->rules)); |
Ido Schimmel | a110748 | 2017-05-26 08:37:39 +0200 | [diff] [blame] | 763 | mlxsw_sp_fid_put(acl->dummy_fid); |
Jiri Pirko | 22a6776 | 2017-02-03 10:29:07 +0100 | [diff] [blame] | 764 | rhashtable_destroy(&acl->ruleset_ht); |
| 765 | mlxsw_afa_destroy(acl->afa); |
| 766 | mlxsw_afk_destroy(acl->afk); |
| 767 | kfree(acl); |
| 768 | } |