Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2016 Netronome Systems, Inc. |
| 3 | * |
| 4 | * This software is dual licensed under the GNU General License Version 2, |
| 5 | * June 1991 as shown in the file COPYING in the top-level directory of this |
| 6 | * source tree or the BSD 2-Clause License provided below. You have the |
| 7 | * option to license this software under the complete terms of either license. |
| 8 | * |
| 9 | * The BSD 2-Clause License: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * 1. Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * 2. Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
| 32 | */ |
| 33 | |
| 34 | /* |
| 35 | * nfp_net_offload.c |
| 36 | * Netronome network device driver: TC offload functions for PF and VF |
| 37 | */ |
| 38 | |
| 39 | #include <linux/kernel.h> |
| 40 | #include <linux/netdevice.h> |
| 41 | #include <linux/pci.h> |
| 42 | #include <linux/jiffies.h> |
| 43 | #include <linux/timer.h> |
| 44 | #include <linux/list.h> |
| 45 | |
| 46 | #include <net/pkt_cls.h> |
| 47 | #include <net/tc_act/tc_gact.h> |
| 48 | #include <net/tc_act/tc_mirred.h> |
| 49 | |
Jakub Kicinski | d9ae7f2 | 2017-05-31 08:06:48 -0700 | [diff] [blame] | 50 | #include "main.h" |
| 51 | #include "../nfp_net_ctrl.h" |
| 52 | #include "../nfp_net.h" |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 53 | |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 54 | void nfp_net_filter_stats_timer(unsigned long data) |
| 55 | { |
| 56 | struct nfp_net *nn = (void *)data; |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 57 | struct nfp_net_bpf_priv *priv; |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 58 | struct nfp_stat_pair latest; |
| 59 | |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 60 | priv = nn->app_priv; |
| 61 | |
| 62 | spin_lock_bh(&priv->rx_filter_lock); |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 63 | |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 64 | if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 65 | mod_timer(&priv->rx_filter_stats_timer, |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 66 | jiffies + NFP_NET_STAT_POLL_IVL); |
| 67 | |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 68 | spin_unlock_bh(&priv->rx_filter_lock); |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 69 | |
| 70 | latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES); |
| 71 | latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES); |
| 72 | |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 73 | if (latest.pkts != priv->rx_filter.pkts) |
| 74 | priv->rx_filter_change = jiffies; |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 75 | |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 76 | priv->rx_filter = latest; |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 77 | } |
| 78 | |
| 79 | static void nfp_net_bpf_stats_reset(struct nfp_net *nn) |
| 80 | { |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 81 | struct nfp_net_bpf_priv *priv = nn->app_priv; |
| 82 | |
| 83 | priv->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES); |
| 84 | priv->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES); |
| 85 | priv->rx_filter_prev = priv->rx_filter; |
| 86 | priv->rx_filter_change = jiffies; |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | static int |
| 90 | nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) |
| 91 | { |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 92 | struct nfp_net_bpf_priv *priv = nn->app_priv; |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 93 | u64 bytes, pkts; |
| 94 | |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 95 | pkts = priv->rx_filter.pkts - priv->rx_filter_prev.pkts; |
| 96 | bytes = priv->rx_filter.bytes - priv->rx_filter_prev.bytes; |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 97 | bytes -= pkts * ETH_HLEN; |
| 98 | |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 99 | priv->rx_filter_prev = priv->rx_filter; |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 100 | |
Jakub Kicinski | d897a63 | 2017-05-31 08:06:43 -0700 | [diff] [blame] | 101 | tcf_exts_stats_update(cls_bpf->exts, |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 102 | bytes, pkts, priv->rx_filter_change); |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 103 | |
| 104 | return 0; |
| 105 | } |
| 106 | |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 107 | static int |
| 108 | nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) |
| 109 | { |
| 110 | const struct tc_action *a; |
| 111 | LIST_HEAD(actions); |
| 112 | |
Jakub Kicinski | 6d67707 | 2016-11-03 17:12:09 +0000 | [diff] [blame] | 113 | if (!cls_bpf->exts) |
| 114 | return NN_ACT_XDP; |
| 115 | |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 116 | /* TC direct action */ |
Jakub Kicinski | e3b8baf | 2016-09-21 11:44:07 +0100 | [diff] [blame] | 117 | if (cls_bpf->exts_integrated) { |
Jiri Pirko | 3bcc0ce | 2017-08-04 14:28:58 +0200 | [diff] [blame] | 118 | if (!tcf_exts_has_actions(cls_bpf->exts)) |
Jakub Kicinski | e3b8baf | 2016-09-21 11:44:07 +0100 | [diff] [blame] | 119 | return NN_ACT_DIRECT; |
| 120 | |
Jakub Kicinski | 46c5051 | 2017-04-27 21:06:15 -0700 | [diff] [blame] | 121 | return -EOPNOTSUPP; |
Jakub Kicinski | e3b8baf | 2016-09-21 11:44:07 +0100 | [diff] [blame] | 122 | } |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 123 | |
| 124 | /* TC legacy mode */ |
Jiri Pirko | 3bcc0ce | 2017-08-04 14:28:58 +0200 | [diff] [blame] | 125 | if (!tcf_exts_has_one_action(cls_bpf->exts)) |
Jakub Kicinski | 46c5051 | 2017-04-27 21:06:15 -0700 | [diff] [blame] | 126 | return -EOPNOTSUPP; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 127 | |
| 128 | tcf_exts_to_list(cls_bpf->exts, &actions); |
| 129 | list_for_each_entry(a, &actions, list) { |
| 130 | if (is_tcf_gact_shot(a)) |
| 131 | return NN_ACT_TC_DROP; |
Jakub Kicinski | 2d18421 | 2016-09-21 11:44:06 +0100 | [diff] [blame] | 132 | |
Shmulik Ladkani | 5724b8b | 2016-10-13 09:06:43 +0300 | [diff] [blame] | 133 | if (is_tcf_mirred_egress_redirect(a) && |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 134 | tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex) |
Jakub Kicinski | 2d18421 | 2016-09-21 11:44:06 +0100 | [diff] [blame] | 135 | return NN_ACT_TC_REDIR; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 136 | } |
| 137 | |
Jakub Kicinski | 46c5051 | 2017-04-27 21:06:15 -0700 | [diff] [blame] | 138 | return -EOPNOTSUPP; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | static int |
| 142 | nfp_net_bpf_offload_prepare(struct nfp_net *nn, |
| 143 | struct tc_cls_bpf_offload *cls_bpf, |
| 144 | struct nfp_bpf_result *res, |
| 145 | void **code, dma_addr_t *dma_addr, u16 max_instr) |
| 146 | { |
| 147 | unsigned int code_sz = max_instr * sizeof(u64); |
| 148 | enum nfp_bpf_action_type act; |
| 149 | u16 start_off, done_off; |
| 150 | unsigned int max_mtu; |
| 151 | int ret; |
| 152 | |
Arnd Bergmann | b47c62c | 2016-09-23 22:23:59 +0200 | [diff] [blame] | 153 | if (!IS_ENABLED(CONFIG_BPF_SYSCALL)) |
Jakub Kicinski | 46c5051 | 2017-04-27 21:06:15 -0700 | [diff] [blame] | 154 | return -EOPNOTSUPP; |
Arnd Bergmann | b47c62c | 2016-09-23 22:23:59 +0200 | [diff] [blame] | 155 | |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 156 | ret = nfp_net_bpf_get_act(nn, cls_bpf); |
| 157 | if (ret < 0) |
| 158 | return ret; |
| 159 | act = ret; |
| 160 | |
| 161 | max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 162 | if (max_mtu < nn->dp.netdev->mtu) { |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 163 | nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); |
Jakub Kicinski | 46c5051 | 2017-04-27 21:06:15 -0700 | [diff] [blame] | 164 | return -EOPNOTSUPP; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); |
| 168 | done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); |
| 169 | |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 170 | *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 171 | if (!*code) |
| 172 | return -ENOMEM; |
| 173 | |
| 174 | ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off, |
| 175 | max_instr, res); |
| 176 | if (ret) |
| 177 | goto out; |
| 178 | |
| 179 | return 0; |
| 180 | |
| 181 | out: |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 182 | dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 183 | return ret; |
| 184 | } |
| 185 | |
| 186 | static void |
| 187 | nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, |
| 188 | void *code, dma_addr_t dma_addr, |
| 189 | unsigned int code_sz, unsigned int n_instr, |
| 190 | bool dense_mode) |
| 191 | { |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 192 | struct nfp_net_bpf_priv *priv = nn->app_priv; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 193 | u64 bpf_addr = dma_addr; |
| 194 | int err; |
| 195 | |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 196 | nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 197 | |
| 198 | if (dense_mode) |
| 199 | bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX; |
| 200 | |
| 201 | nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr); |
| 202 | nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr); |
| 203 | |
| 204 | /* Load up the JITed code */ |
| 205 | err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF); |
| 206 | if (err) |
| 207 | nn_err(nn, "FW command error while loading BPF: %d\n", err); |
| 208 | |
| 209 | /* Enable passing packets through BPF function */ |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 210 | nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF; |
| 211 | nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 212 | err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); |
| 213 | if (err) |
| 214 | nn_err(nn, "FW command error while enabling BPF: %d\n", err); |
| 215 | |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 216 | dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr); |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 217 | |
| 218 | nfp_net_bpf_stats_reset(nn); |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 219 | mod_timer(&priv->rx_filter_stats_timer, |
| 220 | jiffies + NFP_NET_STAT_POLL_IVL); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 221 | } |
| 222 | |
| 223 | static int nfp_net_bpf_stop(struct nfp_net *nn) |
| 224 | { |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 225 | struct nfp_net_bpf_priv *priv = nn->app_priv; |
| 226 | |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 227 | if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 228 | return 0; |
| 229 | |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 230 | spin_lock_bh(&priv->rx_filter_lock); |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 231 | nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF; |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 232 | spin_unlock_bh(&priv->rx_filter_lock); |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 233 | nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 234 | |
Jakub Kicinski | c66a9cf | 2017-05-31 08:06:50 -0700 | [diff] [blame] | 235 | del_timer_sync(&priv->rx_filter_stats_timer); |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 236 | nn->dp.bpf_offload_skip_sw = 0; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 237 | |
| 238 | return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); |
| 239 | } |
| 240 | |
Jakub Kicinski | 2e9d594 | 2016-11-03 17:12:08 +0000 | [diff] [blame] | 241 | int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 242 | { |
| 243 | struct nfp_bpf_result res; |
| 244 | dma_addr_t dma_addr; |
| 245 | u16 max_instr; |
| 246 | void *code; |
| 247 | int err; |
| 248 | |
| 249 | max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); |
| 250 | |
| 251 | switch (cls_bpf->command) { |
| 252 | case TC_CLSBPF_REPLACE: |
| 253 | /* There is nothing stopping us from implementing seamless |
| 254 | * replace but the simple method of loading I adopted in |
| 255 | * the firmware does not handle atomic replace (i.e. we have to |
| 256 | * stop the BPF offload and re-enable it). Leaking-in a few |
| 257 | * frames which didn't have BPF applied in the hardware should |
| 258 | * be fine if software fallback is available, though. |
| 259 | */ |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 260 | if (nn->dp.bpf_offload_skip_sw) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 261 | return -EBUSY; |
| 262 | |
| 263 | err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, |
| 264 | &dma_addr, max_instr); |
| 265 | if (err) |
| 266 | return err; |
| 267 | |
| 268 | nfp_net_bpf_stop(nn); |
| 269 | nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code, |
| 270 | dma_addr, max_instr * sizeof(u64), |
| 271 | res.n_instr, res.dense_mode); |
| 272 | return 0; |
| 273 | |
| 274 | case TC_CLSBPF_ADD: |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 275 | if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 276 | return -EBUSY; |
| 277 | |
| 278 | err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, |
| 279 | &dma_addr, max_instr); |
| 280 | if (err) |
| 281 | return err; |
| 282 | |
| 283 | nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code, |
| 284 | dma_addr, max_instr * sizeof(u64), |
| 285 | res.n_instr, res.dense_mode); |
| 286 | return 0; |
| 287 | |
| 288 | case TC_CLSBPF_DESTROY: |
| 289 | return nfp_net_bpf_stop(nn); |
| 290 | |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 291 | case TC_CLSBPF_STATS: |
| 292 | return nfp_net_bpf_stats_update(nn, cls_bpf); |
| 293 | |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 294 | default: |
Jakub Kicinski | 46c5051 | 2017-04-27 21:06:15 -0700 | [diff] [blame] | 295 | return -EOPNOTSUPP; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 296 | } |
| 297 | } |