Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2016 Netronome Systems, Inc. |
| 3 | * |
| 4 | * This software is dual licensed under the GNU General License Version 2, |
| 5 | * June 1991 as shown in the file COPYING in the top-level directory of this |
| 6 | * source tree or the BSD 2-Clause License provided below. You have the |
| 7 | * option to license this software under the complete terms of either license. |
| 8 | * |
| 9 | * The BSD 2-Clause License: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * 1. Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * 2. Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
| 32 | */ |
| 33 | |
| 34 | /* |
| 35 | * nfp_net_offload.c |
| 36 | * Netronome network device driver: TC offload functions for PF and VF |
| 37 | */ |
| 38 | |
| 39 | #include <linux/kernel.h> |
| 40 | #include <linux/netdevice.h> |
| 41 | #include <linux/pci.h> |
| 42 | #include <linux/jiffies.h> |
| 43 | #include <linux/timer.h> |
| 44 | #include <linux/list.h> |
| 45 | |
| 46 | #include <net/pkt_cls.h> |
| 47 | #include <net/tc_act/tc_gact.h> |
| 48 | #include <net/tc_act/tc_mirred.h> |
| 49 | |
Jakub Kicinski | d9ae7f2 | 2017-05-31 08:06:48 -0700 | [diff] [blame^] | 50 | #include "main.h" |
| 51 | #include "../nfp_net_ctrl.h" |
| 52 | #include "../nfp_net.h" |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 53 | |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 54 | void nfp_net_filter_stats_timer(unsigned long data) |
| 55 | { |
| 56 | struct nfp_net *nn = (void *)data; |
| 57 | struct nfp_stat_pair latest; |
| 58 | |
| 59 | spin_lock_bh(&nn->rx_filter_lock); |
| 60 | |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 61 | if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 62 | mod_timer(&nn->rx_filter_stats_timer, |
| 63 | jiffies + NFP_NET_STAT_POLL_IVL); |
| 64 | |
| 65 | spin_unlock_bh(&nn->rx_filter_lock); |
| 66 | |
| 67 | latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES); |
| 68 | latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES); |
| 69 | |
| 70 | if (latest.pkts != nn->rx_filter.pkts) |
| 71 | nn->rx_filter_change = jiffies; |
| 72 | |
| 73 | nn->rx_filter = latest; |
| 74 | } |
| 75 | |
| 76 | static void nfp_net_bpf_stats_reset(struct nfp_net *nn) |
| 77 | { |
| 78 | nn->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES); |
| 79 | nn->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES); |
| 80 | nn->rx_filter_prev = nn->rx_filter; |
| 81 | nn->rx_filter_change = jiffies; |
| 82 | } |
| 83 | |
| 84 | static int |
| 85 | nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) |
| 86 | { |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 87 | u64 bytes, pkts; |
| 88 | |
| 89 | pkts = nn->rx_filter.pkts - nn->rx_filter_prev.pkts; |
| 90 | bytes = nn->rx_filter.bytes - nn->rx_filter_prev.bytes; |
| 91 | bytes -= pkts * ETH_HLEN; |
| 92 | |
| 93 | nn->rx_filter_prev = nn->rx_filter; |
| 94 | |
Jakub Kicinski | d897a63 | 2017-05-31 08:06:43 -0700 | [diff] [blame] | 95 | tcf_exts_stats_update(cls_bpf->exts, |
| 96 | bytes, pkts, nn->rx_filter_change); |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 97 | |
| 98 | return 0; |
| 99 | } |
| 100 | |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 101 | static int |
| 102 | nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) |
| 103 | { |
| 104 | const struct tc_action *a; |
| 105 | LIST_HEAD(actions); |
| 106 | |
Jakub Kicinski | 6d67707 | 2016-11-03 17:12:09 +0000 | [diff] [blame] | 107 | if (!cls_bpf->exts) |
| 108 | return NN_ACT_XDP; |
| 109 | |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 110 | /* TC direct action */ |
Jakub Kicinski | e3b8baf | 2016-09-21 11:44:07 +0100 | [diff] [blame] | 111 | if (cls_bpf->exts_integrated) { |
| 112 | if (tc_no_actions(cls_bpf->exts)) |
| 113 | return NN_ACT_DIRECT; |
| 114 | |
Jakub Kicinski | 46c5051 | 2017-04-27 21:06:15 -0700 | [diff] [blame] | 115 | return -EOPNOTSUPP; |
Jakub Kicinski | e3b8baf | 2016-09-21 11:44:07 +0100 | [diff] [blame] | 116 | } |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 117 | |
| 118 | /* TC legacy mode */ |
| 119 | if (!tc_single_action(cls_bpf->exts)) |
Jakub Kicinski | 46c5051 | 2017-04-27 21:06:15 -0700 | [diff] [blame] | 120 | return -EOPNOTSUPP; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 121 | |
| 122 | tcf_exts_to_list(cls_bpf->exts, &actions); |
| 123 | list_for_each_entry(a, &actions, list) { |
| 124 | if (is_tcf_gact_shot(a)) |
| 125 | return NN_ACT_TC_DROP; |
Jakub Kicinski | 2d18421 | 2016-09-21 11:44:06 +0100 | [diff] [blame] | 126 | |
Shmulik Ladkani | 5724b8b | 2016-10-13 09:06:43 +0300 | [diff] [blame] | 127 | if (is_tcf_mirred_egress_redirect(a) && |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 128 | tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex) |
Jakub Kicinski | 2d18421 | 2016-09-21 11:44:06 +0100 | [diff] [blame] | 129 | return NN_ACT_TC_REDIR; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 130 | } |
| 131 | |
Jakub Kicinski | 46c5051 | 2017-04-27 21:06:15 -0700 | [diff] [blame] | 132 | return -EOPNOTSUPP; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 133 | } |
| 134 | |
| 135 | static int |
| 136 | nfp_net_bpf_offload_prepare(struct nfp_net *nn, |
| 137 | struct tc_cls_bpf_offload *cls_bpf, |
| 138 | struct nfp_bpf_result *res, |
| 139 | void **code, dma_addr_t *dma_addr, u16 max_instr) |
| 140 | { |
| 141 | unsigned int code_sz = max_instr * sizeof(u64); |
| 142 | enum nfp_bpf_action_type act; |
| 143 | u16 start_off, done_off; |
| 144 | unsigned int max_mtu; |
| 145 | int ret; |
| 146 | |
Arnd Bergmann | b47c62c | 2016-09-23 22:23:59 +0200 | [diff] [blame] | 147 | if (!IS_ENABLED(CONFIG_BPF_SYSCALL)) |
Jakub Kicinski | 46c5051 | 2017-04-27 21:06:15 -0700 | [diff] [blame] | 148 | return -EOPNOTSUPP; |
Arnd Bergmann | b47c62c | 2016-09-23 22:23:59 +0200 | [diff] [blame] | 149 | |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 150 | ret = nfp_net_bpf_get_act(nn, cls_bpf); |
| 151 | if (ret < 0) |
| 152 | return ret; |
| 153 | act = ret; |
| 154 | |
| 155 | max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 156 | if (max_mtu < nn->dp.netdev->mtu) { |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 157 | nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); |
Jakub Kicinski | 46c5051 | 2017-04-27 21:06:15 -0700 | [diff] [blame] | 158 | return -EOPNOTSUPP; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 159 | } |
| 160 | |
| 161 | start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); |
| 162 | done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); |
| 163 | |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 164 | *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 165 | if (!*code) |
| 166 | return -ENOMEM; |
| 167 | |
| 168 | ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off, |
| 169 | max_instr, res); |
| 170 | if (ret) |
| 171 | goto out; |
| 172 | |
| 173 | return 0; |
| 174 | |
| 175 | out: |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 176 | dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 177 | return ret; |
| 178 | } |
| 179 | |
| 180 | static void |
| 181 | nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, |
| 182 | void *code, dma_addr_t dma_addr, |
| 183 | unsigned int code_sz, unsigned int n_instr, |
| 184 | bool dense_mode) |
| 185 | { |
| 186 | u64 bpf_addr = dma_addr; |
| 187 | int err; |
| 188 | |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 189 | nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 190 | |
| 191 | if (dense_mode) |
| 192 | bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX; |
| 193 | |
| 194 | nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr); |
| 195 | nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr); |
| 196 | |
| 197 | /* Load up the JITed code */ |
| 198 | err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF); |
| 199 | if (err) |
| 200 | nn_err(nn, "FW command error while loading BPF: %d\n", err); |
| 201 | |
| 202 | /* Enable passing packets through BPF function */ |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 203 | nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF; |
| 204 | nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 205 | err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); |
| 206 | if (err) |
| 207 | nn_err(nn, "FW command error while enabling BPF: %d\n", err); |
| 208 | |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 209 | dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr); |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 210 | |
| 211 | nfp_net_bpf_stats_reset(nn); |
| 212 | mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | static int nfp_net_bpf_stop(struct nfp_net *nn) |
| 216 | { |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 217 | if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 218 | return 0; |
| 219 | |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 220 | spin_lock_bh(&nn->rx_filter_lock); |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 221 | nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF; |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 222 | spin_unlock_bh(&nn->rx_filter_lock); |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 223 | nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 224 | |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 225 | del_timer_sync(&nn->rx_filter_stats_timer); |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 226 | nn->dp.bpf_offload_skip_sw = 0; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 227 | |
| 228 | return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); |
| 229 | } |
| 230 | |
Jakub Kicinski | 2e9d594 | 2016-11-03 17:12:08 +0000 | [diff] [blame] | 231 | int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 232 | { |
| 233 | struct nfp_bpf_result res; |
| 234 | dma_addr_t dma_addr; |
| 235 | u16 max_instr; |
| 236 | void *code; |
| 237 | int err; |
| 238 | |
| 239 | max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); |
| 240 | |
| 241 | switch (cls_bpf->command) { |
| 242 | case TC_CLSBPF_REPLACE: |
| 243 | /* There is nothing stopping us from implementing seamless |
| 244 | * replace but the simple method of loading I adopted in |
| 245 | * the firmware does not handle atomic replace (i.e. we have to |
| 246 | * stop the BPF offload and re-enable it). Leaking-in a few |
| 247 | * frames which didn't have BPF applied in the hardware should |
| 248 | * be fine if software fallback is available, though. |
| 249 | */ |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 250 | if (nn->dp.bpf_offload_skip_sw) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 251 | return -EBUSY; |
| 252 | |
| 253 | err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, |
| 254 | &dma_addr, max_instr); |
| 255 | if (err) |
| 256 | return err; |
| 257 | |
| 258 | nfp_net_bpf_stop(nn); |
| 259 | nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code, |
| 260 | dma_addr, max_instr * sizeof(u64), |
| 261 | res.n_instr, res.dense_mode); |
| 262 | return 0; |
| 263 | |
| 264 | case TC_CLSBPF_ADD: |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 265 | if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 266 | return -EBUSY; |
| 267 | |
| 268 | err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, |
| 269 | &dma_addr, max_instr); |
| 270 | if (err) |
| 271 | return err; |
| 272 | |
| 273 | nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code, |
| 274 | dma_addr, max_instr * sizeof(u64), |
| 275 | res.n_instr, res.dense_mode); |
| 276 | return 0; |
| 277 | |
| 278 | case TC_CLSBPF_DESTROY: |
| 279 | return nfp_net_bpf_stop(nn); |
| 280 | |
Jakub Kicinski | 66860be | 2016-09-21 11:44:03 +0100 | [diff] [blame] | 281 | case TC_CLSBPF_STATS: |
| 282 | return nfp_net_bpf_stats_update(nn, cls_bpf); |
| 283 | |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 284 | default: |
Jakub Kicinski | 46c5051 | 2017-04-27 21:06:15 -0700 | [diff] [blame] | 285 | return -EOPNOTSUPP; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 286 | } |
| 287 | } |