Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 1 | /* |
Jiong Wang | 5b67414 | 2017-11-30 21:32:50 -0800 | [diff] [blame] | 2 | * Copyright (C) 2016-2017 Netronome Systems, Inc. |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 3 | * |
| 4 | * This software is dual licensed under the GNU General License Version 2, |
| 5 | * June 1991 as shown in the file COPYING in the top-level directory of this |
| 6 | * source tree or the BSD 2-Clause License provided below. You have the |
| 7 | * option to license this software under the complete terms of either license. |
| 8 | * |
| 9 | * The BSD 2-Clause License: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * 1. Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * 2. Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
| 32 | */ |
| 33 | |
| 34 | /* |
| 35 | * nfp_net_offload.c |
| 36 | * Netronome network device driver: TC offload functions for PF and VF |
| 37 | */ |
| 38 | |
| 39 | #include <linux/kernel.h> |
| 40 | #include <linux/netdevice.h> |
| 41 | #include <linux/pci.h> |
| 42 | #include <linux/jiffies.h> |
| 43 | #include <linux/timer.h> |
| 44 | #include <linux/list.h> |
| 45 | |
| 46 | #include <net/pkt_cls.h> |
| 47 | #include <net/tc_act/tc_gact.h> |
| 48 | #include <net/tc_act/tc_mirred.h> |
| 49 | |
Jakub Kicinski | d9ae7f2 | 2017-05-31 08:06:48 -0700 | [diff] [blame] | 50 | #include "main.h" |
| 51 | #include "../nfp_net_ctrl.h" |
| 52 | #include "../nfp_net.h" |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 53 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 54 | static int |
Jakub Kicinski | c1c88ea | 2017-11-03 13:56:27 -0700 | [diff] [blame] | 55 | nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, |
| 56 | unsigned int cnt) |
| 57 | { |
Jiong Wang | 5b67414 | 2017-11-30 21:32:50 -0800 | [diff] [blame] | 58 | struct nfp_insn_meta *meta; |
Jakub Kicinski | c1c88ea | 2017-11-03 13:56:27 -0700 | [diff] [blame] | 59 | unsigned int i; |
| 60 | |
| 61 | for (i = 0; i < cnt; i++) { |
Jakub Kicinski | c1c88ea | 2017-11-03 13:56:27 -0700 | [diff] [blame] | 62 | meta = kzalloc(sizeof(*meta), GFP_KERNEL); |
| 63 | if (!meta) |
| 64 | return -ENOMEM; |
| 65 | |
| 66 | meta->insn = prog[i]; |
| 67 | meta->n = i; |
| 68 | |
| 69 | list_add_tail(&meta->l, &nfp_prog->insns); |
| 70 | } |
| 71 | |
Jiong Wang | 5b67414 | 2017-11-30 21:32:50 -0800 | [diff] [blame] | 72 | /* Another pass to record jump information. */ |
| 73 | list_for_each_entry(meta, &nfp_prog->insns, l) { |
| 74 | u64 code = meta->insn.code; |
| 75 | |
| 76 | if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && |
| 77 | BPF_OP(code) != BPF_CALL) { |
| 78 | struct nfp_insn_meta *dst_meta; |
| 79 | unsigned short dst_indx; |
| 80 | |
| 81 | dst_indx = meta->n + 1 + meta->insn.off; |
| 82 | dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, |
| 83 | cnt); |
| 84 | |
| 85 | meta->jmp_dst = dst_meta; |
Jiong Wang | a09d5c5 | 2017-11-30 21:32:51 -0800 | [diff] [blame] | 86 | dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; |
Jiong Wang | 5b67414 | 2017-11-30 21:32:50 -0800 | [diff] [blame] | 87 | } |
| 88 | } |
| 89 | |
Jakub Kicinski | c1c88ea | 2017-11-03 13:56:27 -0700 | [diff] [blame] | 90 | return 0; |
| 91 | } |
| 92 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 93 | static void nfp_prog_free(struct nfp_prog *nfp_prog) |
Jakub Kicinski | c1c88ea | 2017-11-03 13:56:27 -0700 | [diff] [blame] | 94 | { |
| 95 | struct nfp_insn_meta *meta, *tmp; |
| 96 | |
| 97 | list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) { |
| 98 | list_del(&meta->l); |
| 99 | kfree(meta); |
| 100 | } |
| 101 | kfree(nfp_prog); |
| 102 | } |
| 103 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 104 | int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, |
| 105 | struct netdev_bpf *bpf) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 106 | { |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 107 | struct bpf_prog *prog = bpf->verifier.prog; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 108 | struct nfp_prog *nfp_prog; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 109 | int ret; |
| 110 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 111 | nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL); |
| 112 | if (!nfp_prog) |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 113 | return -ENOMEM; |
| 114 | prog->aux->offload->dev_priv = nfp_prog; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 115 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 116 | INIT_LIST_HEAD(&nfp_prog->insns); |
| 117 | nfp_prog->type = prog->type; |
| 118 | |
| 119 | ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len); |
| 120 | if (ret) |
| 121 | goto err_free; |
| 122 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 123 | nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog); |
| 124 | bpf->verifier.ops = &nfp_bpf_analyzer_ops; |
| 125 | |
| 126 | return 0; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 127 | |
| 128 | err_free: |
| 129 | nfp_prog_free(nfp_prog); |
| 130 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 131 | return ret; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 132 | } |
| 133 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 134 | int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn, |
| 135 | struct bpf_prog *prog) |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 136 | { |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 137 | struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 138 | unsigned int stack_size; |
| 139 | unsigned int max_instr; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 140 | |
Jakub Kicinski | ee9133a | 2017-10-23 11:58:08 -0700 | [diff] [blame] | 141 | stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 142 | if (prog->aux->stack_depth > stack_size) { |
Jakub Kicinski | ee9133a | 2017-10-23 11:58:08 -0700 | [diff] [blame] | 143 | nn_info(nn, "stack too large: program %dB > FW stack %dB\n", |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 144 | prog->aux->stack_depth, stack_size); |
Jakub Kicinski | ee9133a | 2017-10-23 11:58:08 -0700 | [diff] [blame] | 145 | return -EOPNOTSUPP; |
| 146 | } |
| 147 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 148 | nfp_prog->stack_depth = prog->aux->stack_depth; |
| 149 | nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); |
| 150 | nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE); |
| 151 | |
| 152 | max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); |
| 153 | nfp_prog->__prog_alloc_len = max_instr * sizeof(u64); |
| 154 | |
| 155 | nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL); |
| 156 | if (!nfp_prog->prog) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 157 | return -ENOMEM; |
| 158 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 159 | return nfp_bpf_jit(nfp_prog); |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 160 | } |
| 161 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 162 | int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn, |
| 163 | struct bpf_prog *prog) |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 164 | { |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 165 | struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; |
| 166 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 167 | kfree(nfp_prog->prog); |
| 168 | nfp_prog_free(nfp_prog); |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 169 | |
| 170 | return 0; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 171 | } |
| 172 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 173 | static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 174 | { |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 175 | struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 176 | unsigned int max_mtu; |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 177 | dma_addr_t dma_addr; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 178 | int err; |
| 179 | |
| 180 | max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; |
| 181 | if (max_mtu < nn->dp.netdev->mtu) { |
| 182 | nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 183 | return -EOPNOTSUPP; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 184 | } |
| 185 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 186 | dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog, |
| 187 | nfp_prog->prog_len * sizeof(u64), |
| 188 | DMA_TO_DEVICE); |
| 189 | if (dma_mapping_error(nn->dp.dev, dma_addr)) |
| 190 | return -ENOMEM; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 191 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 192 | nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len); |
Jakub Kicinski | 9450843 | 2017-11-03 13:56:23 -0700 | [diff] [blame] | 193 | nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 194 | |
| 195 | /* Load up the JITed code */ |
| 196 | err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF); |
| 197 | if (err) |
| 198 | nn_err(nn, "FW command error while loading BPF: %d\n", err); |
| 199 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 200 | dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64), |
| 201 | DMA_TO_DEVICE); |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 202 | |
| 203 | return err; |
Jakub Kicinski | e4a91cd | 2017-11-03 13:56:26 -0700 | [diff] [blame] | 204 | } |
| 205 | |
| 206 | static void nfp_net_bpf_start(struct nfp_net *nn) |
| 207 | { |
| 208 | int err; |
| 209 | |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 210 | /* Enable passing packets through BPF function */ |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 211 | nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF; |
| 212 | nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 213 | err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); |
| 214 | if (err) |
| 215 | nn_err(nn, "FW command error while enabling BPF: %d\n", err); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 216 | } |
| 217 | |
| 218 | static int nfp_net_bpf_stop(struct nfp_net *nn) |
| 219 | { |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 220 | if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 221 | return 0; |
| 222 | |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 223 | nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF; |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 224 | nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 225 | |
| 226 | return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); |
| 227 | } |
| 228 | |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 229 | int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, |
Jakub Kicinski | e4a91cd | 2017-11-03 13:56:26 -0700 | [diff] [blame] | 230 | bool old_prog) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 231 | { |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 232 | int err; |
| 233 | |
Jakub Kicinski | 288b3de | 2017-11-20 15:21:54 -0800 | [diff] [blame] | 234 | if (prog) { |
| 235 | struct bpf_dev_offload *offload = prog->aux->offload; |
| 236 | |
| 237 | if (!offload) |
| 238 | return -EINVAL; |
| 239 | if (offload->netdev != nn->dp.netdev) |
| 240 | return -EINVAL; |
| 241 | } |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 242 | |
Jakub Kicinski | e4a91cd | 2017-11-03 13:56:26 -0700 | [diff] [blame] | 243 | if (prog && old_prog) { |
| 244 | u8 cap; |
| 245 | |
| 246 | cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP); |
| 247 | if (!(cap & NFP_NET_BPF_CAP_RELO)) { |
| 248 | nn_err(nn, "FW does not support live reload\n"); |
| 249 | return -EBUSY; |
| 250 | } |
| 251 | } |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 252 | |
| 253 | /* Something else is loaded, different program type? */ |
| 254 | if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) |
| 255 | return -EBUSY; |
| 256 | |
Jakub Kicinski | e4a91cd | 2017-11-03 13:56:26 -0700 | [diff] [blame] | 257 | if (old_prog && !prog) |
| 258 | return nfp_net_bpf_stop(nn); |
| 259 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 260 | err = nfp_net_bpf_load(nn, prog); |
| 261 | if (err) |
| 262 | return err; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 263 | |
Jakub Kicinski | e4a91cd | 2017-11-03 13:56:26 -0700 | [diff] [blame] | 264 | if (!old_prog) |
| 265 | nfp_net_bpf_start(nn); |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 266 | |
| 267 | return 0; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 268 | } |