Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 1 | /* |
Jiong Wang | 5b67414 | 2017-11-30 21:32:50 -0800 | [diff] [blame^] | 2 | * Copyright (C) 2016-2017 Netronome Systems, Inc. |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 3 | * |
| 4 | * This software is dual licensed under the GNU General License Version 2, |
| 5 | * June 1991 as shown in the file COPYING in the top-level directory of this |
| 6 | * source tree or the BSD 2-Clause License provided below. You have the |
| 7 | * option to license this software under the complete terms of either license. |
| 8 | * |
| 9 | * The BSD 2-Clause License: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * 1. Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * 2. Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
| 32 | */ |
| 33 | |
| 34 | /* |
| 35 | * nfp_net_offload.c |
| 36 | * Netronome network device driver: TC offload functions for PF and VF |
| 37 | */ |
| 38 | |
| 39 | #include <linux/kernel.h> |
| 40 | #include <linux/netdevice.h> |
| 41 | #include <linux/pci.h> |
| 42 | #include <linux/jiffies.h> |
| 43 | #include <linux/timer.h> |
| 44 | #include <linux/list.h> |
| 45 | |
| 46 | #include <net/pkt_cls.h> |
| 47 | #include <net/tc_act/tc_gact.h> |
| 48 | #include <net/tc_act/tc_mirred.h> |
| 49 | |
Jakub Kicinski | d9ae7f2 | 2017-05-31 08:06:48 -0700 | [diff] [blame] | 50 | #include "main.h" |
| 51 | #include "../nfp_net_ctrl.h" |
| 52 | #include "../nfp_net.h" |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 53 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 54 | static int |
Jakub Kicinski | c1c88ea | 2017-11-03 13:56:27 -0700 | [diff] [blame] | 55 | nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, |
| 56 | unsigned int cnt) |
| 57 | { |
Jiong Wang | 5b67414 | 2017-11-30 21:32:50 -0800 | [diff] [blame^] | 58 | struct nfp_insn_meta *meta; |
Jakub Kicinski | c1c88ea | 2017-11-03 13:56:27 -0700 | [diff] [blame] | 59 | unsigned int i; |
| 60 | |
| 61 | for (i = 0; i < cnt; i++) { |
Jakub Kicinski | c1c88ea | 2017-11-03 13:56:27 -0700 | [diff] [blame] | 62 | meta = kzalloc(sizeof(*meta), GFP_KERNEL); |
| 63 | if (!meta) |
| 64 | return -ENOMEM; |
| 65 | |
| 66 | meta->insn = prog[i]; |
| 67 | meta->n = i; |
| 68 | |
| 69 | list_add_tail(&meta->l, &nfp_prog->insns); |
| 70 | } |
| 71 | |
Jiong Wang | 5b67414 | 2017-11-30 21:32:50 -0800 | [diff] [blame^] | 72 | /* Another pass to record jump information. */ |
| 73 | list_for_each_entry(meta, &nfp_prog->insns, l) { |
| 74 | u64 code = meta->insn.code; |
| 75 | |
| 76 | if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && |
| 77 | BPF_OP(code) != BPF_CALL) { |
| 78 | struct nfp_insn_meta *dst_meta; |
| 79 | unsigned short dst_indx; |
| 80 | |
| 81 | dst_indx = meta->n + 1 + meta->insn.off; |
| 82 | dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, |
| 83 | cnt); |
| 84 | |
| 85 | meta->jmp_dst = dst_meta; |
| 86 | } |
| 87 | } |
| 88 | |
Jakub Kicinski | c1c88ea | 2017-11-03 13:56:27 -0700 | [diff] [blame] | 89 | return 0; |
| 90 | } |
| 91 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 92 | static void nfp_prog_free(struct nfp_prog *nfp_prog) |
Jakub Kicinski | c1c88ea | 2017-11-03 13:56:27 -0700 | [diff] [blame] | 93 | { |
| 94 | struct nfp_insn_meta *meta, *tmp; |
| 95 | |
| 96 | list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) { |
| 97 | list_del(&meta->l); |
| 98 | kfree(meta); |
| 99 | } |
| 100 | kfree(nfp_prog); |
| 101 | } |
| 102 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 103 | int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, |
| 104 | struct netdev_bpf *bpf) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 105 | { |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 106 | struct bpf_prog *prog = bpf->verifier.prog; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 107 | struct nfp_prog *nfp_prog; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 108 | int ret; |
| 109 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 110 | nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL); |
| 111 | if (!nfp_prog) |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 112 | return -ENOMEM; |
| 113 | prog->aux->offload->dev_priv = nfp_prog; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 114 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 115 | INIT_LIST_HEAD(&nfp_prog->insns); |
| 116 | nfp_prog->type = prog->type; |
| 117 | |
| 118 | ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len); |
| 119 | if (ret) |
| 120 | goto err_free; |
| 121 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 122 | nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog); |
| 123 | bpf->verifier.ops = &nfp_bpf_analyzer_ops; |
| 124 | |
| 125 | return 0; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 126 | |
| 127 | err_free: |
| 128 | nfp_prog_free(nfp_prog); |
| 129 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 130 | return ret; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 131 | } |
| 132 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 133 | int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn, |
| 134 | struct bpf_prog *prog) |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 135 | { |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 136 | struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 137 | unsigned int stack_size; |
| 138 | unsigned int max_instr; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 139 | |
Jakub Kicinski | ee9133a | 2017-10-23 11:58:08 -0700 | [diff] [blame] | 140 | stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 141 | if (prog->aux->stack_depth > stack_size) { |
Jakub Kicinski | ee9133a | 2017-10-23 11:58:08 -0700 | [diff] [blame] | 142 | nn_info(nn, "stack too large: program %dB > FW stack %dB\n", |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 143 | prog->aux->stack_depth, stack_size); |
Jakub Kicinski | ee9133a | 2017-10-23 11:58:08 -0700 | [diff] [blame] | 144 | return -EOPNOTSUPP; |
| 145 | } |
| 146 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 147 | nfp_prog->stack_depth = prog->aux->stack_depth; |
| 148 | nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); |
| 149 | nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE); |
| 150 | |
| 151 | max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); |
| 152 | nfp_prog->__prog_alloc_len = max_instr * sizeof(u64); |
| 153 | |
| 154 | nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL); |
| 155 | if (!nfp_prog->prog) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 156 | return -ENOMEM; |
| 157 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 158 | return nfp_bpf_jit(nfp_prog); |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 159 | } |
| 160 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 161 | int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn, |
| 162 | struct bpf_prog *prog) |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 163 | { |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 164 | struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; |
| 165 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 166 | kfree(nfp_prog->prog); |
| 167 | nfp_prog_free(nfp_prog); |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 168 | |
| 169 | return 0; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 170 | } |
| 171 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 172 | static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 173 | { |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 174 | struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 175 | unsigned int max_mtu; |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 176 | dma_addr_t dma_addr; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 177 | int err; |
| 178 | |
| 179 | max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; |
| 180 | if (max_mtu < nn->dp.netdev->mtu) { |
| 181 | nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 182 | return -EOPNOTSUPP; |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 183 | } |
| 184 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 185 | dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog, |
| 186 | nfp_prog->prog_len * sizeof(u64), |
| 187 | DMA_TO_DEVICE); |
| 188 | if (dma_mapping_error(nn->dp.dev, dma_addr)) |
| 189 | return -ENOMEM; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 190 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 191 | nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len); |
Jakub Kicinski | 9450843 | 2017-11-03 13:56:23 -0700 | [diff] [blame] | 192 | nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 193 | |
| 194 | /* Load up the JITed code */ |
| 195 | err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF); |
| 196 | if (err) |
| 197 | nn_err(nn, "FW command error while loading BPF: %d\n", err); |
| 198 | |
Jakub Kicinski | 9314c44 | 2017-11-03 13:56:28 -0700 | [diff] [blame] | 199 | dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64), |
| 200 | DMA_TO_DEVICE); |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 201 | |
| 202 | return err; |
Jakub Kicinski | e4a91cd | 2017-11-03 13:56:26 -0700 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | static void nfp_net_bpf_start(struct nfp_net *nn) |
| 206 | { |
| 207 | int err; |
| 208 | |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 209 | /* Enable passing packets through BPF function */ |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 210 | nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF; |
| 211 | nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 212 | err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); |
| 213 | if (err) |
| 214 | nn_err(nn, "FW command error while enabling BPF: %d\n", err); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | static int nfp_net_bpf_stop(struct nfp_net *nn) |
| 218 | { |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 219 | if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 220 | return 0; |
| 221 | |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 222 | nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF; |
Jakub Kicinski | 79c12a7 | 2017-03-10 10:38:27 -0800 | [diff] [blame] | 223 | nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 224 | |
| 225 | return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); |
| 226 | } |
| 227 | |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 228 | int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, |
Jakub Kicinski | e4a91cd | 2017-11-03 13:56:26 -0700 | [diff] [blame] | 229 | bool old_prog) |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 230 | { |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 231 | int err; |
| 232 | |
Jakub Kicinski | 288b3de | 2017-11-20 15:21:54 -0800 | [diff] [blame] | 233 | if (prog) { |
| 234 | struct bpf_dev_offload *offload = prog->aux->offload; |
| 235 | |
| 236 | if (!offload) |
| 237 | return -EINVAL; |
| 238 | if (offload->netdev != nn->dp.netdev) |
| 239 | return -EINVAL; |
| 240 | } |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 241 | |
Jakub Kicinski | e4a91cd | 2017-11-03 13:56:26 -0700 | [diff] [blame] | 242 | if (prog && old_prog) { |
| 243 | u8 cap; |
| 244 | |
| 245 | cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP); |
| 246 | if (!(cap & NFP_NET_BPF_CAP_RELO)) { |
| 247 | nn_err(nn, "FW does not support live reload\n"); |
| 248 | return -EBUSY; |
| 249 | } |
| 250 | } |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 251 | |
| 252 | /* Something else is loaded, different program type? */ |
| 253 | if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) |
| 254 | return -EBUSY; |
| 255 | |
Jakub Kicinski | e4a91cd | 2017-11-03 13:56:26 -0700 | [diff] [blame] | 256 | if (old_prog && !prog) |
| 257 | return nfp_net_bpf_stop(nn); |
| 258 | |
Jakub Kicinski | c6c580d | 2017-11-03 13:56:29 -0700 | [diff] [blame] | 259 | err = nfp_net_bpf_load(nn, prog); |
| 260 | if (err) |
| 261 | return err; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 262 | |
Jakub Kicinski | e4a91cd | 2017-11-03 13:56:26 -0700 | [diff] [blame] | 263 | if (!old_prog) |
| 264 | nfp_net_bpf_start(nn); |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 265 | |
| 266 | return 0; |
Jakub Kicinski | 7533fdc | 2016-09-21 11:44:01 +0100 | [diff] [blame] | 267 | } |