blob: fa2905e67b073c3853e3741d7fccecd26586a532 [file] [log] [blame]
Jakub Kicinski7533fdc2016-09-21 11:44:01 +01001/*
Jiong Wang5b674142017-11-30 21:32:50 -08002 * Copyright (C) 2016-2017 Netronome Systems, Inc.
Jakub Kicinski7533fdc2016-09-21 11:44:01 +01003 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * nfp_net_offload.c
36 * Netronome network device driver: TC offload functions for PF and VF
37 */
38
39#include <linux/kernel.h>
40#include <linux/netdevice.h>
41#include <linux/pci.h>
42#include <linux/jiffies.h>
43#include <linux/timer.h>
44#include <linux/list.h>
45
46#include <net/pkt_cls.h>
47#include <net/tc_act/tc_gact.h>
48#include <net/tc_act/tc_mirred.h>
49
Jakub Kicinskid9ae7f22017-05-31 08:06:48 -070050#include "main.h"
Jakub Kicinski77a844e2017-12-14 21:29:16 -080051#include "../nfp_app.h"
Jakub Kicinskid9ae7f22017-05-31 08:06:48 -070052#include "../nfp_net_ctrl.h"
53#include "../nfp_net.h"
Jakub Kicinski7533fdc2016-09-21 11:44:01 +010054
Jakub Kicinski9314c442017-11-03 13:56:28 -070055static int
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070056nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
57 unsigned int cnt)
58{
Jiong Wang5b674142017-11-30 21:32:50 -080059 struct nfp_insn_meta *meta;
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070060 unsigned int i;
61
62 for (i = 0; i < cnt; i++) {
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070063 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
64 if (!meta)
65 return -ENOMEM;
66
67 meta->insn = prog[i];
68 meta->n = i;
69
70 list_add_tail(&meta->l, &nfp_prog->insns);
71 }
72
Jiong Wang5b674142017-11-30 21:32:50 -080073 /* Another pass to record jump information. */
74 list_for_each_entry(meta, &nfp_prog->insns, l) {
75 u64 code = meta->insn.code;
76
77 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
78 BPF_OP(code) != BPF_CALL) {
79 struct nfp_insn_meta *dst_meta;
80 unsigned short dst_indx;
81
82 dst_indx = meta->n + 1 + meta->insn.off;
83 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
84 cnt);
85
86 meta->jmp_dst = dst_meta;
Jiong Wanga09d5c52017-11-30 21:32:51 -080087 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
Jiong Wang5b674142017-11-30 21:32:50 -080088 }
89 }
90
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070091 return 0;
92}
93
Jakub Kicinski9314c442017-11-03 13:56:28 -070094static void nfp_prog_free(struct nfp_prog *nfp_prog)
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070095{
96 struct nfp_insn_meta *meta, *tmp;
97
98 list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
99 list_del(&meta->l);
100 kfree(meta);
101 }
102 kfree(nfp_prog);
103}
104
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700105int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
106 struct netdev_bpf *bpf)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100107{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700108 struct bpf_prog *prog = bpf->verifier.prog;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700109 struct nfp_prog *nfp_prog;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100110 int ret;
111
Jakub Kicinski9314c442017-11-03 13:56:28 -0700112 nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
113 if (!nfp_prog)
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700114 return -ENOMEM;
115 prog->aux->offload->dev_priv = nfp_prog;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100116
Jakub Kicinski9314c442017-11-03 13:56:28 -0700117 INIT_LIST_HEAD(&nfp_prog->insns);
118 nfp_prog->type = prog->type;
Jakub Kicinski77a844e2017-12-14 21:29:16 -0800119 nfp_prog->bpf = app->priv;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700120
121 ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
122 if (ret)
123 goto err_free;
124
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700125 nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
126 bpf->verifier.ops = &nfp_bpf_analyzer_ops;
127
128 return 0;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700129
130err_free:
131 nfp_prog_free(nfp_prog);
132
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700133 return ret;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700134}
135
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700136int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
137 struct bpf_prog *prog)
Jakub Kicinski9314c442017-11-03 13:56:28 -0700138{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700139 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700140 unsigned int stack_size;
141 unsigned int max_instr;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100142
Jakub Kicinskiee9133a2017-10-23 11:58:08 -0700143 stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700144 if (prog->aux->stack_depth > stack_size) {
Jakub Kicinskiee9133a2017-10-23 11:58:08 -0700145 nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700146 prog->aux->stack_depth, stack_size);
Jakub Kicinskiee9133a2017-10-23 11:58:08 -0700147 return -EOPNOTSUPP;
148 }
149
Jakub Kicinski9314c442017-11-03 13:56:28 -0700150 nfp_prog->stack_depth = prog->aux->stack_depth;
151 nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
152 nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
153
154 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
155 nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
156
157 nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
158 if (!nfp_prog->prog)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100159 return -ENOMEM;
160
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700161 return nfp_bpf_jit(nfp_prog);
Jakub Kicinski9314c442017-11-03 13:56:28 -0700162}
163
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700164int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
165 struct bpf_prog *prog)
Jakub Kicinski9314c442017-11-03 13:56:28 -0700166{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700167 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
168
Jakub Kicinski9314c442017-11-03 13:56:28 -0700169 kfree(nfp_prog->prog);
170 nfp_prog_free(nfp_prog);
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700171
172 return 0;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700173}
174
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700175static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
Jakub Kicinski9314c442017-11-03 13:56:28 -0700176{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700177 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700178 unsigned int max_mtu;
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700179 dma_addr_t dma_addr;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700180 int err;
181
182 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
183 if (max_mtu < nn->dp.netdev->mtu) {
184 nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700185 return -EOPNOTSUPP;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700186 }
187
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700188 dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
189 nfp_prog->prog_len * sizeof(u64),
190 DMA_TO_DEVICE);
191 if (dma_mapping_error(nn->dp.dev, dma_addr))
192 return -ENOMEM;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100193
Jakub Kicinski9314c442017-11-03 13:56:28 -0700194 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
Jakub Kicinski94508432017-11-03 13:56:23 -0700195 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100196
197 /* Load up the JITed code */
198 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
199 if (err)
200 nn_err(nn, "FW command error while loading BPF: %d\n", err);
201
Jakub Kicinski9314c442017-11-03 13:56:28 -0700202 dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
203 DMA_TO_DEVICE);
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700204
205 return err;
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700206}
207
208static void nfp_net_bpf_start(struct nfp_net *nn)
209{
210 int err;
211
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100212 /* Enable passing packets through BPF function */
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800213 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
214 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100215 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
216 if (err)
217 nn_err(nn, "FW command error while enabling BPF: %d\n", err);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100218}
219
220static int nfp_net_bpf_stop(struct nfp_net *nn)
221{
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800222 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100223 return 0;
224
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800225 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800226 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100227
228 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
229}
230
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700231int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700232 bool old_prog)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100233{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700234 int err;
235
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800236 if (prog) {
237 struct bpf_dev_offload *offload = prog->aux->offload;
238
239 if (!offload)
240 return -EINVAL;
241 if (offload->netdev != nn->dp.netdev)
242 return -EINVAL;
243 }
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100244
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700245 if (prog && old_prog) {
246 u8 cap;
247
248 cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
249 if (!(cap & NFP_NET_BPF_CAP_RELO)) {
250 nn_err(nn, "FW does not support live reload\n");
251 return -EBUSY;
252 }
253 }
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700254
255 /* Something else is loaded, different program type? */
256 if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
257 return -EBUSY;
258
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700259 if (old_prog && !prog)
260 return nfp_net_bpf_stop(nn);
261
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700262 err = nfp_net_bpf_load(nn, prog);
263 if (err)
264 return err;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100265
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700266 if (!old_prog)
267 nfp_net_bpf_start(nn);
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700268
269 return 0;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100270}