blob: 240db663d83fac34f5d989f00ae28e950c412ff7 [file] [log] [blame]
Jakub Kicinski7533fdc2016-09-21 11:44:01 +01001/*
Jiong Wang5b674142017-11-30 21:32:50 -08002 * Copyright (C) 2016-2017 Netronome Systems, Inc.
Jakub Kicinski7533fdc2016-09-21 11:44:01 +01003 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * nfp_net_offload.c
36 * Netronome network device driver: TC offload functions for PF and VF
37 */
38
39#include <linux/kernel.h>
40#include <linux/netdevice.h>
41#include <linux/pci.h>
42#include <linux/jiffies.h>
43#include <linux/timer.h>
44#include <linux/list.h>
45
46#include <net/pkt_cls.h>
47#include <net/tc_act/tc_gact.h>
48#include <net/tc_act/tc_mirred.h>
49
Jakub Kicinskid9ae7f22017-05-31 08:06:48 -070050#include "main.h"
51#include "../nfp_net_ctrl.h"
52#include "../nfp_net.h"
Jakub Kicinski7533fdc2016-09-21 11:44:01 +010053
Jakub Kicinski9314c442017-11-03 13:56:28 -070054static int
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070055nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
56 unsigned int cnt)
57{
Jiong Wang5b674142017-11-30 21:32:50 -080058 struct nfp_insn_meta *meta;
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070059 unsigned int i;
60
61 for (i = 0; i < cnt; i++) {
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070062 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
63 if (!meta)
64 return -ENOMEM;
65
66 meta->insn = prog[i];
67 meta->n = i;
68
69 list_add_tail(&meta->l, &nfp_prog->insns);
70 }
71
Jiong Wang5b674142017-11-30 21:32:50 -080072 /* Another pass to record jump information. */
73 list_for_each_entry(meta, &nfp_prog->insns, l) {
74 u64 code = meta->insn.code;
75
76 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
77 BPF_OP(code) != BPF_CALL) {
78 struct nfp_insn_meta *dst_meta;
79 unsigned short dst_indx;
80
81 dst_indx = meta->n + 1 + meta->insn.off;
82 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
83 cnt);
84
85 meta->jmp_dst = dst_meta;
86 }
87 }
88
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070089 return 0;
90}
91
Jakub Kicinski9314c442017-11-03 13:56:28 -070092static void nfp_prog_free(struct nfp_prog *nfp_prog)
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070093{
94 struct nfp_insn_meta *meta, *tmp;
95
96 list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
97 list_del(&meta->l);
98 kfree(meta);
99 }
100 kfree(nfp_prog);
101}
102
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700103int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
104 struct netdev_bpf *bpf)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100105{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700106 struct bpf_prog *prog = bpf->verifier.prog;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700107 struct nfp_prog *nfp_prog;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100108 int ret;
109
Jakub Kicinski9314c442017-11-03 13:56:28 -0700110 nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
111 if (!nfp_prog)
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700112 return -ENOMEM;
113 prog->aux->offload->dev_priv = nfp_prog;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100114
Jakub Kicinski9314c442017-11-03 13:56:28 -0700115 INIT_LIST_HEAD(&nfp_prog->insns);
116 nfp_prog->type = prog->type;
117
118 ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
119 if (ret)
120 goto err_free;
121
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700122 nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
123 bpf->verifier.ops = &nfp_bpf_analyzer_ops;
124
125 return 0;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700126
127err_free:
128 nfp_prog_free(nfp_prog);
129
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700130 return ret;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700131}
132
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700133int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
134 struct bpf_prog *prog)
Jakub Kicinski9314c442017-11-03 13:56:28 -0700135{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700136 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700137 unsigned int stack_size;
138 unsigned int max_instr;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100139
Jakub Kicinskiee9133a2017-10-23 11:58:08 -0700140 stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700141 if (prog->aux->stack_depth > stack_size) {
Jakub Kicinskiee9133a2017-10-23 11:58:08 -0700142 nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700143 prog->aux->stack_depth, stack_size);
Jakub Kicinskiee9133a2017-10-23 11:58:08 -0700144 return -EOPNOTSUPP;
145 }
146
Jakub Kicinski9314c442017-11-03 13:56:28 -0700147 nfp_prog->stack_depth = prog->aux->stack_depth;
148 nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
149 nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
150
151 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
152 nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
153
154 nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
155 if (!nfp_prog->prog)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100156 return -ENOMEM;
157
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700158 return nfp_bpf_jit(nfp_prog);
Jakub Kicinski9314c442017-11-03 13:56:28 -0700159}
160
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700161int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
162 struct bpf_prog *prog)
Jakub Kicinski9314c442017-11-03 13:56:28 -0700163{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700164 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
165
Jakub Kicinski9314c442017-11-03 13:56:28 -0700166 kfree(nfp_prog->prog);
167 nfp_prog_free(nfp_prog);
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700168
169 return 0;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700170}
171
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700172static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
Jakub Kicinski9314c442017-11-03 13:56:28 -0700173{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700174 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700175 unsigned int max_mtu;
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700176 dma_addr_t dma_addr;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700177 int err;
178
179 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
180 if (max_mtu < nn->dp.netdev->mtu) {
181 nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700182 return -EOPNOTSUPP;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700183 }
184
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700185 dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
186 nfp_prog->prog_len * sizeof(u64),
187 DMA_TO_DEVICE);
188 if (dma_mapping_error(nn->dp.dev, dma_addr))
189 return -ENOMEM;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100190
Jakub Kicinski9314c442017-11-03 13:56:28 -0700191 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
Jakub Kicinski94508432017-11-03 13:56:23 -0700192 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100193
194 /* Load up the JITed code */
195 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
196 if (err)
197 nn_err(nn, "FW command error while loading BPF: %d\n", err);
198
Jakub Kicinski9314c442017-11-03 13:56:28 -0700199 dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
200 DMA_TO_DEVICE);
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700201
202 return err;
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700203}
204
205static void nfp_net_bpf_start(struct nfp_net *nn)
206{
207 int err;
208
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100209 /* Enable passing packets through BPF function */
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800210 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
211 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100212 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
213 if (err)
214 nn_err(nn, "FW command error while enabling BPF: %d\n", err);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100215}
216
217static int nfp_net_bpf_stop(struct nfp_net *nn)
218{
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800219 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100220 return 0;
221
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800222 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800223 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100224
225 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
226}
227
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700228int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700229 bool old_prog)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100230{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700231 int err;
232
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800233 if (prog) {
234 struct bpf_dev_offload *offload = prog->aux->offload;
235
236 if (!offload)
237 return -EINVAL;
238 if (offload->netdev != nn->dp.netdev)
239 return -EINVAL;
240 }
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100241
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700242 if (prog && old_prog) {
243 u8 cap;
244
245 cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
246 if (!(cap & NFP_NET_BPF_CAP_RELO)) {
247 nn_err(nn, "FW does not support live reload\n");
248 return -EBUSY;
249 }
250 }
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700251
252 /* Something else is loaded, different program type? */
253 if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
254 return -EBUSY;
255
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700256 if (old_prog && !prog)
257 return nfp_net_bpf_stop(nn);
258
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700259 err = nfp_net_bpf_load(nn, prog);
260 if (err)
261 return err;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100262
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700263 if (!old_prog)
264 nfp_net_bpf_start(nn);
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700265
266 return 0;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100267}