blob: 377976ce92dd0c8518ebf2d94fc8279e36003afe [file] [log] [blame]
Jakub Kicinski7533fdc2016-09-21 11:44:01 +01001/*
Jiong Wang5b674142017-11-30 21:32:50 -08002 * Copyright (C) 2016-2017 Netronome Systems, Inc.
Jakub Kicinski7533fdc2016-09-21 11:44:01 +01003 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * nfp_net_offload.c
36 * Netronome network device driver: TC offload functions for PF and VF
37 */
38
39#include <linux/kernel.h>
40#include <linux/netdevice.h>
41#include <linux/pci.h>
42#include <linux/jiffies.h>
43#include <linux/timer.h>
44#include <linux/list.h>
45
46#include <net/pkt_cls.h>
47#include <net/tc_act/tc_gact.h>
48#include <net/tc_act/tc_mirred.h>
49
Jakub Kicinskid9ae7f22017-05-31 08:06:48 -070050#include "main.h"
51#include "../nfp_net_ctrl.h"
52#include "../nfp_net.h"
Jakub Kicinski7533fdc2016-09-21 11:44:01 +010053
Jakub Kicinski9314c442017-11-03 13:56:28 -070054static int
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070055nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
56 unsigned int cnt)
57{
Jiong Wang5b674142017-11-30 21:32:50 -080058 struct nfp_insn_meta *meta;
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070059 unsigned int i;
60
61 for (i = 0; i < cnt; i++) {
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070062 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
63 if (!meta)
64 return -ENOMEM;
65
66 meta->insn = prog[i];
67 meta->n = i;
68
69 list_add_tail(&meta->l, &nfp_prog->insns);
70 }
71
Jiong Wang5b674142017-11-30 21:32:50 -080072 /* Another pass to record jump information. */
73 list_for_each_entry(meta, &nfp_prog->insns, l) {
74 u64 code = meta->insn.code;
75
76 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
77 BPF_OP(code) != BPF_CALL) {
78 struct nfp_insn_meta *dst_meta;
79 unsigned short dst_indx;
80
81 dst_indx = meta->n + 1 + meta->insn.off;
82 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
83 cnt);
84
85 meta->jmp_dst = dst_meta;
Jiong Wanga09d5c52017-11-30 21:32:51 -080086 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
Jiong Wang5b674142017-11-30 21:32:50 -080087 }
88 }
89
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070090 return 0;
91}
92
Jakub Kicinski9314c442017-11-03 13:56:28 -070093static void nfp_prog_free(struct nfp_prog *nfp_prog)
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070094{
95 struct nfp_insn_meta *meta, *tmp;
96
97 list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
98 list_del(&meta->l);
99 kfree(meta);
100 }
101 kfree(nfp_prog);
102}
103
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700104int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
105 struct netdev_bpf *bpf)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100106{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700107 struct bpf_prog *prog = bpf->verifier.prog;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700108 struct nfp_prog *nfp_prog;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100109 int ret;
110
Jakub Kicinski9314c442017-11-03 13:56:28 -0700111 nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
112 if (!nfp_prog)
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700113 return -ENOMEM;
114 prog->aux->offload->dev_priv = nfp_prog;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100115
Jakub Kicinski9314c442017-11-03 13:56:28 -0700116 INIT_LIST_HEAD(&nfp_prog->insns);
117 nfp_prog->type = prog->type;
118
119 ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
120 if (ret)
121 goto err_free;
122
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700123 nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
124 bpf->verifier.ops = &nfp_bpf_analyzer_ops;
125
126 return 0;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700127
128err_free:
129 nfp_prog_free(nfp_prog);
130
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700131 return ret;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700132}
133
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700134int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
135 struct bpf_prog *prog)
Jakub Kicinski9314c442017-11-03 13:56:28 -0700136{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700137 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700138 unsigned int stack_size;
139 unsigned int max_instr;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100140
Jakub Kicinskiee9133a2017-10-23 11:58:08 -0700141 stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700142 if (prog->aux->stack_depth > stack_size) {
Jakub Kicinskiee9133a2017-10-23 11:58:08 -0700143 nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700144 prog->aux->stack_depth, stack_size);
Jakub Kicinskiee9133a2017-10-23 11:58:08 -0700145 return -EOPNOTSUPP;
146 }
147
Jakub Kicinski9314c442017-11-03 13:56:28 -0700148 nfp_prog->stack_depth = prog->aux->stack_depth;
149 nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
150 nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
151
152 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
153 nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
154
155 nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
156 if (!nfp_prog->prog)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100157 return -ENOMEM;
158
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700159 return nfp_bpf_jit(nfp_prog);
Jakub Kicinski9314c442017-11-03 13:56:28 -0700160}
161
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700162int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
163 struct bpf_prog *prog)
Jakub Kicinski9314c442017-11-03 13:56:28 -0700164{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700165 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
166
Jakub Kicinski9314c442017-11-03 13:56:28 -0700167 kfree(nfp_prog->prog);
168 nfp_prog_free(nfp_prog);
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700169
170 return 0;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700171}
172
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700173static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
Jakub Kicinski9314c442017-11-03 13:56:28 -0700174{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700175 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700176 unsigned int max_mtu;
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700177 dma_addr_t dma_addr;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700178 int err;
179
180 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
181 if (max_mtu < nn->dp.netdev->mtu) {
182 nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700183 return -EOPNOTSUPP;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700184 }
185
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700186 dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
187 nfp_prog->prog_len * sizeof(u64),
188 DMA_TO_DEVICE);
189 if (dma_mapping_error(nn->dp.dev, dma_addr))
190 return -ENOMEM;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100191
Jakub Kicinski9314c442017-11-03 13:56:28 -0700192 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
Jakub Kicinski94508432017-11-03 13:56:23 -0700193 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100194
195 /* Load up the JITed code */
196 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
197 if (err)
198 nn_err(nn, "FW command error while loading BPF: %d\n", err);
199
Jakub Kicinski9314c442017-11-03 13:56:28 -0700200 dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
201 DMA_TO_DEVICE);
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700202
203 return err;
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700204}
205
206static void nfp_net_bpf_start(struct nfp_net *nn)
207{
208 int err;
209
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100210 /* Enable passing packets through BPF function */
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800211 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
212 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100213 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
214 if (err)
215 nn_err(nn, "FW command error while enabling BPF: %d\n", err);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100216}
217
218static int nfp_net_bpf_stop(struct nfp_net *nn)
219{
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800220 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100221 return 0;
222
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800223 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800224 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100225
226 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
227}
228
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700229int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700230 bool old_prog)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100231{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700232 int err;
233
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800234 if (prog) {
235 struct bpf_dev_offload *offload = prog->aux->offload;
236
237 if (!offload)
238 return -EINVAL;
239 if (offload->netdev != nn->dp.netdev)
240 return -EINVAL;
241 }
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100242
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700243 if (prog && old_prog) {
244 u8 cap;
245
246 cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
247 if (!(cap & NFP_NET_BPF_CAP_RELO)) {
248 nn_err(nn, "FW does not support live reload\n");
249 return -EBUSY;
250 }
251 }
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700252
253 /* Something else is loaded, different program type? */
254 if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
255 return -EBUSY;
256
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700257 if (old_prog && !prog)
258 return nfp_net_bpf_stop(nn);
259
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700260 err = nfp_net_bpf_load(nn, prog);
261 if (err)
262 return err;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100263
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700264 if (!old_prog)
265 nfp_net_bpf_start(nn);
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700266
267 return 0;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100268}