blob: e2859b2e9c6ae5ad142a6087b5b6ed1a5635047e [file] [log] [blame]
Jakub Kicinski7533fdc2016-09-21 11:44:01 +01001/*
Jiong Wang5b674142017-11-30 21:32:50 -08002 * Copyright (C) 2016-2017 Netronome Systems, Inc.
Jakub Kicinski7533fdc2016-09-21 11:44:01 +01003 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * nfp_net_offload.c
36 * Netronome network device driver: TC offload functions for PF and VF
37 */
38
Jakub Kicinski1bba4c42018-01-11 20:29:17 -080039#define pr_fmt(fmt) "NFP net bpf: " fmt
40
41#include <linux/bpf.h>
Jakub Kicinski7533fdc2016-09-21 11:44:01 +010042#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/pci.h>
45#include <linux/jiffies.h>
46#include <linux/timer.h>
47#include <linux/list.h>
Jakub Kicinski44a12ec2018-01-10 12:26:02 +000048#include <linux/mm.h>
Jakub Kicinski7533fdc2016-09-21 11:44:01 +010049
50#include <net/pkt_cls.h>
51#include <net/tc_act/tc_gact.h>
52#include <net/tc_act/tc_mirred.h>
53
Jakub Kicinskid9ae7f22017-05-31 08:06:48 -070054#include "main.h"
Jakub Kicinski77a844e2017-12-14 21:29:16 -080055#include "../nfp_app.h"
Jakub Kicinskid9ae7f22017-05-31 08:06:48 -070056#include "../nfp_net_ctrl.h"
57#include "../nfp_net.h"
Jakub Kicinski7533fdc2016-09-21 11:44:01 +010058
Jakub Kicinski9314c442017-11-03 13:56:28 -070059static int
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070060nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
61 unsigned int cnt)
62{
Jiong Wang5b674142017-11-30 21:32:50 -080063 struct nfp_insn_meta *meta;
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070064 unsigned int i;
65
66 for (i = 0; i < cnt; i++) {
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070067 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
68 if (!meta)
69 return -ENOMEM;
70
71 meta->insn = prog[i];
72 meta->n = i;
73
74 list_add_tail(&meta->l, &nfp_prog->insns);
75 }
76
Jakub Kicinski15499212018-01-10 12:25:59 +000077 nfp_bpf_jit_prepare(nfp_prog, cnt);
Jiong Wang5b674142017-11-30 21:32:50 -080078
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070079 return 0;
80}
81
Jakub Kicinski9314c442017-11-03 13:56:28 -070082static void nfp_prog_free(struct nfp_prog *nfp_prog)
Jakub Kicinskic1c88ea2017-11-03 13:56:27 -070083{
84 struct nfp_insn_meta *meta, *tmp;
85
86 list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
87 list_del(&meta->l);
88 kfree(meta);
89 }
90 kfree(nfp_prog);
91}
92
Jakub Kicinskiaf93d152018-01-10 12:26:04 +000093static int
94nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
95 struct netdev_bpf *bpf)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +010096{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -070097 struct bpf_prog *prog = bpf->verifier.prog;
Jakub Kicinski9314c442017-11-03 13:56:28 -070098 struct nfp_prog *nfp_prog;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +010099 int ret;
100
Jakub Kicinski9314c442017-11-03 13:56:28 -0700101 nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
102 if (!nfp_prog)
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700103 return -ENOMEM;
104 prog->aux->offload->dev_priv = nfp_prog;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100105
Jakub Kicinski9314c442017-11-03 13:56:28 -0700106 INIT_LIST_HEAD(&nfp_prog->insns);
107 nfp_prog->type = prog->type;
Jakub Kicinski77a844e2017-12-14 21:29:16 -0800108 nfp_prog->bpf = app->priv;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700109
110 ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
111 if (ret)
112 goto err_free;
113
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700114 nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
115 bpf->verifier.ops = &nfp_bpf_analyzer_ops;
116
117 return 0;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700118
119err_free:
120 nfp_prog_free(nfp_prog);
121
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700122 return ret;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700123}
124
Jakub Kicinskiaf93d152018-01-10 12:26:04 +0000125static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
Jakub Kicinski9314c442017-11-03 13:56:28 -0700126{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700127 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700128 unsigned int stack_size;
129 unsigned int max_instr;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100130
Jakub Kicinskiee9133a2017-10-23 11:58:08 -0700131 stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700132 if (prog->aux->stack_depth > stack_size) {
Jakub Kicinskiee9133a2017-10-23 11:58:08 -0700133 nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700134 prog->aux->stack_depth, stack_size);
Jakub Kicinskiee9133a2017-10-23 11:58:08 -0700135 return -EOPNOTSUPP;
136 }
Jakub Kicinskic4f77302018-01-10 12:25:56 +0000137 nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
Jakub Kicinski9314c442017-11-03 13:56:28 -0700138
139 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
140 nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
141
Jakub Kicinski44a12ec2018-01-10 12:26:02 +0000142 nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
Jakub Kicinski9314c442017-11-03 13:56:28 -0700143 if (!nfp_prog->prog)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100144 return -ENOMEM;
145
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700146 return nfp_bpf_jit(nfp_prog);
Jakub Kicinski9314c442017-11-03 13:56:28 -0700147}
148
Jakub Kicinskiaf93d152018-01-10 12:26:04 +0000149static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
Jakub Kicinski9314c442017-11-03 13:56:28 -0700150{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700151 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
152
Jakub Kicinski44a12ec2018-01-10 12:26:02 +0000153 kvfree(nfp_prog->prog);
Jakub Kicinski9314c442017-11-03 13:56:28 -0700154 nfp_prog_free(nfp_prog);
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700155
156 return 0;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700157}
158
Jakub Kicinski1bba4c42018-01-11 20:29:17 -0800159static int
160nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
161 void *key, void *next_key)
162{
163 if (!key)
164 return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
165 return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
166}
167
168static int
169nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
170{
171 return nfp_bpf_ctrl_del_entry(offmap, key);
172}
173
174static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
175 .map_get_next_key = nfp_bpf_map_get_next_key,
176 .map_lookup_elem = nfp_bpf_ctrl_lookup_entry,
177 .map_update_elem = nfp_bpf_ctrl_update_entry,
178 .map_delete_elem = nfp_bpf_map_delete_elem,
179};
180
181static int
182nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
183{
184 struct nfp_bpf_map *nfp_map;
185 long long int res;
186
187 if (!bpf->maps.types)
188 return -EOPNOTSUPP;
189
190 if (offmap->map.map_flags ||
191 offmap->map.numa_node != NUMA_NO_NODE) {
192 pr_info("map flags are not supported\n");
193 return -EINVAL;
194 }
195
196 if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
197 pr_info("map type not supported\n");
198 return -EOPNOTSUPP;
199 }
200 if (bpf->maps.max_maps == bpf->maps_in_use) {
201 pr_info("too many maps for a device\n");
202 return -ENOMEM;
203 }
204 if (bpf->maps.max_elems - bpf->map_elems_in_use <
205 offmap->map.max_entries) {
206 pr_info("map with too many elements: %u, left: %u\n",
207 offmap->map.max_entries,
208 bpf->maps.max_elems - bpf->map_elems_in_use);
209 return -ENOMEM;
210 }
211 if (offmap->map.key_size > bpf->maps.max_key_sz ||
212 offmap->map.value_size > bpf->maps.max_val_sz ||
213 round_up(offmap->map.key_size, 8) +
214 round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
215 pr_info("elements don't fit in device constraints\n");
216 return -ENOMEM;
217 }
218
219 nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER);
220 if (!nfp_map)
221 return -ENOMEM;
222
223 offmap->dev_priv = nfp_map;
224 nfp_map->offmap = offmap;
225 nfp_map->bpf = bpf;
226
227 res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
228 if (res < 0) {
229 kfree(nfp_map);
230 return res;
231 }
232
233 nfp_map->tid = res;
234 offmap->dev_ops = &nfp_bpf_map_ops;
235 bpf->maps_in_use++;
236 bpf->map_elems_in_use += offmap->map.max_entries;
237 list_add_tail(&nfp_map->l, &bpf->map_list);
238
239 return 0;
240}
241
242static int
243nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
244{
245 struct nfp_bpf_map *nfp_map = offmap->dev_priv;
246
247 nfp_bpf_ctrl_free_map(bpf, nfp_map);
248 list_del_init(&nfp_map->l);
249 bpf->map_elems_in_use -= offmap->map.max_entries;
250 bpf->maps_in_use--;
251 kfree(nfp_map);
252
253 return 0;
254}
255
Jakub Kicinskiaf93d152018-01-10 12:26:04 +0000256int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
257{
258 switch (bpf->command) {
259 case BPF_OFFLOAD_VERIFIER_PREP:
260 return nfp_bpf_verifier_prep(app, nn, bpf);
261 case BPF_OFFLOAD_TRANSLATE:
262 return nfp_bpf_translate(nn, bpf->offload.prog);
263 case BPF_OFFLOAD_DESTROY:
264 return nfp_bpf_destroy(nn, bpf->offload.prog);
Jakub Kicinski1bba4c42018-01-11 20:29:17 -0800265 case BPF_OFFLOAD_MAP_ALLOC:
266 return nfp_bpf_map_alloc(app->priv, bpf->offmap);
267 case BPF_OFFLOAD_MAP_FREE:
268 return nfp_bpf_map_free(app->priv, bpf->offmap);
Jakub Kicinskiaf93d152018-01-10 12:26:04 +0000269 default:
270 return -EINVAL;
271 }
272}
273
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700274static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
Jakub Kicinski9314c442017-11-03 13:56:28 -0700275{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700276 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700277 unsigned int max_mtu;
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700278 dma_addr_t dma_addr;
Jakub Kicinski2314fe92018-01-10 12:26:01 +0000279 void *img;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700280 int err;
281
282 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
283 if (max_mtu < nn->dp.netdev->mtu) {
284 nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700285 return -EOPNOTSUPP;
Jakub Kicinski9314c442017-11-03 13:56:28 -0700286 }
287
Jakub Kicinski2314fe92018-01-10 12:26:01 +0000288 img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
289 if (IS_ERR(img))
290 return PTR_ERR(img);
291
292 dma_addr = dma_map_single(nn->dp.dev, img,
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700293 nfp_prog->prog_len * sizeof(u64),
294 DMA_TO_DEVICE);
Jakub Kicinski2314fe92018-01-10 12:26:01 +0000295 if (dma_mapping_error(nn->dp.dev, dma_addr)) {
296 kfree(img);
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700297 return -ENOMEM;
Jakub Kicinski2314fe92018-01-10 12:26:01 +0000298 }
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100299
Jakub Kicinski9314c442017-11-03 13:56:28 -0700300 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
Jakub Kicinski94508432017-11-03 13:56:23 -0700301 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100302
303 /* Load up the JITed code */
304 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
305 if (err)
306 nn_err(nn, "FW command error while loading BPF: %d\n", err);
307
Jakub Kicinski9314c442017-11-03 13:56:28 -0700308 dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
309 DMA_TO_DEVICE);
Jakub Kicinski2314fe92018-01-10 12:26:01 +0000310 kfree(img);
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700311
312 return err;
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700313}
314
315static void nfp_net_bpf_start(struct nfp_net *nn)
316{
317 int err;
318
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100319 /* Enable passing packets through BPF function */
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800320 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
321 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100322 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
323 if (err)
324 nn_err(nn, "FW command error while enabling BPF: %d\n", err);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100325}
326
327static int nfp_net_bpf_stop(struct nfp_net *nn)
328{
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800329 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100330 return 0;
331
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800332 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800333 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100334
335 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
336}
337
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700338int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700339 bool old_prog)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100340{
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700341 int err;
342
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800343 if (prog) {
Jakub Kicinski0a9c1992018-01-11 20:29:07 -0800344 struct bpf_prog_offload *offload = prog->aux->offload;
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800345
346 if (!offload)
347 return -EINVAL;
348 if (offload->netdev != nn->dp.netdev)
349 return -EINVAL;
350 }
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100351
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700352 if (prog && old_prog) {
353 u8 cap;
354
355 cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
356 if (!(cap & NFP_NET_BPF_CAP_RELO)) {
357 nn_err(nn, "FW does not support live reload\n");
358 return -EBUSY;
359 }
360 }
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700361
362 /* Something else is loaded, different program type? */
363 if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
364 return -EBUSY;
365
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700366 if (old_prog && !prog)
367 return nfp_net_bpf_stop(nn);
368
Jakub Kicinskic6c580d2017-11-03 13:56:29 -0700369 err = nfp_net_bpf_load(nn, prog);
370 if (err)
371 return err;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100372
Jakub Kicinskie4a91cd2017-11-03 13:56:26 -0700373 if (!old_prog)
374 nfp_net_bpf_start(nn);
Jakub Kicinski9ce7a952017-11-03 13:56:25 -0700375
376 return 0;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100377}