blob: a88bb5bc00826689c2366b9ca5de9cf911545683 [file] [log] [blame]
Jakub Kicinski7533fdc2016-09-21 11:44:01 +01001/*
2 * Copyright (C) 2016 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * nfp_net_offload.c
36 * Netronome network device driver: TC offload functions for PF and VF
37 */
38
39#include <linux/kernel.h>
40#include <linux/netdevice.h>
41#include <linux/pci.h>
42#include <linux/jiffies.h>
43#include <linux/timer.h>
44#include <linux/list.h>
45
46#include <net/pkt_cls.h>
47#include <net/tc_act/tc_gact.h>
48#include <net/tc_act/tc_mirred.h>
49
Jakub Kicinskid9ae7f22017-05-31 08:06:48 -070050#include "main.h"
51#include "../nfp_net_ctrl.h"
52#include "../nfp_net.h"
Jakub Kicinski7533fdc2016-09-21 11:44:01 +010053
Jakub Kicinski66860be2016-09-21 11:44:03 +010054void nfp_net_filter_stats_timer(unsigned long data)
55{
56 struct nfp_net *nn = (void *)data;
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -070057 struct nfp_net_bpf_priv *priv;
Jakub Kicinski66860be2016-09-21 11:44:03 +010058 struct nfp_stat_pair latest;
59
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -070060 priv = nn->app_priv;
61
62 spin_lock_bh(&priv->rx_filter_lock);
Jakub Kicinski66860be2016-09-21 11:44:03 +010063
Jakub Kicinski79c12a72017-03-10 10:38:27 -080064 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -070065 mod_timer(&priv->rx_filter_stats_timer,
Jakub Kicinski66860be2016-09-21 11:44:03 +010066 jiffies + NFP_NET_STAT_POLL_IVL);
67
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -070068 spin_unlock_bh(&priv->rx_filter_lock);
Jakub Kicinski66860be2016-09-21 11:44:03 +010069
70 latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
71 latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
72
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -070073 if (latest.pkts != priv->rx_filter.pkts)
74 priv->rx_filter_change = jiffies;
Jakub Kicinski66860be2016-09-21 11:44:03 +010075
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -070076 priv->rx_filter = latest;
Jakub Kicinski66860be2016-09-21 11:44:03 +010077}
78
79static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
80{
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -070081 struct nfp_net_bpf_priv *priv = nn->app_priv;
82
83 priv->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
84 priv->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
85 priv->rx_filter_prev = priv->rx_filter;
86 priv->rx_filter_change = jiffies;
Jakub Kicinski66860be2016-09-21 11:44:03 +010087}
88
89static int
90nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
91{
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -070092 struct nfp_net_bpf_priv *priv = nn->app_priv;
Jakub Kicinski66860be2016-09-21 11:44:03 +010093 u64 bytes, pkts;
94
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -070095 pkts = priv->rx_filter.pkts - priv->rx_filter_prev.pkts;
96 bytes = priv->rx_filter.bytes - priv->rx_filter_prev.bytes;
Jakub Kicinski66860be2016-09-21 11:44:03 +010097 bytes -= pkts * ETH_HLEN;
98
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -070099 priv->rx_filter_prev = priv->rx_filter;
Jakub Kicinski66860be2016-09-21 11:44:03 +0100100
Jakub Kicinskid897a632017-05-31 08:06:43 -0700101 tcf_exts_stats_update(cls_bpf->exts,
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -0700102 bytes, pkts, priv->rx_filter_change);
Jakub Kicinski66860be2016-09-21 11:44:03 +0100103
104 return 0;
105}
106
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100107static int
108nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
109{
110 const struct tc_action *a;
111 LIST_HEAD(actions);
112
Jakub Kicinski6d677072016-11-03 17:12:09 +0000113 if (!cls_bpf->exts)
114 return NN_ACT_XDP;
115
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100116 /* TC direct action */
Jakub Kicinskie3b8baf2016-09-21 11:44:07 +0100117 if (cls_bpf->exts_integrated) {
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200118 if (!tcf_exts_has_actions(cls_bpf->exts))
Jakub Kicinskie3b8baf2016-09-21 11:44:07 +0100119 return NN_ACT_DIRECT;
120
Jakub Kicinski46c50512017-04-27 21:06:15 -0700121 return -EOPNOTSUPP;
Jakub Kicinskie3b8baf2016-09-21 11:44:07 +0100122 }
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100123
124 /* TC legacy mode */
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200125 if (!tcf_exts_has_one_action(cls_bpf->exts))
Jakub Kicinski46c50512017-04-27 21:06:15 -0700126 return -EOPNOTSUPP;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100127
128 tcf_exts_to_list(cls_bpf->exts, &actions);
129 list_for_each_entry(a, &actions, list) {
130 if (is_tcf_gact_shot(a))
131 return NN_ACT_TC_DROP;
Jakub Kicinski2d184212016-09-21 11:44:06 +0100132
Shmulik Ladkani5724b8b2016-10-13 09:06:43 +0300133 if (is_tcf_mirred_egress_redirect(a) &&
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800134 tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
Jakub Kicinski2d184212016-09-21 11:44:06 +0100135 return NN_ACT_TC_REDIR;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100136 }
137
Jakub Kicinski46c50512017-04-27 21:06:15 -0700138 return -EOPNOTSUPP;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100139}
140
141static int
142nfp_net_bpf_offload_prepare(struct nfp_net *nn,
143 struct tc_cls_bpf_offload *cls_bpf,
144 struct nfp_bpf_result *res,
145 void **code, dma_addr_t *dma_addr, u16 max_instr)
146{
147 unsigned int code_sz = max_instr * sizeof(u64);
148 enum nfp_bpf_action_type act;
149 u16 start_off, done_off;
150 unsigned int max_mtu;
151 int ret;
152
Arnd Bergmannb47c62c2016-09-23 22:23:59 +0200153 if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
Jakub Kicinski46c50512017-04-27 21:06:15 -0700154 return -EOPNOTSUPP;
Arnd Bergmannb47c62c2016-09-23 22:23:59 +0200155
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100156 ret = nfp_net_bpf_get_act(nn, cls_bpf);
157 if (ret < 0)
158 return ret;
159 act = ret;
160
161 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800162 if (max_mtu < nn->dp.netdev->mtu) {
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100163 nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
Jakub Kicinski46c50512017-04-27 21:06:15 -0700164 return -EOPNOTSUPP;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100165 }
166
167 start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
168 done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
169
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800170 *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100171 if (!*code)
172 return -ENOMEM;
173
174 ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off,
175 max_instr, res);
176 if (ret)
177 goto out;
178
179 return 0;
180
181out:
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800182 dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100183 return ret;
184}
185
186static void
187nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
188 void *code, dma_addr_t dma_addr,
189 unsigned int code_sz, unsigned int n_instr,
190 bool dense_mode)
191{
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -0700192 struct nfp_net_bpf_priv *priv = nn->app_priv;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100193 u64 bpf_addr = dma_addr;
194 int err;
195
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800196 nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100197
198 if (dense_mode)
199 bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
200
201 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
202 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
203
204 /* Load up the JITed code */
205 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
206 if (err)
207 nn_err(nn, "FW command error while loading BPF: %d\n", err);
208
209 /* Enable passing packets through BPF function */
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800210 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
211 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100212 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
213 if (err)
214 nn_err(nn, "FW command error while enabling BPF: %d\n", err);
215
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800216 dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
Jakub Kicinski66860be2016-09-21 11:44:03 +0100217
218 nfp_net_bpf_stats_reset(nn);
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -0700219 mod_timer(&priv->rx_filter_stats_timer,
220 jiffies + NFP_NET_STAT_POLL_IVL);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100221}
222
223static int nfp_net_bpf_stop(struct nfp_net *nn)
224{
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -0700225 struct nfp_net_bpf_priv *priv = nn->app_priv;
226
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800227 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100228 return 0;
229
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -0700230 spin_lock_bh(&priv->rx_filter_lock);
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800231 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -0700232 spin_unlock_bh(&priv->rx_filter_lock);
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800233 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100234
Jakub Kicinskic66a9cf2017-05-31 08:06:50 -0700235 del_timer_sync(&priv->rx_filter_stats_timer);
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800236 nn->dp.bpf_offload_skip_sw = 0;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100237
238 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
239}
240
Jakub Kicinski2e9d5942016-11-03 17:12:08 +0000241int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100242{
243 struct nfp_bpf_result res;
244 dma_addr_t dma_addr;
245 u16 max_instr;
246 void *code;
247 int err;
248
249 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
250
251 switch (cls_bpf->command) {
252 case TC_CLSBPF_REPLACE:
253 /* There is nothing stopping us from implementing seamless
254 * replace but the simple method of loading I adopted in
255 * the firmware does not handle atomic replace (i.e. we have to
256 * stop the BPF offload and re-enable it). Leaking-in a few
257 * frames which didn't have BPF applied in the hardware should
258 * be fine if software fallback is available, though.
259 */
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800260 if (nn->dp.bpf_offload_skip_sw)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100261 return -EBUSY;
262
263 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
264 &dma_addr, max_instr);
265 if (err)
266 return err;
267
268 nfp_net_bpf_stop(nn);
269 nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
270 dma_addr, max_instr * sizeof(u64),
271 res.n_instr, res.dense_mode);
272 return 0;
273
274 case TC_CLSBPF_ADD:
Jakub Kicinski79c12a72017-03-10 10:38:27 -0800275 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100276 return -EBUSY;
277
278 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
279 &dma_addr, max_instr);
280 if (err)
281 return err;
282
283 nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
284 dma_addr, max_instr * sizeof(u64),
285 res.n_instr, res.dense_mode);
286 return 0;
287
288 case TC_CLSBPF_DESTROY:
289 return nfp_net_bpf_stop(nn);
290
Jakub Kicinski66860be2016-09-21 11:44:03 +0100291 case TC_CLSBPF_STATS:
292 return nfp_net_bpf_stats_update(nn, cls_bpf);
293
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100294 default:
Jakub Kicinski46c50512017-04-27 21:06:15 -0700295 return -EOPNOTSUPP;
Jakub Kicinski7533fdc2016-09-21 11:44:01 +0100296 }
297}