Jakub Kicinski | 8aa0cb0 | 2017-05-31 08:06:46 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2017 Netronome Systems, Inc. |
| 3 | * |
| 4 | * This software is dual licensed under the GNU General License Version 2, |
| 5 | * June 1991 as shown in the file COPYING in the top-level directory of this |
| 6 | * source tree or the BSD 2-Clause License provided below. You have the |
| 7 | * option to license this software under the complete terms of either license. |
| 8 | * |
| 9 | * The BSD 2-Clause License: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * 1. Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * 2. Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
| 32 | */ |
| 33 | |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 34 | #include <net/pkt_cls.h> |
| 35 | |
Jakub Kicinski | 8aa0cb0 | 2017-05-31 08:06:46 -0700 | [diff] [blame] | 36 | #include "../nfpcore/nfp_cpp.h" |
Jakub Kicinski | 77a844e | 2017-12-14 21:29:16 -0800 | [diff] [blame] | 37 | #include "../nfpcore/nfp_nffw.h" |
Jakub Kicinski | e3ac6c0 | 2018-02-07 20:55:22 -0800 | [diff] [blame] | 38 | #include "../nfpcore/nfp_nsp.h" |
Jakub Kicinski | 8aa0cb0 | 2017-05-31 08:06:46 -0700 | [diff] [blame] | 39 | #include "../nfp_app.h" |
| 40 | #include "../nfp_main.h" |
| 41 | #include "../nfp_net.h" |
| 42 | #include "../nfp_port.h" |
Jakub Kicinski | 0d49eaf | 2017-12-14 21:29:18 -0800 | [diff] [blame] | 43 | #include "fw.h" |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 44 | #include "main.h" |
| 45 | |
| 46 | static bool nfp_net_ebpf_capable(struct nfp_net *nn) |
| 47 | { |
Jakub Kicinski | 0f6cf4d | 2017-10-12 10:34:13 -0700 | [diff] [blame] | 48 | #ifdef __LITTLE_ENDIAN |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 49 | if (nn->cap & NFP_NET_CFG_CTRL_BPF && |
| 50 | nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI) |
| 51 | return true; |
Jakub Kicinski | 0f6cf4d | 2017-10-12 10:34:13 -0700 | [diff] [blame] | 52 | #endif |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 53 | return false; |
| 54 | } |
| 55 | |
| 56 | static int |
| 57 | nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, |
Quentin Monnet | acc2abbb | 2018-01-19 17:44:49 -0800 | [diff] [blame] | 58 | struct bpf_prog *prog, struct netlink_ext_ack *extack) |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 59 | { |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 60 | bool running, xdp_running; |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 61 | int ret; |
| 62 | |
| 63 | if (!nfp_net_ebpf_capable(nn)) |
| 64 | return -EINVAL; |
| 65 | |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 66 | running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF; |
| 67 | xdp_running = running && nn->dp.bpf_offload_xdp; |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 68 | |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 69 | if (!prog && !xdp_running) |
| 70 | return 0; |
| 71 | if (prog && running && !xdp_running) |
| 72 | return -EBUSY; |
| 73 | |
Quentin Monnet | 52be9a7 | 2018-01-19 17:44:50 -0800 | [diff] [blame] | 74 | ret = nfp_net_bpf_offload(nn, prog, running, extack); |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 75 | /* Stop offload if replace not possible */ |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 76 | if (ret && prog) |
Quentin Monnet | acc2abbb | 2018-01-19 17:44:49 -0800 | [diff] [blame] | 77 | nfp_bpf_xdp_offload(app, nn, NULL, extack); |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 78 | |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 79 | nn->dp.bpf_offload_xdp = prog && !ret; |
| 80 | return ret; |
| 81 | } |
| 82 | |
| 83 | static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn) |
| 84 | { |
| 85 | return nfp_net_ebpf_capable(nn) ? "BPF" : ""; |
| 86 | } |
Jakub Kicinski | 8aa0cb0 | 2017-05-31 08:06:46 -0700 | [diff] [blame] | 87 | |
Jakub Kicinski | 4f83435 | 2017-12-27 15:36:49 -0800 | [diff] [blame] | 88 | static int |
| 89 | nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) |
| 90 | { |
Jakub Kicinski | e3ac6c0 | 2018-02-07 20:55:22 -0800 | [diff] [blame] | 91 | struct nfp_pf *pf = app->pf; |
Jakub Kicinski | 2314fe9 | 2018-01-10 12:26:01 +0000 | [diff] [blame] | 92 | struct nfp_bpf_vnic *bv; |
Jakub Kicinski | 4f83435 | 2017-12-27 15:36:49 -0800 | [diff] [blame] | 93 | int err; |
| 94 | |
Jakub Kicinski | e3ac6c0 | 2018-02-07 20:55:22 -0800 | [diff] [blame] | 95 | if (!pf->eth_tbl) { |
| 96 | nfp_err(pf->cpp, "No ETH table\n"); |
| 97 | return -EINVAL; |
| 98 | } |
| 99 | if (pf->max_data_vnics != pf->eth_tbl->count) { |
| 100 | nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", |
| 101 | pf->max_data_vnics, pf->eth_tbl->count); |
| 102 | return -EINVAL; |
| 103 | } |
| 104 | |
Jakub Kicinski | 2314fe9 | 2018-01-10 12:26:01 +0000 | [diff] [blame] | 105 | bv = kzalloc(sizeof(*bv), GFP_KERNEL); |
| 106 | if (!bv) |
Jakub Kicinski | 4f83435 | 2017-12-27 15:36:49 -0800 | [diff] [blame] | 107 | return -ENOMEM; |
Jakub Kicinski | 2314fe9 | 2018-01-10 12:26:01 +0000 | [diff] [blame] | 108 | nn->app_priv = bv; |
Jakub Kicinski | 4f83435 | 2017-12-27 15:36:49 -0800 | [diff] [blame] | 109 | |
| 110 | err = nfp_app_nic_vnic_alloc(app, nn, id); |
| 111 | if (err) |
| 112 | goto err_free_priv; |
| 113 | |
Jakub Kicinski | 2314fe9 | 2018-01-10 12:26:01 +0000 | [diff] [blame] | 114 | bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); |
| 115 | bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE); |
| 116 | |
Jakub Kicinski | 4f83435 | 2017-12-27 15:36:49 -0800 | [diff] [blame] | 117 | return 0; |
| 118 | err_free_priv: |
| 119 | kfree(nn->app_priv); |
| 120 | return err; |
| 121 | } |
| 122 | |
| 123 | static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn) |
| 124 | { |
| 125 | struct nfp_bpf_vnic *bv = nn->app_priv; |
| 126 | |
| 127 | WARN_ON(bv->tc_prog); |
| 128 | kfree(bv); |
| 129 | } |
| 130 | |
Jiri Pirko | 90d9731 | 2017-10-19 15:50:44 +0200 | [diff] [blame] | 131 | static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, |
| 132 | void *type_data, void *cb_priv) |
| 133 | { |
| 134 | struct tc_cls_bpf_offload *cls_bpf = type_data; |
| 135 | struct nfp_net *nn = cb_priv; |
Jakub Kicinski | d3f89b9 | 2017-12-19 13:32:14 -0800 | [diff] [blame] | 136 | struct bpf_prog *oldprog; |
| 137 | struct nfp_bpf_vnic *bv; |
| 138 | int err; |
Jiri Pirko | 90d9731 | 2017-10-19 15:50:44 +0200 | [diff] [blame] | 139 | |
Quentin Monnet | 52be9a7 | 2018-01-19 17:44:50 -0800 | [diff] [blame] | 140 | if (type != TC_SETUP_CLSBPF) { |
| 141 | NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, |
| 142 | "only offload of BPF classifiers supported"); |
| 143 | return -EOPNOTSUPP; |
| 144 | } |
Jakub Kicinski | 3107fdc | 2018-01-25 14:00:45 -0800 | [diff] [blame] | 145 | if (!tc_cls_can_offload_and_chain0(nn->dp.netdev, &cls_bpf->common)) |
Quentin Monnet | 52be9a7 | 2018-01-19 17:44:50 -0800 | [diff] [blame] | 146 | return -EOPNOTSUPP; |
| 147 | if (!nfp_net_ebpf_capable(nn)) { |
| 148 | NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, |
| 149 | "NFP firmware does not support eBPF offload"); |
| 150 | return -EOPNOTSUPP; |
| 151 | } |
| 152 | if (cls_bpf->common.protocol != htons(ETH_P_ALL)) { |
| 153 | NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, |
| 154 | "only ETH_P_ALL supported as filter protocol"); |
| 155 | return -EOPNOTSUPP; |
| 156 | } |
Jiri Pirko | 44ae12a | 2017-11-01 11:47:39 +0100 | [diff] [blame] | 157 | |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 158 | /* Only support TC direct action */ |
| 159 | if (!cls_bpf->exts_integrated || |
| 160 | tcf_exts_has_actions(cls_bpf->exts)) { |
Quentin Monnet | 52be9a7 | 2018-01-19 17:44:50 -0800 | [diff] [blame] | 161 | NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, |
| 162 | "only direct action with no legacy actions supported"); |
Jakub Kicinski | 9ce7a95 | 2017-11-03 13:56:25 -0700 | [diff] [blame] | 163 | return -EOPNOTSUPP; |
| 164 | } |
Jakub Kicinski | f449657 | 2017-11-02 01:31:31 -0700 | [diff] [blame] | 165 | |
Jakub Kicinski | 102740b | 2017-12-19 13:32:13 -0800 | [diff] [blame] | 166 | if (cls_bpf->command != TC_CLSBPF_OFFLOAD) |
Jiri Pirko | 90d9731 | 2017-10-19 15:50:44 +0200 | [diff] [blame] | 167 | return -EOPNOTSUPP; |
Jakub Kicinski | 102740b | 2017-12-19 13:32:13 -0800 | [diff] [blame] | 168 | |
Jakub Kicinski | d3f89b9 | 2017-12-19 13:32:14 -0800 | [diff] [blame] | 169 | bv = nn->app_priv; |
| 170 | oldprog = cls_bpf->oldprog; |
| 171 | |
| 172 | /* Don't remove if oldprog doesn't match driver's state */ |
| 173 | if (bv->tc_prog != oldprog) { |
| 174 | oldprog = NULL; |
| 175 | if (!cls_bpf->prog) |
| 176 | return 0; |
Jiri Pirko | 90d9731 | 2017-10-19 15:50:44 +0200 | [diff] [blame] | 177 | } |
Jakub Kicinski | d3f89b9 | 2017-12-19 13:32:14 -0800 | [diff] [blame] | 178 | |
Quentin Monnet | 52be9a7 | 2018-01-19 17:44:50 -0800 | [diff] [blame] | 179 | err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog, |
| 180 | cls_bpf->common.extack); |
Jakub Kicinski | d3f89b9 | 2017-12-19 13:32:14 -0800 | [diff] [blame] | 181 | if (err) |
| 182 | return err; |
| 183 | |
| 184 | bv->tc_prog = cls_bpf->prog; |
Jakub Kicinski | d692403 | 2018-02-07 20:55:24 -0800 | [diff] [blame] | 185 | nn->port->tc_offload_cnt = !!bv->tc_prog; |
Jakub Kicinski | d3f89b9 | 2017-12-19 13:32:14 -0800 | [diff] [blame] | 186 | return 0; |
Jiri Pirko | 90d9731 | 2017-10-19 15:50:44 +0200 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | static int nfp_bpf_setup_tc_block(struct net_device *netdev, |
| 190 | struct tc_block_offload *f) |
| 191 | { |
| 192 | struct nfp_net *nn = netdev_priv(netdev); |
| 193 | |
| 194 | if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
| 195 | return -EOPNOTSUPP; |
| 196 | |
| 197 | switch (f->command) { |
| 198 | case TC_BLOCK_BIND: |
| 199 | return tcf_block_cb_register(f->block, |
| 200 | nfp_bpf_setup_tc_block_cb, |
| 201 | nn, nn); |
| 202 | case TC_BLOCK_UNBIND: |
| 203 | tcf_block_cb_unregister(f->block, |
| 204 | nfp_bpf_setup_tc_block_cb, |
| 205 | nn); |
| 206 | return 0; |
| 207 | default: |
| 208 | return -EOPNOTSUPP; |
| 209 | } |
| 210 | } |
| 211 | |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 212 | static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, |
Jiri Pirko | de4784c | 2017-08-07 10:15:32 +0200 | [diff] [blame] | 213 | enum tc_setup_type type, void *type_data) |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 214 | { |
Jiri Pirko | 90d9731 | 2017-10-19 15:50:44 +0200 | [diff] [blame] | 215 | switch (type) { |
Jiri Pirko | 90d9731 | 2017-10-19 15:50:44 +0200 | [diff] [blame] | 216 | case TC_SETUP_BLOCK: |
| 217 | return nfp_bpf_setup_tc_block(netdev, type_data); |
| 218 | default: |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 219 | return -EOPNOTSUPP; |
Jiri Pirko | 90d9731 | 2017-10-19 15:50:44 +0200 | [diff] [blame] | 220 | } |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 221 | } |
| 222 | |
Jakub Kicinski | 0d49eaf | 2017-12-14 21:29:18 -0800 | [diff] [blame] | 223 | static int |
John Hurley | 167cebe | 2018-03-28 18:50:06 -0700 | [diff] [blame] | 224 | nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) |
Jakub Kicinski | ccbdc59 | 2018-01-10 12:25:57 +0000 | [diff] [blame] | 225 | { |
| 226 | struct nfp_net *nn = netdev_priv(netdev); |
| 227 | unsigned int max_mtu; |
| 228 | |
| 229 | if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) |
| 230 | return 0; |
| 231 | |
| 232 | max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; |
| 233 | if (new_mtu > max_mtu) { |
| 234 | nn_info(nn, "BPF offload active, MTU over %u not supported\n", |
| 235 | max_mtu); |
| 236 | return -EBUSY; |
| 237 | } |
| 238 | return 0; |
| 239 | } |
| 240 | |
| 241 | static int |
Jakub Kicinski | 0d49eaf | 2017-12-14 21:29:18 -0800 | [diff] [blame] | 242 | nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value, |
| 243 | u32 length) |
| 244 | { |
| 245 | struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value; |
| 246 | struct nfp_cpp *cpp = bpf->app->pf->cpp; |
| 247 | |
| 248 | if (length < sizeof(*cap)) { |
| 249 | nfp_err(cpp, "truncated adjust_head TLV: %d\n", length); |
| 250 | return -EINVAL; |
| 251 | } |
| 252 | |
| 253 | bpf->adjust_head.flags = readl(&cap->flags); |
| 254 | bpf->adjust_head.off_min = readl(&cap->off_min); |
| 255 | bpf->adjust_head.off_max = readl(&cap->off_max); |
Jakub Kicinski | 8231f84 | 2017-12-14 21:29:19 -0800 | [diff] [blame] | 256 | bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub); |
| 257 | bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add); |
Jakub Kicinski | 0d49eaf | 2017-12-14 21:29:18 -0800 | [diff] [blame] | 258 | |
| 259 | if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) { |
| 260 | nfp_err(cpp, "invalid adjust_head TLV: min > max\n"); |
| 261 | return -EINVAL; |
| 262 | } |
| 263 | if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) || |
| 264 | !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) { |
| 265 | nfp_warn(cpp, "disabling adjust_head - driver expects min/max to fit in as immediates\n"); |
| 266 | memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head)); |
| 267 | return 0; |
| 268 | } |
| 269 | |
| 270 | return 0; |
| 271 | } |
| 272 | |
Jakub Kicinski | 9d080d5 | 2018-01-11 20:29:13 -0800 | [diff] [blame] | 273 | static int |
| 274 | nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) |
| 275 | { |
| 276 | struct nfp_bpf_cap_tlv_func __iomem *cap = value; |
| 277 | |
| 278 | if (length < sizeof(*cap)) { |
| 279 | nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length); |
| 280 | return -EINVAL; |
| 281 | } |
| 282 | |
| 283 | switch (readl(&cap->func_id)) { |
| 284 | case BPF_FUNC_map_lookup_elem: |
| 285 | bpf->helpers.map_lookup = readl(&cap->func_addr); |
| 286 | break; |
Jakub Kicinski | 44d65a4 | 2018-03-28 17:48:31 -0700 | [diff] [blame] | 287 | case BPF_FUNC_map_update_elem: |
| 288 | bpf->helpers.map_update = readl(&cap->func_addr); |
| 289 | break; |
Jakub Kicinski | bfee64d | 2018-03-28 17:48:32 -0700 | [diff] [blame] | 290 | case BPF_FUNC_map_delete_elem: |
| 291 | bpf->helpers.map_delete = readl(&cap->func_addr); |
| 292 | break; |
Jakub Kicinski | 9d080d5 | 2018-01-11 20:29:13 -0800 | [diff] [blame] | 293 | } |
| 294 | |
| 295 | return 0; |
| 296 | } |
| 297 | |
| 298 | static int |
| 299 | nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) |
| 300 | { |
| 301 | struct nfp_bpf_cap_tlv_maps __iomem *cap = value; |
| 302 | |
| 303 | if (length < sizeof(*cap)) { |
| 304 | nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length); |
| 305 | return -EINVAL; |
| 306 | } |
| 307 | |
| 308 | bpf->maps.types = readl(&cap->types); |
| 309 | bpf->maps.max_maps = readl(&cap->max_maps); |
| 310 | bpf->maps.max_elems = readl(&cap->max_elems); |
| 311 | bpf->maps.max_key_sz = readl(&cap->max_key_sz); |
| 312 | bpf->maps.max_val_sz = readl(&cap->max_val_sz); |
| 313 | bpf->maps.max_elem_sz = readl(&cap->max_elem_sz); |
| 314 | |
| 315 | return 0; |
| 316 | } |
| 317 | |
Jakub Kicinski | df4a37d | 2018-03-28 17:48:37 -0700 | [diff] [blame] | 318 | static int |
| 319 | nfp_bpf_parse_cap_random(struct nfp_app_bpf *bpf, void __iomem *value, |
| 320 | u32 length) |
| 321 | { |
| 322 | bpf->pseudo_random = true; |
| 323 | return 0; |
| 324 | } |
| 325 | |
Jakub Kicinski | 77a844e | 2017-12-14 21:29:16 -0800 | [diff] [blame] | 326 | static int nfp_bpf_parse_capabilities(struct nfp_app *app) |
| 327 | { |
| 328 | struct nfp_cpp *cpp = app->pf->cpp; |
| 329 | struct nfp_cpp_area *area; |
| 330 | u8 __iomem *mem, *start; |
| 331 | |
| 332 | mem = nfp_rtsym_map(app->pf->rtbl, "_abi_bpf_capabilities", "bpf.cap", |
| 333 | 8, &area); |
| 334 | if (IS_ERR(mem)) |
| 335 | return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem); |
| 336 | |
| 337 | start = mem; |
| 338 | while (mem - start + 8 < nfp_cpp_area_size(area)) { |
Jakub Kicinski | 0d49eaf | 2017-12-14 21:29:18 -0800 | [diff] [blame] | 339 | u8 __iomem *value; |
Jakub Kicinski | 77a844e | 2017-12-14 21:29:16 -0800 | [diff] [blame] | 340 | u32 type, length; |
| 341 | |
| 342 | type = readl(mem); |
| 343 | length = readl(mem + 4); |
Jakub Kicinski | 0d49eaf | 2017-12-14 21:29:18 -0800 | [diff] [blame] | 344 | value = mem + 8; |
Jakub Kicinski | 77a844e | 2017-12-14 21:29:16 -0800 | [diff] [blame] | 345 | |
| 346 | mem += 8 + length; |
| 347 | if (mem - start > nfp_cpp_area_size(area)) |
| 348 | goto err_release_free; |
| 349 | |
| 350 | switch (type) { |
Jakub Kicinski | 9d080d5 | 2018-01-11 20:29:13 -0800 | [diff] [blame] | 351 | case NFP_BPF_CAP_TYPE_FUNC: |
| 352 | if (nfp_bpf_parse_cap_func(app->priv, value, length)) |
| 353 | goto err_release_free; |
| 354 | break; |
Jakub Kicinski | 0d49eaf | 2017-12-14 21:29:18 -0800 | [diff] [blame] | 355 | case NFP_BPF_CAP_TYPE_ADJUST_HEAD: |
| 356 | if (nfp_bpf_parse_cap_adjust_head(app->priv, value, |
| 357 | length)) |
| 358 | goto err_release_free; |
| 359 | break; |
Jakub Kicinski | 9d080d5 | 2018-01-11 20:29:13 -0800 | [diff] [blame] | 360 | case NFP_BPF_CAP_TYPE_MAPS: |
| 361 | if (nfp_bpf_parse_cap_maps(app->priv, value, length)) |
| 362 | goto err_release_free; |
| 363 | break; |
Jakub Kicinski | df4a37d | 2018-03-28 17:48:37 -0700 | [diff] [blame] | 364 | case NFP_BPF_CAP_TYPE_RANDOM: |
| 365 | if (nfp_bpf_parse_cap_random(app->priv, value, length)) |
| 366 | goto err_release_free; |
| 367 | break; |
Jakub Kicinski | 77a844e | 2017-12-14 21:29:16 -0800 | [diff] [blame] | 368 | default: |
| 369 | nfp_dbg(cpp, "unknown BPF capability: %d\n", type); |
| 370 | break; |
| 371 | } |
| 372 | } |
| 373 | if (mem - start != nfp_cpp_area_size(area)) { |
Jakub Kicinski | 0bce7c9 | 2017-12-15 10:39:31 -0800 | [diff] [blame] | 374 | nfp_err(cpp, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n", |
Jakub Kicinski | 77a844e | 2017-12-14 21:29:16 -0800 | [diff] [blame] | 375 | mem - start, nfp_cpp_area_size(area)); |
| 376 | goto err_release_free; |
| 377 | } |
| 378 | |
| 379 | nfp_cpp_area_release_free(area); |
| 380 | |
| 381 | return 0; |
| 382 | |
| 383 | err_release_free: |
Jakub Kicinski | 0bce7c9 | 2017-12-15 10:39:31 -0800 | [diff] [blame] | 384 | nfp_err(cpp, "invalid BPF capabilities at offset:%zd\n", mem - start); |
Jakub Kicinski | 77a844e | 2017-12-14 21:29:16 -0800 | [diff] [blame] | 385 | nfp_cpp_area_release_free(area); |
| 386 | return -EINVAL; |
| 387 | } |
| 388 | |
| 389 | static int nfp_bpf_init(struct nfp_app *app) |
| 390 | { |
| 391 | struct nfp_app_bpf *bpf; |
| 392 | int err; |
| 393 | |
| 394 | bpf = kzalloc(sizeof(*bpf), GFP_KERNEL); |
| 395 | if (!bpf) |
| 396 | return -ENOMEM; |
| 397 | bpf->app = app; |
| 398 | app->priv = bpf; |
| 399 | |
Jakub Kicinski | d48ae23 | 2018-01-11 20:29:11 -0800 | [diff] [blame] | 400 | skb_queue_head_init(&bpf->cmsg_replies); |
| 401 | init_waitqueue_head(&bpf->cmsg_wq); |
Jakub Kicinski | 4da98ee | 2018-01-11 20:29:10 -0800 | [diff] [blame] | 402 | INIT_LIST_HEAD(&bpf->map_list); |
| 403 | |
Jakub Kicinski | 77a844e | 2017-12-14 21:29:16 -0800 | [diff] [blame] | 404 | err = nfp_bpf_parse_capabilities(app); |
| 405 | if (err) |
| 406 | goto err_free_bpf; |
| 407 | |
| 408 | return 0; |
| 409 | |
| 410 | err_free_bpf: |
| 411 | kfree(bpf); |
| 412 | return err; |
| 413 | } |
| 414 | |
| 415 | static void nfp_bpf_clean(struct nfp_app *app) |
| 416 | { |
Jakub Kicinski | 4da98ee | 2018-01-11 20:29:10 -0800 | [diff] [blame] | 417 | struct nfp_app_bpf *bpf = app->priv; |
| 418 | |
Jakub Kicinski | d48ae23 | 2018-01-11 20:29:11 -0800 | [diff] [blame] | 419 | WARN_ON(!skb_queue_empty(&bpf->cmsg_replies)); |
Jakub Kicinski | 4da98ee | 2018-01-11 20:29:10 -0800 | [diff] [blame] | 420 | WARN_ON(!list_empty(&bpf->map_list)); |
Jakub Kicinski | 1bba4c4 | 2018-01-11 20:29:17 -0800 | [diff] [blame] | 421 | WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use); |
Jakub Kicinski | 4da98ee | 2018-01-11 20:29:10 -0800 | [diff] [blame] | 422 | kfree(bpf); |
Jakub Kicinski | 77a844e | 2017-12-14 21:29:16 -0800 | [diff] [blame] | 423 | } |
| 424 | |
Jakub Kicinski | 8aa0cb0 | 2017-05-31 08:06:46 -0700 | [diff] [blame] | 425 | const struct nfp_app_type app_bpf = { |
| 426 | .id = NFP_APP_BPF_NIC, |
Jakub Kicinski | 2707d6f | 2017-05-31 08:06:47 -0700 | [diff] [blame] | 427 | .name = "ebpf", |
Jakub Kicinski | 8aa0cb0 | 2017-05-31 08:06:46 -0700 | [diff] [blame] | 428 | |
Jakub Kicinski | 81bd5ded | 2018-01-17 18:51:06 -0800 | [diff] [blame] | 429 | .ctrl_cap_mask = 0, |
Jakub Kicinski | 78a0a65 | 2018-01-17 18:51:05 -0800 | [diff] [blame] | 430 | |
Jakub Kicinski | 77a844e | 2017-12-14 21:29:16 -0800 | [diff] [blame] | 431 | .init = nfp_bpf_init, |
| 432 | .clean = nfp_bpf_clean, |
| 433 | |
John Hurley | 167cebe | 2018-03-28 18:50:06 -0700 | [diff] [blame] | 434 | .check_mtu = nfp_bpf_check_mtu, |
Jakub Kicinski | ccbdc59 | 2018-01-10 12:25:57 +0000 | [diff] [blame] | 435 | |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 436 | .extra_cap = nfp_bpf_extra_cap, |
| 437 | |
Jakub Kicinski | 4f83435 | 2017-12-27 15:36:49 -0800 | [diff] [blame] | 438 | .vnic_alloc = nfp_bpf_vnic_alloc, |
| 439 | .vnic_free = nfp_bpf_vnic_free, |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 440 | |
Jakub Kicinski | d48ae23 | 2018-01-11 20:29:11 -0800 | [diff] [blame] | 441 | .ctrl_msg_rx = nfp_bpf_ctrl_msg_rx, |
| 442 | |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 443 | .setup_tc = nfp_bpf_setup_tc, |
Jakub Kicinski | af93d15 | 2018-01-10 12:26:04 +0000 | [diff] [blame] | 444 | .bpf = nfp_ndo_bpf, |
Jakub Kicinski | bb45e51 | 2017-05-31 08:06:49 -0700 | [diff] [blame] | 445 | .xdp_offload = nfp_bpf_xdp_offload, |
Jakub Kicinski | 8aa0cb0 | 2017-05-31 08:06:46 -0700 | [diff] [blame] | 446 | }; |