blob: 22a78eedf4b1447a8f42cc442615191d66ff1b99 [file] [log] [blame]
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001/* Copyright (c) 2017 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/bpf.h>
8#include <linux/slab.h>
9#include <linux/vmalloc.h>
10#include <linux/etherdevice.h>
11#include <linux/filter.h>
12#include <linux/sched/signal.h>
13
14static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx)
15{
16 u32 ret;
17
18 preempt_disable();
19 rcu_read_lock();
20 ret = BPF_PROG_RUN(prog, ctx);
21 rcu_read_unlock();
22 preempt_enable();
23
24 return ret;
25}
26
27static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
28{
29 u64 time_start, time_spent = 0;
30 u32 ret = 0, i;
31
32 if (!repeat)
33 repeat = 1;
34 time_start = ktime_get_ns();
35 for (i = 0; i < repeat; i++) {
36 ret = bpf_test_run_one(prog, ctx);
37 if (need_resched()) {
38 if (signal_pending(current))
39 break;
40 time_spent += ktime_get_ns() - time_start;
41 cond_resched();
42 time_start = ktime_get_ns();
43 }
44 }
45 time_spent += ktime_get_ns() - time_start;
46 do_div(time_spent, repeat);
47 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
48
49 return ret;
50}
51
David Miller78e52272017-05-02 11:36:33 -040052static int bpf_test_finish(const union bpf_attr *kattr,
53 union bpf_attr __user *uattr, const void *data,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070054 u32 size, u32 retval, u32 duration)
55{
David Miller78e52272017-05-02 11:36:33 -040056 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -070057 int err = -EFAULT;
58
59 if (data_out && copy_to_user(data_out, data, size))
60 goto out;
61 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
62 goto out;
63 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
64 goto out;
65 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
66 goto out;
67 err = 0;
68out:
69 return err;
70}
71
72static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
73 u32 headroom, u32 tailroom)
74{
75 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
76 void *data;
77
78 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
79 return ERR_PTR(-EINVAL);
80
81 data = kzalloc(size + headroom + tailroom, GFP_USER);
82 if (!data)
83 return ERR_PTR(-ENOMEM);
84
85 if (copy_from_user(data + headroom, data_in, size)) {
86 kfree(data);
87 return ERR_PTR(-EFAULT);
88 }
89 return data;
90}
91
92int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
93 union bpf_attr __user *uattr)
94{
95 bool is_l2 = false, is_direct_pkt_access = false;
96 u32 size = kattr->test.data_size_in;
97 u32 repeat = kattr->test.repeat;
98 u32 retval, duration;
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +020099 int hh_len = ETH_HLEN;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700100 struct sk_buff *skb;
101 void *data;
102 int ret;
103
David Miller586f8522017-05-02 11:36:45 -0400104 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700105 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
106 if (IS_ERR(data))
107 return PTR_ERR(data);
108
109 switch (prog->type) {
110 case BPF_PROG_TYPE_SCHED_CLS:
111 case BPF_PROG_TYPE_SCHED_ACT:
112 is_l2 = true;
113 /* fall through */
114 case BPF_PROG_TYPE_LWT_IN:
115 case BPF_PROG_TYPE_LWT_OUT:
116 case BPF_PROG_TYPE_LWT_XMIT:
117 is_direct_pkt_access = true;
118 break;
119 default:
120 break;
121 }
122
123 skb = build_skb(data, 0);
124 if (!skb) {
125 kfree(data);
126 return -ENOMEM;
127 }
128
David Miller586f8522017-05-02 11:36:45 -0400129 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700130 __skb_put(skb, size);
131 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
132 skb_reset_network_header(skb);
133
134 if (is_l2)
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200135 __skb_push(skb, hh_len);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700136 if (is_direct_pkt_access)
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200137 bpf_compute_data_pointers(skb);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700138 retval = bpf_test_run(prog, skb, repeat, &duration);
Daniel Borkmann6e6fddc2018-07-11 15:30:14 +0200139 if (!is_l2) {
140 if (skb_headroom(skb) < hh_len) {
141 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
142
143 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
144 kfree_skb(skb);
145 return -ENOMEM;
146 }
147 }
148 memset(__skb_push(skb, hh_len), 0, hh_len);
149 }
150
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700151 size = skb->len;
152 /* bpf program can never convert linear skb to non-linear */
153 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
154 size = skb_headlen(skb);
David Miller78e52272017-05-02 11:36:33 -0400155 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700156 kfree_skb(skb);
157 return ret;
158}
159
160int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
161 union bpf_attr __user *uattr)
162{
163 u32 size = kattr->test.data_size_in;
164 u32 repeat = kattr->test.repeat;
Daniel Borkmann65073a62018-01-31 12:58:56 +0100165 struct netdev_rx_queue *rxqueue;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700166 struct xdp_buff xdp = {};
167 u32 retval, duration;
168 void *data;
169 int ret;
170
David Miller586f8522017-05-02 11:36:45 -0400171 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700172 if (IS_ERR(data))
173 return PTR_ERR(data);
174
175 xdp.data_hard_start = data;
David Miller586f8522017-05-02 11:36:45 -0400176 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +0200177 xdp.data_meta = xdp.data;
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700178 xdp.data_end = xdp.data + size;
179
Daniel Borkmann65073a62018-01-31 12:58:56 +0100180 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
181 xdp.rxq = &rxqueue->xdp_rxq;
182
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700183 retval = bpf_test_run(prog, &xdp, repeat, &duration);
Nikita V. Shirokov587b80c2018-04-17 21:42:21 -0700184 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
185 xdp.data_end != xdp.data + size)
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700186 size = xdp.data_end - xdp.data;
David Miller78e52272017-05-02 11:36:33 -0400187 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -0700188 kfree(data);
189 return ret;
190}