blob: 03cffbdf125fea45491096dfa57d034362c764aa [file] [log] [blame]
Chenbo Feng2236e1b2019-02-26 14:30:19 -08001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080017#include <bpf_helpers.h>
Chenbo Feng2236e1b2019-02-26 14:30:19 -080018#include <linux/bpf.h>
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080019#include <linux/if.h>
20#include <linux/if_ether.h>
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -070021#include <linux/if_packet.h>
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080022#include <linux/in.h>
23#include <linux/in6.h>
24#include <linux/ip.h>
25#include <linux/ipv6.h>
Maciej Żenczykowski3aaf2942020-01-04 05:09:45 -080026#include <linux/tcp.h>
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080027#include <stdbool.h>
28#include <stdint.h>
29#include "bpf_net_helpers.h"
30#include "netdbpf/bpf_shared.h"
31
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080032// This is defined for cgroup bpf filter only.
33#define BPF_PASS 1
34#define BPF_DROP 0
35
36// This is used for xt_bpf program only.
37#define BPF_NOMATCH 0
38#define BPF_MATCH 1
39
40#define BPF_EGRESS 0
41#define BPF_INGRESS 1
42
43#define IP_PROTO_OFF offsetof(struct iphdr, protocol)
44#define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
45#define IPPROTO_IHL_OFF 0
46#define TCP_FLAG_OFF 13
47#define RST_OFFSET 2
48
Maciej Żenczykowski11ec78b2019-12-30 06:39:32 -080049DEFINE_BPF_MAP(cookie_tag_map, HASH, uint64_t, UidTagValue, COOKIE_UID_MAP_SIZE)
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080050DEFINE_BPF_MAP(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE)
Maciej Żenczykowski11ec78b2019-12-30 06:39:32 -080051DEFINE_BPF_MAP(app_uid_stats_map, HASH, uint32_t, StatsValue, APP_STATS_MAP_SIZE)
52DEFINE_BPF_MAP(stats_map_A, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
53DEFINE_BPF_MAP(stats_map_B, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
54DEFINE_BPF_MAP(iface_stats_map, HASH, uint32_t, StatsValue, IFACE_STATS_MAP_SIZE)
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080055DEFINE_BPF_MAP(configuration_map, HASH, uint32_t, uint8_t, CONFIGURATION_MAP_SIZE)
56DEFINE_BPF_MAP(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
57
58/* never actually used from ebpf */
Maciej Żenczykowski4256f652020-02-14 14:44:46 -080059DEFINE_BPF_MAP(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE)
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080060
61static __always_inline int is_system_uid(uint32_t uid) {
62 return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID);
63}
64
Maciej Żenczykowski3aaf2942020-01-04 05:09:45 -080065/*
66 * Note: this blindly assumes an MTU of 1500, and that packets > MTU are always TCP,
67 * and that TCP is using the Linux default settings with TCP timestamp option enabled
68 * which uses 12 TCP option bytes per frame.
69 *
70 * These are not unreasonable assumptions:
71 *
72 * The internet does not really support MTUs greater than 1500, so most TCP traffic will
73 * be at that MTU, or slightly below it (worst case our upwards adjustment is too small).
74 *
75 * The chance our traffic isn't IP at all is basically zero, so the IP overhead correction
76 * is bound to be needed.
77 *
78 * Furthermore, the likelyhood that we're having to deal with GSO (ie. > MTU) packets that
79 * are not IP/TCP is pretty small (few other things are supported by Linux) and worse case
80 * our extra overhead will be slightly off, but probably still better than assuming none.
81 *
82 * Most servers are also Linux and thus support/default to using TCP timestamp option
83 * (and indeed TCP timestamp option comes from RFC 1323 titled "TCP Extensions for High
84 * Performance" which also defined TCP window scaling and are thus absolutely ancient...).
85 *
86 * All together this should be more correct than if we simply ignored GSO frames
87 * (ie. counted them as single packets with no extra overhead)
88 *
89 * Especially since the number of packets is important for any future clat offload correction.
90 * (which adjusts upward by 20 bytes per packet to account for ipv4 -> ipv6 header conversion)
91 */
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080092#define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey) \
93 static __always_inline inline void update_##the_stats_map(struct __sk_buff* skb, \
94 int direction, TypeOfKey* key) { \
Maciej Żenczykowski3aaf2942020-01-04 05:09:45 -080095 StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080096 if (!value) { \
Maciej Żenczykowski11ec78b2019-12-30 06:39:32 -080097 StatsValue newValue = {}; \
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080098 bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST); \
99 value = bpf_##the_stats_map##_lookup_elem(key); \
100 } \
101 if (value) { \
Maciej Żenczykowski3aaf2942020-01-04 05:09:45 -0800102 const int mtu = 1500; \
103 uint64_t packets = 1; \
104 uint64_t bytes = skb->len; \
105 if (bytes > mtu) { \
106 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6)); \
107 int ip_overhead = (is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr)); \
108 int tcp_overhead = ip_overhead + sizeof(struct tcphdr) + 12; \
109 int mss = mtu - tcp_overhead; \
110 uint64_t payload = bytes - tcp_overhead; \
111 packets = (payload + mss - 1) / mss; \
112 bytes = tcp_overhead * packets + payload; \
113 } \
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800114 if (direction == BPF_EGRESS) { \
Maciej Żenczykowski3aaf2942020-01-04 05:09:45 -0800115 __sync_fetch_and_add(&value->txPackets, packets); \
116 __sync_fetch_and_add(&value->txBytes, bytes); \
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800117 } else if (direction == BPF_INGRESS) { \
Maciej Żenczykowski3aaf2942020-01-04 05:09:45 -0800118 __sync_fetch_and_add(&value->rxPackets, packets); \
119 __sync_fetch_and_add(&value->rxBytes, bytes); \
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800120 } \
121 } \
122 }
123
124DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
125DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
Maciej Żenczykowski11ec78b2019-12-30 06:39:32 -0800126DEFINE_UPDATE_STATS(stats_map_A, StatsKey)
127DEFINE_UPDATE_STATS(stats_map_B, StatsKey)
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800128
129static inline bool skip_owner_match(struct __sk_buff* skb) {
130 int offset = -1;
131 int ret = 0;
132 if (skb->protocol == htons(ETH_P_IP)) {
133 offset = IP_PROTO_OFF;
134 uint8_t proto, ihl;
135 uint16_t flag;
136 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
137 if (!ret) {
138 if (proto == IPPROTO_ESP) {
139 return true;
140 } else if (proto == IPPROTO_TCP) {
141 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
142 ihl = ihl & 0x0F;
143 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
144 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
145 return true;
146 }
147 }
148 }
149 } else if (skb->protocol == htons(ETH_P_IPV6)) {
150 offset = IPV6_PROTO_OFF;
151 uint8_t proto;
152 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
153 if (!ret) {
154 if (proto == IPPROTO_ESP) {
155 return true;
156 } else if (proto == IPPROTO_TCP) {
157 uint16_t flag;
158 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
159 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
160 return true;
161 }
162 }
163 }
164 }
165 return false;
166}
167
168static __always_inline BpfConfig getConfig(uint32_t configKey) {
169 uint32_t mapSettingKey = configKey;
170 BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
171 if (!config) {
172 // Couldn't read configuration entry. Assume everything is disabled.
173 return DEFAULT_CONFIG;
174 }
175 return *config;
176}
177
178static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, int direction) {
179 if (skip_owner_match(skb)) return BPF_PASS;
180
181 if ((uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID)) return BPF_PASS;
182
183 BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
184
185 UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
186 uint8_t uidRules = uidEntry ? uidEntry->rule : 0;
187 uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
188
189 if (enabledRules) {
190 if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) {
191 return BPF_DROP;
192 }
193 if ((enabledRules & STANDBY_MATCH) && (uidRules & STANDBY_MATCH)) {
194 return BPF_DROP;
195 }
196 if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) {
197 return BPF_DROP;
198 }
199 }
200 if (direction == BPF_INGRESS && (uidRules & IIF_MATCH)) {
201 // Drops packets not coming from lo nor the whitelisted interface
202 if (allowed_iif && skb->ifindex != 1 && skb->ifindex != allowed_iif) {
203 return BPF_DROP;
204 }
205 }
206 return BPF_PASS;
207}
208
209static __always_inline inline void update_stats_with_config(struct __sk_buff* skb, int direction,
Maciej Żenczykowski11ec78b2019-12-30 06:39:32 -0800210 StatsKey* key, uint8_t selectedMap) {
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800211 if (selectedMap == SELECT_MAP_A) {
212 update_stats_map_A(skb, direction, key);
213 } else if (selectedMap == SELECT_MAP_B) {
214 update_stats_map_B(skb, direction, key);
215 }
216}
217
218static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
219 uint32_t sock_uid = bpf_get_socket_uid(skb);
220 int match = bpf_owner_match(skb, sock_uid, direction);
221 if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
222 // If an outbound packet is going to be dropped, we do not count that
223 // traffic.
224 return match;
225 }
226
227 uint64_t cookie = bpf_get_socket_cookie(skb);
Maciej Żenczykowski11ec78b2019-12-30 06:39:32 -0800228 UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800229 uint32_t uid, tag;
230 if (utag) {
231 uid = utag->uid;
232 tag = utag->tag;
233 } else {
234 uid = sock_uid;
235 tag = 0;
236 }
237
Maciej Żenczykowski11ec78b2019-12-30 06:39:32 -0800238 StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
Maciej Żenczykowskib601c042019-12-30 04:15:53 -0800239
240 uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
241 if (counterSet) key.counterSet = (uint32_t)*counterSet;
242
243 uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
244 uint8_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
245 if (!selectedMap) {
246 return match;
247 }
248
249 if (key.tag) {
250 update_stats_with_config(skb, direction, &key, *selectedMap);
251 key.tag = 0;
252 }
253
254 update_stats_with_config(skb, direction, &key, *selectedMap);
255 update_app_uid_stats_map(skb, direction, &uid);
256 return match;
257}
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800258
259SEC("cgroupskb/ingress/stats")
260int bpf_cgroup_ingress(struct __sk_buff* skb) {
261 return bpf_traffic_account(skb, BPF_INGRESS);
262}
263
264SEC("cgroupskb/egress/stats")
265int bpf_cgroup_egress(struct __sk_buff* skb) {
266 return bpf_traffic_account(skb, BPF_EGRESS);
267}
268
269SEC("skfilter/egress/xtbpf")
270int xt_bpf_egress_prog(struct __sk_buff* skb) {
271 uint32_t key = skb->ifindex;
Maciej Żenczykowski0f24d322019-04-19 17:51:33 -0700272 update_iface_stats_map(skb, BPF_EGRESS, &key);
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800273 return BPF_MATCH;
274}
275
276SEC("skfilter/ingress/xtbpf")
277int xt_bpf_ingress_prog(struct __sk_buff* skb) {
278 uint32_t key = skb->ifindex;
Maciej Żenczykowski0f24d322019-04-19 17:51:33 -0700279 update_iface_stats_map(skb, BPF_INGRESS, &key);
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800280 return BPF_MATCH;
281}
282
283SEC("skfilter/whitelist/xtbpf")
284int xt_bpf_whitelist_prog(struct __sk_buff* skb) {
285 uint32_t sock_uid = bpf_get_socket_uid(skb);
286 if (is_system_uid(sock_uid)) return BPF_MATCH;
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -0700287
288 // 65534 is the overflow 'nobody' uid, usually this being returned means
289 // that skb->sk is NULL during RX (early decap socket lookup failure),
290 // which commonly happens for incoming packets to an unconnected udp socket.
291 // Additionally bpf_get_socket_cookie() returns 0 if skb->sk is NULL
Maciej Żenczykowski0b60d602019-10-30 23:51:34 -0700292 if ((sock_uid == 65534) && !bpf_get_socket_cookie(skb) && is_received_skb(skb))
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -0700293 return BPF_MATCH;
294
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -0700295 UidOwnerValue* whitelistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -0700296 if (whitelistMatch) return whitelistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800297 return BPF_NOMATCH;
298}
299
300SEC("skfilter/blacklist/xtbpf")
301int xt_bpf_blacklist_prog(struct __sk_buff* skb) {
302 uint32_t sock_uid = bpf_get_socket_uid(skb);
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -0700303 UidOwnerValue* blacklistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -0700304 if (blacklistMatch) return blacklistMatch->rule & PENALTY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800305 return BPF_NOMATCH;
306}
307
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -0700308DEFINE_BPF_MAP(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE)
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800309
Chenbo Fengbf660aa2019-02-26 16:12:27 -0800310SEC("cgroupsock/inet/create")
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800311int inet_socket_create(struct bpf_sock* sk) {
312 uint64_t gid_uid = bpf_get_current_uid_gid();
313 /*
314 * A given app is guaranteed to have the same app ID in all the profiles in
315 * which it is installed, and install permission is granted to app for all
316 * user at install time so we only check the appId part of a request uid at
317 * run time. See UserHandle#isSameApp for detail.
318 */
319 uint32_t appId = (gid_uid & 0xffffffff) % PER_USER_RANGE;
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -0700320 uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
Chenbo Fengbf660aa2019-02-26 16:12:27 -0800321 if (!permissions) {
322 // UID not in map. Default to just INTERNET permission.
323 return 1;
324 }
325
326 // A return value of 1 means allow, everything else means deny.
327 return (*permissions & BPF_PERMISSION_INTERNET) == BPF_PERMISSION_INTERNET;
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800328}
329
330char _license[] SEC("license") = "Apache 2.0";