blob: c018e5dbcd1b1548045a35b9290800db07689536 [file] [log] [blame]
Chenbo Feng2236e1b2019-02-26 14:30:19 -08001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080017#include <bpf_helpers.h>
Chenbo Feng2236e1b2019-02-26 14:30:19 -080018#include <linux/bpf.h>
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080019#include <linux/if.h>
20#include <linux/if_ether.h>
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -070021#include <linux/if_packet.h>
Maciej Żenczykowskib601c042019-12-30 04:15:53 -080022#include <linux/in.h>
23#include <linux/in6.h>
24#include <linux/ip.h>
25#include <linux/ipv6.h>
26#include <stdbool.h>
27#include <stdint.h>
28#include "bpf_net_helpers.h"
29#include "netdbpf/bpf_shared.h"
30
31typedef struct {
32 uint32_t uid;
33 uint32_t tag;
34} uid_tag;
35
36typedef struct {
37 uint32_t uid;
38 uint32_t tag;
39 uint32_t counterSet;
40 uint32_t ifaceIndex;
41} stats_key;
42
43typedef struct {
44 uint64_t rxPackets;
45 uint64_t rxBytes;
46 uint64_t txPackets;
47 uint64_t txBytes;
48} stats_value;
49
50typedef struct {
51 char name[IFNAMSIZ];
52} IfaceValue;
53
54// This is defined for cgroup bpf filter only.
55#define BPF_PASS 1
56#define BPF_DROP 0
57
58// This is used for xt_bpf program only.
59#define BPF_NOMATCH 0
60#define BPF_MATCH 1
61
62#define BPF_EGRESS 0
63#define BPF_INGRESS 1
64
65#define IP_PROTO_OFF offsetof(struct iphdr, protocol)
66#define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
67#define IPPROTO_IHL_OFF 0
68#define TCP_FLAG_OFF 13
69#define RST_OFFSET 2
70
71DEFINE_BPF_MAP(cookie_tag_map, HASH, uint64_t, uid_tag, COOKIE_UID_MAP_SIZE)
72DEFINE_BPF_MAP(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE)
73DEFINE_BPF_MAP(app_uid_stats_map, HASH, uint32_t, stats_value, APP_STATS_MAP_SIZE)
74DEFINE_BPF_MAP(stats_map_A, HASH, stats_key, stats_value, STATS_MAP_SIZE)
75DEFINE_BPF_MAP(stats_map_B, HASH, stats_key, stats_value, STATS_MAP_SIZE)
76DEFINE_BPF_MAP(iface_stats_map, HASH, uint32_t, stats_value, IFACE_STATS_MAP_SIZE)
77DEFINE_BPF_MAP(configuration_map, HASH, uint32_t, uint8_t, CONFIGURATION_MAP_SIZE)
78DEFINE_BPF_MAP(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
79
80/* never actually used from ebpf */
81DEFINE_BPF_MAP_NO_ACCESSORS(iface_index_name_map, HASH, uint32_t, IfaceValue,
82 IFACE_INDEX_NAME_MAP_SIZE)
83
84static __always_inline int is_system_uid(uint32_t uid) {
85 return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID);
86}
87
88#define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey) \
89 static __always_inline inline void update_##the_stats_map(struct __sk_buff* skb, \
90 int direction, TypeOfKey* key) { \
91 stats_value* value; \
92 value = bpf_##the_stats_map##_lookup_elem(key); \
93 if (!value) { \
94 stats_value newValue = {}; \
95 bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST); \
96 value = bpf_##the_stats_map##_lookup_elem(key); \
97 } \
98 if (value) { \
99 if (direction == BPF_EGRESS) { \
100 __sync_fetch_and_add(&value->txPackets, 1); \
101 __sync_fetch_and_add(&value->txBytes, skb->len); \
102 } else if (direction == BPF_INGRESS) { \
103 __sync_fetch_and_add(&value->rxPackets, 1); \
104 __sync_fetch_and_add(&value->rxBytes, skb->len); \
105 } \
106 } \
107 }
108
109DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
110DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
111DEFINE_UPDATE_STATS(stats_map_A, stats_key)
112DEFINE_UPDATE_STATS(stats_map_B, stats_key)
113
114static inline bool skip_owner_match(struct __sk_buff* skb) {
115 int offset = -1;
116 int ret = 0;
117 if (skb->protocol == htons(ETH_P_IP)) {
118 offset = IP_PROTO_OFF;
119 uint8_t proto, ihl;
120 uint16_t flag;
121 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
122 if (!ret) {
123 if (proto == IPPROTO_ESP) {
124 return true;
125 } else if (proto == IPPROTO_TCP) {
126 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
127 ihl = ihl & 0x0F;
128 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
129 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
130 return true;
131 }
132 }
133 }
134 } else if (skb->protocol == htons(ETH_P_IPV6)) {
135 offset = IPV6_PROTO_OFF;
136 uint8_t proto;
137 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
138 if (!ret) {
139 if (proto == IPPROTO_ESP) {
140 return true;
141 } else if (proto == IPPROTO_TCP) {
142 uint16_t flag;
143 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
144 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
145 return true;
146 }
147 }
148 }
149 }
150 return false;
151}
152
153static __always_inline BpfConfig getConfig(uint32_t configKey) {
154 uint32_t mapSettingKey = configKey;
155 BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
156 if (!config) {
157 // Couldn't read configuration entry. Assume everything is disabled.
158 return DEFAULT_CONFIG;
159 }
160 return *config;
161}
162
163static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, int direction) {
164 if (skip_owner_match(skb)) return BPF_PASS;
165
166 if ((uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID)) return BPF_PASS;
167
168 BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
169
170 UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
171 uint8_t uidRules = uidEntry ? uidEntry->rule : 0;
172 uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
173
174 if (enabledRules) {
175 if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) {
176 return BPF_DROP;
177 }
178 if ((enabledRules & STANDBY_MATCH) && (uidRules & STANDBY_MATCH)) {
179 return BPF_DROP;
180 }
181 if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) {
182 return BPF_DROP;
183 }
184 }
185 if (direction == BPF_INGRESS && (uidRules & IIF_MATCH)) {
186 // Drops packets not coming from lo nor the whitelisted interface
187 if (allowed_iif && skb->ifindex != 1 && skb->ifindex != allowed_iif) {
188 return BPF_DROP;
189 }
190 }
191 return BPF_PASS;
192}
193
194static __always_inline inline void update_stats_with_config(struct __sk_buff* skb, int direction,
195 stats_key* key, uint8_t selectedMap) {
196 if (selectedMap == SELECT_MAP_A) {
197 update_stats_map_A(skb, direction, key);
198 } else if (selectedMap == SELECT_MAP_B) {
199 update_stats_map_B(skb, direction, key);
200 }
201}
202
203static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
204 uint32_t sock_uid = bpf_get_socket_uid(skb);
205 int match = bpf_owner_match(skb, sock_uid, direction);
206 if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
207 // If an outbound packet is going to be dropped, we do not count that
208 // traffic.
209 return match;
210 }
211
212 uint64_t cookie = bpf_get_socket_cookie(skb);
213 uid_tag* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
214 uint32_t uid, tag;
215 if (utag) {
216 uid = utag->uid;
217 tag = utag->tag;
218 } else {
219 uid = sock_uid;
220 tag = 0;
221 }
222
223 stats_key key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
224
225 uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
226 if (counterSet) key.counterSet = (uint32_t)*counterSet;
227
228 uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
229 uint8_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
230 if (!selectedMap) {
231 return match;
232 }
233
234 if (key.tag) {
235 update_stats_with_config(skb, direction, &key, *selectedMap);
236 key.tag = 0;
237 }
238
239 update_stats_with_config(skb, direction, &key, *selectedMap);
240 update_app_uid_stats_map(skb, direction, &uid);
241 return match;
242}
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800243
244SEC("cgroupskb/ingress/stats")
245int bpf_cgroup_ingress(struct __sk_buff* skb) {
246 return bpf_traffic_account(skb, BPF_INGRESS);
247}
248
249SEC("cgroupskb/egress/stats")
250int bpf_cgroup_egress(struct __sk_buff* skb) {
251 return bpf_traffic_account(skb, BPF_EGRESS);
252}
253
254SEC("skfilter/egress/xtbpf")
255int xt_bpf_egress_prog(struct __sk_buff* skb) {
256 uint32_t key = skb->ifindex;
Maciej Żenczykowski0f24d322019-04-19 17:51:33 -0700257 update_iface_stats_map(skb, BPF_EGRESS, &key);
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800258 return BPF_MATCH;
259}
260
261SEC("skfilter/ingress/xtbpf")
262int xt_bpf_ingress_prog(struct __sk_buff* skb) {
263 uint32_t key = skb->ifindex;
Maciej Żenczykowski0f24d322019-04-19 17:51:33 -0700264 update_iface_stats_map(skb, BPF_INGRESS, &key);
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800265 return BPF_MATCH;
266}
267
268SEC("skfilter/whitelist/xtbpf")
269int xt_bpf_whitelist_prog(struct __sk_buff* skb) {
270 uint32_t sock_uid = bpf_get_socket_uid(skb);
271 if (is_system_uid(sock_uid)) return BPF_MATCH;
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -0700272
273 // 65534 is the overflow 'nobody' uid, usually this being returned means
274 // that skb->sk is NULL during RX (early decap socket lookup failure),
275 // which commonly happens for incoming packets to an unconnected udp socket.
276 // Additionally bpf_get_socket_cookie() returns 0 if skb->sk is NULL
Maciej Żenczykowski0b60d602019-10-30 23:51:34 -0700277 if ((sock_uid == 65534) && !bpf_get_socket_cookie(skb) && is_received_skb(skb))
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -0700278 return BPF_MATCH;
279
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -0700280 UidOwnerValue* whitelistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -0700281 if (whitelistMatch) return whitelistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800282 return BPF_NOMATCH;
283}
284
285SEC("skfilter/blacklist/xtbpf")
286int xt_bpf_blacklist_prog(struct __sk_buff* skb) {
287 uint32_t sock_uid = bpf_get_socket_uid(skb);
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -0700288 UidOwnerValue* blacklistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
Maciej Żenczykowskic6c8d4f2019-09-19 08:28:19 -0700289 if (blacklistMatch) return blacklistMatch->rule & PENALTY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800290 return BPF_NOMATCH;
291}
292
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -0700293DEFINE_BPF_MAP(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE)
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800294
Chenbo Fengbf660aa2019-02-26 16:12:27 -0800295SEC("cgroupsock/inet/create")
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800296int inet_socket_create(struct bpf_sock* sk) {
297 uint64_t gid_uid = bpf_get_current_uid_gid();
298 /*
299 * A given app is guaranteed to have the same app ID in all the profiles in
300 * which it is installed, and install permission is granted to app for all
301 * user at install time so we only check the appId part of a request uid at
302 * run time. See UserHandle#isSameApp for detail.
303 */
304 uint32_t appId = (gid_uid & 0xffffffff) % PER_USER_RANGE;
Maciej Żenczykowskib8a7be52019-04-19 21:47:08 -0700305 uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
Chenbo Fengbf660aa2019-02-26 16:12:27 -0800306 if (!permissions) {
307 // UID not in map. Default to just INTERNET permission.
308 return 1;
309 }
310
311 // A return value of 1 means allow, everything else means deny.
312 return (*permissions & BPF_PERMISSION_INTERNET) == BPF_PERMISSION_INTERNET;
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800313}
314
315char _license[] SEC("license") = "Apache 2.0";