blob: 3bede8db938db13ed10700800e1178b5fd038b64 [file] [log] [blame]
Chenbo Feng2236e1b2019-02-26 14:30:19 -08001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18 * This h file together with netd.c is used for compiling the eBPF kernel
19 * program.
20 */
21
22#include <bpf_helpers.h>
23#include <linux/bpf.h>
24#include <linux/if.h>
25#include <linux/if_ether.h>
26#include <linux/in.h>
27#include <linux/in6.h>
28#include <linux/ip.h>
29#include <linux/ipv6.h>
30#include <stdbool.h>
31#include <stdint.h>
32#include "netdbpf/bpf_shared.h"
33
34struct uid_tag {
35 uint32_t uid;
36 uint32_t tag;
37};
38
39struct stats_key {
40 uint32_t uid;
41 uint32_t tag;
42 uint32_t counterSet;
43 uint32_t ifaceIndex;
44};
45
46struct stats_value {
47 uint64_t rxPackets;
48 uint64_t rxBytes;
49 uint64_t txPackets;
50 uint64_t txBytes;
51};
52
53struct IfaceValue {
54 char name[IFNAMSIZ];
55};
56
57// This is defined for cgroup bpf filter only.
58#define BPF_PASS 1
59#define BPF_DROP 0
60
61// This is used for xt_bpf program only.
62#define BPF_NOMATCH 0
63#define BPF_MATCH 1
64
65#define BPF_EGRESS 0
66#define BPF_INGRESS 1
67
68#define IP_PROTO_OFF offsetof(struct iphdr, protocol)
69#define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
70#define IPPROTO_IHL_OFF 0
71#define TCP_FLAG_OFF 13
72#define RST_OFFSET 2
73
74struct bpf_map_def SEC("maps") cookie_tag_map = {
Chenbo Feng58bd3d42019-02-26 15:43:36 -080075 .type = BPF_MAP_TYPE_HASH,
76 .key_size = sizeof(uint64_t),
77 .value_size = sizeof(struct uid_tag),
78 .max_entries = COOKIE_UID_MAP_SIZE,
Chenbo Feng2236e1b2019-02-26 14:30:19 -080079};
80
81struct bpf_map_def SEC("maps") uid_counterset_map = {
Chenbo Feng58bd3d42019-02-26 15:43:36 -080082 .type = BPF_MAP_TYPE_HASH,
83 .key_size = sizeof(uint32_t),
84 .value_size = sizeof(uint8_t),
85 .max_entries = UID_COUNTERSET_MAP_SIZE,
Chenbo Feng2236e1b2019-02-26 14:30:19 -080086};
87
88struct bpf_map_def SEC("maps") app_uid_stats_map = {
Chenbo Feng58bd3d42019-02-26 15:43:36 -080089 .type = BPF_MAP_TYPE_HASH,
90 .key_size = sizeof(uint32_t),
91 .value_size = sizeof(struct stats_value),
92 .max_entries = APP_STATS_MAP_SIZE,
Chenbo Feng2236e1b2019-02-26 14:30:19 -080093};
94
95struct bpf_map_def SEC("maps") stats_map_A = {
Chenbo Feng58bd3d42019-02-26 15:43:36 -080096 .type = BPF_MAP_TYPE_HASH,
97 .key_size = sizeof(struct stats_key),
98 .value_size = sizeof(struct stats_value),
99 .max_entries = STATS_MAP_SIZE,
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800100};
101
102struct bpf_map_def SEC("maps") stats_map_B = {
Chenbo Feng58bd3d42019-02-26 15:43:36 -0800103 .type = BPF_MAP_TYPE_HASH,
104 .key_size = sizeof(struct stats_key),
105 .value_size = sizeof(struct stats_value),
106 .max_entries = STATS_MAP_SIZE,
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800107};
108
109struct bpf_map_def SEC("maps") iface_stats_map = {
Chenbo Feng58bd3d42019-02-26 15:43:36 -0800110 .type = BPF_MAP_TYPE_HASH,
111 .key_size = sizeof(uint32_t),
112 .value_size = sizeof(struct stats_value),
113 .max_entries = IFACE_STATS_MAP_SIZE,
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800114};
115
116struct bpf_map_def SEC("maps") configuration_map = {
Chenbo Feng58bd3d42019-02-26 15:43:36 -0800117 .type = BPF_MAP_TYPE_HASH,
118 .key_size = sizeof(uint32_t),
119 .value_size = sizeof(uint8_t),
120 .max_entries = CONFIGURATION_MAP_SIZE,
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800121};
122
123struct bpf_map_def SEC("maps") uid_owner_map = {
Chenbo Feng58bd3d42019-02-26 15:43:36 -0800124 .type = BPF_MAP_TYPE_HASH,
125 .key_size = sizeof(uint32_t),
Rubin Xuec27ff22019-01-08 21:33:03 +0000126 .value_size = sizeof(struct UidOwnerValue),
Chenbo Feng58bd3d42019-02-26 15:43:36 -0800127 .max_entries = UID_OWNER_MAP_SIZE,
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800128};
129
130struct bpf_map_def SEC("maps") iface_index_name_map = {
Chenbo Feng58bd3d42019-02-26 15:43:36 -0800131 .type = BPF_MAP_TYPE_HASH,
132 .key_size = sizeof(uint32_t),
133 .value_size = sizeof(struct IfaceValue),
134 .max_entries = IFACE_INDEX_NAME_MAP_SIZE,
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800135};
136
137static __always_inline int is_system_uid(uint32_t uid) {
138 return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID);
139}
140
141static __always_inline inline void bpf_update_stats(struct __sk_buff* skb, struct bpf_map_def* map,
142 int direction, void* key) {
143 struct stats_value* value;
144 value = bpf_map_lookup_elem(map, key);
145 if (!value) {
146 struct stats_value newValue = {};
147 bpf_map_update_elem(map, key, &newValue, BPF_NOEXIST);
148 value = bpf_map_lookup_elem(map, key);
149 }
150 if (value) {
151 if (direction == BPF_EGRESS) {
152 __sync_fetch_and_add(&value->txPackets, 1);
153 __sync_fetch_and_add(&value->txBytes, skb->len);
154 } else if (direction == BPF_INGRESS) {
155 __sync_fetch_and_add(&value->rxPackets, 1);
156 __sync_fetch_and_add(&value->rxBytes, skb->len);
157 }
158 }
159}
160
161static inline bool skip_owner_match(struct __sk_buff* skb) {
162 int offset = -1;
163 int ret = 0;
164 if (skb->protocol == ETH_P_IP) {
165 offset = IP_PROTO_OFF;
166 uint8_t proto, ihl;
167 uint16_t flag;
168 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
169 if (!ret) {
170 if (proto == IPPROTO_ESP) {
171 return true;
172 } else if (proto == IPPROTO_TCP) {
173 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
174 ihl = ihl & 0x0F;
175 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
176 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
177 return true;
178 }
179 }
180 }
181 } else if (skb->protocol == ETH_P_IPV6) {
182 offset = IPV6_PROTO_OFF;
183 uint8_t proto;
184 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
185 if (!ret) {
186 if (proto == IPPROTO_ESP) {
187 return true;
188 } else if (proto == IPPROTO_TCP) {
189 uint16_t flag;
190 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
191 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
192 return true;
193 }
194 }
195 }
196 }
197 return false;
198}
199
200static __always_inline BpfConfig getConfig(uint32_t configKey) {
201 uint32_t mapSettingKey = configKey;
202 BpfConfig* config = bpf_map_lookup_elem(&configuration_map, &mapSettingKey);
203 if (!config) {
204 // Couldn't read configuration entry. Assume everything is disabled.
205 return DEFAULT_CONFIG;
206 }
207 return *config;
208}
209
Rubin Xuec27ff22019-01-08 21:33:03 +0000210static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, int direction) {
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800211 if (skip_owner_match(skb)) return BPF_PASS;
212
213 if ((uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID)) return BPF_PASS;
214
215 BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800216
Rubin Xuec27ff22019-01-08 21:33:03 +0000217 struct UidOwnerValue* uidEntry = bpf_map_lookup_elem(&uid_owner_map, &uid);
218 uint8_t uidRules = uidEntry ? uidEntry->rule : 0;
219 uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
220
221 if (enabledRules) {
222 if ((enabledRules & DOZABLE_MATCH) && !(uidRules & DOZABLE_MATCH)) {
223 return BPF_DROP;
224 }
225 if ((enabledRules & STANDBY_MATCH) && (uidRules & STANDBY_MATCH)) {
226 return BPF_DROP;
227 }
228 if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) {
229 return BPF_DROP;
230 }
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800231 }
Rubin Xuec27ff22019-01-08 21:33:03 +0000232 if (direction == BPF_INGRESS && (uidRules & IIF_MATCH)) {
233 // Drops packets not coming from lo nor the whitelisted interface
234 if (allowed_iif && skb->ifindex != 1 && skb->ifindex != allowed_iif) {
235 return BPF_DROP;
236 }
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800237 }
238 return BPF_PASS;
239}
240
241static __always_inline inline void update_stats_with_config(struct __sk_buff* skb, int direction,
242 void* key, uint8_t selectedMap) {
243 if (selectedMap == SELECT_MAP_A) {
244 bpf_update_stats(skb, &stats_map_A, direction, key);
245 } else if (selectedMap == SELECT_MAP_B) {
246 bpf_update_stats(skb, &stats_map_B, direction, key);
247 }
248}
249
250static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
251 uint32_t sock_uid = bpf_get_socket_uid(skb);
Rubin Xuec27ff22019-01-08 21:33:03 +0000252 int match = bpf_owner_match(skb, sock_uid, direction);
Chenbo Feng2236e1b2019-02-26 14:30:19 -0800253 if ((direction == BPF_EGRESS) && (match == BPF_DROP)) {
254 // If an outbound packet is going to be dropped, we do not count that
255 // traffic.
256 return match;
257 }
258
259 uint64_t cookie = bpf_get_socket_cookie(skb);
260 struct uid_tag* utag = bpf_map_lookup_elem(&cookie_tag_map, &cookie);
261 uint32_t uid, tag;
262 if (utag) {
263 uid = utag->uid;
264 tag = utag->tag;
265 } else {
266 uid = sock_uid;
267 tag = 0;
268 }
269
270 struct stats_key key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
271
272 uint8_t* counterSet = bpf_map_lookup_elem(&uid_counterset_map, &uid);
273 if (counterSet) key.counterSet = (uint32_t)*counterSet;
274
275 uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
276 uint8_t* selectedMap = bpf_map_lookup_elem(&configuration_map, &mapSettingKey);
277 if (!selectedMap) {
278 return match;
279 }
280
281 if (tag) {
282 update_stats_with_config(skb, direction, &key, *selectedMap);
283 }
284
285 key.tag = 0;
286 update_stats_with_config(skb, direction, &key, *selectedMap);
287 bpf_update_stats(skb, &app_uid_stats_map, direction, &uid);
288 return match;
289}