blob: 8fb6046f45db05746025dd11edb69f1131a5d5a8 [file] [log] [blame]
Chenbo Feng36575d02018-02-05 15:19:15 -08001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <linux/bpf.h>
Chenbo Feng89c12f12018-03-21 10:29:18 -070018#include <linux/if_ether.h>
19#include <linux/in.h>
20#include <linux/in6.h>
21#include <linux/ip.h>
22#include <linux/ipv6.h>
Chenbo Feng36575d02018-02-05 15:19:15 -080023#include <stdint.h>
Chenbo Feng89c12f12018-03-21 10:29:18 -070024#include "bpf/bpf_shared.h"
Chenbo Feng36575d02018-02-05 15:19:15 -080025
26#define ELF_SEC(NAME) __attribute__((section(NAME), used))
27
28struct uid_tag {
29 uint32_t uid;
30 uint32_t tag;
31};
32
33struct stats_key {
34 uint32_t uid;
35 uint32_t tag;
36 uint32_t counterSet;
37 uint32_t ifaceIndex;
38};
39
40struct stats_value {
41 uint64_t rxPackets;
42 uint64_t rxBytes;
43 uint64_t txPackets;
44 uint64_t txBytes;
45};
46
47/* helper functions called from eBPF programs written in C */
48static void* (*find_map_entry)(uint64_t map, void* key) = (void*)BPF_FUNC_map_lookup_elem;
49static int (*write_to_map_entry)(uint64_t map, void* key, void* value,
50 uint64_t flags) = (void*)BPF_FUNC_map_update_elem;
51static int (*delete_map_entry)(uint64_t map, void* key) = (void*)BPF_FUNC_map_delete_elem;
52static uint64_t (*get_socket_cookie)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_cookie;
53static uint32_t (*get_socket_uid)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_uid;
54static int (*bpf_skb_load_bytes)(struct __sk_buff* skb, int off, void* to,
55 int len) = (void*)BPF_FUNC_skb_load_bytes;
Chenbo Feng36575d02018-02-05 15:19:15 -080056#define BPF_PASS 1
57#define BPF_DROP 0
Chenbo Feng5ed17992018-03-13 21:30:49 -070058#define BPF_EGRESS 0
59#define BPF_INGRESS 1
60
Chenbo Feng89c12f12018-03-21 10:29:18 -070061#define IP_PROTO_OFF offsetof(struct iphdr, protocol)
62#define IPV6_PROTO_OFF offsetof(struct ipv6hdr, nexthdr)
63#define IPPROTO_IHL_OFF 0
64#define TCP_FLAG_OFF 13
65#define RST_OFFSET 2
66
Chenbo Fenga8317b42018-04-02 12:34:45 -070067static __always_inline inline void bpf_update_stats(struct __sk_buff* skb, uint64_t map,
68 int direction, void *key) {
Chenbo Feng5ed17992018-03-13 21:30:49 -070069 struct stats_value* value;
Chenbo Fenga8317b42018-04-02 12:34:45 -070070 value = find_map_entry(map, key);
Chenbo Feng5ed17992018-03-13 21:30:49 -070071 if (!value) {
72 struct stats_value newValue = {};
Chenbo Fenga8317b42018-04-02 12:34:45 -070073 write_to_map_entry(map, key, &newValue, BPF_NOEXIST);
74 value = find_map_entry(map, key);
Chenbo Feng5ed17992018-03-13 21:30:49 -070075 }
76 if (value) {
Chenbo Fenga8317b42018-04-02 12:34:45 -070077 if (direction == BPF_EGRESS) {
Chenbo Feng5ed17992018-03-13 21:30:49 -070078 __sync_fetch_and_add(&value->txPackets, 1);
79 __sync_fetch_and_add(&value->txBytes, skb->len);
Chenbo Fenga8317b42018-04-02 12:34:45 -070080 } else if (direction == BPF_INGRESS) {
Chenbo Feng5ed17992018-03-13 21:30:49 -070081 __sync_fetch_and_add(&value->rxPackets, 1);
82 __sync_fetch_and_add(&value->rxBytes, skb->len);
83 }
84 }
Chenbo Feng2cc3d6a2018-03-06 02:13:51 -080085}
86
Chenbo Feng89c12f12018-03-21 10:29:18 -070087static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid) {
88 int offset = -1;
89 int ret = 0;
90 if (skb->protocol == ETH_P_IP) {
91 offset = IP_PROTO_OFF;
92 uint8_t proto, ihl;
93 uint16_t flag;
94 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
95 if (!ret) {
96 if (proto == IPPROTO_ESP) {
97 return 1;
98 } else if (proto == IPPROTO_TCP) {
99 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1);
100 ihl = ihl & 0x0F;
101 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1);
102 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
103 return BPF_PASS;
104 }
105 }
106 }
107 } else if (skb->protocol == ETH_P_IPV6) {
108 offset = IPV6_PROTO_OFF;
109 uint8_t proto;
110 ret = bpf_skb_load_bytes(skb, offset, &proto, 1);
111 if (!ret) {
112 if (proto == IPPROTO_ESP) {
113 return BPF_PASS;
114 } else if (proto == IPPROTO_TCP) {
115 uint16_t flag;
116 ret = bpf_skb_load_bytes(skb, sizeof(struct ipv6hdr) + TCP_FLAG_OFF, &flag, 1);
117 if (ret == 0 && (flag >> RST_OFFSET & 1)) {
118 return BPF_PASS;
119 }
120 }
121 }
122 }
123
124 if ((uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID)) return BPF_PASS;
125
126 // In each of these maps, the entry with key UID_MAP_ENABLED tells us whether that
127 // map is enabled or not.
128 // TODO: replace this with a map of size one that contains a config structure defined in
129 // bpf_shared.h that can be written by userspace and read here.
130 uint32_t mapSettingKey = UID_MAP_ENABLED;
131 uint8_t* ownerMatch;
132 uint8_t* mapEnabled = find_map_entry(DOZABLE_UID_MAP, &mapSettingKey);
133 if (mapEnabled && *mapEnabled) {
134 ownerMatch = find_map_entry(DOZABLE_UID_MAP, &uid);
135 if (ownerMatch) return *ownerMatch;
136 return BPF_DROP;
137 }
138 mapEnabled = find_map_entry(STANDBY_UID_MAP, &mapSettingKey);
139 if (mapEnabled && *mapEnabled) {
140 ownerMatch = find_map_entry(STANDBY_UID_MAP, &uid);
141 if (ownerMatch) return *ownerMatch;
142 }
143 mapEnabled = find_map_entry(POWERSAVE_UID_MAP, &mapSettingKey);
144 if (mapEnabled && *mapEnabled) {
145 ownerMatch = find_map_entry(POWERSAVE_UID_MAP, &uid);
146 if (ownerMatch) return *ownerMatch;
147 return BPF_DROP;
148 }
149 return BPF_PASS;
150}
151
Chenbo Feng2cc3d6a2018-03-06 02:13:51 -0800152static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int direction) {
153 uint64_t cookie = get_socket_cookie(skb);
154 struct uid_tag* utag = find_map_entry(COOKIE_TAG_MAP, &cookie);
155 uint32_t uid, tag;
156 if (utag) {
157 uid = utag->uid;
158 tag = utag->tag;
159 } else {
160 uid = get_socket_uid(skb);
161 tag = 0;
162 }
163
164 struct stats_key key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
165
166 uint32_t* counterSet;
167 counterSet = find_map_entry(UID_COUNTERSET_MAP, &uid);
168 if (counterSet) key.counterSet = *counterSet;
169
170 int ret;
171 if (tag) {
Chenbo Fenga8317b42018-04-02 12:34:45 -0700172 bpf_update_stats(skb, TAG_STATS_MAP, direction, &key);
Chenbo Feng2cc3d6a2018-03-06 02:13:51 -0800173 }
174
175 key.tag = 0;
Chenbo Fenga8317b42018-04-02 12:34:45 -0700176 bpf_update_stats(skb, UID_STATS_MAP, direction, &key);
Chenbo Feng89c12f12018-03-21 10:29:18 -0700177 return bpf_owner_match(skb, uid);
Chenbo Feng2cc3d6a2018-03-06 02:13:51 -0800178}