blob: 16dbe1ddcfaaa6d9f7571f1a8b39e9b7110db777 [file] [log] [blame]
Hungming Chen41b2ae12020-02-04 15:09:37 +08001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <linux/if.h>
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -080018#include <linux/ip.h>
19#include <linux/ipv6.h>
Hungming Chen41b2ae12020-02-04 15:09:37 +080020#include <linux/pkt_cls.h>
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -080021#include <linux/tcp.h>
Hungming Chen41b2ae12020-02-04 15:09:37 +080022
23#include "bpf_helpers.h"
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -080024#include "bpf_net_helpers.h"
Hungming Chen41b2ae12020-02-04 15:09:37 +080025#include "netdbpf/bpf_shared.h"
26
Maciej Żenczykowski565b6fb2020-01-27 01:58:40 -080027DEFINE_BPF_MAP_GRW(tether_ingress_map, HASH, TetherIngressKey, TetherIngressValue, 64,
28 AID_NETWORK_STACK)
Hungming Chen41b2ae12020-02-04 15:09:37 +080029
Maciej Żenczykowski0510b012020-02-12 05:03:05 -080030// Tethering stats, indexed by upstream interface.
Maciej Żenczykowskia7077cb2020-06-10 01:36:53 -070031DEFINE_BPF_MAP_GRW(tether_stats_map, HASH, uint32_t, TetherStatsValue, 16, AID_NETWORK_STACK)
Maciej Żenczykowski0510b012020-02-12 05:03:05 -080032
Maciej Żenczykowski8887cce2020-05-04 18:14:59 -070033// Tethering data limit, indexed by upstream interface.
34// (tethering allowed when stats[iif].rxBytes + stats[iif].txBytes < limit[iif])
Maciej Żenczykowskia7077cb2020-06-10 01:36:53 -070035DEFINE_BPF_MAP_GRW(tether_limit_map, HASH, uint32_t, uint64_t, 16, AID_NETWORK_STACK)
Maciej Żenczykowski8887cce2020-05-04 18:14:59 -070036
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -080037static inline __always_inline int do_forward(struct __sk_buff* skb, bool is_ethernet) {
38 int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
39 void* data = (void*)(long)skb->data;
40 const void* data_end = (void*)(long)skb->data_end;
41 struct ethhdr* eth = is_ethernet ? data : NULL; // used iff is_ethernet
42 struct ipv6hdr* ip6 = is_ethernet ? (void*)(eth + 1) : data;
43
44 // Must be meta-ethernet IPv6 frame
45 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_OK;
46
47 // Must have (ethernet and) ipv6 header
48 if (data + l2_header_size + sizeof(*ip6) > data_end) return TC_ACT_OK;
49
50 // Ethertype - if present - must be IPv6
51 if (is_ethernet && (eth->h_proto != htons(ETH_P_IPV6))) return TC_ACT_OK;
52
53 // IP version must be 6
54 if (ip6->version != 6) return TC_ACT_OK;
55
56 // Cannot decrement during forward if already zero or would be zero,
57 // Let the kernel's stack handle these cases and generate appropriate ICMP errors.
58 if (ip6->hop_limit <= 1) return TC_ACT_OK;
59
Maciej Żenczykowskia28a1da2020-06-03 10:18:45 +000060 // Protect against forwarding packets sourced from ::1 or fe80::/64 or other weirdness.
61 __be32 src32 = ip6->saddr.s6_addr32[0];
62 if (src32 != htonl(0x0064ff9b) && // 64:ff9b:/32 incl. XLAT464 WKP
63 (src32 & htonl(0xe0000000)) != htonl(0x20000000)) // 2000::/3 Global Unicast
64 return TC_ACT_OK;
65
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -080066 TetherIngressKey k = {
67 .iif = skb->ifindex,
68 .neigh6 = ip6->daddr,
69 };
70
71 TetherIngressValue* v = bpf_tether_ingress_map_lookup_elem(&k);
72
73 // If we don't find any offload information then simply let the core stack handle it...
74 if (!v) return TC_ACT_OK;
75
Maciej Żenczykowskiac14cb52020-05-23 20:48:00 +000076 uint32_t stat_and_limit_k = skb->ifindex;
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -080077
Maciej Żenczykowskiac14cb52020-05-23 20:48:00 +000078 TetherStatsValue* stat_v = bpf_tether_stats_map_lookup_elem(&stat_and_limit_k);
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -080079
Maciej Żenczykowskiac14cb52020-05-23 20:48:00 +000080 // If we don't have anywhere to put stats, then abort...
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -080081 if (!stat_v) return TC_ACT_OK;
82
Maciej Żenczykowskiac14cb52020-05-23 20:48:00 +000083 uint64_t* limit_v = bpf_tether_limit_map_lookup_elem(&stat_and_limit_k);
84
85 // If we don't have a limit, then abort...
86 if (!limit_v) return TC_ACT_OK;
87
Maciej Żenczykowski1ec24532020-05-23 22:07:00 +000088 // Required IPv6 minimum mtu is 1280, below that not clear what we should do, abort...
89 const int pmtu = v->pmtu;
90 if (pmtu < IPV6_MIN_MTU) return TC_ACT_OK;
91
92 // Approximate handling of TCP/IPv6 overhead for incoming LRO/GRO packets: default
93 // outbound path mtu of 1500 is not necessarily correct, but worst case we simply
94 // undercount, which is still better then not accounting for this overhead at all.
95 // Note: this really shouldn't be device/path mtu at all, but rather should be
96 // derived from this particular connection's mss (ie. from gro segment size).
97 // This would require a much newer kernel with newer ebpf accessors.
98 // (This is also blindly assuming 12 bytes of tcp timestamp option in tcp header)
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -080099 uint64_t packets = 1;
100 uint64_t bytes = skb->len;
Maciej Żenczykowski1ec24532020-05-23 22:07:00 +0000101 if (bytes > pmtu) {
102 const int tcp_overhead = sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + 12;
103 const int mss = pmtu - tcp_overhead;
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -0800104 const uint64_t payload = bytes - tcp_overhead;
105 packets = (payload + mss - 1) / mss;
106 bytes = tcp_overhead * packets + payload;
107 }
108
Maciej Żenczykowskiac14cb52020-05-23 20:48:00 +0000109 // Are we past the limit? If so, then abort...
110 // Note: will not overflow since u64 is 936 years even at 5Gbps.
111 // Do not drop here. Offload is just that, whenever we fail to handle
112 // a packet we let the core stack deal with things.
113 // (The core stack needs to handle limits correctly anyway,
114 // since we don't offload all traffic in both directions)
115 if (stat_v->rxBytes + stat_v->txBytes + bytes > *limit_v) return TC_ACT_OK;
116
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -0800117 if (!is_ethernet) {
118 is_ethernet = true;
119 l2_header_size = sizeof(struct ethhdr);
120 // Try to inject an ethernet header, and simply return if we fail
121 if (bpf_skb_change_head(skb, l2_header_size, /*flags*/ 0)) {
122 __sync_fetch_and_add(&stat_v->rxErrors, 1);
123 return TC_ACT_OK;
124 }
125
126 // bpf_skb_change_head() invalidates all pointers - reload them
127 data = (void*)(long)skb->data;
128 data_end = (void*)(long)skb->data_end;
129 eth = data;
130 ip6 = (void*)(eth + 1);
131
132 // I do not believe this can ever happen, but keep the verifier happy...
133 if (data + l2_header_size + sizeof(*ip6) > data_end) return TC_ACT_SHOT;
134 };
135
136 // CHECKSUM_COMPLETE is a 16-bit one's complement sum,
137 // thus corrections for it need to be done in 16-byte chunks at even offsets.
138 // IPv6 nexthdr is at offset 6, while hop limit is at offset 7
139 uint8_t old_hl = ip6->hop_limit;
140 --ip6->hop_limit;
141 uint8_t new_hl = ip6->hop_limit;
142
143 // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
144 // (-ENOTSUPP) if it isn't.
145 bpf_csum_update(skb, 0xFFFF - ntohs(old_hl) + ntohs(new_hl));
146
147 __sync_fetch_and_add(&stat_v->rxPackets, packets);
148 __sync_fetch_and_add(&stat_v->rxBytes, bytes);
149
150 // Overwrite any mac header with the new one
151 *eth = v->macHeader;
152
153 // Redirect to forwarded interface.
154 //
155 // Note that bpf_redirect() cannot fail unless you pass invalid flags.
156 // The redirect actually happens after the ebpf program has already terminated,
157 // and can fail for example for mtu reasons at that point in time, but there's nothing
158 // we can do about it here.
159 return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
160}
161
Hungming Chen41b2ae12020-02-04 15:09:37 +0800162SEC("schedcls/ingress/tether_ether")
163int sched_cls_ingress_tether_ether(struct __sk_buff* skb) {
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -0800164 return do_forward(skb, true);
Hungming Chen41b2ae12020-02-04 15:09:37 +0800165}
166
Maciej Żenczykowskib49f8b72020-06-17 17:21:08 +0000167// Note: section names must be unique to prevent programs from appending to each other,
168// so instead the bpf loader will strip everything past the final $ symbol when actually
169// pinning the program into the filesystem.
Maciej Żenczykowski3c4a44c2020-04-22 04:11:43 +0000170//
Maciej Żenczykowskib49f8b72020-06-17 17:21:08 +0000171// bpf_skb_change_head() is only present on 4.14+ and 2 trivial kernel patches are needed:
172// ANDROID: net: bpf: Allow TC programs to call BPF_FUNC_skb_change_head
173// ANDROID: net: bpf: permit redirect from ingress L3 to egress L2 devices at near max mtu
174// (the first of those has already been upstreamed)
Maciej Żenczykowski3c4a44c2020-04-22 04:11:43 +0000175//
Maciej Żenczykowskib49f8b72020-06-17 17:21:08 +0000176// 5.4 kernel support was only added to Android Common Kernel in R,
177// and thus a 5.4 kernel always supports this.
178//
179// Hence, this mandatory (must load successfully) implementation for 5.4+ kernels:
180DEFINE_BPF_PROG_KVER("schedcls/ingress/tether_rawip$5_4", AID_ROOT, AID_ROOT,
181 sched_cls_ingress_tether_rawip_5_4, KVER(5, 4, 0))
Maciej Żenczykowski27d0d152020-04-16 05:07:39 +0000182(struct __sk_buff* skb) {
Maciej Żenczykowskia76543e2020-02-10 17:53:47 -0800183 return do_forward(skb, false);
Hungming Chen41b2ae12020-02-04 15:09:37 +0800184}
185
Maciej Żenczykowskib49f8b72020-06-17 17:21:08 +0000186// and this identical optional (may fail to load) implementation for [4.14..5.4) patched kernels:
187DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/ingress/tether_rawip$4_14", AID_ROOT, AID_ROOT,
188 sched_cls_ingress_tether_rawip_4_14, KVER(4, 14, 0),
189 KVER(5, 4, 0))
190(struct __sk_buff* skb) {
191 return do_forward(skb, false);
192}
193
194// and define a no-op stub for [4.9,4.14) and unpatched [4.14,5.4) kernels.
195// (if the above real 4.14+ program loaded successfully, then bpfloader will have already pinned
196// it at the same location this one would be pinned at and will thus skip loading this stub)
197DEFINE_BPF_PROG_KVER_RANGE("schedcls/ingress/tether_rawip$stub", AID_ROOT, AID_ROOT,
198 sched_cls_ingress_tether_rawip_stub, KVER_NONE, KVER(5, 4, 0))
199(struct __sk_buff* skb) {
200 return TC_ACT_OK;
201}
202
Treehugger Robot2b9df0c2020-03-20 23:23:27 +0000203LICENSE("Apache 2.0");
Maciej Żenczykowskia60b74e2020-06-15 08:25:49 +0000204CRITICAL("netd");