Switch to using typesafe kernel ebpf map accessors

Test: atest libbpf_android_test libnetdbpf_test netd_integration_test netd_unit_test netdutils_test resolv_integration_test resolv_unit_test
Bug: 130746652
Signed-off-by: Maciej Żenczykowski <maze@google.com>
Change-Id: I1a2e8fe1bdf0914a90902666895705b45fd1f959
diff --git a/bpf_progs/clatd.c b/bpf_progs/clatd.c
index 4c2953f..578d244 100644
--- a/bpf_progs/clatd.c
+++ b/bpf_progs/clatd.c
@@ -48,12 +48,7 @@
 #define ntohs(x) htons(x)
 #define ntohl(x) htonl(x)
 
-struct bpf_map_def SEC("maps") clat_ingress_map = {
-        .type = BPF_MAP_TYPE_HASH,
-        .key_size = sizeof(struct ClatIngressKey),
-        .value_size = sizeof(struct ClatIngressValue),
-        .max_entries = 16,
-};
+DEFINE_BPF_MAP(clat_ingress_map, HASH, ClatIngressKey, ClatIngressValue, 16)
 
 static inline __always_inline int nat64(struct __sk_buff* skb, bool is_ethernet) {
     const int l2_header_size = is_ethernet ? sizeof(struct ethhdr) : 0;
@@ -92,7 +87,7 @@
             return TC_ACT_OK;
     }
 
-    struct ClatIngressKey k = {
+    ClatIngressKey k = {
             .iif = skb->ifindex,
             .pfx96.in6_u.u6_addr32 =
                     {
@@ -103,7 +98,7 @@
             .local6 = ip6->daddr,
     };
 
-    struct ClatIngressValue* v = bpf_map_lookup_elem(&clat_ingress_map, &k);
+    ClatIngressValue* v = bpf_clat_ingress_map_lookup_elem(&k);
 
     if (!v) return TC_ACT_OK;
 
diff --git a/bpf_progs/netd.c b/bpf_progs/netd.c
index e752dc3..65f3b47 100644
--- a/bpf_progs/netd.c
+++ b/bpf_progs/netd.c
@@ -45,7 +45,7 @@
 int xt_bpf_whitelist_prog(struct __sk_buff* skb) {
     uint32_t sock_uid = bpf_get_socket_uid(skb);
     if (is_system_uid(sock_uid)) return BPF_MATCH;
-    struct UidOwnerValue* whitelistMatch = bpf_map_lookup_elem(&uid_owner_map, &sock_uid);
+    UidOwnerValue* whitelistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
     if (whitelistMatch) return whitelistMatch->rule & HAPPY_BOX_MATCH;
     return BPF_NOMATCH;
 }
@@ -53,17 +53,12 @@
 SEC("skfilter/blacklist/xtbpf")
 int xt_bpf_blacklist_prog(struct __sk_buff* skb) {
     uint32_t sock_uid = bpf_get_socket_uid(skb);
-    struct UidOwnerValue* blacklistMatch = bpf_map_lookup_elem(&uid_owner_map, &sock_uid);
+    UidOwnerValue* blacklistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
     if (blacklistMatch) return blacklistMatch->rule & PENALTY_BOX_MATCH;
     return BPF_NOMATCH;
 }
 
-struct bpf_map_def SEC("maps") uid_permission_map = {
-        .type = BPF_MAP_TYPE_HASH,
-        .key_size = sizeof(uint32_t),
-        .value_size = sizeof(uint8_t),
-        .max_entries = UID_OWNER_MAP_SIZE,
-};
+DEFINE_BPF_MAP(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE)
 
 SEC("cgroupsock/inet/create")
 int inet_socket_create(struct bpf_sock* sk) {
@@ -75,7 +70,7 @@
      * run time. See UserHandle#isSameApp for detail.
      */
     uint32_t appId = (gid_uid & 0xffffffff) % PER_USER_RANGE;
-    uint8_t* permissions = bpf_map_lookup_elem(&uid_permission_map, &appId);
+    uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
     if (!permissions) {
         // UID not in map. Default to just INTERNET permission.
         return 1;
diff --git a/bpf_progs/netd.h b/bpf_progs/netd.h
index 3bede8d..b98be57 100644
--- a/bpf_progs/netd.h
+++ b/bpf_progs/netd.h
@@ -31,28 +31,28 @@
 #include <stdint.h>
 #include "netdbpf/bpf_shared.h"
 
-struct uid_tag {
+typedef struct {
     uint32_t uid;
     uint32_t tag;
-};
+} uid_tag;
 
-struct stats_key {
+typedef struct {
     uint32_t uid;
     uint32_t tag;
     uint32_t counterSet;
     uint32_t ifaceIndex;
-};
+} stats_key;
 
-struct stats_value {
+typedef struct {
     uint64_t rxPackets;
     uint64_t rxBytes;
     uint64_t txPackets;
     uint64_t txBytes;
-};
+} stats_value;
 
-struct IfaceValue {
+typedef struct {
     char name[IFNAMSIZ];
-};
+} IfaceValue;
 
 // This is defined for cgroup bpf filter only.
 #define BPF_PASS 1
@@ -71,81 +71,38 @@
 #define TCP_FLAG_OFF 13
 #define RST_OFFSET 2
 
-struct bpf_map_def SEC("maps") cookie_tag_map = {
-        .type = BPF_MAP_TYPE_HASH,
-        .key_size = sizeof(uint64_t),
-        .value_size = sizeof(struct uid_tag),
-        .max_entries = COOKIE_UID_MAP_SIZE,
-};
+DEFINE_BPF_MAP(cookie_tag_map, HASH, uint64_t, uid_tag, COOKIE_UID_MAP_SIZE)
+DEFINE_BPF_MAP(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE)
 
-struct bpf_map_def SEC("maps") uid_counterset_map = {
-        .type = BPF_MAP_TYPE_HASH,
-        .key_size = sizeof(uint32_t),
-        .value_size = sizeof(uint8_t),
-        .max_entries = UID_COUNTERSET_MAP_SIZE,
-};
+/* these are updated via bpf_update_stats() which unfortunately isn't type safe */
+DEFINE_BPF_MAP_NO_ACCESSORS(app_uid_stats_map, HASH, uint32_t, stats_value, APP_STATS_MAP_SIZE)
+DEFINE_BPF_MAP_NO_ACCESSORS(stats_map_A, HASH, stats_key, stats_value, STATS_MAP_SIZE)
+DEFINE_BPF_MAP_NO_ACCESSORS(stats_map_B, HASH, stats_key, stats_value, STATS_MAP_SIZE)
+DEFINE_BPF_MAP_NO_ACCESSORS(iface_stats_map, HASH, uint32_t, stats_value, IFACE_STATS_MAP_SIZE)
 
-struct bpf_map_def SEC("maps") app_uid_stats_map = {
-        .type = BPF_MAP_TYPE_HASH,
-        .key_size = sizeof(uint32_t),
-        .value_size = sizeof(struct stats_value),
-        .max_entries = APP_STATS_MAP_SIZE,
-};
+DEFINE_BPF_MAP(configuration_map, HASH, uint32_t, uint8_t, CONFIGURATION_MAP_SIZE)
+DEFINE_BPF_MAP(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
 
-struct bpf_map_def SEC("maps") stats_map_A = {
-        .type = BPF_MAP_TYPE_HASH,
-        .key_size = sizeof(struct stats_key),
-        .value_size = sizeof(struct stats_value),
-        .max_entries = STATS_MAP_SIZE,
-};
-
-struct bpf_map_def SEC("maps") stats_map_B = {
-        .type = BPF_MAP_TYPE_HASH,
-        .key_size = sizeof(struct stats_key),
-        .value_size = sizeof(struct stats_value),
-        .max_entries = STATS_MAP_SIZE,
-};
-
-struct bpf_map_def SEC("maps") iface_stats_map = {
-        .type = BPF_MAP_TYPE_HASH,
-        .key_size = sizeof(uint32_t),
-        .value_size = sizeof(struct stats_value),
-        .max_entries = IFACE_STATS_MAP_SIZE,
-};
-
-struct bpf_map_def SEC("maps") configuration_map = {
-        .type = BPF_MAP_TYPE_HASH,
-        .key_size = sizeof(uint32_t),
-        .value_size = sizeof(uint8_t),
-        .max_entries = CONFIGURATION_MAP_SIZE,
-};
-
-struct bpf_map_def SEC("maps") uid_owner_map = {
-        .type = BPF_MAP_TYPE_HASH,
-        .key_size = sizeof(uint32_t),
-        .value_size = sizeof(struct UidOwnerValue),
-        .max_entries = UID_OWNER_MAP_SIZE,
-};
-
-struct bpf_map_def SEC("maps") iface_index_name_map = {
-        .type = BPF_MAP_TYPE_HASH,
-        .key_size = sizeof(uint32_t),
-        .value_size = sizeof(struct IfaceValue),
-        .max_entries = IFACE_INDEX_NAME_MAP_SIZE,
-};
+/* never actually used from ebpf */
+DEFINE_BPF_MAP_NO_ACCESSORS(iface_index_name_map, HASH, uint32_t, IfaceValue,
+                            IFACE_INDEX_NAME_MAP_SIZE)
 
 static __always_inline int is_system_uid(uint32_t uid) {
     return (uid <= MAX_SYSTEM_UID) && (uid >= MIN_SYSTEM_UID);
 }
 
+/* this function deals with bpf maps from any key type to 'stats_value' value,
+ * but it is on the programmer to ensure this, and to ensure the type/length of
+ * the passed in key is appropriate for the passed in map.
+ */
 static __always_inline inline void bpf_update_stats(struct __sk_buff* skb, struct bpf_map_def* map,
                                                     int direction, void* key) {
-    struct stats_value* value;
-    value = bpf_map_lookup_elem(map, key);
+    stats_value* value;
+    value = unsafe_bpf_map_lookup_elem(map, key);
     if (!value) {
-        struct stats_value newValue = {};
-        bpf_map_update_elem(map, key, &newValue, BPF_NOEXIST);
-        value = bpf_map_lookup_elem(map, key);
+        stats_value newValue = {};
+        unsafe_bpf_map_update_elem(map, key, &newValue, BPF_NOEXIST);
+        value = unsafe_bpf_map_lookup_elem(map, key);
     }
     if (value) {
         if (direction == BPF_EGRESS) {
@@ -199,7 +156,7 @@
 
 static __always_inline BpfConfig getConfig(uint32_t configKey) {
     uint32_t mapSettingKey = configKey;
-    BpfConfig* config = bpf_map_lookup_elem(&configuration_map, &mapSettingKey);
+    BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
     if (!config) {
         // Couldn't read configuration entry. Assume everything is disabled.
         return DEFAULT_CONFIG;
@@ -214,7 +171,7 @@
 
     BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
 
-    struct UidOwnerValue* uidEntry = bpf_map_lookup_elem(&uid_owner_map, &uid);
+    UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
     uint8_t uidRules = uidEntry ? uidEntry->rule : 0;
     uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
 
@@ -257,7 +214,7 @@
     }
 
     uint64_t cookie = bpf_get_socket_cookie(skb);
-    struct uid_tag* utag = bpf_map_lookup_elem(&cookie_tag_map, &cookie);
+    uid_tag* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
     uint32_t uid, tag;
     if (utag) {
         uid = utag->uid;
@@ -267,13 +224,13 @@
         tag = 0;
     }
 
-    struct stats_key key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
+    stats_key key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
 
-    uint8_t* counterSet = bpf_map_lookup_elem(&uid_counterset_map, &uid);
+    uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
     if (counterSet) key.counterSet = (uint32_t)*counterSet;
 
     uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
-    uint8_t* selectedMap = bpf_map_lookup_elem(&configuration_map, &mapSettingKey);
+    uint8_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
     if (!selectedMap) {
         return match;
     }