Fix cgroup bpf program direction

The bpfloader loaded the cgroup bpf program in wrong place and caused
the per uid/tag stats stored in the wrong place. This patch fixed the
program loading problem and also refactored the kernel program to make
all stats record function use the same implementation. Cleaned up the
bpf instruction set that is no longer used.

Test: the per uid stats sum up is consistent with per iface stats
Bug: 30950746
Change-Id: I92c0511855f224ef98c7d75a01c99c432796d3bf
diff --git a/bpfloader/BpfLoader.cpp b/bpfloader/BpfLoader.cpp
index 9d75077..26fb99d 100644
--- a/bpfloader/BpfLoader.cpp
+++ b/bpfloader/BpfLoader.cpp
@@ -199,9 +199,9 @@
     }
 
     unique_fd fd;
-    if (type == BPF_CGROUP_INET_EGRESS) {
+    if (type == BPF_CGROUP_INET_INGRESS) {
         fd.reset(loadProg(INGRESS_PROG, mapPatterns));
-    } else if (type == BPF_CGROUP_INET_INGRESS) {
+    } else if (type == BPF_CGROUP_INET_EGRESS) {
         fd.reset(loadProg(EGRESS_PROG, mapPatterns));
     } else if (!strcmp(name, "xt_bpf_ingress_prog")) {
         fd.reset(loadProg(XT_BPF_INGRESS_PROG, mapPatterns));
diff --git a/bpfloader/bpf_kern.h b/bpfloader/bpf_kern.h
index 83ad5b1..8fb6046 100644
--- a/bpfloader/bpf_kern.h
+++ b/bpfloader/bpf_kern.h
@@ -64,46 +64,24 @@
 #define TCP_FLAG_OFF 13
 #define RST_OFFSET 2
 
-static __always_inline int xt_bpf_count(struct __sk_buff* skb, int type) {
-    uint32_t key = skb->ifindex;
+static __always_inline inline void bpf_update_stats(struct __sk_buff* skb, uint64_t map,
+                                                    int direction, void *key) {
     struct stats_value* value;
-
-    value = find_map_entry(IFACE_STATS_MAP, &key);
+    value = find_map_entry(map, key);
     if (!value) {
         struct stats_value newValue = {};
-        write_to_map_entry(IFACE_STATS_MAP, &key, &newValue, BPF_NOEXIST);
-        value = find_map_entry(IFACE_STATS_MAP, &key);
+        write_to_map_entry(map, key, &newValue, BPF_NOEXIST);
+        value = find_map_entry(map, key);
     }
     if (value) {
-        if (type == BPF_EGRESS) {
+        if (direction == BPF_EGRESS) {
             __sync_fetch_and_add(&value->txPackets, 1);
             __sync_fetch_and_add(&value->txBytes, skb->len);
-        } else if (type == BPF_INGRESS) {
+        } else if (direction == BPF_INGRESS) {
             __sync_fetch_and_add(&value->rxPackets, 1);
             __sync_fetch_and_add(&value->rxBytes, skb->len);
         }
     }
-    return BPF_PASS;
-}
-
-static __always_inline inline void bpf_update_stats(struct __sk_buff* skb, uint64_t map,
-                                                    int direction, struct stats_key key) {
-    struct stats_value* value;
-    value = find_map_entry(map, &key);
-    if (!value) {
-        struct stats_value newValue = {};
-        write_to_map_entry(map, &key, &newValue, BPF_NOEXIST);
-        value = find_map_entry(map, &key);
-    }
-    if (value) {
-      if (direction == BPF_INGRESS) {
-        __sync_fetch_and_add(&value->rxPackets, 1);
-        __sync_fetch_and_add(&value->rxBytes, skb->len);
-      } else {
-        __sync_fetch_and_add(&value->txPackets, 1);
-        __sync_fetch_and_add(&value->txBytes, skb->len);
-      }
-    }
 }
 
 static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid) {
@@ -191,10 +169,10 @@
 
     int ret;
     if (tag) {
-        bpf_update_stats(skb, TAG_STATS_MAP, direction, key);
+        bpf_update_stats(skb, TAG_STATS_MAP, direction, &key);
     }
 
     key.tag = 0;
-    bpf_update_stats(skb, UID_STATS_MAP, direction, key);
+    bpf_update_stats(skb, UID_STATS_MAP, direction, &key);
     return bpf_owner_match(skb, uid);
 }
diff --git a/bpfloader/cgroup_bpf_egress_prog.o b/bpfloader/cgroup_bpf_egress_prog.o
index 4575693..f68fd02 100644
--- a/bpfloader/cgroup_bpf_egress_prog.o
+++ b/bpfloader/cgroup_bpf_egress_prog.o
Binary files differ
diff --git a/bpfloader/cgroup_bpf_ingress_prog.o b/bpfloader/cgroup_bpf_ingress_prog.o
index b989bd3..18cfbbc 100644
--- a/bpfloader/cgroup_bpf_ingress_prog.o
+++ b/bpfloader/cgroup_bpf_ingress_prog.o
Binary files differ
diff --git a/bpfloader/xt_bpf_egress_prog.c b/bpfloader/xt_bpf_egress_prog.c
index ae4dc31..3f4bace 100644
--- a/bpfloader/xt_bpf_egress_prog.c
+++ b/bpfloader/xt_bpf_egress_prog.c
@@ -19,5 +19,7 @@
 
 ELF_SEC(BPF_PROG_SEC_NAME)
 int xt_bpf_egress_prog(struct __sk_buff* skb) {
-    return xt_bpf_count(skb, BPF_EGRESS);
+    uint32_t key = skb->ifindex;
+    bpf_update_stats(skb, IFACE_STATS_MAP, BPF_EGRESS, &key);
+    return BPF_PASS;
 }
diff --git a/bpfloader/xt_bpf_egress_prog.o b/bpfloader/xt_bpf_egress_prog.o
index 59ff564..739b3da 100644
--- a/bpfloader/xt_bpf_egress_prog.o
+++ b/bpfloader/xt_bpf_egress_prog.o
Binary files differ
diff --git a/bpfloader/xt_bpf_ingress_prog.c b/bpfloader/xt_bpf_ingress_prog.c
index dc823c3..5f10255 100644
--- a/bpfloader/xt_bpf_ingress_prog.c
+++ b/bpfloader/xt_bpf_ingress_prog.c
@@ -19,5 +19,7 @@
 
 ELF_SEC(BPF_PROG_SEC_NAME)
 int xt_bpf_ingress_prog(struct __sk_buff* skb) {
-    return xt_bpf_count(skb, BPF_INGRESS);
+    uint32_t key = skb->ifindex;
+    bpf_update_stats(skb, IFACE_STATS_MAP, BPF_INGRESS, &key);
+    return BPF_PASS;
 }
diff --git a/bpfloader/xt_bpf_ingress_prog.o b/bpfloader/xt_bpf_ingress_prog.o
index 8c29a32..20e71bc 100644
--- a/bpfloader/xt_bpf_ingress_prog.o
+++ b/bpfloader/xt_bpf_ingress_prog.o
Binary files differ