blob: 7ee8a301c262874c0c37d4237f362048d9db671e [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Anton Protopopov
//
// Based on tcpconnect(8) from BCC by Brendan Gregg
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_tracing.h>
#include "maps.bpf.h"
#include "tcpconnect.h"
SEC(".rodata") int filter_ports[MAX_PORTS];
const volatile int filter_ports_len = 0;
const volatile uid_t filter_uid = -1;
const volatile pid_t filter_pid = 0;
const volatile bool do_count = 0;
/* Define here, because there are conflicts with include files */
#define AF_INET 2
#define AF_INET6 10
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, u32);
__type(value, struct sock *);
__uint(map_flags, BPF_F_NO_PREALLOC);
} sockets SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, struct ipv4_flow_key);
__type(value, u64);
__uint(map_flags, BPF_F_NO_PREALLOC);
} ipv4_count SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, struct ipv6_flow_key);
__type(value, u64);
__uint(map_flags, BPF_F_NO_PREALLOC);
} ipv6_count SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(key_size, sizeof(u32));
__uint(value_size, sizeof(u32));
} events SEC(".maps");
static __always_inline bool filter_port(__u16 port)
{
int i;
if (filter_ports_len == 0)
return false;
for (i = 0; i < filter_ports_len; i++) {
if (port == filter_ports[i])
return false;
}
return true;
}
static __always_inline int
enter_tcp_connect(struct pt_regs *ctx, struct sock *sk)
{
__u64 pid_tgid = bpf_get_current_pid_tgid();
__u32 pid = pid_tgid >> 32;
__u32 tid = pid_tgid;
__u32 uid;
if (filter_pid && pid != filter_pid)
return 0;
uid = bpf_get_current_uid_gid();
if (filter_uid != (uid_t) -1 && uid != filter_uid)
return 0;
bpf_map_update_elem(&sockets, &tid, &sk, 0);
return 0;
}
static __always_inline void count_v4(struct sock *sk, __u16 dport)
{
struct ipv4_flow_key key = {};
static __u64 zero;
__u64 *val;
BPF_CORE_READ_INTO(&key.saddr, sk, __sk_common.skc_rcv_saddr);
BPF_CORE_READ_INTO(&key.daddr, sk, __sk_common.skc_daddr);
key.dport = dport;
val = bpf_map_lookup_or_try_init(&ipv4_count, &key, &zero);
if (val)
__atomic_add_fetch(val, 1, __ATOMIC_RELAXED);
}
static __always_inline void count_v6(struct sock *sk, __u16 dport)
{
struct ipv6_flow_key key = {};
static const __u64 zero;
__u64 *val;
BPF_CORE_READ_INTO(&key.saddr, sk,
__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
BPF_CORE_READ_INTO(&key.daddr, sk,
__sk_common.skc_v6_daddr.in6_u.u6_addr32);
key.dport = dport;
val = bpf_map_lookup_or_try_init(&ipv6_count, &key, &zero);
if (val)
__atomic_add_fetch(val, 1, __ATOMIC_RELAXED);
}
static __always_inline void
trace_v4(struct pt_regs *ctx, pid_t pid, struct sock *sk, __u16 dport)
{
struct event event = {};
event.af = AF_INET;
event.pid = pid;
event.uid = bpf_get_current_uid_gid();
event.ts_us = bpf_ktime_get_ns() / 1000;
BPF_CORE_READ_INTO(&event.saddr_v4, sk, __sk_common.skc_rcv_saddr);
BPF_CORE_READ_INTO(&event.daddr_v4, sk, __sk_common.skc_daddr);
event.dport = dport;
bpf_get_current_comm(event.task, sizeof(event.task));
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
&event, sizeof(event));
}
static __always_inline void
trace_v6(struct pt_regs *ctx, pid_t pid, struct sock *sk, __u16 dport)
{
struct event event = {};
event.af = AF_INET6;
event.pid = pid;
event.uid = bpf_get_current_uid_gid();
event.ts_us = bpf_ktime_get_ns() / 1000;
BPF_CORE_READ_INTO(&event.saddr_v6, sk,
__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
BPF_CORE_READ_INTO(&event.daddr_v6, sk,
__sk_common.skc_v6_daddr.in6_u.u6_addr32);
event.dport = dport;
bpf_get_current_comm(event.task, sizeof(event.task));
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
&event, sizeof(event));
}
static __always_inline int
exit_tcp_connect(struct pt_regs *ctx, int ret, int ip_ver)
{
__u64 pid_tgid = bpf_get_current_pid_tgid();
__u32 pid = pid_tgid >> 32;
__u32 tid = pid_tgid;
struct sock **skpp;
struct sock *sk;
__u16 dport;
skpp = bpf_map_lookup_elem(&sockets, &tid);
if (!skpp)
return 0;
if (ret)
goto end;
sk = *skpp;
BPF_CORE_READ_INTO(&dport, sk, __sk_common.skc_dport);
if (filter_port(dport))
goto end;
if (do_count) {
if (ip_ver == 4)
count_v4(sk, dport);
else
count_v6(sk, dport);
} else {
if (ip_ver == 4)
trace_v4(ctx, pid, sk, dport);
else
trace_v6(ctx, pid, sk, dport);
}
end:
bpf_map_delete_elem(&sockets, &tid);
return 0;
}
SEC("kprobe/tcp_v4_connect")
int BPF_KPROBE(tcp_v4_connect, struct sock *sk)
{
return enter_tcp_connect(ctx, sk);
}
SEC("kretprobe/tcp_v4_connect")
int BPF_KRETPROBE(tcp_v4_connect_ret, int ret)
{
return exit_tcp_connect(ctx, ret, 4);
}
SEC("kprobe/tcp_v6_connect")
int BPF_KPROBE(tcp_v6_connect, struct sock *sk)
{
return enter_tcp_connect(ctx, sk);
}
SEC("kretprobe/tcp_v6_connect")
int BPF_KRETPROBE(tcp_v6_connect_ret, int ret)
{
return exit_tcp_connect(ctx, ret, 6);
}
char LICENSE[] SEC("license") = "GPL";