diff --git a/observation/src/tcpconnect/tcpconnect.bpf.c b/observation/src/tcpconnect/tcpconnect.bpf.c index ba7c3a237032990156d580e8b89adab9aaad6388..2bab2385bb74e43d58599edd4d6498bf81797946 100644 --- a/observation/src/tcpconnect/tcpconnect.bpf.c +++ b/observation/src/tcpconnect/tcpconnect.bpf.c @@ -41,3 +41,97 @@ struct { __type(value, u64); } ipv6_count SEC(".maps"); +static __always_inline bool filter_port(__u16 port) +{ + if (filter_ports_len == 0) + return false; + + for (int i = 0; i < filter_ports_len && i < MAX_PORTS; i++) { + if (port == filter_ports[i]) + return false; + } + + return true; +} + +static __always_inline void +count_v4(struct sock *sk, __u16 sport, __u16 dport) +{ + struct ipv4_flow_key key = {}; + static __u64 zero; + __u64 *val; + + BPF_CORE_READ_INTO(&key.saddr, sk, __sk_common.skc_rcv_saddr); + BPF_CORE_READ_INTO(&key.daddr, sk, __sk_common.skc_daddr); + key.sport = sport; + key.dport = dport; + val = bpf_map_lookup_or_try_init(&ipv4_count, &key, &zero); + if (!val) + return; + __atomic_add_fetch(val, 1, __ATOMIC_RELAXED); +} + +static __always_inline void +count_v6(struct sock *sk, __u16 sport, __u16 dport) +{ + struct ipv6_flow_key key = {}; + static const __u64 zero; + __u64 *val; + + BPF_CORE_READ_INTO(&key.saddr, sk, + __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32); + BPF_CORE_READ_INTO(&key.daddr, sk, + __sk_common.skc_v6_daddr.in6_u.u6_addr32); + key.sport = sport; + key.dport = dport; + + val = bpf_map_lookup_or_try_init(&ipv6_count, &key, &zero); + if (!val) + return; + __atomic_add_fetch(val, 1, __ATOMIC_RELAXED); +} + +static __always_inline void +trace_v4(void *ctx, pid_t pid, struct sock *sk, __u16 sport, __u16 dport) +{ + struct event *event; + + event = reserve_buf(sizeof(*event)); + if (!event) + return; + + event->af = AF_INET; + event->pid = pid; + event->uid = bpf_get_current_uid_gid(); + BPF_CORE_READ_INTO(&event->saddr_v4, sk, __sk_common.skc_rcv_saddr); + BPF_CORE_READ_INTO(&event->daddr_v4, sk, __sk_common.skc_daddr); + event->sport = sport; + event->dport = dport; + bpf_get_current_comm(&event->task, sizeof(event->task)); + + submit_buf(ctx, event, sizeof(*event)); +} + +static __always_inline void +trace_v6(void *ctx, pid_t pid, struct sock *sk, __u16 sport, __u16 dport) +{ + struct event *event; + + event = reserve_buf(sizeof(*event)); + if (!event) + return; + + event->af = AF_INET6; + event->pid = pid; + event->uid = bpf_get_current_uid_gid(); + BPF_CORE_READ_INTO(&event->saddr_v6, sk, + __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32); + BPF_CORE_READ_INTO(&event->daddr_v6, sk, + __sk_common.skc_v6_daddr.in6_u.u6_addr32); + event->sport = sport; + event->dport = dport; + bpf_get_current_comm(&event->task, sizeof(event->task)); + + submit_buf(ctx, event, sizeof(*event)); +} +