diff --git a/source/tools/detect/net_diag/rtrace/.gitignore b/source/tools/detect/net_diag/rtrace/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..0969291a0726cdb9cc543b3eb9dc2eb167d018ff --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/.gitignore @@ -0,0 +1,4 @@ +target +Cargo.lock +.vscode +*.toml diff --git a/source/tools/detect/net_diag/rtrace/Makefile b/source/tools/detect/net_diag/rtrace/Makefile index 47f0faa0ce8e3f57ca33147b2d1e23662d7056a8..6445e616ef742303232ed7206a14bc6166c9ccce 100644 --- a/source/tools/detect/net_diag/rtrace/Makefile +++ b/source/tools/detect/net_diag/rtrace/Makefile @@ -1,5 +1,44 @@ -target := rtrace +ifndef KERNEL_VERSION +KERNEL_VERSION = $(shell uname -r) +SRC := /work/gitee/sysak/source +OBJPATH = /work/gitee/sysak/out +OBJ_LIB_PATH := $(OBJPATH)/.sysak_compoents/lib/$(KERNEL_VERSION) +OBJ_TOOLS_ROOT := $(OBJPATH)/.sysak_compoents/tools +OBJ_TOOLS_PATH := $(OBJPATH)/.sysak_compoents/tools/$(KERNEL_VERSION) +SYSAK_RULES := .sysak.rules + +export SRC +export OBJPATH +export OBJ_LIB_PATH +export OBJ_TOOLS_ROOT +export OBJ_TOOLS_PATH +endif + +TARGET_PATH := $(OBJ_TOOLS_ROOT) + +.PHONY: rtrace + +rtrace: lib bin +lib: + make -C ebpf -include $(SRC)/mk/bin.mk +bin: delay drop +rs: + cd rtrace-rs && cargo build --release + +parser: + cd rtrace-parser && cargo build --release + +delay: + cd rtrace-delay && cargo build --release + cp rtrace-delay/target/release/rtrace-delay $(TARGET_PATH)/ + @echo "rtrace-delay" >> $(TARGET_PATH)/$(SYSAK_RULES) + +drop: + cd rtrace-drop && cargo build --release + cp rtrace-drop/target/release/rtrace-drop $(TARGET_PATH)/ + @echo "rtrace-drop" >> $(TARGET_PATH)/$(SYSAK_RULES) + +target := rtrace diff --git a/source/tools/detect/net_diag/rtrace/README.md b/source/tools/detect/net_diag/rtrace/README.md new file mode 100644 index 0000000000000000000000000000000000000000..30d07cdb7f1c4500c73ff11e8ac8b7c24208cd75 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/README.md @@ -0,0 +1,19 @@ + + +# rtrace + +rtrace is an eBPF-based tool focused on network diagnostics. It contains three libraries and two diagnostic tools. + +Three libraries: + +* ebpf +* rtrace-rs +* rtrace-parser + +Two diagnostic tools: + +* rtrace-delay +* rtrace-drop + + + diff --git a/source/tools/detect/net_diag/rtrace/ebpf/Makefile b/source/tools/detect/net_diag/rtrace/ebpf/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..bf480a0bafa228d867ddb74ff7230fb186fabe9d --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/Makefile @@ -0,0 +1,68 @@ +CLANG ?= clang +LLVM_STRIP ?= llvm-strip +BPFTOOL ?= $(SRC)/lib/internal/ebpf/tools/bpftool +ARCH := $(shell uname -m | sed 's/x86_64/x86/') +LIBBPF_OBJ := $(OBJ_LIB_PATH)/libbpf.a + + +CFLAGS = -g -O2 -Wall -fPIC + +INCLUDES = -I$(OBJPATH) -I$(SRC)/lib/internal/ebpf -I$(SRC)/lib/internal/ebpf/libbpf/include -I$(SRC)/lib/internal/ebpf/libbpf/include/uapi -I$(OBJ_LIB_PATH) -I. + + +newdirs := $(shell find ./ -type d) +bpfsrcs := rtrace.bpf.c +csrcs := rtrace.c utils/btf.c utils/disasm.c utils/insn.c utils/object.c + +newdirs := $(addprefix $(OBJPATH)/, $(newdirs)) +cobjs := $(patsubst %.c, %.o, $(csrcs)) +target_cobjs := $(foreach n, $(cobjs), $(OBJPATH)/$(n)) + +bpfobjs := $(patsubst %.c, %.o, $(bpfsrcs)) +target_bpfobjs := $(foreach n, $(bpfobjs), $(OBJPATH)/$(n)) + +bpfskel := $(patsubst %.bpf.o, %.skel.h, $(target_bpfobjs)) + +ifeq ($(V),1) + Q = + msg = +else + Q = @ + msg = @printf ' %-8s %s%s\n' \ + "$(1)" \ + "$(patsubst $(abspath $(OUTPUT))/%,%,$(2))" \ + "$(if $(3), $(3))"; + MAKEFLAGS += --no-print-directory +endif + + +librtrace: $(OBJ_LIB_PATH)/librtrace.a + +$(OBJ_LIB_PATH)/librtrace.a: $(target_cobjs) + $(Q) ar -rcs $@ $^ + +$(target_cobjs): $(cobjs) + +$(cobjs): %.o : %.c $(bpfskel) + $(call msg,CC,$@) + $(Q)$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $(OBJPATH)/$@ + +$(bpfskel): %.skel.h : %.bpf.o $(target_bpfobjs) + $(call msg,GEN-SKEL,$@) + $(Q)$(BPFTOOL) gen skeleton $< > $@ + +$(target_bpfobjs): $(bpfobjs) + +$(bpfobjs) : %.o : %.c dirs + $(call msg,BPF,$@) + $(Q)$(CLANG) -g -O2 -target bpf -D__TARGET_ARCH_$(ARCH) $(INCLUDES) -c $< -o $(OBJPATH)/$@ + $(Q)$(LLVM_STRIP) -g $(OBJPATH)/$@ # strip useless DWARF info + +dirs: + mkdir -p $(newdirs) + +# delete failed targets +.DELETE_ON_ERROR: + +# keep intermediate (.skel.h, .bpf.o, etc) targets +# .SECONDARY: diff --git a/source/tools/detect/net_diag/rtrace/ebpf/README.md b/source/tools/detect/net_diag/rtrace/ebpf/README.md new file mode 100644 index 0000000000000000000000000000000000000000..13dca20b301ebc3fab767e6163962539a0c3ef3d --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/README.md @@ -0,0 +1,5 @@ + +## rtrace eBPF program + +The eBPF program included in the rtrace tool, the eBPF program is convenient for third-party programs to carry out secondary development in the form of a library. + diff --git a/source/tools/detect/net_diag/rtrace/ebpf/common.bpf.h b/source/tools/detect/net_diag/rtrace/ebpf/common.bpf.h new file mode 100644 index 0000000000000000000000000000000000000000..a8d8a4b58dc570e72f794fa9dad0ef31e448d5e6 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/common.bpf.h @@ -0,0 +1,129 @@ +#ifndef _RTRACE_COMMON_BPF_H +#define _RTRACE_COMMON_BPF_H + +#include "common.def.h" + +struct +{ + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __type(key, int); + __type(value, int); +} perf SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 8); + __type(key, uint32_t); + __type(value, uint32_t); +} jmp_table SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, uint32_t); + __type(value, struct buffer); +} buffer_map SEC(".maps"); + +static __always_inline void set_pid_info(struct pid_info *pi) +{ + uint64_t pid_tgid = bpf_get_current_pid_tgid(); + uint32_t pid = pid_tgid >> 32; + + pi->pid = pid; + bpf_get_current_comm(pi->comm, TASK_COMM_LEN); +} + +union kernfs_node_id___419 +{ + struct + { + u32 ino; + u32 generation; + }; + u64 id; +}; + +struct kernfs_node___419 +{ + struct kernfs_node___419 *parent; + union kernfs_node_id___419 id; +}; + +static __always_inline void read_cgroup_id(uint64_t *target_id) +{ + struct task_struct *curr_task = (struct task_struct *)bpf_get_current_task(); + struct kernfs_node *kn; + BPF_CORE_READ_INTO(&kn, curr_task, cgroups, subsys[0], cgroup, kn); + + if (!kn) + return; + struct kernfs_node___419 *kn_old = (void *)kn; + if (bpf_core_field_exists(kn_old->id)) + BPF_CORE_READ_INTO(target_id, kn_old, id.id); + else + BPF_CORE_READ_INTO(target_id, kn, id); +} + +// for centos 3.10 kernel +struct net___310 +{ + unsigned int proc_inum; +}; + +struct net_device___310 +{ + struct net___310 *nd_net; + int ifindex; +}; + +static __always_inline void read_ns_inum(struct sk_buff *skb, u32 *inum) +{ + struct net *net; + if (bpf_core_field_exists(net->ns.inum)) + { + struct net_device *dev; + bpf_core_read(&dev, sizeof(dev), &skb->dev); + bpf_core_read(&net, sizeof(net), &dev->nd_net.net); + bpf_core_read(inum, sizeof(*inum), &net->ns.inum); + } + else + { + struct net___310 *net310; + struct net_device___310 *dev310; + bpf_core_read(&dev310, sizeof(dev310), &skb->dev); + bpf_core_read(&net310, sizeof(net310), &dev310->nd_net); + bpf_core_read(inum, sizeof(*inum), &net310->proc_inum); + } +} + +static __always_inline void read_ns_inum_by_sk(struct sock *sk, u32 *inum) +{ + struct net *net; + if (bpf_core_field_exists(net->ns.inum)) + BPF_CORE_READ_INTO(inum, sk, __sk_common.skc_net.net, ns.inum); + else + { + struct net___310 *net310; + BPF_CORE_READ_INTO(&net310, sk, __sk_common.skc_net.net); + BPF_CORE_READ_INTO(inum, net310, proc_inum); + } +} + +// Declare a map with key type uint64 according to enum type. +#define DECLARE_HASH_MAP(enum_type, entries) \ + struct \ + { \ + __uint(type, BPF_MAP_TYPE_HASH); \ + __uint(max_entries, entries); \ + __type(key, u64); \ + __type(value, ENUM_TO_STRUCT(enum_type)); \ + } ENUM_TO_MAP_NAME(enum_type) SEC(".maps"); + +DECLARE_HASH_MAP(BASIC_INFO, MAX_ENTRIES) + +#define UPDATE_HASH_MAP(enum_type, key, value) \ + bpf_map_update_elem(ENUM_TO_REF_MAP(enum_type), key, value, BPF_ANY); + +#endif diff --git a/source/tools/detect/net_diag/rtrace/ebpf/common.def.h b/source/tools/detect/net_diag/rtrace/ebpf/common.def.h new file mode 100644 index 0000000000000000000000000000000000000000..a339df6c612fae33667f1c46ff75ccb79aebb85e --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/common.def.h @@ -0,0 +1,201 @@ +#ifndef _RTRACE_COMMON_BPF_H_ +#define _RTRACE_COMMON_BPF_H_ + +#define TASK_COMM_LEN 16 +#define FUNCNAME_MAX_LEN 32 +#define MAC_HEADER_SIZE 14 +#define FILTER_RULES_MAX_NUM 10 + +#define USR_DEUBG + +#if defined(__VMLINUX_H__) && defined(BPF_DEBUG) +#define output(...) __bpf_printk(__VA_ARGS__); +#elif !defined(__VMLINUX_H__) && defined(USR_DEUBG) +#define output(...) printf(__VA_ARGS__); +#else +#define output(...) +#endif + +#define pr_err(fmt, ...) \ + do \ + { \ + output("ERROR: " fmt, ##__VA_ARGS__); \ + } while (0) + +#define pr_dbg(fmt, ...) \ + do \ + { \ + output("DEBUG: " fmt, ##__VA_ARGS__); \ + } while (0) + +#define TO_STR(a) #a +#define TO_STRING(a) TO_STR(a) + +#define FILTER_MAP_DEFAULT_KEY 0 +#define FLOW_MAP_DEFAULT_VAL ((__u64)(0x0123456776543210)) +typedef uint64_t rtrace_mask_t; +#define MAX_NUM_BYTES (0xff + 1) +#define MAX_STACK 5 +#define MAX_ENTRIES 10240 +#define RTRACE_LSHIFT(nbits) ((rtrace_mask_t)1 << (nbits)) +#define RTRACE_MASK(nbits) (RTRACE_LSHIFT(nbits) - 1) +#define RTRACE_ALIGN(n) n +#define TEST_NBITS_SET(num, nbits) ((num)&RTRACE_LSHIFT(nbits)) +// #define ADDREF(a) &##a ERROR +#define ADDREF(a) &a +#define ENUM_TO_MAP_NAME(enum_type) enum_type##_map +#define ENUM_TO_REF_MAP(enum_type) ADDREF(ENUM_TO_MAP_NAME(enum_type)) +#define ENUM_TO_STRUCT(enum_type) \ + struct enum_type##_struct + +#define ENUM_TO_FUNC_NAME(prefix, enum_type) prefix_##enum_type + +#define TYPE_TO_ENUM(type) (((type) << 16) >> 16) +#define TYPE_TO_CPU(type) ((type) >> 16) + +#define TYPE_SET_CPU(type, cpu) ((type) + ((cpu) << 16)) + +#define IPHDR_VALID(iphdr) ((iphdr)->saddr != 0) +#define TCPHDR_VALID(cd) ((cd)->transport_header != (u16)~0) + +// 0 - 6 bit 6 - 12 bit 31 - 32 bit +#define SET_MAJOR_TYPE(num, val) (((num) & (~(0x3f))) | ((val) & (0x3f))) +#define SET_MINOR_TYPE(num, val) (((num) & (~(0x3f << 6))) | (((val) & (0x3f)) << 6)) +#define SET_SEND_RECV(num, val) ((num) & (~(1u << 31)) | (((val)&0x1)) << 31) +#define GET_MAJOR_TYPE(num) ((num) & (0x3f)) +#define GET_MINOR_TYPE(num) (((num) >> 6) & (0x3f)) +#define GET_SEND_RECV(num) (((num) >> 31) & 0x1) + +#ifndef __CONCAT //for bpf program. +#define __CONCAT(a, b) a##b +#endif +#define CONCATENATE(a, b) __CONCAT(a, b) + +#define PLACEHOLDER_NUM1 0x888 +#define PLACEHOLDER_NUM(no) CONCATENATE(PLACEHOLDER_NUM, no) +#define INSTERT_PLACEHOLDER(type, no) \ + type placeholder_##no; \ + asm volatile("%0 = %1" \ + : "=r"(placeholder_##no) \ + : "i"(PLACEHOLDER_NUM(no))); + +#define LOOKUP_PLACEHOLDER(no) placeholder_##no +#define CONTAINER_ID_LEN 128 + +#define KPROBE_NAME(func) kprobe__##func +#define ZERO_OR_EQUAL(a, b) ((a) == 0 || (a) == (b)) + +enum +{ + BASIC_INFO = 0, + CGROUP, + STACK, + KRETPROBE, // Get the return parameter of the function + LINEPROBE, + ENUM_END, +}; + +#define MAX_BUFFER_SIZE 512 +#define BUFFER_START_OFFSET 8 +struct buffer +{ + uint64_t offset; + uint8_t buffer[MAX_BUFFER_SIZE]; +}; + +struct cache_data +{ + void *ctx; + struct sock *sk; + struct sk_buff *skb; + struct buffer *buffer; + char *head; + // char *data; +#if defined(__VMLINUX_H__) + struct iphdr ih; + struct tcphdr th; + struct tcp_skb_cb tsc; +#else + int ih[5]; + int th[5]; + int tsc[12]; +#endif + uint16_t transport_header; + uint16_t network_header; + uint8_t send; + uint32_t sk_protocol; +}; + +struct addr_pair +{ + uint32_t saddr; + uint32_t daddr; + uint16_t sport; + uint16_t dport; +}; + +struct pid_info +{ + uint32_t pid; + char comm[TASK_COMM_LEN]; +}; + +// The addition of s is to avoid duplication with stack_info of vmlinux.h. +ENUM_TO_STRUCT(STACK) +{ + uint64_t kern_stack[MAX_STACK]; +}; + +struct filter_meta { + int pid; + struct addr_pair ap; +}; + +struct filter_map_key +{ + struct filter_meta fm[FILTER_RULES_MAX_NUM]; + uint32_t protocol; + int cnt; +}; + +struct tid_map_key +{ + uint32_t tid; + uint32_t bp; +}; + +#define CONSTRUCT_BPF_PROGRAM_NAME(sk_pos, skb_pos) \ + kprobe_sk_##sk_pos##skb_##skb_pos + +ENUM_TO_STRUCT(BASIC_INFO) +{ + uint64_t mask; + uint64_t ip; + uint64_t ts; + uint32_t seq; + uint32_t end_seq; + uint32_t rseq; + uint32_t rend_seq; + struct addr_pair ap; + struct pid_info pi; + uint64_t ret; +}; + +ENUM_TO_STRUCT(CGROUP) +{ + uint32_t inum; + uint64_t cgroupid; +}; + +#define DECLARE_AND_INIT_STRUCT(enum_type, name) \ + ENUM_TO_STRUCT(enum_type) \ + name = {0} + +#define DECLARE_STRUCT_PTR(enum_type, name) \ + ENUM_TO_STRUCT(enum_type) * name + +#define DECLARE_STRUCT(enum_type, name) \ + ENUM_TO_STRUCT(enum_type) \ + name + +#endif diff --git a/source/tools/detect/net_diag/rtrace/ebpf/common.usr.h b/source/tools/detect/net_diag/rtrace/ebpf/common.usr.h new file mode 100644 index 0000000000000000000000000000000000000000..58e1503b8abb9514cd000b14cf7768cfd5487216 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/common.usr.h @@ -0,0 +1,51 @@ +#ifndef _RTRACE_COMMON_USR_H +#define _RTRACE_COMMON_USR_H +#include +#include +#include +#include "common.def.h" + +extern bool gdebug; + +#ifndef zfree +#define zfree(ptr) ( \ + { \ + free(*ptr); \ + *ptr = NULL; \ + }) +#endif + +#ifndef zclose +#define zclose(fd) ( \ + { \ + int ___err = 0; \ + if ((fd) >= 0) \ + ___err = close((fd)); \ + fd = -1; \ + ___err; \ + }) +#endif + +#define DEBUG_LINE printf("debug: %s:%d:1 fun:%s\n", __FILE__, __LINE__, __FUNCTION__); +#define ERROR_LINE printf("error: %s:%d:1 fun:%s\n", __FILE__, __LINE__, __FUNCTION__); + +static char special_funcs[][50] = { + "tcp_sendmsg", + "tcp_cleanup_rbuf", + "kretprobe_common", + "kprobe_lines", + "raw_sendmsg", +}; + +static inline bool is_special_func(char *func) +{ + int i; + int len = sizeof(special_funcs) / sizeof(special_funcs[0]); + + for (i = 0; i < len; i++) + if (strcmp(func, special_funcs[i]) == 0) + return true; + return false; +} + +#endif diff --git a/source/tools/detect/net_diag/rtrace/ebpf/rtrace.bpf.c b/source/tools/detect/net_diag/rtrace/ebpf/rtrace.bpf.c new file mode 100644 index 0000000000000000000000000000000000000000..764cf667c4b1b5fe0ce514760cc970282e4e7fe2 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/rtrace.bpf.c @@ -0,0 +1,721 @@ +#define BPF_NO_GLOBAL_DATA +#include "vmlinux.h" +#include +#include +#include +#include +#include "common.bpf.h" + +#ifndef NULL +#define NULL ((void *)0) +#endif + +// from linux/icmp.h +#define ICMP_ECHOREPLY 0 /* Echo Reply */ +#define ICMP_DEST_UNREACH 3 /* Destination Unreachable */ +#define ICMP_SOURCE_QUENCH 4 /* Source Quench */ +#define ICMP_REDIRECT 5 /* Redirect (change route) */ +#define ICMP_ECHO 8 /* Echo Request */ +#define ICMP_TIME_EXCEEDED 11 /* Time Exceeded */ +#define ICMP_PARAMETERPROB 12 /* Parameter Problem */ +#define ICMP_TIMESTAMP 13 /* Timestamp Request */ +#define ICMP_TIMESTAMPREPLY 14 /* Timestamp Reply */ +#define ICMP_INFO_REQUEST 15 /* Information Request */ +#define ICMP_INFO_REPLY 16 /* Information Reply */ +#define ICMP_ADDRESS 17 /* Address Mask Request */ +#define ICMP_ADDRESSREPLY 18 /* Address Mask Reply */ +#define NR_ICMP_TYPES 18 + +/* Codes for UNREACH. */ +#define ICMP_NET_UNREACH 0 /* Network Unreachable */ +#define ICMP_HOST_UNREACH 1 /* Host Unreachable */ +#define ICMP_PROT_UNREACH 2 /* Protocol Unreachable */ +#define ICMP_PORT_UNREACH 3 /* Port Unreachable */ +#define ICMP_FRAG_NEEDED 4 /* Fragmentation Needed/DF set */ +#define ICMP_SR_FAILED 5 /* Source Route failed */ +#define ICMP_NET_UNKNOWN 6 +#define ICMP_HOST_UNKNOWN 7 +#define ICMP_HOST_ISOLATED 8 +#define ICMP_NET_ANO 9 +#define ICMP_HOST_ANO 10 +#define ICMP_NET_UNR_TOS 11 +#define ICMP_HOST_UNR_TOS 12 +#define ICMP_PKT_FILTERED 13 /* Packet filtered */ +#define ICMP_PREC_VIOLATION 14 /* Precedence violation */ +#define ICMP_PREC_CUTOFF 15 /* Precedence cut off */ +#define NR_ICMP_UNREACH 15 /* instead of hardcoding immediate value */ + +/* Codes for REDIRECT. */ +#define ICMP_REDIR_NET 0 /* Redirect Net */ +#define ICMP_REDIR_HOST 1 /* Redirect Host */ +#define ICMP_REDIR_NETTOS 2 /* Redirect Net for TOS */ +#define ICMP_REDIR_HOSTTOS 3 /* Redirect Host for TOS */ + +/* Codes for TIME_EXCEEDED. */ +#define ICMP_EXC_TTL 0 /* TTL count exceeded */ +#define ICMP_EXC_FRAGTIME 1 /* Fragment Reass time exceeded */ + +struct +{ + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 10); + __type(key, u32); + __type(value, struct filter_map_key); +} filter_map SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, 10240); + __type(key, struct addr_pair); + __type(value, struct sock *); +} flow_map SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, 512); + __type(key, struct tid_map_key); + __type(value, ENUM_TO_STRUCT(BASIC_INFO)); +} tid_map SEC(".maps"); + +/** + * @brief save data into buffer + * + * @param cd + * @param ptr + * @param size + * @return __always_inline + */ +static __always_inline void buffer_input(struct cache_data *cd, void *ptr, uint32_t size) +{ + if (size == 0) + return; + + if (cd->buffer->offset < MAX_BUFFER_SIZE - size) + { + bpf_probe_read(&(cd->buffer->buffer[cd->buffer->offset]), size, ptr); + cd->buffer->offset += size; + } +} + +/** + * @brief output buffer to userspace + * + * @param cd + * @return __always_inline + */ +static __always_inline void buffer_output(struct cache_data *cd) +{ + int size = cd->buffer->offset & (MAX_BUFFER_SIZE - 1); + bpf_perf_event_output(cd->ctx, &perf, BPF_F_CURRENT_CPU, cd->buffer->buffer, size); + cd->buffer->offset = 0; +} + +/** + * @brief Compares two addresses for equality + * + * @param skb_ap + * @param sk_ap + * @return int + */ +static int addr_pair_cmp(struct addr_pair *skb_ap, struct addr_pair *sk_ap) +{ + if (sk_ap->dport == skb_ap->dport && sk_ap->sport == skb_ap->sport) + return 0; + + return -1; +} + +/** + * @brief Set the seq object + * + * @param cd + * @param seq + * @param end_seq + * @param rseq + * @param rend_seq + * @return __always_inline + */ +static __always_inline void set_seq(struct cache_data *cd, uint32_t *seq, uint32_t *end_seq, uint32_t *rseq, uint32_t *rend_seq) +{ + char *data; + uint32_t len, tmp_seq, tmp_end_seq, tmp_rseq, tmp_rend_seq; + struct tcp_skb_cb *tsc; + uint32_t protocol = cd->sk_protocol & 0xff; + if (protocol == IPPROTO_ICMP) + { + struct icmphdr *ih = ((struct icmphdr *)(&cd->th)); + uint16_t sequence; + uint8_t type = ih->type; + sequence = ih->un.echo.sequence; + *seq = sequence; + *end_seq = sequence + 1; + *rseq = sequence; + *rend_seq = sequence + 1; + return; + } + + if (cd->transport_header != (uint16_t)~0) + { + struct sk_buff *skb = cd->skb; + BPF_CORE_READ_INTO(&data, skb, data); + BPF_CORE_READ_INTO(&len, skb, len); + + if (cd->send) + { + *seq = bpf_ntohl(cd->th.seq); + *end_seq = *seq + len - cd->transport_header + (data - cd->head) - cd->th.doff * 4; + *rend_seq = bpf_ntohl(cd->th.ack_seq); + } + else + { + *rseq = bpf_ntohl(cd->th.seq); + *rend_seq = *rseq + len - cd->transport_header + (data - cd->head) - cd->th.doff * 4; + struct tcp_sock *ts = (struct tcp_sock *)cd->sk; + BPF_CORE_READ_INTO(seq, ts, snd_una); + *end_seq = bpf_ntohl(cd->th.ack_seq); + } + } + else + { + tsc = (struct tcp_skb_cb *)((unsigned long)cd->skb + offsetof(struct sk_buff, cb[0])); +#define TCPHDR_ACK 0x10 + if (cd->send) + { + uint8_t tcp_flags; + BPF_CORE_READ_INTO(&tcp_flags, tsc, tcp_flags); + BPF_CORE_READ_INTO(seq, tsc, seq); + BPF_CORE_READ_INTO(end_seq, tsc, end_seq); + if (tcp_flags & TCPHDR_ACK) + { + BPF_CORE_READ_INTO(rend_seq, tsc, ack_seq); + } + } + else + { + uint8_t tcp_flags; + BPF_CORE_READ_INTO(&tcp_flags, tsc, tcp_flags); + BPF_CORE_READ_INTO(rseq, tsc, seq); + BPF_CORE_READ_INTO(rend_seq, tsc, end_seq); + if (tcp_flags & TCPHDR_ACK) + { + struct tcp_sock *ts = (struct tcp_sock *)cd->sk; + BPF_CORE_READ_INTO(seq, ts, snd_una); + BPF_CORE_READ_INTO(end_seq, tsc, ack_seq); + } + } + } +} + +/** + * @brief Set the seq by tsc object. Some functions may not have tcp headers, + * such as __tcp_transmit_skb, so seq needs to be obtained from tcp_skb_cb. + * + * @param skb + * @param seq + * @param end_seq + */ +static void set_seq_by_tsc(struct sk_buff *skb, uint32_t *seq, uint32_t *end_seq) +{ + struct tcp_skb_cb *tsc; + tsc = (struct tcp_skb_cb *)((unsigned long)skb + offsetof(struct sk_buff, cb[0])); + BPF_CORE_READ_INTO(seq, tsc, seq); + BPF_CORE_READ_INTO(end_seq, tsc, end_seq); +} + +/** + * @brief Set the addr pair by hdr object + * + * @param cd cache_data structure pointer + * @param ap addr_pair structure pointer + */ +static void set_addr_pair_by_hdr(struct cache_data *cd, struct addr_pair *ap) +{ + ap->saddr = cd->ih.saddr; + ap->daddr = cd->ih.daddr; + + switch (cd->sk_protocol) + { + case IPPROTO_ICMP: + ap->sport = 0; + ap->dport = 0; + break; + case IPPROTO_TCP: + ap->sport = bpf_ntohs(cd->th.source); + ap->dport = bpf_ntohs(cd->th.dest); + break; + default: + break; + } +} + +/** + * @brief Set the addr pair by sock object + * + * @param sk sock object pointer + * @param ap addr_pair object pointer + */ +static __always_inline void set_addr_pair_by_sock(struct sock *sk, struct addr_pair *ap) +{ + BPF_CORE_READ_INTO(&ap->daddr, sk, __sk_common.skc_daddr); + BPF_CORE_READ_INTO(&ap->dport, sk, __sk_common.skc_dport); + ap->dport = bpf_ntohs(ap->dport); + BPF_CORE_READ_INTO(&ap->saddr, sk, __sk_common.skc_rcv_saddr); + BPF_CORE_READ_INTO(&ap->sport, sk, __sk_common.skc_num); +} + +/** + * @brief Bind sock and addr_pair, and update the sk pointer + * + * @param skp sock's secondary pointer + * @param ap + * @return int + */ +static int set_sock(struct sock **skp, struct addr_pair *ap) +{ + struct sock **skp_tmp; + skp_tmp = bpf_map_lookup_elem(&flow_map, ap); + + // not found. + if (!skp_tmp) + { + if (*skp) + { + bpf_map_update_elem(&flow_map, ap, skp, BPF_ANY); + return 0; + } + return -1; + } + + if (!*skp) + { + if (*skp_tmp == (struct sock *)FLOW_MAP_DEFAULT_VAL) + return -1; + // assign sock pointer. + *skp = *skp_tmp; + } + else if (*skp != *skp_tmp) + bpf_map_update_elem(&flow_map, ap, skp, BPF_ANY); + return 0; +} + +/** + * @brief Set the cache data object + * + * @param cd cache_data structure pointer + * @param skb sk_buff structure pointer + * @return void + */ +static __always_inline void set_cache_data(struct cache_data *cd, struct sk_buff *skb) +{ + char *head, *l3_header_addr, *l4_header_addr = NULL; + u16 mac_header, network_header, transport_header, size; + uint32_t protocol; + + BPF_CORE_READ_INTO(&transport_header, skb, transport_header); + BPF_CORE_READ_INTO(&head, skb, head); + cd->transport_header = transport_header; + + BPF_CORE_READ_INTO(&network_header, skb, network_header); + cd->network_header = network_header; + if (network_header == 0) + { + BPF_CORE_READ_INTO(&mac_header, skb, mac_header); + network_header = mac_header + MAC_HEADER_SIZE; + } + l3_header_addr = head + network_header; + bpf_probe_read(&cd->ih, sizeof(struct iphdr), l3_header_addr); + if (transport_header == (u16)~0) + l4_header_addr = l3_header_addr + cd->ih.ihl * 4; + else + l4_header_addr = head + transport_header; + + if (!l4_header_addr) + return; + + protocol = cd->sk_protocol == 0 ? cd->ih.protocol : cd->sk_protocol; + cd->head = head; + switch (protocol) + { + case IPPROTO_ICMP: + size = sizeof(struct icmphdr); + break; + case IPPROTO_TCP: + size = sizeof(struct tcphdr); + break; + default: + size = 0; + break; + } + if (size) + bpf_probe_read(&cd->th, size, l4_header_addr); + cd->sk_protocol = protocol; + // BPF_CORE_READ_INTO(&head, cd, skb); // to generate cache_data btf info. +} + +/** + * @brief Filter out unwanted packets + * + * @param protocol + * @param pid + * @param ap + * @return __always_inline + */ +static __always_inline int builtin_filter(uint32_t protocol, int pid, struct addr_pair *ap) +{ + u32 key = FILTER_MAP_DEFAULT_KEY; + struct filter_map_key *fmkp; + struct filter_meta *fm; + struct addr_pair *app; + int i, cnt; + + fmkp = bpf_map_lookup_elem(&filter_map, &key); + if (!fmkp) + return -1; + + // compare major protocol + if ((fmkp->protocol & 0xff) != (protocol & 0xff)) + return -1; + + // compare minor protocol + i = (fmkp->protocol >> 8) & 0xff; + if (i && i != ((protocol >> 8) & 0xff)) + return -1; + +#pragma unroll + for (i = 0; i < FILTER_RULES_MAX_NUM; i++) + { + fm = &fmkp->fm[i]; + app = &fm->ap; + + if (ZERO_OR_EQUAL(fm->pid, pid) && + ZERO_OR_EQUAL(app->daddr, ap->daddr) && + ZERO_OR_EQUAL(app->dport, ap->dport) && + ZERO_OR_EQUAL(app->saddr, ap->saddr) && + ZERO_OR_EQUAL(app->sport, ap->sport)) + break; + } + + if (i && i >= fmkp->cnt) + return -1; + return 0; +} + +/** + * @brief Main processing function entry + * + * @param ctx + * @param sk + * @param skb + * @return __always_inline + */ +static __always_inline int do_trace_sk_skb(void *ctx, struct sock *sk, struct sk_buff *skb) +{ + INSTERT_PLACEHOLDER(rtrace_mask_t, 1); + struct addr_pair skb_ap = {0}; + struct addr_pair sk_ap = {0}; + struct cache_data cd = {0}; + uint64_t pid_tgid = bpf_get_current_pid_tgid(); + uint32_t pid = pid_tgid >> 32; + uint32_t tid = pid_tgid; + uint32_t default_buffer_map_key = 0; + + if (!sk) + BPF_CORE_READ_INTO(&sk, skb, sk); + + cd.sk_protocol = 0; + if (sk) + cd.sk_protocol = BPF_CORE_READ_BITFIELD_PROBED(sk, sk_protocol); + + set_cache_data(&cd, skb); + switch (cd.sk_protocol) + { + case IPPROTO_TCP: + set_addr_pair_by_hdr(&cd, &skb_ap); + if (sk) + { + // 1. May be the sending path + // 2. May be the upper layer of the protocol stack + set_addr_pair_by_sock(sk, &sk_ap); + set_sock(&sk, &sk_ap); + // todo: Consider the impact of nat + if (addr_pair_cmp(&skb_ap, &sk_ap) == 0) + cd.send = 1; + } + else + { + // may be the receive path + sk_ap.daddr = skb_ap.saddr; + sk_ap.dport = skb_ap.sport; + sk_ap.saddr = skb_ap.daddr; + sk_ap.sport = skb_ap.dport; + set_sock(&sk, &sk_ap); + } + if (cd.th.syn) + cd.sk_protocol |= (1 << 8); + break; + case IPPROTO_ICMP: + sk_ap.sport = ((struct icmphdr *)&cd.th)->un.echo.id; + sk_ap.dport = sk_ap.sport; + set_sock(&sk, &sk_ap); + break; + default: + return 0; + } + + if (!sk) + return -1; + if (builtin_filter(cd.sk_protocol, pid, &sk_ap) < 0) + return -1; + + // Here, we have captured the message we want. + cd.skb = skb; + cd.sk = sk; + cd.ctx = ctx; + cd.buffer = bpf_map_lookup_elem(&buffer_map, &default_buffer_map_key); + + if (!cd.buffer) + return -1; + + if (TEST_NBITS_SET(LOOKUP_PLACEHOLDER(1), BASIC_INFO)) + { + DECLARE_AND_INIT_STRUCT(BASIC_INFO, bi); + set_seq(&cd, &bi.seq, &bi.end_seq, &bi.rseq, &bi.rend_seq); + bi.mask = LOOKUP_PLACEHOLDER(1); + bi.mask &= (~(1ull << KRETPROBE)); + bi.mask &= (~(1ull << LINEPROBE)); + bi.ip = PT_REGS_IP((struct pt_regs *)cd.ctx); + bi.ts = bpf_ktime_get_ns(); + bi.ap = sk_ap; + bi.pi.pid = pid; + bpf_get_current_comm(bi.pi.comm, TASK_COMM_LEN); + buffer_input(&cd, &bi, sizeof(bi)); + + if (TEST_NBITS_SET(LOOKUP_PLACEHOLDER(1), KRETPROBE) || TEST_NBITS_SET(LOOKUP_PLACEHOLDER(1), LINEPROBE)) + { + struct tid_map_key tmk = {0}; + tmk.tid = tid; + // todo: lineprobe and multi-level Kretprobe nesting + // tmk.bp = ((struct pt_regs *)cd.ctx)->bp; + tmk.bp = 0; + bi.mask = LOOKUP_PLACEHOLDER(1); + bpf_map_update_elem(&tid_map, &tmk, &bi, BPF_ANY); + } + } + + if (TEST_NBITS_SET(LOOKUP_PLACEHOLDER(1), CGROUP)) + { + DECLARE_AND_INIT_STRUCT(CGROUP, cg); + read_ns_inum_by_sk(sk, &cg.inum); + read_cgroup_id(&cg.cgroupid); + buffer_input(&cd, &cg, sizeof(cg)); + } + + if (TEST_NBITS_SET(LOOKUP_PLACEHOLDER(1), STACK)) + { + int size = sizeof(ENUM_TO_STRUCT(STACK)); + if (cd.buffer->offset < (MAX_BUFFER_SIZE - size)) + { + bpf_get_stack(ctx, &(cd.buffer->buffer[cd.buffer->offset]), size, BPF_ANY); + cd.buffer->offset += size; + } + } + + void *ctxp; + asm volatile("%0 = %1" + : "=r"(ctxp) + : "r"(&cd)); + buffer_output(&cd); + return 0; +} + +SEC("kretprobe/common") +int kretprobe_common(struct pt_regs *ctx) +{ + struct tid_map_key tmk = {0}; + uint64_t mask; + tmk.tid = (uint32_t)bpf_get_current_pid_tgid(); + tmk.bp = 0; + ENUM_TO_STRUCT(BASIC_INFO) *bi = bpf_map_lookup_elem(&tid_map, &tmk); + if (!bi) + return 0; + mask = bi->mask; + if (TEST_NBITS_SET(mask, KRETPROBE)) + { + bi->mask = 1ull << KRETPROBE; + // bi->ip = ctx->ip; // cannot cover ip. rip now pointer to kretprobe_trampoline. + bi->ts = bpf_ktime_get_ns(); + bi->ret = PT_REGS_RC(ctx); + bpf_perf_event_output(ctx, &perf, BPF_F_CURRENT_CPU, bi, sizeof(ENUM_TO_STRUCT(BASIC_INFO))); + } + bpf_map_delete_elem(&tid_map, &tmk); + return 0; +} + +SEC("kprobe/lines") +int kprobe_lines(struct pt_regs *ctx) +{ + struct tid_map_key tmk = {0}; + uint64_t bp; + // todo: find upper function rbp + // bpf_probe_read_kernel(&bp, sizeof(bp), (void *)(ctx->bp)) + tmk.tid = (uint32_t)bpf_get_current_pid_tgid(); + tmk.bp = 0; + ENUM_TO_STRUCT(BASIC_INFO) *bi = bpf_map_lookup_elem(&tid_map, &tmk); + if (!bi) + return 0; + + bi->mask = 1ull << LINEPROBE; + bi->ip = ctx->ip; + bi->ts = bpf_ktime_get_ns(); + bpf_perf_event_output(ctx, &perf, BPF_F_CURRENT_CPU, bi, sizeof(ENUM_TO_STRUCT(BASIC_INFO))); + return 0; +} + +SEC("kprobe/tcp_cleanup_rbuf") +int BPF_KPROBE(tcp_cleanup_rbuf, struct sock *sk, int copied) +{ + ENUM_TO_STRUCT(BASIC_INFO) + bi = {0}; + INSTERT_PLACEHOLDER(rtrace_mask_t, 1); + struct tcp_sock *ts = (struct tcp_sock *)sk; + uint32_t copied_seq; + uint64_t pid_tgid = bpf_get_current_pid_tgid(); + uint32_t pid = pid_tgid >> 32; + uint32_t tid = pid_tgid; + + set_addr_pair_by_sock(sk, &bi.ap); + set_sock(&sk, &bi.ap); + + if (builtin_filter(IPPROTO_TCP, pid, &bi.ap) < 0) + return -1; + + bi.mask = 1 << BASIC_INFO; + bi.ip = ctx->ip; + bi.ts = bpf_ktime_get_ns(); + bi.seq = 0; + bi.end_seq = 0; + BPF_CORE_READ_INTO(&copied_seq, ts, copied_seq); + bi.rseq = copied_seq - copied; + bi.rend_seq = copied_seq; + bi.pi.pid = pid; + bpf_get_current_comm(bi.pi.comm, TASK_COMM_LEN); + if (TEST_NBITS_SET(LOOKUP_PLACEHOLDER(1), KRETPROBE)) + { + struct tid_map_key tmk = {0}; + tmk.tid = tid; + tmk.bp = ctx->bp; + bpf_map_update_elem(&tid_map, &tmk, &bi, BPF_ANY); + } + bpf_perf_event_output(ctx, &perf, BPF_F_CURRENT_CPU, &bi, sizeof(ENUM_TO_STRUCT(BASIC_INFO))); + return 0; +} + +SEC("kprobe/tcp_sendmsg") +int BPF_KPROBE(tcp_sendmsg, struct sock *sk, struct msghdr *msg, size_t size) +{ + ENUM_TO_STRUCT(BASIC_INFO) + bi = {0}; + INSTERT_PLACEHOLDER(rtrace_mask_t, 1); + struct tcp_sock *ts = (struct tcp_sock *)sk; + uint64_t pid_tgid = bpf_get_current_pid_tgid(); + uint32_t pid = pid_tgid >> 32; + uint32_t tid = pid_tgid; + + set_addr_pair_by_sock(sk, &bi.ap); + set_sock(&sk, &bi.ap); + if (builtin_filter(IPPROTO_TCP, pid, &bi.ap) < 0) + return -1; + + bi.mask = 1 << BASIC_INFO; + bi.ip = ctx->ip; + bi.ts = bpf_ktime_get_ns(); + BPF_CORE_READ_INTO(&bi.seq, ts, write_seq); + bi.end_seq = bi.seq + size; + bi.pi.pid = pid; + bpf_get_current_comm(bi.pi.comm, TASK_COMM_LEN); + if (TEST_NBITS_SET(LOOKUP_PLACEHOLDER(1), KRETPROBE)) + { + struct tid_map_key tmk = {0}; + tmk.tid = tid; + tmk.bp = ctx->bp; + bpf_map_update_elem(&tid_map, &tmk, &bi, BPF_ANY); + } + bpf_perf_event_output(ctx, &perf, BPF_F_CURRENT_CPU, &bi, sizeof(ENUM_TO_STRUCT(BASIC_INFO))); + return 0; +} + +SEC("kprobe/raw_sendmsg") +int BPF_KPROBE(raw_sendmsg, struct sock *sk, struct msghdr *msg, size_t len) +{ + ENUM_TO_STRUCT(BASIC_INFO) + bi = {0}; + INSTERT_PLACEHOLDER(rtrace_mask_t, 1); + uint64_t pid_tgid = bpf_get_current_pid_tgid(); + uint32_t pid = pid_tgid >> 32; + uint32_t tid = pid_tgid; + struct icmphdr ih; + uint32_t protocol; + char *ptr; + + protocol = BPF_CORE_READ_BITFIELD_PROBED(sk, sk_protocol); + if (protocol != IPPROTO_ICMP) + return 0; + + BPF_CORE_READ_INTO(&ptr, msg, msg_iter.iov, iov_base); + bpf_probe_read(&ih, sizeof(ih), ptr); + bi.ap.sport = ih.un.echo.id; + bi.ap.dport = bi.ap.sport; + set_sock(&sk, &bi.ap); + if (builtin_filter(IPPROTO_ICMP, pid, &bi.ap) < 0) + return -1; + + bi.mask = 1 << BASIC_INFO; + bi.ip = ctx->ip; + bi.ts = bpf_ktime_get_ns(); + bi.seq = ih.un.echo.sequence; + bi.end_seq = bi.seq + 1; + bi.rseq = bi.seq; + bi.rend_seq = bi.end_seq; + bpf_get_current_comm(bi.pi.comm, TASK_COMM_LEN); + if (TEST_NBITS_SET(LOOKUP_PLACEHOLDER(1), KRETPROBE)) + { + struct tid_map_key tmk = {0}; + tmk.tid = tid; + tmk.bp = ctx->bp; + bpf_map_update_elem(&tid_map, &tmk, &bi, BPF_ANY); + } + bpf_perf_event_output(ctx, &perf, BPF_F_CURRENT_CPU, &bi, sizeof(ENUM_TO_STRUCT(BASIC_INFO))); + return 0; +} + +#define SK0_SKB_ARG_FN(pos) \ + SEC("kprobe/sk0_skb" #pos) \ + int kprobe_sk0_skb##pos(struct pt_regs *pt) \ + { \ + struct sk_buff *skb = (struct sk_buff *)PT_REGS_PARM##pos(pt); \ + do_trace_sk_skb(pt, NULL, skb); \ + return 0; \ + } + +#define SK_SKB_ARG_FN(skpos, skbpos) \ + SEC("kprobe/sk" #skpos "_skb" #skbpos) \ + int kprobe_sk##skpos##_skb##skbpos(struct pt_regs *pt) \ + { \ + struct sock *sk = (struct sock *)PT_REGS_PARM##skpos(pt); \ + struct sk_buff *skb = (struct sk_buff *)PT_REGS_PARM##skbpos(pt); \ + do_trace_sk_skb(pt, sk, skb); \ + return 0; \ + } + +SK0_SKB_ARG_FN(1) +SK0_SKB_ARG_FN(2) +SK0_SKB_ARG_FN(3) +SK0_SKB_ARG_FN(4) +SK0_SKB_ARG_FN(5) + +SK_SKB_ARG_FN(1, 2) +SK_SKB_ARG_FN(2, 3) + +char LICENSE[] SEC("license") = "GPL"; \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/ebpf/rtrace.c b/source/tools/detect/net_diag/rtrace/ebpf/rtrace.c new file mode 100644 index 0000000000000000000000000000000000000000..8b0afa6b8d1e859f4c3538f855aca8ad0a81d949 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/rtrace.c @@ -0,0 +1,464 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.usr.h" +#include "utils/btf.h" +#include + +#include "rtrace.h" +#include "rtrace.skel.h" + +#define RTRACE_DYNAMIC_CTX_REG BPF_REG_6 +#define JMP_ERR_CODE 4096 + +#define BPF_ALU64_REG(OP, DST, SRC) \ + ((struct bpf_insn){ \ + .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0}) + +struct rtrace +{ + struct rtrace_bpf *obj; + char *pin_path; + char *btf_custom_path; + struct btf *btf; +}; + +bool gdebug = false; + +static int libbpf_print_fn(enum libbpf_print_level level, + const char *format, va_list args) +{ + if (gdebug) + return vfprintf(stderr, format, args); + return 0; +} + +int bump_memlock_rlimit(void) +{ + struct rlimit rlim_new = { + .rlim_cur = RLIM_INFINITY, + .rlim_max = RLIM_INFINITY, + }; + + return setrlimit(RLIMIT_MEMLOCK, &rlim_new); +} + + +/** + * @brief enable debug or not + * + * @param debug debug output or not + */ +void rtrace_set_debug(bool debug) +{ + gdebug = debug; +} + +/** + * @brief Get the fd of the perf map + * + * @param r rtrace context + * @return int map fd of type BPF_MAP_TYPE_PERF_EVENT_ARRAY + */ +int rtrace_perf_map_fd(struct rtrace *r) +{ + return bpf_map__fd(r->obj->maps.perf); +} +/** + * @brief Get the fd of the filter map + * + * @param r rtrace context + * @return int fd of filter map + */ +int rtrace_filter_map_fd(struct rtrace *r) +{ + return bpf_map__fd(r->obj->maps.filter_map); +} + +static int rtrace_init(struct rtrace *r, char *btf_custom_path, char *pin_path) +{ + int err; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts); + + libbpf_set_print(libbpf_print_fn); + bump_memlock_rlimit(); + + r->btf_custom_path = btf_custom_path; + r->pin_path = pin_path; + open_opts.btf_custom_path = btf_custom_path; + r->obj = rtrace_bpf__open_opts(&open_opts); + if (!r->obj) + { + pr_err("failed to open BPF object\n"); + err = -EINVAL; + goto err_out; + } + + err = rtrace_bpf__load(r->obj); + if (err) + { + pr_err("failed to load bpf, err: %s\n", strerror(-err)); + goto err_out; + } + + if (pin_path) + { + err = bpf_object__pin_maps(r->obj->obj, pin_path); + if (err) + { + pr_err("failed to pin maps\n"); + goto err_out; + } + } + + r->btf = btf_load(btf_custom_path); + if (!r->btf) + { + pr_err("Failed to load vmlinux BTF: %d, err msg: %s\n", -errno, strerror(errno)); + err = -errno; + goto err_out; + } + + return 0; + +err_out: + + return err; +} + +/** + * @brief + * + * @param btf_custom_path + * @param pin_path + * @return struct rtrace* + */ +struct rtrace *rtrace_alloc_and_init(char *btf_custom_path, char *pin_path) +{ + struct rtrace *r; + int err; + + r = malloc(sizeof(struct rtrace)); + if (!r) + { + errno = ENOMEM; + return NULL; + } + + err = rtrace_init(r, btf_custom_path, pin_path); + if (err) + { + errno = -err; + goto err_out; + } + return r; + +err_out: + // rtrace_free(r); + return NULL; +} + +/** + * @brief Find the corresponding ebpf program according to the function name and the sk, + * skb parameter positions + * + * @param r rtrace context + * @param func function name, eg. __ip_queue_xmit + * @param sk optional, the parameter position of the sk parameter in the function prototype + * @param skb optional, the parameter position of the skb parameter in the function prototype + * @return struct bpf_program* ebpf program + */ +struct bpf_program *rtrace_trace_program(struct rtrace *r, char *func, int sk, int skb) +{ + struct bpf_program *prog; + int func_proto_id; + if (is_special_func(func)) + { + prog = bpf_object__find_program_by_name(r->obj->obj, func); + } + else + { + if (sk == 0 && skb == 0) + { + func_proto_id = btf_find_func_proto_id(r->btf, func); + sk = btf_func_proto_find_param_pos(r->btf, func_proto_id, "sock", NULL); + sk = sk < 0 ? 0 : sk; + skb = btf_func_proto_find_param_pos(r->btf, func_proto_id, "sk_buff", NULL); + if (skb < 0) + { + pr_err("func-%s prog is null, sk = %d, skb = %d.\n", func, sk, skb); + return NULL; + } + } + prog = object_find_program(r->obj->obj, sk, skb); + } + // if (gdebug) + // insns_dump(bpf_program__insns(prog), bpf_program__insn_cnt(prog)); + return prog; +} + +/** + * @brief Load the incoming ebpf instruction, after verification by the kernel, + * return the corresponding file descriptor + * + * @param r rtrace context + * @param prog bpf program to laod + * @param insns the ebpf instruction that really needs to be loaded + * @param insns_cnt instruction count + * @return int fd + */ +int rtrace_trace_load_prog(struct rtrace *r, struct bpf_program *prog, + struct bpf_insn *insns, size_t insns_cnt) +{ + struct bpf_load_program_attr attr; + static const int log_buf_size = 1024 * 1024; + char log_buf[log_buf_size]; + int fd; + + if (gdebug) + insns_dump(insns, insns_cnt); + + memset(&attr, 0, sizeof(attr)); + attr.prog_type = bpf_program__get_type(prog); + attr.expected_attach_type = bpf_program__get_expected_attach_type(prog); + attr.name = bpf_program__name((const struct bpf_program *)prog); + attr.insns = insns; + attr.insns_cnt = insns_cnt; + attr.license = "Dual BSD/GPL"; + attr.kern_version = bpf_object__kversion(r->obj->obj); + attr.prog_ifindex = 0; + + if (gdebug) + fd = bpf_load_program_xattr(&attr, log_buf, log_buf_size); + else + fd = bpf_load_program_xattr(&attr, NULL, 0); + if (fd < 0) + { + printf("%s\n", log_buf); + return fd; + } + bpf_program__set_fd(prog, fd); + return fd; +} + +struct dynamic_parse +{ + struct + { + int offset; // in bits + int size; + int elem_size; + bool is_ptr; + } attr[10]; + int cnt; + + int offsets[10]; + int offset_cnt; + int size; + int arg_pos; +}; + +#define OFFSET_REGS_PARM1 offsetof(struct pt_regs, rdi) +#define OFFSET_REGS_PARM2 offsetof(struct pt_regs, rsi) +#define OFFSET_REGS_PARM3 offsetof(struct pt_regs, rdx) +#define OFFSET_REGS_PARM4 offsetof(struct pt_regs, rcx) +#define OFFSET_REGS_PARM5 offsetof(struct pt_regs, r8) + +static int dynamic_ptregs_param_offset(int param_pos) +{ + if (param_pos >= 5 || param_pos <= 0) + return -EINVAL; + + switch (param_pos) + { + case 1: + return OFFSET_REGS_PARM1; + case 2: + return OFFSET_REGS_PARM2; + case 3: + return OFFSET_REGS_PARM3; + case 4: + return OFFSET_REGS_PARM4; + case 5: + return OFFSET_REGS_PARM5; + default: + return -EINVAL; + } + return -EINVAL; +} + +/** + * @brief Calculate the corresponding offset according to the accessed structure member + * + * @param r rtrace context + * @param df array of members accessed by the structure + * @param df_cnt array length + * @param func_proto_id btf id + * @param dos offsets for struct members + * @return int 0 is ok + */ +int rtrace_dynamic_gen_offset(struct rtrace *r, struct dynamic_fields *df, + int df_cnt, int func_proto_id, struct dynamic_offsets *dos) +{ + struct dynamic_parse dp = {0}; + const struct btf_member *mem; + int i, err, offset, pre_typeid, root_typeid, cnt, off_sum; + + if (!r || !r->btf || df_cnt <= 0) + return -EINVAL; + + root_typeid = btf_func_proto_find_param(r->btf, func_proto_id, NULL, df[0].ident); + if (root_typeid < 0) + { + pr_dbg("failed to find param: %s in function", df[0].ident); + err = root_typeid; + goto err_out; + } + + err = btf_func_proto_find_param_pos(r->btf, func_proto_id, NULL, df[0].ident); + if (err <= 0) + goto err_out; + + cnt = 0; + + dp.attr[cnt].offset = err; + if (df[0].cast_type > 0) + { + root_typeid = btf__find_by_name_kind(r->btf, df[0].cast_name, df[0].cast_type); + if (root_typeid < 0) + { + err = root_typeid; + goto err_out; + } + if (df[0].pointer == 1) + dp.attr[cnt].is_ptr = true; + else + dp.attr[cnt].is_ptr = false; + } + else + dp.attr[cnt].is_ptr = btf_typeid_has_ptr(r->btf, root_typeid); + + cnt++; + pre_typeid = root_typeid; + for (i = 1; i < df_cnt; i++) + { + offset = 0; + mem = btf_find_member(r->btf, pre_typeid, df[i].ident, &offset); + if (mem == NULL) + { + err = -errno; + goto err_out; + } + dp.attr[cnt].offset = offset; + if (df[i].cast_type > 0) + { + pre_typeid = btf__find_by_name_kind(r->btf, df[i].cast_name, df[i].cast_type); + if (pre_typeid < 0) + { + err = pre_typeid; + goto err_out; + } + if (df[i].pointer == 1) + dp.attr[cnt].is_ptr = true; + else + dp.attr[cnt].is_ptr = false; + } + else + { + dp.attr[cnt].is_ptr = btf_typeid_has_ptr(r->btf, mem->type); + pre_typeid = mem->type; + } + + cnt++; + } + + dp.cnt = cnt; + + off_sum = 0; + cnt = 0; + for (i = 1; i < dp.cnt - 1; i++) + { + off_sum += dp.attr[i].offset; + if (dp.attr[i].is_ptr) + { + dp.offsets[cnt++] = off_sum / 8; + off_sum = 0; + } + } + dos->offs[cnt++] = (dp.attr[dp.cnt - 1].offset + off_sum) / 8; + dos->cnt = cnt; + dos->size = btf__resolve_size(r->btf, pre_typeid); + dos->arg = dp.attr[0].offset; + + pr_dbg("offset array:\n"); + for (i = 0; i < dos->cnt; i++) + pr_dbg("%d %s", dos->offs[i], i == dos->cnt - 1 ? "\n" : ""); + + return 0; + +err_out: + return err; +} + +/** + * @brief + * + * @param r rtrace context + * @param dos offsets for struct members + * @param insns pointer to save instructions + * @param cd_off struct cache_data offset in stack + * @return int instruction count + */ +int rtrace_dynamic_gen_insns(struct rtrace *r, struct dynamic_offsets *dos, struct bpf_insn *insns, int cd_off) +{ + int i, insns_cnt, ctx_off, regs_off, buff_off; + + insns_cnt = 0; + ctx_off = cd_off + offsetof(struct cache_data, ctx); + buff_off = cd_off + offsetof(struct cache_data, buffer); + insns[insns_cnt++] = BPF_LDX_MEM(BPF_DW, RTRACE_DYNAMIC_CTX_REG, BPF_REG_10, ctx_off); + + regs_off = dynamic_ptregs_param_offset(dos->arg); + insns[insns_cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, RTRACE_DYNAMIC_CTX_REG, regs_off); + for (i = 0; i < dos->cnt - 1; i++) + { + insns[insns_cnt++] = BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, dos->offs[i]); + insns[insns_cnt++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10); + insns[insns_cnt++] = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8); + insns[insns_cnt++] = BPF_MOV64_IMM(BPF_REG_2, 8); + insns[insns_cnt++] = BPF_EMIT_CALL(BPF_FUNC_probe_read); + insns[insns_cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8); + } + insns[insns_cnt++] = BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, dos->offs[i]); + insns[insns_cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, buff_off); + insns[insns_cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 0); + insns[insns_cnt++] = BPF_JMP_IMM(BPF_JGT, BPF_REG_2, MAX_BUFFER_SIZE - dos->size, JMP_ERR_CODE); + insns[insns_cnt++] = BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2); + insns[insns_cnt++] = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8); + insns[insns_cnt++] = BPF_MOV64_IMM(BPF_REG_2, dos->size); + insns[insns_cnt++] = BPF_EMIT_CALL(BPF_FUNC_probe_read); + insns[insns_cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, buff_off); + insns[insns_cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 0); + insns[insns_cnt++] = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, dos->size); + insns[insns_cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0); + + pr_dbg("generate new insns, insns cnt: %d\n", insns_cnt); + if (gdebug) + insns_dump(insns, insns_cnt); + return insns_cnt; +} + +struct btf *rtrace_dynamic_btf(struct rtrace *r) +{ + return r->btf; +} \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/ebpf/rtrace.h b/source/tools/detect/net_diag/rtrace/ebpf/rtrace.h new file mode 100644 index 0000000000000000000000000000000000000000..a7286ed71e34422f254da57398842a9792d47522 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/rtrace.h @@ -0,0 +1,48 @@ +#ifndef __RTRACE_RTRACE_H +#define __RTRACE_RTRACE_H + +#define MAX_PROBE_NUM 1024 + +#include "common.usr.h" +#include "utils/btf.h" +#include "utils/disasm.h" +#include "utils/insn.h" +#include "utils/object.h" + +struct dynamic_offsets +{ + int offs[10]; + int cnt; + int arg; + int size; +}; + +struct dynamic_fields +{ + char *ident; + char* cast_name; + int cast_type; + int index; + int pointer; +}; + +struct rtrace; + +struct rtrace *rtrace_alloc_and_init(char *pin_path, char *btf_custom_path); +int rtrace_perf_map_fd(struct rtrace *r); +int rtrace_filter_map_fd(struct rtrace *r); +void rtrace_set_debug(bool debug); + + +// dynamic module. +int rtrace_dynamic_gen_offset(struct rtrace *r, struct dynamic_fields *df, + int df_cnt, int func_proto_id, struct dynamic_offsets *dos); +int rtrace_dynamic_gen_insns(struct rtrace *r, struct dynamic_offsets *dos, struct bpf_insn *insns, int cd_off); +struct btf *rtrace_dynamic_btf(struct rtrace *r); + +// trace module. +int rtrace_trace_load_prog(struct rtrace *r, struct bpf_program *prog, + struct bpf_insn *insns, size_t insns_cnt); +struct bpf_program *rtrace_trace_program(struct rtrace *r, char *func, int sk, int skb); + +#endif \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/ebpf/utils/btf.c b/source/tools/detect/net_diag/rtrace/ebpf/utils/btf.c new file mode 100644 index 0000000000000000000000000000000000000000..ab8cbc5a8962301563d886c8426f61fa0d81283d --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/utils/btf.c @@ -0,0 +1,220 @@ +#include +#include +#include +#include + +#include "common.usr.h" +#include "utils/btf.h" + +static bool btf_type_is_modifier(const struct btf_type *t) +{ + /* Some of them is not strictly a C modifier + * but they are grouped into the same bucket + * for BTF concern: + * A type (t) that refers to another + * type through t->type AND its size cannot + * be determined without following the t->type. + * + * ptr does not fall into this bucket + * because its size is always sizeof(void *). + */ + switch (BTF_INFO_KIND(t->info)) + { + case BTF_KIND_TYPEDEF: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + // case BTF_KIND_TYPE_TAG: + return true; + } + + return false; +} + +const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, + uint32_t id, uint32_t *res_id) +{ + const struct btf_type *t = btf__type_by_id(btf, id); + + while (btf_type_is_modifier(t)) + { + id = t->type; + t = btf__type_by_id(btf, t->type); + } + + if (res_id) + *res_id = id; + + return t; +} + +const struct btf_type *btf_type_skip_ptr(const struct btf *btf, uint32_t id) +{ + const struct btf_type *t = btf__type_by_id(btf, id); + while (btf_is_ptr(t)) + t = btf__type_by_id(btf, t->type); + + return t; +} + +/* Similar to btf_type_skip_modifiers() but does not skip typedefs. */ +static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf, + uint32_t id) +{ + const struct btf_type *t = btf__type_by_id(btf, id); + + while (btf_type_is_modifier(t) && + BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) + { + t = btf__type_by_id(btf, t->type); + } + + return t; +} + +bool btf_typeid_has_ptr(const struct btf *btf, int id) +{ + const struct btf_type *t; + t = btf_type_skip_modifiers(btf, id, NULL); + + if (!btf_is_ptr(t)) + return false; + return true; +} + +const struct btf_member *btf_find_member(struct btf *btf, int typeid, + const char *target_member_name, int *offset) +{ + const struct btf_type *t; + const struct btf_member *m, *tmpm; + const char *name; + int i; + t = btf__type_by_id(btf, typeid); + t = btf_type_skip_modifiers(btf, typeid, (uint32_t *)&typeid); + t = btf_type_skip_ptr(btf, typeid); + m = btf_members(t); + for (i = 0; i < btf_vlen(t); i++, m++) + { + name = btf__name_by_offset(btf, m->name_off); + if (!name || !name[0]) + { + // find embedded struct/union + tmpm = btf_find_member(btf, m->type, target_member_name, offset); + if (tmpm) + { + pr_dbg("find member: name-%s, off-%u, size-%llu\n", btf__name_by_offset(btf, tmpm->name_off), tmpm->offset, btf__resolve_size(btf, tmpm->type)); + *offset += m->offset; + return tmpm; + } + } + else if (strcmp(name, target_member_name) == 0) + { + pr_dbg("find member: name-%s, off-%u, size-%llu\n", btf__name_by_offset(btf, m->name_off), m->offset, btf__resolve_size(btf, m->type)); + *offset += m->offset; + return m; + } + } + + pr_dbg("Unable to find %s(member) in %s(struct)\n", target_member_name, btf__name_by_offset(btf, t->name_off)); + return NULL; +} + +struct btf *btf_load(char *btf_custom_path) +{ + struct btf *btf; + int err; + if (btf_custom_path != NULL) + btf = btf__parse(btf_custom_path, NULL); + else + btf = libbpf_find_kernel_btf(); + + err = libbpf_get_error(btf); + if (err) + { + errno = -err; + return NULL; + } + + return btf; +} + +static const char *btf_param_type_name(struct btf *btf, const struct btf_param *p) +{ + const struct btf_type *t; + __s32 id = p->type; + t = btf__type_by_id(btf, id); + // todo: 过滤掉名字不是结构体类型的参数 + if (BTF_INFO_KIND(t->info) == BTF_KIND_PTR) + t = btf__type_by_id(btf, t->type); + + if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST) + t = btf__type_by_id(btf, t->type); + return btf__name_by_offset(btf, t->name_off); +} + +int btf_func_proto_find_param(struct btf *btf, int func_proto_id, + const char *type_name, const char *param_name) +{ + const struct btf_type *t; + const struct btf_param *p; + const char *tmp_param_name, *tmp_type_name; + int i; + + t = btf__type_by_id(btf, func_proto_id); + if (t == NULL) + return -EINVAL; + + for (i = 0; i < btf_vlen(t); i++) + { + p = btf_params(t) + i; + tmp_param_name = btf__name_by_offset(btf, p->name_off); + if (param_name && tmp_param_name && strcmp(param_name, tmp_param_name) == 0) + return p->type; + + tmp_type_name = btf_param_type_name(btf, p); + if (type_name && tmp_type_name && strcmp(type_name, tmp_type_name) == 0) + return p->type; + } + return -ENOENT; +} + +int btf_func_proto_find_param_pos(struct btf *btf, int func_proto_id, + const char *type_name, const char *param_name) +{ + const struct btf_type *t; + const struct btf_param *p; + const char *tmp_param_name, *tmp_type_name; + int i; + + t = btf__type_by_id(btf, func_proto_id); + if (t == NULL) + return -EINVAL; + + for (i = 0; i < btf_vlen(t); i++) + { + p = btf_params(t) + i; + tmp_param_name = btf__name_by_offset(btf, p->name_off); + if (param_name && tmp_param_name && strcmp(param_name, tmp_param_name) == 0) + return i + 1; + + tmp_type_name = btf_param_type_name(btf, p); + if (type_name && tmp_type_name && strcmp(type_name, tmp_type_name) == 0) + return i + 1; + } + return -ENOENT; +} + +int btf_find_func_proto_id(struct btf *btf, const char *func_name) +{ + const struct btf_type *t; + int id; + + if (!btf && !func_name) + return -EINVAL; + + id = btf__find_by_name_kind(btf, func_name, BTF_KIND_FUNC); + if (id <= 0) + return id; + t = btf__type_by_id(btf, id); + return t->type; +} \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/ebpf/utils/btf.h b/source/tools/detect/net_diag/rtrace/ebpf/utils/btf.h new file mode 100644 index 0000000000000000000000000000000000000000..43ce22bf58ea0105ee62c0d4fd334ded2db66486 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/utils/btf.h @@ -0,0 +1,17 @@ +#ifndef __RTRACE_UTILS_BTF_H +#define __RTRACE_UTILS_BTF_H + +struct btf *btf_load(char *btf_custom_path); + +int btf_func_proto_find_param(struct btf *btf, int func_proto_id, + const char *type_name, const char *param_name); +int btf_func_proto_find_param_pos(struct btf *btf, int func_proto_id, + const char *type_name, const char *param_name); +// Find func proto type id by func name. +int btf_find_func_proto_id(struct btf *btf, const char *func_name); +// Find member in struct/union by member name. +const struct btf_member *btf_find_member(struct btf *btf, int typeid, + const char *target_member_name, int *offset); +bool btf_typeid_has_ptr(const struct btf *btf, int id); + +#endif diff --git a/source/tools/detect/net_diag/rtrace/ebpf/utils/disasm.c b/source/tools/detect/net_diag/rtrace/ebpf/utils/disasm.c new file mode 100644 index 0000000000000000000000000000000000000000..130ff88078f7eaec2871cbe378152258313328f3 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/utils/disasm.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * Copyright (c) 2016 Facebook + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "disasm.h" + +#define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x) +static const char * const func_id_str[] = { + __BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN) +}; +#undef __BPF_FUNC_STR_FN + +static const char *__func_get_name(const struct bpf_insn_cbs *cbs, + const struct bpf_insn *insn, + char *buff, size_t len) +{ + // BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); + + if (!insn->src_reg && + insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID && + func_id_str[insn->imm]) + return func_id_str[insn->imm]; + + if (cbs && cbs->cb_call) { + const char *res; + + res = cbs->cb_call(cbs->private_data, insn); + if (res) + return res; + } + + if (insn->src_reg == BPF_PSEUDO_CALL) + snprintf(buff, len, "%+d", insn->imm); + // else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) + // snprintf(buff, len, "kernel-function"); + + return buff; +} + +static const char *__func_imm_name(const struct bpf_insn_cbs *cbs, + const struct bpf_insn *insn, + uint64_t full_imm, char *buff, size_t len) +{ + if (cbs && cbs->cb_imm) + return cbs->cb_imm(cbs->private_data, insn, full_imm); + + snprintf(buff, len, "0x%llx", (unsigned long long)full_imm); + return buff; +} + +const char *func_id_name(int id) +{ + if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) + return func_id_str[id]; + else + return "unknown"; +} + +const char *const bpf_class_string[8] = { + [BPF_LD] = "ld", + [BPF_LDX] = "ldx", + [BPF_ST] = "st", + [BPF_STX] = "stx", + [BPF_ALU] = "alu", + [BPF_JMP] = "jmp", + [BPF_JMP32] = "jmp32", + [BPF_ALU64] = "alu64", +}; + +const char *const bpf_alu_string[16] = { + [BPF_ADD >> 4] = "+=", + [BPF_SUB >> 4] = "-=", + [BPF_MUL >> 4] = "*=", + [BPF_DIV >> 4] = "/=", + [BPF_OR >> 4] = "|=", + [BPF_AND >> 4] = "&=", + [BPF_LSH >> 4] = "<<=", + [BPF_RSH >> 4] = ">>=", + [BPF_NEG >> 4] = "neg", + [BPF_MOD >> 4] = "%=", + [BPF_XOR >> 4] = "^=", + [BPF_MOV >> 4] = "=", + [BPF_ARSH >> 4] = "s>>=", + [BPF_END >> 4] = "endian", +}; + +static const char *const bpf_atomic_alu_string[16] = { + [BPF_ADD >> 4] = "add", + [BPF_AND >> 4] = "and", + [BPF_OR >> 4] = "or", + [BPF_XOR >> 4] = "xor", +}; + +static const char *const bpf_ldst_string[] = { + [BPF_W >> 3] = "u32", + [BPF_H >> 3] = "u16", + [BPF_B >> 3] = "u8", + [BPF_DW >> 3] = "u64", +}; + +static const char *const bpf_jmp_string[16] = { + [BPF_JA >> 4] = "jmp", + [BPF_JEQ >> 4] = "==", + [BPF_JGT >> 4] = ">", + [BPF_JLT >> 4] = "<", + [BPF_JGE >> 4] = ">=", + [BPF_JLE >> 4] = "<=", + [BPF_JSET >> 4] = "&", + [BPF_JNE >> 4] = "!=", + [BPF_JSGT >> 4] = "s>", + [BPF_JSLT >> 4] = "s<", + [BPF_JSGE >> 4] = "s>=", + [BPF_JSLE >> 4] = "s<=", + [BPF_CALL >> 4] = "call", + [BPF_EXIT >> 4] = "exit", +}; + +static void print_bpf_end_insn(bpf_insn_print_t verbose, + void *private_data, + const struct bpf_insn *insn) +{ + verbose(private_data, "(%02x) r%d = %s%d r%d\n", + insn->code, insn->dst_reg, + BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le", + insn->imm, insn->dst_reg); +} + +void print_bpf_insn(const struct bpf_insn_cbs *cbs, + const struct bpf_insn *insn, + bool allow_ptr_leaks) +{ + const bpf_insn_print_t verbose = cbs->cb_print; + uint8_t class = BPF_CLASS(insn->code); + + if (class == BPF_ALU || class == BPF_ALU64) { + if (BPF_OP(insn->code) == BPF_END) { + if (class == BPF_ALU64) + verbose(cbs->private_data, "BUG_alu64_%02x\n", insn->code); + else + print_bpf_end_insn(verbose, cbs->private_data, insn); + } else if (BPF_OP(insn->code) == BPF_NEG) { + verbose(cbs->private_data, "(%02x) %c%d = -%c%d\n", + insn->code, class == BPF_ALU ? 'w' : 'r', + insn->dst_reg, class == BPF_ALU ? 'w' : 'r', + insn->dst_reg); + } else if (BPF_SRC(insn->code) == BPF_X) { + verbose(cbs->private_data, "(%02x) %c%d %s %c%d\n", + insn->code, class == BPF_ALU ? 'w' : 'r', + insn->dst_reg, + bpf_alu_string[BPF_OP(insn->code) >> 4], + class == BPF_ALU ? 'w' : 'r', + insn->src_reg); + } else { + verbose(cbs->private_data, "(%02x) %c%d %s %d\n", + insn->code, class == BPF_ALU ? 'w' : 'r', + insn->dst_reg, + bpf_alu_string[BPF_OP(insn->code) >> 4], + insn->imm); + } + } else if (class == BPF_STX) { + if (BPF_MODE(insn->code) == BPF_MEM) + verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = r%d\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->dst_reg, + insn->off, insn->src_reg); + else if (BPF_MODE(insn->code) == BPF_ATOMIC && + (insn->imm == BPF_ADD || insn->imm == BPF_AND || + insn->imm == BPF_OR || insn->imm == BPF_XOR)) { + verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) %s r%d\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->dst_reg, insn->off, + bpf_alu_string[BPF_OP(insn->imm) >> 4], + insn->src_reg); + } else if (BPF_MODE(insn->code) == BPF_ATOMIC && + (insn->imm == (BPF_ADD | BPF_FETCH) || + insn->imm == (BPF_AND | BPF_FETCH) || + insn->imm == (BPF_OR | BPF_FETCH) || + insn->imm == (BPF_XOR | BPF_FETCH))) { + verbose(cbs->private_data, "(%02x) r%d = atomic%s_fetch_%s((%s *)(r%d %+d), r%d)\n", + insn->code, insn->src_reg, + BPF_SIZE(insn->code) == BPF_DW ? "64" : "", + bpf_atomic_alu_string[BPF_OP(insn->imm) >> 4], + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->dst_reg, insn->off, insn->src_reg); + } else if (BPF_MODE(insn->code) == BPF_ATOMIC && + insn->imm == BPF_CMPXCHG) { + verbose(cbs->private_data, "(%02x) r0 = atomic%s_cmpxchg((%s *)(r%d %+d), r0, r%d)\n", + insn->code, + BPF_SIZE(insn->code) == BPF_DW ? "64" : "", + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->dst_reg, insn->off, + insn->src_reg); + } else if (BPF_MODE(insn->code) == BPF_ATOMIC && + insn->imm == BPF_XCHG) { + verbose(cbs->private_data, "(%02x) r%d = atomic%s_xchg((%s *)(r%d %+d), r%d)\n", + insn->code, insn->src_reg, + BPF_SIZE(insn->code) == BPF_DW ? "64" : "", + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->dst_reg, insn->off, insn->src_reg); + } else { + verbose(cbs->private_data, "BUG_%02x\n", insn->code); + } + } else if (class == BPF_ST) { + if (BPF_MODE(insn->code) == BPF_MEM) { + verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->dst_reg, + insn->off, insn->imm); + } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) { + verbose(cbs->private_data, "(%02x) nospec\n", insn->code); + } else { + verbose(cbs->private_data, "BUG_st_%02x\n", insn->code); + } + } else if (class == BPF_LDX) { + if (BPF_MODE(insn->code) != BPF_MEM) { + verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code); + return; + } + verbose(cbs->private_data, "(%02x) r%d = *(%s *)(r%d %+d)\n", + insn->code, insn->dst_reg, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->src_reg, insn->off); + } else if (class == BPF_LD) { + if (BPF_MODE(insn->code) == BPF_ABS) { + verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[%d]\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->imm); + } else if (BPF_MODE(insn->code) == BPF_IND) { + verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[r%d + %d]\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->src_reg, insn->imm); + } else if (BPF_MODE(insn->code) == BPF_IMM && + BPF_SIZE(insn->code) == BPF_DW) { + /* At this point, we already made sure that the second + * part of the ldimm64 insn is accessible. + */ + uint64_t imm = ((uint64_t)(insn + 1)->imm << 32) | (uint32_t)insn->imm; + bool is_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD || + insn->src_reg == BPF_PSEUDO_MAP_VALUE; + char tmp[64]; + + if (is_ptr && !allow_ptr_leaks) + imm = 0; + + verbose(cbs->private_data, "(%02x) r%d = %s\n", + insn->code, insn->dst_reg, + __func_imm_name(cbs, insn, imm, + tmp, sizeof(tmp))); + } else { + verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code); + return; + } + } else if (class == BPF_JMP32 || class == BPF_JMP) { + uint8_t opcode = BPF_OP(insn->code); + + if (opcode == BPF_CALL) { + char tmp[64]; + + if (insn->src_reg == BPF_PSEUDO_CALL) { + verbose(cbs->private_data, "(%02x) call pc%s\n", + insn->code, + __func_get_name(cbs, insn, + tmp, sizeof(tmp))); + } else { + strcpy(tmp, "unknown"); + verbose(cbs->private_data, "(%02x) call %s#%d\n", insn->code, + __func_get_name(cbs, insn, + tmp, sizeof(tmp)), + insn->imm); + } + } else if (insn->code == (BPF_JMP | BPF_JA)) { + verbose(cbs->private_data, "(%02x) goto pc%+d\n", + insn->code, insn->off); + } else if (insn->code == (BPF_JMP | BPF_EXIT)) { + verbose(cbs->private_data, "(%02x) exit\n", insn->code); + } else if (BPF_SRC(insn->code) == BPF_X) { + verbose(cbs->private_data, + "(%02x) if %c%d %s %c%d goto pc%+d\n", + insn->code, class == BPF_JMP32 ? 'w' : 'r', + insn->dst_reg, + bpf_jmp_string[BPF_OP(insn->code) >> 4], + class == BPF_JMP32 ? 'w' : 'r', + insn->src_reg, insn->off); + } else { + verbose(cbs->private_data, + "(%02x) if %c%d %s 0x%x goto pc%+d\n", + insn->code, class == BPF_JMP32 ? 'w' : 'r', + insn->dst_reg, + bpf_jmp_string[BPF_OP(insn->code) >> 4], + insn->imm, insn->off); + } + } else { + verbose(cbs->private_data, "(%02x) %s\n", + insn->code, bpf_class_string[class]); + } +} diff --git a/source/tools/detect/net_diag/rtrace/ebpf/utils/disasm.h b/source/tools/detect/net_diag/rtrace/ebpf/utils/disasm.h new file mode 100644 index 0000000000000000000000000000000000000000..ad5884b1f074aa74ed23ff7004edddeef71cb910 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/utils/disasm.h @@ -0,0 +1,41 @@ +#ifndef __RTRACE_DISASM_H +#define __RTRACE_DISASM_H + + +#include +#include +#ifndef __KERNEL__ +#include +#include +#endif + +#define __stringify_1(x...) #x +#define __stringify(x...) __stringify_1(x) + +#define __printf(a, b) __attribute__((format(printf, a, b))) + +extern const char *const bpf_alu_string[16]; +extern const char *const bpf_class_string[8]; + +const char *func_id_name(int id); + +typedef __printf(2, 3) void (*bpf_insn_print_t)(void *private_data, + const char *, ...); +typedef const char *(*bpf_insn_revmap_call_t)(void *private_data, + const struct bpf_insn *insn); +typedef const char *(*bpf_insn_print_imm_t)(void *private_data, + const struct bpf_insn *insn, + __u64 full_imm); + +struct bpf_insn_cbs { + bpf_insn_print_t cb_print; + bpf_insn_revmap_call_t cb_call; + bpf_insn_print_imm_t cb_imm; + void *private_data; +}; + +void print_bpf_insn(const struct bpf_insn_cbs *cbs, + const struct bpf_insn *insn, + bool allow_ptr_leaks); + +#endif diff --git a/source/tools/detect/net_diag/rtrace/ebpf/utils/insn.c b/source/tools/detect/net_diag/rtrace/ebpf/utils/insn.c new file mode 100644 index 0000000000000000000000000000000000000000..99eb5ef16a67ea59695279fb1b4c6b6ed3757110 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/utils/insn.c @@ -0,0 +1,94 @@ + +#include +#include +#include +#include + +#include "utils/insn.h" +#include "utils/disasm.h" + +#define SYM_MAX_NAME 256 +struct dump_data +{ + char scratch_buff[SYM_MAX_NAME + 8]; +}; + +uint64_t insn_get_imm(struct bpf_insn *insn) +{ + uint64_t imm = 0; + imm = insn[0].imm + ((uint64_t)insn[1].imm << 32); + return imm; +} + +void insn_set_imm(struct bpf_insn *insn, uint64_t imm) +{ + insn[0].imm = (int)((imm << 32) >> 32); + insn[1].imm = (int)(imm >> 32); +} + +static const char *print_call(void *private_data, + const struct bpf_insn *insn) +{ + struct dump_data *dd = private_data; + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), "funccall"); + return dd->scratch_buff; +} + +static void print_insn(void *private_data, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vprintf(fmt, args); + va_end(args); +} + +static const char *print_imm(void *private_data, + const struct bpf_insn *insn, + __u64 full_imm) +{ + struct dump_data *dd = private_data; + + if (insn->src_reg == BPF_PSEUDO_MAP_FD) + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), + "map[id:%u]", insn->imm); + else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), + "map[id:%u][0]+%u", insn->imm, (insn + 1)->imm); + // else if (insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) + // snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), + // "map[idx:%u]+%u", insn->imm, (insn + 1)->imm); + // else if (insn->src_reg == BPF_PSEUDO_FUNC) + // snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), + // "subprog[%+d]", insn->imm); + else + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), + "0x%llx", (unsigned long long)full_imm); + return dd->scratch_buff; +} + +void insns_dump(struct bpf_insn *insns, int cnt) +{ + struct dump_data dd = {0}; + const struct bpf_insn_cbs cbs = { + .cb_print = print_insn, + .cb_call = print_call, + .cb_imm = print_imm, + .private_data = &dd, + }; + int i; + bool double_insn = false; + + for (i = 0; i < cnt; i++) + { + if (double_insn) + { + double_insn = false; + continue; + } + + double_insn = insns[i].code == (BPF_LD | BPF_IMM | BPF_DW); + printf("% 4d: ", i); + print_bpf_insn(&cbs, insns + i, true); + } +} \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/ebpf/utils/insn.h b/source/tools/detect/net_diag/rtrace/ebpf/utils/insn.h new file mode 100644 index 0000000000000000000000000000000000000000..96fb5c64c65be8d6c99985f14788cd93270c8c62 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/utils/insn.h @@ -0,0 +1,7 @@ +#ifndef __RTRACE_UTILS_INSN_H +#define __RTRACE_UTILS_INSN_H + +extern uint64_t insn_get_imm(struct bpf_insn *insn); +void insn_set_imm(struct bpf_insn *insn, uint64_t imm); +void insns_dump(struct bpf_insn *insns, int cnt); +#endif diff --git a/source/tools/detect/net_diag/rtrace/ebpf/utils/object.c b/source/tools/detect/net_diag/rtrace/ebpf/utils/object.c new file mode 100644 index 0000000000000000000000000000000000000000..8c7fedbaee6687e83fbd0fbddb603db077f572bf --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/utils/object.c @@ -0,0 +1,16 @@ + +#include +#include +#include +#include + +#include "common.usr.h" +#include "utils/object.h" + + +struct bpf_program *object_find_program(struct bpf_object *obj, int sk, int skb) +{ + char func_name[FUNCNAME_MAX_LEN]; + sprintf(func_name, "kprobe_sk%d_skb%d", sk, skb); + return bpf_object__find_program_by_name(obj, func_name); +} \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/ebpf/utils/object.h b/source/tools/detect/net_diag/rtrace/ebpf/utils/object.h new file mode 100644 index 0000000000000000000000000000000000000000..113dc0c2604593dc83430ffe09a21e98768e1d1f --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/ebpf/utils/object.h @@ -0,0 +1,6 @@ +#ifndef __RTRACE_UTILS_OBJECT_H +#define __RTRACE_UTILS_OBJECT_H + +struct bpf_program *object_find_program(struct bpf_object *obj, int sk, int skb); + +#endif \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/image/ping-delay.png b/source/tools/detect/net_diag/rtrace/image/ping-delay.png new file mode 100644 index 0000000000000000000000000000000000000000..56f7d25b6f8fed2c6c0d62cb2ad7497aa9e639b0 Binary files /dev/null and b/source/tools/detect/net_diag/rtrace/image/ping-delay.png differ diff --git a/source/tools/detect/net_diag/rtrace/rtrace-delay/Cargo.toml b/source/tools/detect/net_diag/rtrace/rtrace-delay/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..93d4f02cc945bbbc3f5c8dca624717737d002e65 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-delay/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "rtrace-delay" +version = "0.1.0" +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = "1.0" +structopt = "0.3" +rtrace-rs = { version = "0.1.0", path = "../rtrace-rs"} +rtrace-parser = { version = "0.1.0", path = "../rtrace-parser"} +crossbeam-channel = "0.5" +log = "0.4.14" +env_logger = "0.9.0" +uname = "0.1.1" diff --git a/source/tools/detect/net_diag/rtrace/rtrace-delay/README.md b/source/tools/detect/net_diag/rtrace/rtrace-delay/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5b7921c4b2444416e96192851bd279aea8d99b1f --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-delay/README.md @@ -0,0 +1,155 @@ + +# rtrace-delay 网络抖动诊断 + +rtrace-delay是一款基于eBPF的网络抖动诊断工具,目前icmp(ping)、tcp等协议的网络抖动。 + + +## 原理简介 + +一般地,网络抖动问题定位过程包括: + +1. 确定是抖动发生在发送端还是接收端; +2. 确定是否在内核; +3. 确定导致抖动的代码片段; + +为了确定抖动发生在发送端还是接收端,rtrace将一个完整的报文路径分成两个部分:**发送端报文路径和接收端报文路径**。 + +为了确定抖动是否发生在内核,rtrace在**内核出入口处的地方进行打点监控**。 + +为了确定导致抖动的代码片段,rtrace实现了动态打点功能,即可以随时**新增打点函数**,进而逐步缩减抖动范围。 + +接下来从这三点出发,介绍ping报文和tcp报文的抖动诊断方案。 + +### icmp(ping)抖动诊断 + +下图是ping的ICMP_ECHO和ICMP_ECHOREPLY报文路径。黑色箭头是ICMP_ECHO报文路径,红色箭头是ICMP_ECHOREPLY报文路径。 + +其中,发送端报文路径包括:ICMP_ECHO报文发送路径及ICMP_ECHOREPLY报文接收路径: + +* ICMP_ECHO报文发送路径是:raw_sendmsg->dev_hard_start_xmit->驱动; +* ICMP_ECHOREPLY报文接收路径:驱动->__netif_receive_skb_core->ping_rcv->ping_recvmsg。 + +其中,接收端报文路径包括:ICMP_ECHO报文接收路径及ICMP_ECHOREPLY报文发送路径: + +* ICMP_ECHO报文接收路径:驱动->__netif_receive_skb_core->icmp_rcv; +* ICMP_ECHOREPLY报文发送路径:dev_hard_start_xmit->驱动。 + +![ping抖动诊断默认打点路径](../image/ping-delay.png) + +### tcp抖动诊断原理 + +## 使用说明 + +使用流程: + +1. 生成配置文件:通过命令`sysak rtrace-delay --gen ./config`生成toml配置文件到config目录下; +2. 修改配置文件:根据自己的需要修改配置; +3. 运行诊断程序,可输入的参数如下: + +```shell +OPTIONS: + --config configuration file path + --delay show packet routine when delay > DELAY(ms) [default: 200] + --gen generate default configuration file + --latency latency(ms) in processing data [default: 3000] +``` + +### 配置文件 + +配置文件包含三个配置表,分别是基础配置表、过滤器配置表和函数配置表。 + +#### 基础配置表 + +`basic`包含六种键值,分别是: + +* debug:是否开启debug日志输出; +* btf_path:vmlinux btf文件的路径; +* duration:程序运行时间; +* protocol:协议类型,目前包含icmp、tcp和tcp-syn; +* recv:诊断发送端路径还是接收端路径; +* vmlinux:debuginfo中vmlinux的路径。 + +```toml +[basic] +debug = false +btf_path = "" +duration = 0 +protocol = "icmp" +recv = true +vmlinux = "" +``` + +其中,`btf_path`和`vmlinux`是可选参数。 + +目前提供了五个默认的配置文件,分别是:`ping-sender.toml`、`ping-receiver.toml`、`syn-sender.toml`、`tcp-sender.toml`和`tcp-receiver.toml`。 + +#### 过滤器配置表 + +`filter`是数组形式,单个`filter`内部过滤规则取交集,`filter`间过滤规则取并集。`filter`支持三种键值,分别是: + +* pid:进程id; +* dst:目的地址,包含ip和端口; +* src:源地址,包含ip和端口。 + +```toml +[[filter]] +pid = 0 +dst = "0.0.0.0:0" +src = "0.0.0.0:0" +``` + +#### 函数配置表 + +函数配置表中`function`是数组形式。`function`目前支持五种键值,分别是: + +* name:打点的内核函数名字; +* enable:是否使能; +* params:内置参数列表; +* exprs:动态参数表达式列表; +* line:指定打点行数。 + +```toml +[[function]] +name = "raw_sendmsg" +enable = true +params = ["basic"] +lines = ["net/ipv4/raw.c:455"] + +[[function]] +name = "dev_hard_start_xmit" +enable = true +params = ["basic"] + +[[function]] +name = "__netif_receive_skb_core" +enable = true +params = ["basic"] +``` + +### icmp(ping)抖动诊断 + +#### 步骤一 + +运行`sysak rtrace-delay --gen ./config`生成toml配置文件到config目录下。这里我们主要关注`./config/ping-sender.toml`和`./config/ping-receiver.toml`两个配置文件。 + +#### 步骤二: + +根据需求修改配置文件 + +#### 步骤三 + +* 诊断发送端路径,`sysak rtrace-delay --config ./config/ping-sender.toml --delay 20 --latency 1000`; +* 诊断接收端路径,`sysak rtrace-delay --config ./config/ping-receiver.toml`; + +下面是诊断发送端路径的输出,执行的命令是`sysak rtrace-delay --config ./config/ping-sender.toml --delay 20 --latency 1000`。表示延迟1秒处理数据,打印抖动超过20毫秒的报文路径: + +```plain + (0,1)raw_sendmsg+0 (0,1)ping_rcv+0 + ↓ ↑ + 33us 43us + ↓ ↑ + (0,1)dev_hard_start_xmit+0 →27479us→ (0,1)__netif_receive_skb_core+0 +``` + +### tcp抖动诊断 + diff --git a/source/tools/detect/net_diag/rtrace/rtrace-delay/src/fin.rs b/source/tools/detect/net_diag/rtrace/rtrace-delay/src/fin.rs new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source/tools/detect/net_diag/rtrace/rtrace-delay/src/gen.rs b/source/tools/detect/net_diag/rtrace/rtrace-delay/src/gen.rs new file mode 100644 index 0000000000000000000000000000000000000000..21b9b64232facdc267d4bd2cd8e62719d102942b --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-delay/src/gen.rs @@ -0,0 +1,125 @@ +use anyhow::Result; +use rtrace_rs::rtrace::Config; +use std::fs::File; +use std::io::Write; +use std::path::PathBuf; +use uname::uname; + +fn get_btf_path() -> String { + let mut default = String::from("/boot/vmlinux-"); + let info = uname().expect("uname failed"); + default.push_str(&info.release[..]); + default +} + +fn gen_config_common(path: &PathBuf, text: &str) -> Result<()> { + let mut config = Config::from_str(text)?; + config.basic.btf_path = Some(get_btf_path()); + let string = Config::to_string(&config)?; + let mut output = File::create(path)?; + write!(output, "{}", string); + Ok(()) +} + +fn gen_config_ping_receiver(path: &mut PathBuf) -> Result<()> { + let text = r#" +[basic] +debug = false +duration = 0 +protocol = "icmp" +recv = true + +[[filter]] +pid = 0 +dst = "0.0.0.0:0" +src = "0.0.0.0:0" + +[[function]] +name = "dev_hard_start_xmit" +enable = true +params = ["basic"] + +[[function]] +name = "__netif_receive_skb_core" +enable = true +params = ["basic"] + +[[function]] +name = "icmp_rcv" +enable = true +params = ["basic"] + "#; + + path.push("ping-receiver.toml"); + gen_config_common(path, &text)?; + path.pop(); + Ok(()) +} + +fn gen_config_ping_sender(path: &mut PathBuf) -> Result<()> { + let text = r#" +[basic] +debug = false +duration = 0 +protocol = "icmp" +recv = false + +[[filter]] +pid = 0 +dst = "0.0.0.0:0" +src = "0.0.0.0:0" + +[[function]] +name = "raw_sendmsg" +enable = true +params = ["basic"] + +[[function]] +name = "dev_hard_start_xmit" +enable = true +params = ["basic"] + +[[function]] +name = "__netif_receive_skb_core" +enable = true +params = ["basic"] + +[[function]] +name = "ping_rcv" +enable = true +params = ["basic"] + "#; + path.push("ping-sender.toml"); + gen_config_common(path, &text)?; + path.pop(); + Ok(()) +} + +fn gen_config_syn_sender(path: &mut PathBuf) -> Result<()> { + path.push("syn-sender.toml"); + path.pop(); + Ok(()) +} + +fn gen_config_tcp_receiver(path: &mut PathBuf) -> Result<()> { + path.push("tcp-receiver.toml"); + path.pop(); + Ok(()) +} + +fn gen_config_tcp_sender(path: &mut PathBuf) -> Result<()> { + path.push("tcp-sender.toml"); + path.pop(); + Ok(()) +} + +pub fn gen_config(path: &str) -> Result<()> { + let mut p = PathBuf::from(path); + std::fs::create_dir_all(&p)?; + gen_config_ping_receiver(&mut p)?; + gen_config_ping_sender(&mut p)?; + gen_config_syn_sender(&mut p)?; + gen_config_tcp_receiver(&mut p)?; + gen_config_tcp_sender(&mut p)?; + Ok(()) +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-delay/src/main.rs b/source/tools/detect/net_diag/rtrace/rtrace-delay/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..19d9679ff8bd8301a68b24cb116e6b84ed3532cb --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-delay/src/main.rs @@ -0,0 +1,95 @@ +mod gen; +mod mid; +mod syn; + +use crate::gen::gen_config; +use crate::mid::Mid; +use crate::syn::Syn; +use crossbeam_channel; +use log::*; +use rtrace_parser::func::Func; +use rtrace_parser::ksyms::ksyms_load; +use rtrace_parser::perf::{perf_inital_thread2, perf_recv_timeout}; +use rtrace_parser::utils; +use rtrace_rs::bindings::*; +use rtrace_rs::rtrace::Rtrace; +use std::path::PathBuf; +use std::time::Duration; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +#[structopt(name = "rtrace_delay", about = "Network delay diagnosis.")] +pub struct Cli { + #[structopt(long, help = "configuration file path")] + config: Option, + #[structopt(long, help = "generate default configuration file")] + gen: Option, + #[structopt(long, default_value = "3000", help = "latency(ms) in processing data")] + latency: u64, + #[structopt( + long, + default_value = "200", + help = "show packet routine when delay > DELAY(ms)" + )] + delay: u64, +} + +fn main() { + let mut cli = Cli::from_args(); + if let Some(path) = cli.gen { + gen_config(&path).expect("unable to generate config file"); + return; + } + env_logger::init(); + cli.latency = cli.latency * 1_000_000; + cli.delay = cli.delay * 1_000_000; + ksyms_load(&"/proc/kallsyms".to_owned()); + let mut rtrace; + match &cli.config { + None => { + println!("Please input config file path"); + return; + } + Some(config) => rtrace = Rtrace::from_file(config).expect("rtrace init failed"), + } + rtrace.probe_filter().expect("init filter failed"); + rtrace.probe_functions().expect("unable to trace function."); + let protocol = rtrace.protocol().expect("protocol not specified"); + let recv = rtrace.is_recv(); + let mut syn = Syn::new(protocol, recv); + let mut mid = Mid::new(protocol, recv); + + let (rx, tx) = crossbeam_channel::unbounded(); + perf_inital_thread2(rtrace.perf_fd(), (rx, tx)); + + let mut pre_checktimeout_ts = 0; + loop { + let res = perf_recv_timeout(Duration::from_millis(100)); + let cur_ts = utils::get_timestamp(); + match res { + Ok(data) => { + let f = Func::new(data.1); + match protocol { + Protocol::IPPROTO_ICMP | Protocol::IPPROTO_TCP => { + mid.push_func(f); + if cur_ts - pre_checktimeout_ts > 100_000_000 { + mid.check_timeout(cur_ts - cli.latency, cli.delay); + pre_checktimeout_ts = cur_ts; + } + } + Protocol::IPPROTO_TCP_SYN => { + syn.push_func(f); + if cur_ts - pre_checktimeout_ts > 100_000_000 { + syn.check_timeout(cur_ts - cli.latency, cli.delay); + pre_checktimeout_ts = cur_ts; + } + } + _ => { + panic!("rtrace_delay only support tcp or syn") + } + } + } + _ => {} + } + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-delay/src/mid.rs b/source/tools/detect/net_diag/rtrace/rtrace-delay/src/mid.rs new file mode 100644 index 0000000000000000000000000000000000000000..e63ad85d95bd827109d99ed8ed44aafd4c542a64 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-delay/src/mid.rs @@ -0,0 +1,133 @@ +use rtrace_parser::func::Func; +use rtrace_rs::rtrace::Rtrace; +use rtrace_parser::skb::{funcs_to_skbs, Skb}; +use rtrace_parser::net::Net; +use rtrace_rs::bindings::*; +use anyhow::Result; +use std::rc::Rc; +use log::*; + +pub struct Mid { + dis: Vec, + skb: Option, + ts: u64, + recv: bool, + net : Net, +} + +impl Mid { + pub fn new(protocol: Protocol, recv: bool) -> Mid { + // match protocol { + // Protocol::IPPROTO_TCP | Protocol::IPPROTO_ICMP => {} + // _ => { + // panic!("only support tcp or icmp(ping) protocol"); + // } + // } + Mid { + dis: Vec::new(), + skb: None, + ts: 0, + recv, + net: Net::new(recv), + } + } + + pub fn push_func(&mut self, func: Func) { + debug!("{}: {}, {:?}, {:?}", func.get_name(),func.get_ap().into_string(), func.get_seq(), func.get_rseq()); + self.net.push_func(func); + } + + pub fn check_timeout(&mut self, timeout: u64, delay: u64) -> bool { + let vec_funcs = self.net.group(timeout); + for funcs in vec_funcs { + let skbs = funcs_to_skbs(funcs, false, self.recv); + for mut skb in skbs { + let delay_tmp = skb.get_delay(); + if delay_tmp > delay { + skb.show(); + } + } + } + true + } + + // pub fn check(&mut self, f: Vec>, ts: u64) { + // let skbs = funcs_to_skbs(f, false, self.recv); + // for mut skb in skbs { + // let delay = skb.get_delay_ms(); + // if self.insert_delay(delay as usize) { + // self.skb = Some(skb); + // } + // } + + // self.show(ts); + // } + + // fn insert_delay(&mut self, delay: usize) -> bool { + // let mut larger = false; + // if delay + 1 > self.dis.len() { + // self.dis.resize(delay + 1, 0); + // larger = true; + // } + // self.dis[delay as usize] += 1; + // larger + // } + + // fn sum_dis(&self, start: usize, mut end: usize) -> u32 { + // let mut res = 0; + // end = std::cmp::min(self.dis.len(), end); + // for i in start..end { + // res += self.dis[i]; + // } + // res + // } + + // fn show_dis(&self) { + // let default_width = 10; + // let mut print = Vec::with_capacity(default_width); + // let multiple = std::cmp::max(self.dis.len() / default_width, 1); + // let mut total_cnt = 0; + // println!("DISTRIBUTION:\n"); + // for i in 0..default_width { + // let start = i * multiple; + // let end = (i + 1) * multiple; + // let res = self.sum_dis(start, end); + // total_cnt += res; + // print.push(res); + // } + // if total_cnt == 0 { + // return; + // } + // for i in 0..default_width { + // let cnt = print[i] * 50 / total_cnt; + // println!( + // "{:>5}-{:<5} {:<50} {}", + // i * multiple, + // (i + 1) * multiple, + // "*".repeat(cnt as usize), + // print[i] + // ); + // } + // } + + // fn show_skb(&self) { + // if let Some(s) = &self.skb { + // s.show(); + // } + // } + + // pub fn show(&mut self, ts: u64) { + // if self.dis.len() == 0 { + // return; + // } + + // if ts - self.ts > 1_000_000_000 { + // println!("\n"); + // self.show_dis(); + // println!("\n"); + // self.show_skb(); + // println!("\n\n\n\n\n"); + // self.ts = ts; + // } + // } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-delay/src/syn.rs b/source/tools/detect/net_diag/rtrace/rtrace-delay/src/syn.rs new file mode 100644 index 0000000000000000000000000000000000000000..2e984ca1b304078aa96777014745513d95789d32 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-delay/src/syn.rs @@ -0,0 +1,75 @@ +use rtrace_parser::func::Func; +use rtrace_parser::skb::Skb; +use rtrace_rs::bindings::*; +use std::collections::HashMap; +use std::rc::Rc; +use log::*; + +struct SynInfo { + skb: Skb, + max_ts: u64, + recv: bool, +} + +impl SynInfo { + pub fn new(recv: bool) -> SynInfo { + SynInfo { + skb: Skb::new(recv), + max_ts: 0, + recv, + } + } + + pub fn push_func(&mut self, func: Func) { + self.max_ts = std::cmp::max(self.max_ts, func.get_ts()); + self.skb.push_func(Rc::new(func)); + } + + pub fn check_timeout(&mut self, timeout: u64, delay: u64) -> bool { + if self.max_ts < timeout { + if self.skb.get_delay() > delay { + self.skb.show(); + } + return true; + } + false + } +} + +pub struct Syn { + conn: HashMap, + recv: bool, +} + +impl Syn { + pub fn new(protocol: Protocol, recv: bool) -> Syn { + // match protocol { + // Protocol::IPPROTO_TCP_SYN => {} + // _ => { + // panic!("syn packet only support tcp-syn protocol"); + // } + // } + // if recv { + // panic!("syn packet diagnoise now only support send path"); + // } + Syn { + conn: HashMap::new(), + recv, + } + } + + pub fn push_func(&mut self, func: Func) { + debug!("{}: {:?}", func.get_name(),func.get_seq()); + let si = self + .conn + .entry(func.get_ap()) + .or_insert(SynInfo::new(self.recv)); + si.push_func(func); + } + + /// Process timeout data and output + pub fn check_timeout(&mut self, timeout: u64, delay: u64) { + self.conn + .retain(|_, value| value.check_timeout(timeout, delay) == false); + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-drop/README.md b/source/tools/detect/net_diag/rtrace/rtrace-drop/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b9268f7f0cf7823d2765425cccde12971c6f272c --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-drop/README.md @@ -0,0 +1,3 @@ +# rtrace-drop + +rtrace-parser is a network packet loss tracing and diagnosis tool. \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/rtrace-parser/Cargo.toml b/source/tools/detect/net_diag/rtrace/rtrace-parser/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..6434d13ae8fe7a4540e9b4644740e211a48e79b0 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-parser/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "rtrace-parser" +version = "0.1.0" +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +libbpf-sys = { git = "https://github.com/chengshuyi/libbpf-sys" } +rtrace-rs = { version = "0.1.0", path = "../rtrace-rs"} +anyhow = "1.0" +once_cell = "1.8.0" +crossbeam-channel = "0.5" +log = "0.4.0" +nix = "0.22" diff --git a/source/tools/detect/net_diag/rtrace/rtrace-parser/README.md b/source/tools/detect/net_diag/rtrace/rtrace-parser/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4c5943f3c2470066d62a3eaeae8fc4fa70683e5c --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-parser/README.md @@ -0,0 +1,3 @@ +# rtrace-parser + +rtrace-parser is a network data parsing library, which mainly processes the data obtained by ebpf. \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/rtrace-parser/src/func.rs b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/func.rs new file mode 100644 index 0000000000000000000000000000000000000000..6f965d2aa60a9102d1e5348a7b4460edbea5c2ea --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/func.rs @@ -0,0 +1,224 @@ +use crate::ksyms::ksyms_addr_to_name; +use anyhow::anyhow; +use anyhow::Result; +use rtrace_rs::bindings::*; +use std::fmt; +use std::os::raw::{c_char, c_int}; + +#[derive(Clone, Debug, Default)] +pub struct Func { + // Structure data contained in a single trace function. + // Such as basic information, context or tcp window. + name: String, + kretname: String, + data: Vec, + mask: u64, + types: Vec<*const u8>, + extra: usize, +} + +impl fmt::Display for Func { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut ty = INFO_TYPE::BASIC_INFO; + match self.get_struct(ty) { + Ok(x) => { + let bi = x as *const BASIC_INFO_struct; + unsafe { + writeln!(f, "{:?}", *bi); + } + } + _ => {} + } + write!(f, "a") + } +} + +impl Func { + pub fn new(data: Vec) -> Func { + let mut f = Func { + data: Vec::new(), + name: String::new(), + kretname: String::new(), + mask: 0, + types: Vec::with_capacity(64), + extra: 0, + }; + let mut off = 0; + f.data = data; + f.types.resize(64, std::ptr::null()); + f.mask = f.get_u64_by_off(0); + + for i in 0..64 { + if ((1 << i) & f.mask) != 0 { + let ty = INFO_TYPE::from_u32(i); + match ty { + INFO_TYPE::BASIC_INFO => { + f.types[i as usize] = &f.data[off] as *const u8; + off += ty.get_size(); + } + INFO_TYPE::CGROUP => { + f.types[i as usize] = &f.data[off] as *const u8; + off += ty.get_size(); + } + INFO_TYPE::STACK => { + f.types[i as usize] = &f.data[off] as *const u8; + off += ty.get_size(); + } + INFO_TYPE::KRETPROBE | INFO_TYPE::LINEPROBE => { + assert_eq!(f.types[INFO_TYPE::BASIC_INFO as usize], std::ptr::null()); + f.types[INFO_TYPE::BASIC_INFO as usize] = &f.data[off] as *const u8; + off += ty.get_size(); + } + _ => panic!("not support type"), + } + } + } + + f.extra = off; + if f.types[INFO_TYPE::BASIC_INFO as usize] != std::ptr::null() { + let bi = f.types[INFO_TYPE::BASIC_INFO as usize] as *const BASIC_INFO_struct; + unsafe { f.name = ksyms_addr_to_name((*bi).ip) }; + f.kretname = f.name.clone(); + if f.is_kretprobe() { + unsafe { f.kretname.push_str(&format!("({})", (*bi).ret)[..]) }; + } + } + + f + } + fn get_u32_by_off(&self, off: usize) -> u32 { + let ptr = &self.data[off] as *const u8 as *const u32; + unsafe { *ptr } + } + + fn get_u64_by_off(&self, off: usize) -> u64 { + let ptr = &self.data[off] as *const u8 as *const u64; + unsafe { *ptr } + } + + pub fn get_struct(&self, ty: INFO_TYPE) -> Result<*const u8> { + let ptr = self.types[ty as usize]; + if ptr == std::ptr::null() { + return Err(anyhow!("{:?} not exist", ty)); + } + Ok(ptr) + } + + pub fn get_name_no_offset(&self) -> String { + let mut name = self.name.clone(); + if let Some(x) = name.find('+') { + name.truncate(x); + } + name + } + + pub fn get_name(&self) -> &String { + &self.name + } + + pub fn get_kretname(&self) -> &String { + &self.kretname + } + + pub fn get_ts(&self) -> u64 { + let bi = self.get_struct(INFO_TYPE::BASIC_INFO).unwrap() as *const BASIC_INFO_struct; + unsafe { (*bi).ts } + } + + pub fn is_send(&self) -> bool { + (self.get_ts() & 0x1) == 1 + } + + pub fn is_kretprobe(&self) -> bool { + (self.mask & (1 << INFO_TYPE::KRETPROBE as u64)) != 0 + } + + pub fn get_seq(&self) -> (usize, usize) { + let bi = self.get_struct(INFO_TYPE::BASIC_INFO).unwrap() as *const BASIC_INFO_struct; + unsafe { + let seq = (*bi).seq as usize; + let end_seq = (*bi).end_seq as usize; + (seq, end_seq) + } + } + + pub fn get_rseq(&self) -> (usize, usize) { + let bi = self.get_struct(INFO_TYPE::BASIC_INFO).unwrap() as *const BASIC_INFO_struct; + unsafe { + let rseq = (*bi).rseq as usize; + let rend_seq = (*bi).rend_seq as usize; + (rseq, rend_seq) + } + } + + pub fn get_ap(&self) -> addr_pair { + let bi = self.get_struct(INFO_TYPE::BASIC_INFO).unwrap() as *const BASIC_INFO_struct; + unsafe { (*bi).ap } + } + + /// extra mean data of expression statement. + pub fn get_extra(&self, off: usize) -> *const u8 { + &self.data[off + self.extra] as *const u8 + } + + pub fn show_brief(&self) { + // println!( + // "func: {}, seq: {:?}, rseq: {:?}, ts: {}", + // self.get_func_name_with_kret(), + // self.get_seq(), + // self.get_rseq(), + // self.get_ts() + // ); + } + + pub fn get_stack_string(&self) -> Result { + let st = self.get_struct(INFO_TYPE::STACK)? as *const STACK_struct; + let mut vec_str = Vec::new(); + for i in 0..5 { + let mut tmp = unsafe {ksyms_addr_to_name((*st).kern_stack[i])}; + tmp.insert(0, '\t'); + vec_str.push(tmp); + } + Ok(format!("{}", vec_str.join("\n"))) + } + + pub fn show_stack(&self) { + unsafe { println!("{}\n", self.get_stack_string().unwrap()) }; + } + + pub fn show(&self) { + // for (i, item) in self.data.iter().enumerate() { + // let typ = get_type_from_ptr(*item); + // println!("{}: {:?}", i, typ); + // match get_type_from_ptr(*item) { + // INFO_TYPE::BASIC_INFO => { + // let bi = *item as *const BASIC_INFO_struct; + // unsafe { + // println!("{:#?}", *bi); + // } + // } + // INFO_TYPE::CONTEXT => { + // let ct = *item as *const CONTEXT_struct; + // unsafe { + // println!("{:#?}", *ct); + // } + // } + // INFO_TYPE::MEMORY => { + // let mm = *item as *const MEMORY_struct; + // unsafe { + // println!("{:#?}", *mm); + // } + // } + // INFO_TYPE::TCP_WINDOW => { + // let tw = *item as *const TCP_WINDOW_struct; + // unsafe { + // println!("{:#?}", *tw); + // } + // } + // _ => { + // panic!("Unknown format\n"); + // } + // } + // } + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-parser/src/ksyms.rs b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/ksyms.rs new file mode 100644 index 0000000000000000000000000000000000000000..a5b0a054c5be70f043316f6e34fa56aa405eb8c4 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/ksyms.rs @@ -0,0 +1,116 @@ +use anyhow::Result; +use once_cell::sync::Lazy; +use std::fs::File; +use std::io::{self, BufRead}; +use std::sync::Mutex; +use log::*; +#[derive(Debug, Default)] +pub struct Ksyms { + syms: Vec<(String, u64)>, +} + +impl Ksyms { + pub fn new() -> Self { + Ksyms { syms: Vec::new() } + } + + pub(crate) fn insert(&mut self, sym_name: String, sym_addr: u64) { + self.syms.push((sym_name, sym_addr)); + } + + pub(crate) fn get_ksyms_num(&self) -> usize { + self.syms.len() + } + + pub fn load(&mut self, filename: &String) -> Result<()> { + self.syms.clear(); + let file = File::open(filename)?; + let lines = io::BufReader::new(file).lines(); + for line in lines { + if let Ok(l) = line { + let mut iter = l.trim().split_whitespace(); + if let Some(x) = iter.next() { + iter.next(); + if let Some(y) = iter.next() { + self.insert(y.to_string(), u64::from_str_radix(x, 16).unwrap()); + } + } + } + } + self.syms.sort_by(|a, b| a.1.cmp(&b.1)); + debug!( + "Load ksyms done from {:?}, symbols length: {}", + filename, + self.syms.len() + ); + Ok(()) + } + + pub fn addr_to_name(&self, addr: u64) -> String { + let mut start = 0; + let mut end = self.syms.len() - 1; + let mut mid; + let mut sym_addr; + + while start < end { + mid = start + (end - start + 1) / 2; + sym_addr = self.syms[mid].1; + + if sym_addr <= addr { + start = mid; + } else { + end = mid - 1; + } + } + + if start == end && self.syms[start].1 <= addr { + let mut name = self.syms[start].0.clone(); + name.push_str(&format!("+{}", addr - self.syms[start].1 - 1)); + return name; + } + + return String::from("Not Found"); + } +} + +/// +static GLOBAL_KSYMS: Lazy> = Lazy::new(|| { + let ksyms = Ksyms::new(); + Mutex::new(ksyms) +}); + +/// load all kernel symbols +pub fn ksyms_load(filename: &String) { + GLOBAL_KSYMS.lock().unwrap().load(filename).unwrap(); +} + +/// Convert the kernel symbol address to the form of function name + offset +pub fn ksyms_addr_to_name(addr: u64) -> String { + GLOBAL_KSYMS.lock().unwrap().addr_to_name(addr) +} + +#[cfg(test)] +mod tests { + + use super::*; + #[test] + fn test_ksyms_load() { + let mut ksym = Ksyms::new(); + let err = ksym.load(&PathBuf::from("/proc/kallsyms")); + assert_eq!(err.is_ok(), true); + let pre_len = ksym.get_ksyms_num(); + + let err = ksym.load(&PathBuf::from("/3124/2123")); + assert_eq!(err.is_ok(), false); + let aft_len = ksym.get_ksyms_num(); + assert_ne!(pre_len, aft_len); + } + + #[test] + fn test_ksyms_search() { + let mut ksym = Ksyms::new(); + ksym.insert(String::from("test3"), 3); + ksym.insert(String::from("test1"), 1); + ksym.insert(String::from("test2"), 2); + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-parser/src/lib.rs b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..d388c6d37c41c19082780dd8b708ec04fe7dce5d --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/lib.rs @@ -0,0 +1,8 @@ +pub mod func; +pub mod ksyms; +pub mod net; +pub mod perf; +pub mod skb; +pub mod sock; +pub mod utils; + diff --git a/source/tools/detect/net_diag/rtrace/rtrace-parser/src/net.rs b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/net.rs new file mode 100644 index 0000000000000000000000000000000000000000..98cbe9be68056955e2526b99c8274d54d3dffa6a --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/net.rs @@ -0,0 +1,53 @@ +use crate::func::Func; +use crate::sock::Sock; +use anyhow::Result; +use log::*; +use rtrace_rs::bindings::*; +use std::collections::HashMap; +use std::rc::Rc; + +pub struct Net { + sockmap: HashMap, + funcs: Vec, + recv: bool, +} + +impl Net { + pub fn new(recv: bool) -> Net { + let mut n = Net { + sockmap: HashMap::new(), + funcs: Vec::new(), + recv, + }; + n + } + + // 1. Get the network quadruple according to Func + // 2. Get sock + // 3. Send func to the sock + pub fn push_func(&mut self, func: Func) { + // if self.recv { + // println!("name: {}, seq:{:?}", func.get_name(), func.get_rseq()); + // } else { + // println!("name: {}, seq:{:?}", func.get_name(), func.get_seq()); + // } + // unsafe {println!("{:?}", (*(func.get_struct(INFO_TYPE::BASIC_INFO).unwrap() as *const BASIC_INFO_struct)).into_string());} + let ap = func.get_ap(); + let sk = self.sockmap.entry(ap).or_insert(Sock::new(ap, self.recv)); + if log_enabled!(Level::Info) { + func.show_brief(); + } + sk.push_func(func); + } + + pub fn group(&mut self, max_ts: u64) -> Vec>> { + let mut res = Vec::new(); + for (_, sk) in &mut self.sockmap { + let tmp = sk.group_funcs(max_ts); + if tmp.len() != 0 { + res.push(tmp); + } + } + res + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-parser/src/perf.rs b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/perf.rs new file mode 100644 index 0000000000000000000000000000000000000000..dd5b0ecae17dd724644567430798dcf29f6ee42b --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/perf.rs @@ -0,0 +1,280 @@ +use anyhow::anyhow; +use anyhow::Result; +use core::ffi::c_void; +use libbpf_sys; +use std::boxed::Box; +use std::slice; +use std::time::Duration; +use log::*; +use once_cell::sync::Lazy; +use std::sync::Arc; +use std::sync::Mutex; +use std::thread; +use crossbeam_channel::{Sender, Receiver}; + +fn is_power_of_two(i: usize) -> bool { + i > 0 && (i & (i - 1)) == 0 +} + +// Workaround for `trait_alias` +// (https://doc.rust-lang.org/unstable-book/language-features/trait-alias.html) +// not being available yet. This is just a custom trait plus a blanket implementation. +pub trait SampleCb: FnMut(i32, &[u8]) + 'static {} +impl SampleCb for T where T: FnMut(i32, &[u8]) + 'static {} + +pub trait LostCb: FnMut(i32, u64) + 'static {} +impl LostCb for T where T: FnMut(i32, u64) + 'static {} + +struct CbStruct { + sample_cb: Option>, + lost_cb: Option>, +} + +/// Builds [`PerfBuffer`] instances. +pub struct PerfBufferBuilder { + mapfd: i32, + pages: usize, + sample_cb: Option>, + lost_cb: Option>, +} + +impl PerfBufferBuilder { + pub fn new(mapfd: i32) -> Self { + Self { + mapfd, + pages: 128, + sample_cb: None, + lost_cb: None, + } + } +} + +impl PerfBufferBuilder { + /// Callback to run when a sample is received. + /// + /// This callback provides a raw byte slice. You may find libraries such as + /// [`plain`](https://crates.io/crates/plain) helpful. + /// + /// Callback arguments are: `(cpu, data)`. + pub fn sample_cb(self, cb: NewCb) -> PerfBufferBuilder { + PerfBufferBuilder { + mapfd: self.mapfd, + pages: self.pages, + sample_cb: Some(Box::new(cb)), + lost_cb: self.lost_cb, + } + } + + /// Callback to run when a sample is received. + /// + /// Callback arguments are: `(cpu, lost_count)`. + pub fn lost_cb(self, cb: NewCb) -> PerfBufferBuilder { + PerfBufferBuilder { + mapfd: self.mapfd, + pages: self.pages, + sample_cb: self.sample_cb, + lost_cb: Some(Box::new(cb)), + } + } + + /// The number of pages to size the ring buffer. + pub fn pages(&mut self, pages: usize) -> &mut Self { + self.pages = pages; + self + } + + pub fn build(self) -> Result { + if !is_power_of_two(self.pages) { + return Err(anyhow!("Page count must be power of two")); + } + + let c_sample_cb: libbpf_sys::perf_buffer_sample_fn = if self.sample_cb.is_some() { + Some(Self::call_sample_cb) + } else { + None + }; + + let c_lost_cb: libbpf_sys::perf_buffer_lost_fn = if self.lost_cb.is_some() { + Some(Self::call_lost_cb) + } else { + None + }; + + let callback_struct_ptr = Box::into_raw(Box::new(CbStruct { + sample_cb: self.sample_cb, + lost_cb: self.lost_cb, + })); + + let ptr = unsafe { + libbpf_sys::perf_buffer__new( + self.mapfd, + self.pages as libbpf_sys::size_t, + c_sample_cb, + c_lost_cb, + callback_struct_ptr as *mut _, + std::ptr::null(), + ) + }; + let err = unsafe { libbpf_sys::libbpf_get_error(ptr as *const _) }; + if err != 0 { + Err(anyhow!("Unable to create perf buffer")) + } else { + Ok(PerfBuffer { + ptr, + _cb_struct: unsafe { Box::from_raw(callback_struct_ptr) }, + }) + } + } + + unsafe extern "C" fn call_sample_cb(ctx: *mut c_void, cpu: i32, data: *mut c_void, size: u32) { + let callback_struct = ctx as *mut CbStruct; + + if let Some(cb) = &mut (*callback_struct).sample_cb { + cb(cpu, slice::from_raw_parts(data as *const u8, size as usize)); + } + } + + unsafe extern "C" fn call_lost_cb(ctx: *mut c_void, cpu: i32, count: u64) { + let callback_struct = ctx as *mut CbStruct; + + if let Some(cb) = &mut (*callback_struct).lost_cb { + cb(cpu, count); + } + } +} + +/// Represents a special kind of [`Map`]. Typically used to transfer data between +/// [`Program`]s and userspace. +pub struct PerfBuffer { + pub ptr: *mut libbpf_sys::perf_buffer, + // Hold onto the box so it'll get dropped when PerfBuffer is dropped + _cb_struct: Box, +} + +unsafe impl Send for PerfBuffer {} + +impl PerfBuffer { + pub fn poll(&self, timeout: Duration) -> Result<()> { + let ret = unsafe { libbpf_sys::perf_buffer__poll(self.ptr, timeout.as_millis() as i32) }; + if ret < 0 { + Err(anyhow!("Err({}) occurs on perf poll", ret)) + } else { + Ok(()) + } + } +} + +impl Drop for PerfBuffer { + fn drop(&mut self) { + unsafe { + libbpf_sys::perf_buffer__free(self.ptr); + } + } +} + +static GLOBAL_TX: Lazy)>>>> = + Lazy::new(|| Mutex::new(None)); + +static GLOBAL_RX: Lazy)>>>> = + Lazy::new(|| Mutex::new(None)); + +fn handle_lost_events(cpu: i32, count: u64) { + error!("Lost {} events on CPU {}", count, cpu); +} + +fn handle_event(_cpu: i32, data: &[u8]) { + let event = Vec::from(data); + GLOBAL_TX + .lock() + .unwrap() + .as_ref() + .unwrap() + .send((_cpu as usize, event)) + .unwrap(); +} + +fn thread_perf_handle(fd: i32) { + if fd < 0 { + return; + } + + let perf = Arc::new(Mutex::new( + PerfBufferBuilder::new(fd) + .sample_cb(handle_event) + .lost_cb(handle_lost_events) + .build() + .unwrap(), + )); + let clone_perf = perf.clone(); + + thread::spawn(move || loop { + unsafe { + libbpf_sys::perf_buffer__consume(perf.lock().unwrap().ptr); + } + thread::sleep(Duration::from_millis(100)); + }); + + loop { + clone_perf + .lock() + .unwrap() + .poll(Duration::from_millis(100)) + .unwrap(); + } +} + +pub fn perf_inital_thread(fd: i32) { + let (tx, rx) = crossbeam_channel::unbounded(); + *GLOBAL_TX.lock().unwrap() = Some(tx); + *GLOBAL_RX.lock().unwrap() = Some(rx); + thread::spawn(move || thread_perf_handle(fd)); +} + +pub fn perf_inital_thread2(fd: i32, cs: (Sender<(usize, Vec)>, Receiver<(usize, Vec)>)) { + *GLOBAL_TX.lock().unwrap() = Some(cs.0); + *GLOBAL_RX.lock().unwrap() = Some(cs.1); + thread::spawn(move || thread_perf_handle(fd)); +} + +pub fn perf_recv() -> (usize, Vec) { + GLOBAL_RX.lock().unwrap().as_ref().unwrap().recv().unwrap() +} + +pub fn perf_recv_timeout( + timeout: Duration, +) -> std::result::Result<(usize, Vec), crossbeam_channel::RecvTimeoutError> { + GLOBAL_RX + .lock() + .unwrap() + .as_ref() + .unwrap() + .recv_timeout(timeout) +} + + +#[cfg(test)] +mod tests { + use super::*; + + fn is_power_of_two_slow(i: usize) -> bool { + if i == 0 { + return false; + } + + let mut n = i; + while n > 1 { + if n & 0x01 as usize == 1 { + return false; + } + n >>= 1; + } + true + } + + #[test] + fn test_is_power_of_two() { + for i in 0..=256 { + assert_eq!(is_power_of_two(i), is_power_of_two_slow(i)); + } + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-parser/src/skb.rs b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/skb.rs new file mode 100644 index 0000000000000000000000000000000000000000..bdb0dfde81fd227e894fae1fb3212307503e9716 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/skb.rs @@ -0,0 +1,270 @@ +use crate::func::Func; +use crate::sock::Sock; +use crate::utils::*; +use rtrace_rs::bindings::*; +use std::collections::HashMap; +use std::rc::Rc; + +#[derive(Debug, Default)] +pub struct Skb { + min_seq: usize, + end_seq: usize, + delay: u64, + funcs: Vec>, + max_ts: u64, + recv: bool, +} + +impl Skb { + pub fn new(recv: bool) -> Skb { + Skb { + min_seq: usize::MAX, + end_seq: 0, + delay: u64::MAX, + funcs: Vec::new(), + max_ts: 0, + recv, + } + } + + pub fn funcs_len(&self) -> usize { + self.funcs.len() + } + + pub fn push_func(&mut self, func: Rc) { + let tmp; + if self.recv { + tmp = func.get_rseq(); + } else { + tmp = func.get_seq(); + } + if tmp.0 != 0 { + self.min_seq = std::cmp::min(self.min_seq, tmp.0); + } + if tmp.0 == 0 && self.min_seq == usize::MAX { + self.min_seq = 0; + } + self.max_ts = std::cmp::max(self.max_ts, func.get_ts()); + self.funcs.push(func); + } + + pub fn from_funcs(funcs: Vec>, recv: bool) -> Skb { + let mut skb = Skb::new(recv); + for func in funcs { + skb.push_func(func); + } + skb + } + + pub fn get_funcs_by_name(&self, name: &String) -> Vec> { + let mut funcs = Vec::new(); + for func in &self.funcs { + if name.eq(func.get_name()) { + funcs.push(func.clone()); + } + } + funcs + } + + pub fn get_delay(&mut self) -> u64 { + if self.delay == u64::MAX { + self.funcs.sort_by(|a, b| a.get_ts().cmp(&b.get_ts())); + self.delay = self.funcs.last().unwrap().get_ts() - self.funcs.first().unwrap().get_ts(); + } + self.delay + } + + pub fn get_delay_ms(&mut self) -> u64 { + let delay = self.get_delay(); + delay / 1000_000 + } + + pub fn get_max_ts(&self) -> u64 { + self.max_ts + } + + pub fn show(&self) { + if self.funcs.len() == 0 { + return; + } + println!("FUNCTION DELAY: {}\n", self.funcs[0].get_ap().into_string()); + let mut row = (self.funcs.len() as f64).sqrt() as usize; + if row * row < self.funcs.len() { + row += 1; + } + let index_table = get_index_table(row); + + for i in 0..row { + // first line + for j in 0..row { + let index = index_table[i][j]; + if index >= self.funcs.len() { + continue; + } + + let mut seq; + if self.recv { + seq = self.funcs[index].get_rseq(); + if seq.0 == 0 { + seq.0 = self.min_seq; + } + } else { + seq = self.funcs[index].get_seq(); + } + let name = self.funcs[index].get_kretname(); + // let ts = self.funcs[index].get_ts(); + + if i != 0 && i != row - 1 && j != 0 { + print!("{:^10}", " "); + } + + if i == 0 && j != 0 { + if j % 2 == 0 { + let ts = self.funcs[index].get_ts() - self.funcs[index - 1].get_ts(); + let tmp_str = format!("→{}us→", ts / 1000); + print!("{:^10}", tmp_str) + } else { + print!("{:^10}", " "); + } + } + + if i == row - 1 && j != 0 { + if j % 2 == 1 { + let ts = self.funcs[index].get_ts() - self.funcs[index - 1].get_ts(); + let tmp_str = format!("→{}us→", ts / 1000); + print!("{:^10}", tmp_str); + } else { + print!("{:^10}", " "); + } + } + let tmp_str = format!( + "({},{}){}", + seq.0 - self.min_seq, + seq.1 - self.min_seq, + name + ); + print!("{:^30}", tmp_str); + } + println!(""); + if i != row - 1 { + // second line + for j in 0..row { + let index = index_table[i][j]; + if index >= self.funcs.len() { + continue; + } + if j % 2 == 0 { + print!("{:^30}", "↓"); + } else { + print!("{:^30}", "↑"); + } + print!("{:^10}", " "); + } + println!(""); + // thrid line + for j in 0..row { + let index = index_table[i][j]; + let nxt_index = index_table[i + 1][j]; + if index >= self.funcs.len() || nxt_index >= self.funcs.len() { + continue; + } + let ts = self.funcs[index].get_ts(); + let nxt_ts = self.funcs[nxt_index].get_ts(); + let val; + if ts < nxt_ts { + val = nxt_ts - ts; + } else { + val = ts - nxt_ts; + } + let tmp_str = format!("{}us", val / 1000); + print!("{:^30}", tmp_str); + print!("{:^10}", " "); + } + println!(""); + // fourth line + for j in 0..row { + let index = index_table[i][j]; + if index >= self.funcs.len() { + continue; + } + if j % 2 == 0 { + print!("{:^30}", "↓"); + } else { + print!("{:^30}", "↑"); + } + print!("{:^10}", " "); + } + println!(""); + } + } + println!("\n"); + } + + pub fn show_brief(&self) {} +} + +pub fn funcs_to_skbs(funcs: Vec>, raw: bool, recv: bool) -> Vec { + let mut skbs = Vec::new(); + if raw { + skbs.push(Skb::from_funcs(funcs, recv)); + } else { + let mut seqs = Vec::new(); + if recv { + for func in &funcs { + let (sseq, eseq) = func.get_rseq(); + if sseq != 0 { + seqs.push(sseq); + } + seqs.push(eseq); + } + } else { + for func in &funcs { + let (sseq, eseq) = func.get_seq(); + seqs.push(sseq); + seqs.push(eseq); + } + } + seqs.sort_unstable(); + seqs.dedup(); + + if seqs.len() == 1 { + seqs.push(seqs[0]); + } + + for i in 1..seqs.len() { + let mut skb = Skb::new(recv); + if recv { + for func in &funcs { + let (sseq, eseq) = func.get_rseq(); + if seqs[i - 1] >= sseq && seqs[i] <= eseq { + skb.push_func(func.clone()); + } + } + } else { + for func in &funcs { + let (sseq, eseq) = func.get_seq(); + if seqs[i - 1] >= sseq && seqs[i] <= eseq { + skb.push_func(func.clone()); + } + } + } + if skb.funcs_len() != 0 { + skbs.push(skb); + } + } + } + + skbs +} + +// if self.skb_raw { +// skbs.push(Skb::from_vec(funcs, true)); +// } else { +// for func in funcs { +// let (sseq, eseq) = func.get_seq(); +// seqs.push(sseq); +// seqs.push(eseq); +// } +// seqs.sort_unstable(); +// seqs.dedup(); +// } diff --git a/source/tools/detect/net_diag/rtrace/rtrace-parser/src/sock.rs b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/sock.rs new file mode 100644 index 0000000000000000000000000000000000000000..281478d0b957fe445f52025f5e427ddf4a87b4d5 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/sock.rs @@ -0,0 +1,140 @@ +use crate::func::Func; +use anyhow::Result; +use log::*; +use rtrace_rs::bindings::*; +use std::collections::BTreeMap; +use std::rc::Rc; + +#[derive(Debug)] +pub struct Sock { + ap: addr_pair, + seq: Vec, + + max_send_seq: usize, + max_recv_seq: usize, + // usize: Max end seq + // u64: max ts + send: BTreeMap>, usize, u64)>, + recv: BTreeMap>, usize, u64)>, + recv_path: bool, +} + +fn bm_insert( + bm: &mut BTreeMap>, usize, u64)>, + seq: (usize, usize), + func: Rc, +) { + let mut item; + if seq.0 == 0 { + item = bm.entry(seq.1 - 1).or_insert((Vec::new(), 0, 0)); + } else { + item = bm.entry(seq.0).or_insert((Vec::new(), 0, 0)); + } + (*item).1 = std::cmp::max((*item).1, seq.1); + (*item).2 = std::cmp::max((*item).2, func.get_ts()); + (*item).0.push(func); +} + +fn bm_group(bm: &mut BTreeMap>, usize, u64)>, max_ts: u64) -> Vec> { + let mut funcs = Vec::new(); + let mut keys = Vec::new(); + let mut max_end_seq = 0; + + // println!("bm_group: bmap len is {}, max ts is {}", bm.len(), max_ts); + for (key, value) in bm.iter() { + if value.2 > max_ts { + // println!("{} {}", value.2, max_ts); + return funcs; + } + + if max_end_seq == 0 { + max_end_seq = value.1; + keys.push(*key); + continue; + } + + if *key >= max_end_seq { + // All func data from min_start_seq to max_end_seq have been found. + // And the key is not included. + break; + } else { + max_end_seq = std::cmp::max(max_end_seq, value.1); + } + // println!( + // "seq ({}, {}), max end seq: {}, max_ts: {}", + // *key, value.1, max_end_seq, value.2 + // ); + keys.push(*key); + } + + for key in keys { + let val = bm.remove(&key); + if let Some(v) = val { + funcs.extend(v.0); + } + } + // println!("funcs len is {}", funcs.len()); + funcs +} + +impl Sock { + pub fn new(ap: addr_pair, recv: bool) -> Sock { + Sock { + ap: ap, + max_send_seq: 0, + max_recv_seq: 0, + send: BTreeMap::new(), + recv: BTreeMap::new(), + seq: Vec::default(), + recv_path: recv, + } + } + + // 1. get avaliable max ts. + // 2. Call the bm_group function to get the func list, + // the func in the list meets the maximum timestamp less than tmp_max_ts, + // and the maximum end seq no longer appears in subsequent func. + // 3. Call push_skb, pending subsequent processing. + pub fn group_funcs(&mut self, max_ts: u64) -> Vec> { + if self.recv_path { + bm_group(&mut self.recv, max_ts) + } else { + bm_group(&mut self.send, max_ts) + } + } + + // 1. insert func into btreemap + // 2. update max ts + // 2. try to build one skb + pub fn push_func(&mut self, func: Func) { + let rc_func = Rc::new(func); + if self.recv_path { + let (rseq, rend_seq) = rc_func.get_rseq(); + bm_insert(&mut self.recv, (rseq, rend_seq), rc_func.clone()); + } else { + let (seq, end_seq) = rc_func.get_seq(); + bm_insert(&mut self.send, (seq, end_seq), rc_func.clone()); + } + } + + pub fn get_ap(&self) -> addr_pair { + self.ap + } + + pub fn get_rap(&self) -> addr_pair { + addr_pair { + saddr: self.ap.daddr, + sport: u16::from_be(self.ap.dport), + daddr: self.ap.saddr, + dport: self.ap.sport.to_be(), + } + } +} + +#[cfg(test)] +mod sock_tests { + use super::*; + + #[test] + fn bm_test() {} +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-parser/src/utils.rs b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/utils.rs new file mode 100644 index 0000000000000000000000000000000000000000..e49e1a6cf51d16c457a05eecdf07790f45dcaa8f --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-parser/src/utils.rs @@ -0,0 +1,92 @@ +use anyhow::anyhow; +use anyhow::Result; +use std::ffi::{CStr, CString}; +use std::os::raw::c_char; +use std::path::Path; +use nix::libc; + +pub fn str_to_cstring(s: &str) -> Result { + CString::new(s).map_err(|e| anyhow!(e.to_string())) +} + +pub fn path_to_cstring>(path: P) -> Result { + let path_str = path + .as_ref() + .to_str() + .ok_or_else(|| anyhow!(format!("{} is not valid unicode", path.as_ref().display())))?; + + str_to_cstring(path_str) +} + +pub fn c_ptr_to_string(p: *const c_char) -> Result { + if p.is_null() { + return Err(anyhow!("Null string".to_owned())); + } + + let c_str = unsafe { CStr::from_ptr(p) }; + Ok(c_str + .to_str() + .map_err(|e| anyhow!(e.to_string()))? + .to_owned()) +} + +pub fn get_timestamp() -> u64 { + let mut ts = libc::timespec { + tv_sec: 0, + tv_nsec: 0, + }; + unsafe { + libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut ts); + } + (ts.tv_sec as u64) * 1000000000 + (ts.tv_nsec as u64) +} + +pub fn get_index_table(row: usize) -> Vec> { + let mut index_table = Vec::new(); + + for i in 0..row { + let mut tmp = Vec::new(); + for j in 0..row { + tmp.push(0); + } + index_table.push(tmp); + } + // 4tt + // 0 7 8 + // 1 6 9 + // 2 5 10 + // 3 4 + let mut index = 0; + for i in 0..row { + let add1 = (row * 2 - 1) - i * 2; + let add2 = row * 2 - add1; + let mut switch = false; + for j in 0..row { + if j == 0 { + index = i; + } else { + if switch { + index += add1; + } else { + index += add2; + } + } + switch = !switch; + index_table[i][j] = index; + } + } + index_table +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_get_index_table() { + let table = get_index_table(2); + println!("{:#?}", table); + let table = get_index_table(5); + println!("{:#?}", table); + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/Cargo.toml b/source/tools/detect/net_diag/rtrace/rtrace-rs/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..b061d5ac12a873235bab087478f45012afe32e14 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "rtrace-rs" +version = "0.1.0" +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +libbpf-sys = { git = "https://github.com/chengshuyi/libbpf-sys" } +anyhow = "1.0" +serde = "1.0" +serde_derive = "1.0.32" +toml = "0.5" +gdb = "0.1.0" +regex = "1" +once_cell = "1.8.0" +tree-sitter = "0.20.3" +tree-sitter-c = "0.20.1" +log = "0.4.14" diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/README.md b/source/tools/detect/net_diag/rtrace/rtrace-rs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a786515cf399f55b3e4e8c8df31dfac1f3176226 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/README.md @@ -0,0 +1,3 @@ +# rtrace-rs + +rtrace-rs is the rust package of the rtrace ebpf library. On this basis, it provides four core functions, namely dynamic tracing, builtin parameters, dynamic parameters and filtering. \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/build.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..df678c2ed6fa2c6e86d73f4302ca9a4cd6d39211 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/build.rs @@ -0,0 +1,12 @@ +use std::path::PathBuf; +fn main() { + let libpath = PathBuf::from(env!("OBJ_LIB_PATH")); + let mut librtrace_path = libpath.clone(); + librtrace_path.push("librtrace.a"); + + println!("cargo:rerun-if-changed={}", librtrace_path.display()); + + println!("cargo:rustc-link-search={}", libpath.display()); + println!("cargo:rustc-link-lib=static=rtrace"); +} + \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/bindings.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/bindings.rs new file mode 100644 index 0000000000000000000000000000000000000000..879a5a4aa2bb1a06840f7f98186b4051bf50d684 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/bindings.rs @@ -0,0 +1,261 @@ +use anyhow::anyhow; +use anyhow::Result; +use libbpf_sys::{bpf_insn, bpf_program, btf, size_t}; +use std::net::{Ipv4Addr, SocketAddrV4}; +use std::os::raw::{c_char, c_int}; + +#[derive(PartialEq, Debug, Copy, Clone)] +#[repr(u32)] +#[allow(non_camel_case_types)] +pub enum Protocol { + IPPROTO_ICMP = 1, + IPPROTO_TCP = 6, + IPPROTO_UDP = 17, + + IPPROTO_TCP_SYN = (1 << 8) + 6, +} + +impl Protocol { + pub fn from_string(protocol: &String) -> Result { + match &protocol[..] { + "icmp" => Ok(Protocol::IPPROTO_ICMP), + "tcp" => Ok(Protocol::IPPROTO_TCP), + "udp" => Ok(Protocol::IPPROTO_UDP), + "tcp-syn" => Ok(Protocol::IPPROTO_TCP_SYN), + _ => Err(anyhow!("could not parse protocol type")), + } + } + + pub fn into_str(ty: &Protocol) -> &str { + match ty { + Protocol::IPPROTO_ICMP => "icmp", + Protocol::IPPROTO_TCP => "tcp", + Protocol::IPPROTO_UDP => "udp", + Protocol::IPPROTO_TCP_SYN => "tcp-syn", + _ => "unknown", + } + } +} + +#[allow(non_camel_case_types)] +#[derive(PartialEq, Eq, Hash, Copy, Clone, PartialOrd, Ord, Debug)] +#[repr(C)] +pub struct addr_pair { + pub saddr: u32, + pub daddr: u32, + pub sport: u16, + pub dport: u16, +} + +impl addr_pair { + pub fn from_string(src: &String, dst: &String) -> Result { + let s: SocketAddrV4 = src.parse()?; + let d: SocketAddrV4 = dst.parse()?; + Ok(addr_pair { + saddr: u32::from_le_bytes(s.ip().octets()), + daddr: u32::from_le_bytes(d.ip().octets()), + sport: s.port(), + dport: d.port(), + }) + } + + pub fn into_string(&self) -> String { + format!( + "{} - {}", + SocketAddrV4::new(Ipv4Addr::from(u32::from_be(self.saddr)), self.sport), + SocketAddrV4::new( + Ipv4Addr::from(u32::from_be(self.daddr)), + self.dport + ) + ) + } +} + +#[allow(non_camel_case_types)] +#[derive(Default, Copy, Clone, Debug)] +#[repr(C)] +pub struct pid_info { + pub pid: u32, + pub comm: [u8; 16], +} + +#[allow(non_camel_case_types)] +#[repr(C)] +#[derive(Debug)] +pub struct BASIC_INFO_struct { + pub mask: u64, + pub ip: u64, + pub ts: u64, + pub seq: u32, + pub end_seq: u32, + pub rseq: u32, + pub rend_seq: u32, + pub ap: addr_pair, + pub pi: pid_info, + pub ret: u64, +} + +#[allow(non_camel_case_types)] +#[derive(Debug, Default, Copy, Clone)] +#[repr(C)] +pub struct CGROUP_struct { + pub inum: u32, + __pad_4: [u8; 4], + pub cgroupid: u64, +} + +#[allow(non_camel_case_types)] +#[derive(Default, Copy, Clone)] +#[repr(C)] +pub struct STACK_struct { + pub kern_stack: [u64; 5], +} + +#[allow(non_camel_case_types)] +#[derive(PartialEq, Debug, Copy, Clone)] +#[repr(u32)] +pub enum INFO_TYPE { + BASIC_INFO = 0, + CGROUP, + STACK, + KRETPROBE, // Get the return parameter of the function + LINEPROBE, + ENUM_END, +} + +impl INFO_TYPE { + pub fn from_string(string: &String) -> Result { + match string.as_str() { + "basic" => Ok(INFO_TYPE::BASIC_INFO), + "cgroup" => Ok(INFO_TYPE::CGROUP), + "stack" => Ok(INFO_TYPE::STACK), + "kretprobe" => Ok(INFO_TYPE::KRETPROBE), + "lineprobe" => Ok(INFO_TYPE::LINEPROBE), + _ => Err(anyhow!("{} -> INFO_TYPE not support", string)), + } + } + + pub fn from_u32(value: u32) -> INFO_TYPE { + match value { + 0 => INFO_TYPE::BASIC_INFO, + 1 => INFO_TYPE::CGROUP, + 2 => INFO_TYPE::STACK, + 3 => INFO_TYPE::KRETPROBE, + 4 => INFO_TYPE::LINEPROBE, + 5 => INFO_TYPE::ENUM_END, + _ => panic!("Unknown value: {}", value), + } + } + + pub fn get_size(&self) -> usize { + let sz; + match self { + INFO_TYPE::BASIC_INFO => sz = std::mem::size_of::(), + INFO_TYPE::CGROUP => sz = std::mem::size_of::(), + INFO_TYPE::STACK => sz = std::mem::size_of::(), + INFO_TYPE::KRETPROBE => sz = std::mem::size_of::(), + INFO_TYPE::LINEPROBE => sz = std::mem::size_of::(), + _ => sz = 0, + } + sz + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct filter_meta { + pub pid: c_int, + pub ap: addr_pair, +} + +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct filter_params { + pub fm: [filter_meta; 10usize], + pub protocol: u32, + pub cnt: c_int, +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct dynamic_offsets { + pub offs: [c_int; 10usize], + pub cnt: c_int, + pub arg: c_int, + pub size: c_int, +} + +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct dynamic_fields { + pub ident: *mut c_char, + pub cast_name: *mut c_char, + pub cast_type: c_int, + pub index: c_int, + pub pointer: c_int, +} + +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct rtrace { + _unused: [u8; 0], +} + +extern "C" { + pub fn rtrace_dynamic_gen_offset( + r: *mut rtrace, + df: *mut dynamic_fields, + df_cnt: c_int, + func_proto_id: c_int, + dos: *mut dynamic_offsets, + ) -> c_int; +} + +extern "C" { + pub fn rtrace_dynamic_gen_insns( + r: *mut rtrace, + dos: *mut dynamic_offsets, + insns: *mut bpf_insn, + cd_off: c_int, + ) -> c_int; +} + +extern "C" { + pub fn rtrace_dynamic_btf(r: *mut rtrace) -> *mut btf; +} + +extern "C" { + pub fn rtrace_alloc_and_init( + pin_path: *mut ::std::os::raw::c_char, + btf_custom_path: *mut ::std::os::raw::c_char, + ) -> *mut rtrace; +} + +extern "C" { + pub fn rtrace_perf_map_fd(r: *mut rtrace) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn rtrace_filter_map_fd(r: *mut rtrace) -> ::std::os::raw::c_int; +} + +extern "C" { + pub fn rtrace_trace_load_prog( + r: *mut rtrace, + prog: *const bpf_program, + insns: *const bpf_insn, + insns_cnt: size_t, + ) -> c_int; +} + +extern "C" { + pub fn rtrace_trace_program( + r: *mut rtrace, + func: *const c_char, + sk: c_int, + skb: c_int, + ) -> *mut bpf_program; +} + +extern "C" { + pub fn rtrace_set_debug(debug: bool); +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/builtin/builtin.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/builtin/builtin.rs new file mode 100644 index 0000000000000000000000000000000000000000..258130326fedeea5093250a21ac557c7e3b895dc --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/builtin/builtin.rs @@ -0,0 +1,56 @@ +use anyhow::Result; + +use crate::bindings::*; +use crate::rtrace::Function; + +/// Builtin paramerters module +/// +/// Use this structure to convert the built-in parameter list +/// to a 64-bit mask. +pub struct Builtin { + mask: u64, +} + +impl Builtin { + /// + pub fn new(function: &Function) -> Result { + let mut mask = 0; + for param in &function.params { + mask |= 1 << (INFO_TYPE::from_string(param)? as u64); + } + // LINEPROBE parameter type is implicit + if let Some(_) = &function.lines { + mask |= 1 << (INFO_TYPE::LINEPROBE as u64); + } + + Ok(Builtin { mask: mask }) + } + + /// Get built-in parameter mask. + pub fn get_mask(&self) -> u64 { + self.mask + } + + /// Whether the built-in parameter list contains the KRETPROBE type. + pub fn has_kretprobe(&self) -> bool { + (self.mask & (1 << INFO_TYPE::KRETPROBE as u64)) != 0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_builtin_basic() { + let text = r#" + name = "test" + params = ["basic", "kretprobe"] + "#; + let function = Function::from_str(text).unwrap(); + let b = Builtin::new(&function).unwrap(); + + assert_eq!(b.get_mask(), (1 << 0) | (1 << 3)); + assert_eq!(b.has_kretprobe(), true); + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/builtin/mod.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/builtin/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..afbf8e21684c5a5f76b2d1d10b14258c78bc5378 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/builtin/mod.rs @@ -0,0 +1 @@ +pub mod builtin; \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/dynamic/dynamic.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/dynamic/dynamic.rs new file mode 100644 index 0000000000000000000000000000000000000000..04e11c05e9c138743340ff814d3d9b9a81539bd4 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/dynamic/dynamic.rs @@ -0,0 +1,73 @@ +use crate::bindings::*; +use crate::dynamic::offset::Offset; +use crate::dynamic::parser::Parser; +use crate::rtrace::Function; +use anyhow::anyhow; +use anyhow::Result; +use libbpf_sys::{bpf_insn, btf, btf_type}; +use std::ffi::CString; +use log::*; + +pub struct Dynamic { + func: CString, + exprs: Vec, + + sz: Vec, +} + +impl Dynamic { + pub fn new(function: &Function) -> Result { + let mut exprs = Vec::new(); + if let Some(xs) = &function.exprs { + exprs = xs.clone(); + } + + Ok(Dynamic { + func: CString::new(function.name.clone())?, + exprs, + sz: Vec::new(), + }) + } + + pub fn codegen(&mut self, r: *mut rtrace, cd_off: i32) -> Result> { + let mut insns = vec![bpf_insn::default(); 4096usize]; + let btf = unsafe { rtrace_dynamic_btf(r) }; + let func_id = unsafe { + libbpf_sys::btf__find_by_name_kind(btf, self.func.as_ptr(), libbpf_sys::BTF_KIND_FUNC) + }; + if func_id <= 0 { + return Err(anyhow!("unable to find function: {:?} in btf", self.func)); + } + let bt = unsafe { libbpf_sys::btf__type_by_id(btf, func_id as u32) }; + let func_proto_id = unsafe { (*bt).__bindgen_anon_1.type_ }; + let mut p = Parser::new(); + let mut o = Offset::new(r); + let mut insns_cnt = 0; + + for expr in &self.exprs { + debug!("expr: {}", expr); + let fields = p.parse(expr)?; + debug!("fields: {:?}", fields); + let mut offsets = o.parse(func_proto_id, &fields)?; + self.sz.push(offsets.size as u8); + debug!("expr size: {}", offsets.size); + let ret = + unsafe { rtrace_dynamic_gen_insns(r, &mut offsets, &mut insns[insns_cnt], cd_off) }; + if ret <= 0 { + return Err(anyhow!("failed to generate insns")); + } + + insns_cnt += ret as usize; + } + insns.resize(insns_cnt, bpf_insn::default()); + Ok(insns) + } + + pub fn get_sz(&self) -> &Vec{ + &self.sz + } + + pub fn get_exprs(&self) -> &Vec{ + &self.exprs + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/dynamic/mod.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/dynamic/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..52d80c6c5d30d590f736e486ffc0dde008482c4e --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/dynamic/mod.rs @@ -0,0 +1,4 @@ +pub mod dynamic; + +pub mod parser; +pub mod offset; \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/dynamic/offset.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/dynamic/offset.rs new file mode 100644 index 0000000000000000000000000000000000000000..ff5ce096dee1e9a04d2c9c810390cc181cd4d397 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/dynamic/offset.rs @@ -0,0 +1,67 @@ +use crate::bindings::*; +use crate::dynamic::parser::{CastType, Field, Parser}; +use anyhow::anyhow; +use anyhow::Result; +use std::ffi::CString; +use std::os::raw::c_char; + +pub struct OffsetInfo { + offs: Vec, + arg: i32, //position + size: i32, +} + +pub struct Offset { + r: *mut rtrace, +} + +impl Offset { + pub fn new(r: *mut rtrace) -> Offset { + Offset { r } + } + + pub fn parse(&mut self, func_proto_id: u32, fields: &Vec) -> Result { + let mut dos = dynamic_offsets::default(); + let mut dfs = Vec::new(); + for field in fields { + let mut cast_name_ptr = std::ptr::null_mut(); + let mut pointer = 0; + let mut index = -1; + let mut cast_type = 0; + if let Some(cast) = &field.cast { + match &cast.ct { + CastType::Struct(name) => { + cast_name_ptr = name.as_ptr() as *mut c_char; + cast_type = libbpf_sys::BTF_KIND_STRUCT; + } + _ => return Err(anyhow!("CastType: {:?} not support", cast)), + } + pointer = cast.pointer; + } + if let Some(x) = field.index { + index = x; + } + dfs.push(dynamic_fields { + ident: field.ident.as_ptr() as *mut c_char, + cast_name: cast_name_ptr, + cast_type: cast_type as i32, + index: index, + pointer: pointer, + }); + } + let ret = unsafe { + rtrace_dynamic_gen_offset( + self.r, + dfs.as_ptr() as *mut dynamic_fields, + dfs.len() as i32, + func_proto_id as i32, + &mut dos, + ) + }; + + if ret != 0 { + return Err(anyhow!("rtrace dynamic gen offsets failed: err {}", ret)); + } + Ok(dos) + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/dynamic/parser.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/dynamic/parser.rs new file mode 100644 index 0000000000000000000000000000000000000000..079c82e06915f77ee00abffd1d229b1b75214e1e --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/dynamic/parser.rs @@ -0,0 +1,223 @@ +use anyhow::anyhow; +use anyhow::Result; +use std::ops::Range; +use tree_sitter; +use tree_sitter_c; +use std::ffi::CString; + +#[derive(PartialEq, Debug, Clone)] +#[repr(u32)] +#[allow(non_camel_case_types)] +enum Ast { + identifier = 1, + lparen = 5, + rparen = 8, + star = 23, + semi = 39, + lbrack = 61, + rbrack = 62, + struct_ = 78, + dot = 107, + number_literal = 109, + translation_unit = 128, + abstract_pointer_declarator = 178, + struct_specifier = 195, + expression_statement = 208, + cast_expression = 227, + type_descriptor = 228, + subscript_expression = 230, + field_expression = 233, + parenthesized_expression = 235, + field_identifier = 267, + type_identifier = 269, +} + +impl Ast { + pub fn from_u16(val: u16) -> Result { + match val { + 1 => Ok(Ast::identifier), + 5 => Ok(Ast::lparen), + 8 => Ok(Ast::rparen), + 23 => Ok(Ast::star), + 39 => Ok(Ast::semi), + 61 => Ok(Ast::lbrack), + 62 => Ok(Ast::rbrack), + 78 => Ok(Ast::struct_), + 107 => Ok(Ast::dot), + 109 => Ok(Ast::number_literal), + 128 => Ok(Ast::translation_unit), + 178 => Ok(Ast::abstract_pointer_declarator), + 195 => Ok(Ast::struct_specifier), + 208 => Ok(Ast::expression_statement), + 227 => Ok(Ast::cast_expression), + 228 => Ok(Ast::type_descriptor), + 230 => Ok(Ast::subscript_expression), + 233 => Ok(Ast::field_expression), + 235 => Ok(Ast::parenthesized_expression), + 267 => Ok(Ast::field_identifier), + 269 => Ok(Ast::type_identifier), + _ => Err(anyhow!("unable to transmit u16({}) to Ast", val)), + } + } +} + +#[derive(PartialEq, Debug, Clone)] +pub enum CastType { + Struct(CString), + Invalid, +} + +#[derive(Debug, Clone)] +pub struct Cast { + pub ct: CastType, + pub pointer: i32, +} + +#[derive(Debug, Clone)] +pub struct Field { + pub ident: CString, + pub cast: Option, + pub index: Option, +} + +#[derive(Debug, Clone)] +pub struct Parser { + fields: Vec, + // parser context + expr: String, + cast: Cast, +} + +impl Parser { + pub fn new() -> Parser { + Parser { + fields: Vec::new(), + expr: String::default(), + cast: Cast { + ct: CastType::Invalid, + pointer: 0, + }, + } + } + + pub fn parse(&mut self, expr: &String) -> Result>{ + self.fields.clear(); + self.expr = expr.clone(); + self.cast.ct = CastType::Invalid; + self.cast.pointer = 0; + + let mut parser = tree_sitter::Parser::new(); + parser + .set_language(tree_sitter_c::language()) + .expect("Error loading C grammar"); + if let Some(parsed) = parser.parse(expr, None) { + let walker = parsed.walk(); + self.visitor(walker.node())?; + } + + Ok(self.fields.clone()) + } + + fn range_to_str(&self, range: Range) -> CString { + CString::new(self.expr[range].to_owned()).expect("CString new failed") + } + + fn visitor(&mut self, node: tree_sitter::Node) -> Result<()> { + for child in 0..node.child_count() { + let cd = node.child(child); + if let Some(x) = cd { + self.visitor(x)?; + } + } + + let ty = Ast::from_u16(node.kind_id())?; + match ty { + Ast::number_literal => { + let mut index: Option = None; + if let Some(_) = self.fields.last_mut() { + index = Some(self.range_to_str(node.byte_range()).into_string().expect("CString new failed").parse()?); + } + if let Some(item) = self.fields.last_mut() { + item.index = index; + } + } + Ast::identifier => self.fields.push(Field { + ident: self.range_to_str(node.byte_range()), + cast: None, + index: None, + }), + Ast::field_identifier => self.fields.push(Field { + ident: self.range_to_str(node.byte_range()), + cast: None, + index: None, + }), + Ast::cast_expression => { + if let Some(item) = self.fields.last_mut() { + item.cast = Some(self.cast.clone()); + } + self.cast.ct = CastType::Invalid; + self.cast.pointer = 0; + } + Ast::struct_ => self.cast.ct = CastType::Struct(CString::default()), + Ast::type_identifier => match &self.cast.ct { + CastType::Struct(_) => { + self.cast.ct = CastType::Struct(self.range_to_str(node.byte_range())) + } + CastType::Invalid => { + return Err(anyhow!( + "unknow cast type for type_identifier: {:?}", + self.range_to_str(node.byte_range()) + )) + } + }, + Ast::abstract_pointer_declarator => self.cast.pointer += 1, + _ => {} + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_basic() { + let code = "a.b.c;".to_owned(); + let p = Parser::new(); + println!("{:?}", p); + } + + #[test] + fn test_array_basic1() { + let code = "a.b[2].c;".to_owned(); + let p = Parser::new(); + println!("{:?}", p); + } + #[test] + fn test_array_basic2() { + let code = "a[2].b.c;".to_owned(); + let p = Parser::new(); + println!("{:?}", p); + } + #[test] + fn test_array_basic3() { + let code = "a.b.c[2];".to_owned(); + let p = Parser::new(); + println!("{:?}", p); + } + + #[test] + fn test_cast_basic1() { + let code = "((struct d *)a).b.c;".to_owned(); + let p = Parser::new(); + println!("{:?}", p); + } + + #[test] + fn test_cast_basic2() { + let code = "((struct d *)a.b).c;".to_owned(); + let p = Parser::new(); + println!("{:?}", p); + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/filter/filter.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/filter/filter.rs new file mode 100644 index 0000000000000000000000000000000000000000..2eed7cf452aeec7f149169a3e12039c55db67ebd --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/filter/filter.rs @@ -0,0 +1,66 @@ +use crate::bindings::*; +use crate::rtrace::Filterx; +use anyhow::anyhow; +use anyhow::Result; +use libbpf_sys::*; +use log::*; +use std::os::raw::{c_int, c_void}; + +/// filter module +/// +/// Used to process filter maps in eBPF programs. +pub struct Filter { + fd: i32, + key: i32, +} + +impl Filter { + pub fn new(fd: i32) -> Filter { + // value of default key of Filter is 0 + Filter { fd, key: 0 } + } + + fn raw_update(&self, key: *const c_void, value: *const c_void) -> Result<()> { + let ret = unsafe { bpf_map_update_elem(self.fd, key, value, BPF_ANY as u64) }; + if ret < 0 { + return Err(anyhow!("update err, errno: {}", ret)); + } + Ok(()) + } + + pub fn update(&self, filters: &Vec, protocol: Protocol) -> Result<()> { + let zero_fm = filter_meta { + pid: 0, + ap: addr_pair { + saddr: 0, + daddr: 0, + sport: 0, + dport: 0, + }, + }; + let mut tmp_filter_metas = [zero_fm; 10usize]; + debug!("protocol: {:?}", protocol); + for (idx, filter) in filters.iter().enumerate() { + tmp_filter_metas[idx] = filter_meta { + pid: filter.pid as i32, + ap: addr_pair::from_string(&filter.src, &filter.dst)?, + }; + debug!( + "pid: {}, ap: {}", + tmp_filter_metas[idx].pid, + tmp_filter_metas[idx].ap.into_string() + ); + } + + let fp = filter_params { + protocol: protocol as u32, + cnt: filters.len() as i32, + fm: tmp_filter_metas, + }; + + self.raw_update( + &self.key as *const c_int as *const c_void, + &fp as *const filter_params as *const c_void, + ) + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/filter/mod.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/filter/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..3a1545df2d19c013427ea508b7f4dd1e8b1d5a19 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/filter/mod.rs @@ -0,0 +1 @@ +pub mod filter; \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/lib.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ce27af50268c39268b3e8b6e1c59166df7d07cfd --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/lib.rs @@ -0,0 +1,24 @@ +//! # rtrace-rs +//! +//! +//! +//! ## High level workflow +//! +//! +//! ## Alternate workflow +//! +//! ## Design +//! +//! +//! ## Example +//! +//! + +pub mod bindings; +pub mod builtin; +pub mod dynamic; +pub mod filter; +pub mod rtrace; +pub mod trace; +pub mod utils; + diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/rtrace.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/rtrace.rs new file mode 100644 index 0000000000000000000000000000000000000000..02def72c7f5e2eda8a2ea351db78f735d3fff862 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/rtrace.rs @@ -0,0 +1,422 @@ +use crate::bindings::*; +use crate::builtin::builtin::Builtin; +use crate::dynamic::dynamic::Dynamic; +use crate::filter::filter::Filter; +use crate::trace::prog::Prog; +use crate::trace::trace::Trace; +use crate::utils::gdb::Gdb; +use anyhow::anyhow; +use anyhow::Result; +use serde_derive::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::ffi::CString; +use std::os::raw::{c_char, c_int}; +use std::path::PathBuf; +use toml; + +#[derive(Debug, Deserialize, Serialize)] +pub struct Basic { + pub debug: bool, + pub btf_path: Option, + pub pin_path: Option, + pub vmlinux: Option, + pub ksyms: Option, + pub duration: usize, + pub protocol: String, + pub recv: bool, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct Filterx { + pub pid: usize, + pub dst: String, + pub src: String, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct Function { + pub name: String, + pub enable: Option, + pub sk: Option, + pub skb: Option, + pub params: Vec, + pub exprs: Option>, + pub lines: Option>, + + offsets: Option>, +} +// see: https://github.com/alexcrichton/toml-rs/issues/395 +#[derive(Clone, Debug, Deserialize, Serialize)] +struct FunctionContainer { + function: Vec, +} + +impl Function { + pub fn from_str(s: &str) -> Result { + match toml::from_str(s) { + Ok(x) => Ok(x), + Err(y) => Err(anyhow!("str to Function failed: {}", y)), + } + } +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct Config { + pub basic: Basic, + pub filter: Option>, + pub function: Option>, +} + +impl Config { + pub fn from_str(s: &str) -> Result { + match toml::from_str(s) { + Ok(x) => Ok(x), + Err(_) => Err(anyhow!("str to Config failed")), + } + } + + pub fn to_string(&self) -> Result { + match toml::to_string(self) { + Ok(x) => Ok(x), + Err(_) => Err(anyhow!("config to string failed")), + } + } +} + +pub struct Probe { + builtin: Builtin, + trace: Trace, + dynamic: Dynamic, +} + +impl Probe { + pub fn get_exprs(&self) -> &Vec { + self.dynamic.get_exprs() + } + + pub fn get_expr_sz(&self) -> &Vec { + self.dynamic.get_sz() + } +} + +pub struct Rtrace { + ptr: *mut rtrace, + filter: Filter, + probes: HashMap, + progs: HashMap, + config: Option, +} + +impl Rtrace { + pub fn new(btf_path: Option, pin_path: Option) -> Result { + let ptr = Rtrace::init(btf_path, pin_path)?; + let filter_map_fd = unsafe { rtrace_filter_map_fd(ptr) }; + let mut r = Rtrace { + ptr, + filter: Filter::new(filter_map_fd), + probes: HashMap::new(), + progs: HashMap::new(), + config: None, + }; + Ok(r) + } + + fn init(btf_path: Option, pin_path: Option) -> Result<*mut rtrace> { + let mut tmp_btf = CString::default(); + let mut tmp_pin = CString::default(); + let mut tmp_btf_ptr = std::ptr::null_mut(); + let mut tmp_pin_ptr = std::ptr::null_mut(); + if let Some(x) = btf_path { + tmp_btf = CString::new(x.clone())?; + tmp_btf_ptr = tmp_btf.as_ptr() as *mut c_char; + } + + if let Some(x) = pin_path { + tmp_pin = CString::new(x.clone())?; + tmp_pin_ptr = tmp_pin.as_ptr() as *mut c_char; + } + + let ptr = unsafe { rtrace_alloc_and_init(tmp_btf_ptr, tmp_pin_ptr) }; + if ptr == std::ptr::null_mut() { + return Err(anyhow!("unable to open rtrace object")); + } + Ok(ptr) + } + + pub fn insert_prog(&mut self, name: &String, prog: Prog) { + self.progs.insert(name.clone(), prog); + } + + pub fn get_prog(&mut self, name: &String, sk: Option, skb: Option) -> Result { + let mut skv = 0; + let mut skbv = 0; + if let Some(x) = sk { + skv = x; + } + if let Some(x) = skb { + skbv = x; + } + + if let Some(x) = self.progs.remove(name) { + return Ok(x); + } + + let prog = unsafe { + rtrace_trace_program(self.ptr, CString::new(name.clone())?.as_ptr(), skv, skbv) + }; + + if prog == std::ptr::null_mut() { + return Err(anyhow!( + "failed to find bpf program for function: {}, sk-{}, skb-{}", + name, + skv, + skbv + )); + } + Ok(Prog::new(prog)) + } + + pub fn from_file(path: &PathBuf) -> Result { + let contents = + std::fs::read_to_string(path).expect("Something went wrong reading config file"); + Rtrace::from_str(&contents[..]) + } + + pub fn from_str(s: &str) -> Result { + let config: Config = toml::from_str(s).expect("Config file parsed failed"); + unsafe { + rtrace_set_debug(config.basic.debug); + } + let ptr = Rtrace::init(config.basic.btf_path.clone(), config.basic.pin_path.clone())?; + let filter_map_fd = unsafe { rtrace_filter_map_fd(ptr) }; + let r = Rtrace { + ptr, + filter: Filter::new(filter_map_fd), + probes: HashMap::new(), + progs: HashMap::new(), + config: Some(config), + }; + Ok(r) + } + + fn probe_function(&mut self, function: &Function, offsets: Option>) -> Result { + let builtin = Builtin::new(function)?; + let trace = Trace::new(self.ptr, function)?; + let mut dynamic = Dynamic::new(function)?; + + let mut prog = self.get_prog(&function.name, function.sk, function.skb)?; + let kretprog = self.get_prog(&"kretprobe_common".to_owned(), None, None)?; + let lineprog = self.get_prog(&"kprobe_lines".to_owned(), None, None)?; + prog.patch_builtin_insn(builtin.get_mask())?; + if let Some(_) = function.exprs { + prog.patch_dynamic_insn(&dynamic.codegen(self.ptr, prog.cd_off())?)?; + } + trace.attach_kprobe(&prog)?; + if builtin.has_kretprobe() { + trace.attach_kretprobe(&kretprog)?; + } + + if let Some(offs) = &offsets { + trace.attach_lines(&lineprog, offs)?; + if !builtin.has_kretprobe() { + trace.attach_kretprobe(&kretprog)?; // to clear tid_map + } + } + + self.insert_prog(&function.name, prog); + self.insert_prog(&"kretprobe_common".to_owned(), kretprog); + self.insert_prog(&"kprobe_lines".to_owned(), lineprog); + + let p = Probe { + builtin: builtin, + trace: trace, + dynamic: dynamic, + }; + + Ok(p) + } + + fn __probe_functions(&mut self, functions: &Vec) -> Result<()> { + let mut gdb = None; + let mut vmlinux = None; + if let Some(config) = &self.config { + vmlinux = config.basic.vmlinux.clone(); + } + for function in functions { + if let Some(enable) = function.enable { + if enable == false { + continue; + } + } + let mut offsets = None; + if let Some(lines) = &function.lines { + let mut offs = Vec::new(); + for line in lines { + let off = line.parse::(); + match off { + Ok(x) => { + offs.push(x); + continue; + } + _ => {} + } + + match gdb { + None => { + if let Some(x) = &vmlinux { + gdb = Some(Gdb::new(x)?); + } + } + _ => {} + } + if let Some(g) = &mut gdb { + offs.push(g.infoline(line)?); + } + } + offsets = Some(offs); + } + let p = self.probe_function(function, offsets)?; + self.probes.entry(function.name.clone()).or_insert(p); + } + Ok(()) + } + + pub fn probe_functions(&mut self) -> Result<()> { + let mut funtions = None; + if let Some(config) = &self.config { + if let Some(funcs) = &config.function { + funtions = Some(funcs.clone()); + } + } + + if let Some(x) = &funtions { + self.__probe_functions(x)?; + } + Ok(()) + } + + pub fn probe_functions_from_str(&mut self, s: &str) -> Result<()> { + let functions: FunctionContainer = toml::from_str(s).expect("functions str parsed failed"); + self.__probe_functions(&functions.function)?; + Ok(()) + } + + pub fn get_probe(&self, func: &String) -> Option<&Probe> { + if self.probes.contains_key(func) { + return Some(&self.probes[func]); + } + None + } + + pub fn probe_filter(&mut self) -> Result<()> { + if let Some(config) = &self.config { + if let Some(filter) = &config.filter { + self.filter + .update(&filter, Protocol::from_string(&config.basic.protocol)?)?; + } + } + Ok(()) + } + + pub fn probe_filter_from_str(&mut self, protocol: Protocol, s: &str) -> Result<()> { + let filters: Vec = toml::from_str(s).expect("filter str parsed failed"); + self.filter.update(&filters, protocol) + } + + pub fn perf_fd(&self) -> i32 { + unsafe { rtrace_perf_map_fd(self.ptr) as i32 } + } + + pub fn protocol(&self) -> Result { + if let Some(config) = &self.config { + return Protocol::from_string(&config.basic.protocol); + } + Err(anyhow!("Please specified config info")) + } + + pub fn is_recv(&self) -> bool { + if let Some(config) = &self.config { + return config.basic.recv; + } + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + #[test] + fn test_rtrace_from_str() { + let text = r#" + [basic] + debug = false + btf_path = "/boot/vmlinux-4.19.91-007.ali4000.alios7.x86_64" + duration = 0 + protocol = "tcp" + "#; + let r = Rtrace::from_str(text).unwrap(); + } + + #[test] + fn test_rtrace_probe_functions() { + let text = r#" + [basic] + debug = false + btf_path = "/boot/vmlinux-4.19.91-007.ali4000.alios7.x86_64" + duration = 0 + protocol = "tcp" + + [[filter]] + pid = 0 + dst = "0.0.0.0:0" + src = "0.0.0.0:0" + + [[function]] + name = "__ip_queue_xmit" + enable = true + params = ["basic", "stack", "kretprobe"] + + [[function]] + name = "dev_hard_start_xmit" + enable = true + params = ["basic"] + + [[function]] + name = "__netif_receive_skb_core" + enable = true + params = ["basic"] + + [[function]] + name = "tcp_rcv_state_process" + enable = true + params = ["basic"] + "#; + let mut r = Rtrace::from_str(text).unwrap(); + r.probe_functions().unwrap(); + } + + #[test] + fn test_probe_functions_from_str_basic1() { + let text = r#" + [[function]] + name = "tcp_rcv_state_process" + enable = true + params = ["basic"] + "#; + let mut r = Rtrace::new(None, None).unwrap(); + r.probe_functions_from_str(text).unwrap(); + } + + #[test] + fn test_probe_functions_from_str_basic2() { + let text = r#" + [[function]] + name = "tcp_rcv_state_process" + enable = true + params = ["basic"] + expr = ["skb.data"] + "#; + let mut r = Rtrace::new(None, None).unwrap(); + r.probe_functions_from_str(text).unwrap(); + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/trace/mod.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/trace/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..f0921f85fa73eb302152a0bd305d5b686d9c4bb3 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/trace/mod.rs @@ -0,0 +1,2 @@ +pub mod trace; +pub mod prog; \ No newline at end of file diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/trace/prog.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/trace/prog.rs new file mode 100755 index 0000000000000000000000000000000000000000..29d5a05c6eca41223c82641c67321e57eee60d42 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/trace/prog.rs @@ -0,0 +1,236 @@ +use crate::bindings::*; +use anyhow::anyhow; +use anyhow::Result; +use libbpf_sys::{ + bpf_insn, bpf_link, bpf_program, BPF_ALU64, BPF_CALL, BPF_DW, BPF_EXIT, BPF_IMM, BPF_JMP, + BPF_LD, BPF_MOV, BPF_REG_1, BPF_REG_10, BPF_X, +}; + +pub const INSNS_SPLIT_POS: usize = 18; + +#[derive(Clone, Debug)] +pub struct Prog { + ptr: *const bpf_program, + + builtin_insn_pos: i32, + cd_off: i32, + origin_insns: Vec, + + insns: Vec, + bl: *mut bpf_link, +} + +impl Prog { + pub fn new(ptr: *const bpf_program) -> Prog { + let mut p = Prog { + ptr, + builtin_insn_pos: -1, + cd_off: i32::MAX, + origin_insns: Vec::new(), + insns: Vec::with_capacity(4096), + bl: std::ptr::null_mut(), + }; + p.clone_insns(); + p.builtin_insn_pos = p.find_builtin_insn_pos(); + p.cd_off = p.find_cd_off(); + p + } + + /// delete patched instructions, and reset original status. + pub fn reset(&mut self) { + self.insns.clear(); + } + + fn sys_prog_insns(&self) -> *const bpf_insn { + unsafe { libbpf_sys::bpf_program__insns(self.ptr) } + } + + fn sys_prog_insns_cnt(&self) -> usize { + unsafe { libbpf_sys::bpf_program__insn_cnt(self.ptr) as usize } + } + + pub fn is_double_insn(&self, insn: bpf_insn) -> bool { + insn.code as u32 == (BPF_LD | BPF_IMM | BPF_DW) + } + + fn clone_insns(&mut self) { + let cnt = self.sys_prog_insns_cnt(); + let insns = self.sys_prog_insns(); + + for i in 0..cnt { + unsafe { self.origin_insns.push(*insns.offset(i as isize)) }; + } + } + + fn find_cd_off(&mut self) -> i32 { + // * 1139: (bf) r1 = r10 + // * 1140: (07) r1 += -280 + // * 1141: (bf) r1 = r1 + // * 1142: (b7) r0 = 0 + // * 1143: (95) exit + let tmp_insn = self.origin_insns[self.origin_insns.len() - INSNS_SPLIT_POS]; + if tmp_insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) as u8 + && tmp_insn.dst_reg() == BPF_REG_1 as u8 + && tmp_insn.src_reg() == BPF_REG_10 as u8 + { + return self.origin_insns[self.origin_insns.len() - INSNS_SPLIT_POS + 1].imm; + } + i32::MAX + } + + fn find_builtin_insn_pos(&mut self) -> i32 { + let mut double_insn = false; + for i in 0..self.origin_insns.len() { + if double_insn { + double_insn = false; + continue; + } + // 0: (79) r9 = *(u64 *)(r1 +104) + // 1: (7b) *(u64 *)(r10 -296) = r1 + // 2: (79) r8 = *(u64 *)(r1 +112) + // 3: (7b) *(u64 *)(r10 -32) = r8 + // 4: (b7) r7 = 2184 + // 5: (b7) r6 = 0 + double_insn = self.is_double_insn(self.origin_insns[i]); + // notice: not double insns. + if !double_insn { + let imm = self.origin_insns[i].imm as u64; + if imm == 0x888 { + return i as i32; + } + } + } + -1 + } + + pub fn patch_builtin_insn(&mut self, mask: u64) -> Result<()> { + if self.builtin_insn_pos < 0 { + return Err(anyhow!("unable to find target builtin insn")); + } + self.origin_insns[self.builtin_insn_pos as usize].imm = mask as i32; + Ok(()) + } + + /// Merge the instructions of the ebpf program with the newly + /// generated instructions. + /// + /// 1. copy insns. + /// 1. Calculate the instruction split point based on the previous buried + /// point position. + /// 1. fixup jmp. + /// 1. Merge instructions. + pub fn patch_dynamic_insn(&mut self, insns: &Vec) -> Result<()> { + let mark_off = self.origin_insns.len() - INSNS_SPLIT_POS; + // copy insns + for i in 0..mark_off { + self.insns.push(self.origin_insns[i]); + } + // fixup jmp + for i in 0..self.insns.len() { + let class = self.insns[i].code & 0x07; + let opcode; + + if class != BPF_JMP as u8 { + continue; + } + + opcode = self.insns[i].code & 0xf0; + if opcode == BPF_CALL as u8 || opcode == BPF_EXIT as u8 { + continue; + } + + if self.insns[i].off as usize + i + 1 >= mark_off + 3 { + self.insns[i].off += insns.len() as i16; + } + } + // merge insns + for i in 0..insns.len() { + self.insns.push(insns[i]); + } + // copy left insns + for i in mark_off..self.origin_insns.len() { + self.insns.push(self.origin_insns[i]); + } + // fix err code + for i in 0..self.insns.len() { + let class = self.insns[i].code & 0x07; + let opcode; + + if class != BPF_JMP as u8 { + continue; + } + + opcode = self.insns[i].code & 0xf0; + if opcode == BPF_CALL as u8 || opcode == BPF_EXIT as u8 { + continue; + } + + if self.insns[i].off == 4096 { + self.insns[i].off = (self.insns.len() - 5 - i - 1) as i16; + } + } + Ok(()) + } + + pub fn insns(&self) -> *const bpf_insn { + if self.insns.len() == 0 { + return self.origin_insns.as_ptr() as *const bpf_insn; + } + self.insns.as_ptr() as *const bpf_insn + } + + pub fn insns_cnt(&self) -> usize { + if self.insns.len() == 0 { + return self.origin_insns.len(); + } + self.insns.len() + } + + /// return struct cache_data offset in eBPF program stack + pub fn cd_off(&self) -> i32 { + self.cd_off + } + + /// return builtin mask instruction position + pub fn builtin_insn_pos(&self) -> i32 { + self.builtin_insn_pos + } + + /// c raw pointer + pub fn raw_ptr(&self) -> *const bpf_program { + self.ptr + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::ffi::CString; + #[test] + fn test_prog_find_builtin_insn_pos() { + let ptr = unsafe { rtrace_alloc_and_init(std::ptr::null_mut(), std::ptr::null_mut()) }; + let name = CString::new("ip_queue_xmit").unwrap(); + let prog_ptr = unsafe { rtrace_trace_program(ptr, name.as_ptr(), 0, 0) }; + let prog = Prog::new(prog_ptr); + assert_eq!(prog.builtin_insn_pos() > 0, true); + + let name = CString::new("kretprobe_common").unwrap(); + let prog_ptr = unsafe { rtrace_trace_program(ptr, name.as_ptr(), 0, 0) }; + let prog = Prog::new(prog_ptr); + assert_eq!(prog.builtin_insn_pos() > 0, false); + } + + #[test] + fn test_prog_find_cd_off() { + let ptr = unsafe { rtrace_alloc_and_init(std::ptr::null_mut(), std::ptr::null_mut()) }; + let name = CString::new("ip_queue_xmit").unwrap(); + let prog_ptr = unsafe { rtrace_trace_program(ptr, name.as_ptr(), 0, 0) }; + let prog = Prog::new(prog_ptr); + assert_ne!(prog.cd_off(), i32::MAX); + + let name = CString::new("kretprobe_common").unwrap(); + let prog_ptr = unsafe { rtrace_trace_program(ptr, name.as_ptr(), 0, 0) }; + let prog = Prog::new(prog_ptr); + assert_eq!(prog.cd_off(), i32::MAX); + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/trace/trace.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/trace/trace.rs new file mode 100644 index 0000000000000000000000000000000000000000..464c06fc3963435e2e26fcd0efe01ebbfd1b4fef --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/trace/trace.rs @@ -0,0 +1,118 @@ +use crate::bindings::*; +use crate::rtrace::Function; +use crate::trace::prog::Prog; +use anyhow::anyhow; +use anyhow::Result; +use libbpf_sys::{bpf_kprobe_opts, size_t}; +use std::ffi::{CStr, CString}; +use std::os::raw::{c_char, c_int, c_void}; +use log::*; + +/// dynamic trace module +/// +/// +pub struct Trace { + r: *mut rtrace, + + func: CString, + sk: c_int, + skb: c_int, +} + +impl Trace { + pub fn new(r: *mut rtrace, function: &Function) -> Result { + let func = CString::new(function.name.clone())?; + let mut sk = 0; + let mut skb = 0; + if let Some(x) = function.sk { + sk = x; + } + if let Some(x) = function.skb { + skb = x; + } + Ok(Trace { + r, + func, + sk, + skb, + }) + } + + /// load and attach kprobe type eBPF program for this func. + pub fn attach_kprobe(&self, prog: &Prog) -> Result<()> { + let err = unsafe { + rtrace_trace_load_prog(self.r, prog.raw_ptr(), prog.insns(), prog.insns_cnt() as size_t) + }; + + if err < 0 { + return Err(anyhow!("unable to load kprobe -> {:?}, err: {}", self.func, err)); + } + + let bl = unsafe { + libbpf_sys::bpf_program__attach_kprobe( + prog.raw_ptr(), + false, + self.func.as_ptr() as *const c_char, + ) + }; + let err = unsafe { libbpf_sys::libbpf_get_error(bl as *const c_void) }; + if err < 0 { + return Err(anyhow!("failed to attach kprobe -> {:?}", self.func)); + } + + debug!("attach kprobe ({:?}) successfully.", self.func); + Ok(()) + } + + pub fn attach_kretprobe(&self, prog: &Prog) -> Result<()> { + let bl = unsafe { + libbpf_sys::bpf_program__attach_kprobe( + prog.raw_ptr(), + true, + self.func.as_ptr() as *const c_char, + ) + }; + let err = unsafe { libbpf_sys::libbpf_get_error(bl as *const c_void) }; + if err < 0 { + return Err(anyhow!("failed to attach kretprobe -> {:?}", self.func,)); + } + debug!("attach kretprobe ({:?}) successfully.", self.func); + Ok(()) + } + + fn attach_line(&self, prog: &Prog, offset: u64) -> Result<()> { + let mut opts = bpf_kprobe_opts::default(); + opts.sz = std::mem::size_of::() as u64; + opts.bpf_cookie = 0; + opts.offset = offset; + opts.retprobe = false; + + unsafe { + let bl = libbpf_sys::bpf_program__attach_kprobe_opts( + prog.raw_ptr(), + self.func.as_ptr() as *mut c_char, + &opts as *const libbpf_sys::bpf_kprobe_opts, + ); + + let err = libbpf_sys::libbpf_get_error(bl as *const c_void); + if err < 0 { + return Err(anyhow!( + "failed to attach kprobe+{} -> {:?} ", + opts.offset, + self.func + )); + } + } + debug!("attach kprobe ({:?}+{}) successfully.", self.func, offset); + Ok(()) + } + + pub fn attach_lines(&self, prog: &Prog, offsets: &Vec) -> Result<()> { + for offset in offsets { + self.attach_line(prog, *offset)?; + } + Ok(()) + } +} + + diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/utils/gdb.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/utils/gdb.rs new file mode 100644 index 0000000000000000000000000000000000000000..ea5fdb3e1fa50317012d800dcff0d4897ec0b6f0 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/utils/gdb.rs @@ -0,0 +1,87 @@ +use anyhow::Result; +use std::io; +use anyhow::anyhow; +use std::io::{BufRead, BufReader, BufWriter, Write}; +use std::process; +use regex::Regex; + +pub struct Gdb { + stdin: BufWriter, + stdout: BufReader, +} + +impl Gdb { + pub fn new(vmlinux: &String) -> Result { + let mut child = process::Command::new("gdb") + .args(&["--interpreter=mi"]) + .stdout(process::Stdio::piped()) + .stdin(process::Stdio::piped()) + .stderr(process::Stdio::piped()) + .spawn()?; + let mut gdb = Gdb { + stdin: BufWriter::new(child.stdin.take().expect("broken stdin")), + stdout: BufReader::new(child.stdout.take().expect("broken stdout")), + }; + gdb.read_response()?; + let output = gdb.send_cmd_raw(&format!("file {}\n", vmlinux)[..]); + // println!("{:?}", output); + Ok(gdb) + } + + fn read_sequence(&mut self) -> Result> { + let mut result = Vec::new(); + let mut line = String::new(); + self.stdout.read_line(&mut line)?; + while line != "(gdb) \n" { + result.push(line.clone()); + line.clear(); + self.stdout.read_line(&mut line)?; + } + Ok(result) + } + + fn read_response(&mut self) -> Result> { + loop { + let sequence = self.read_sequence(); + if let Some(resp) = sequence.into_iter().nth(0) { + return Ok(resp); + } + } + } + + fn send_cmd_raw(&mut self, cmd: &str) -> Result> { + self.stdin.write_all(cmd.as_ref())?; + self.stdin.flush()?; + self.read_response() + } + + pub fn infoline(&mut self, line: &String) -> Result { + let string = format!("info line {}\n", line); + let output = self.send_cmd_raw(&string)?; + let regex = Regex::new(r"\+(\d+)")?; + for cap in regex.captures_iter(&output[1]) { + return Ok(*(&cap[1].parse::()?)); + } + Err(anyhow!("unable to get offset")) + } +} + +impl Drop for Gdb { + fn drop(&mut self) { + let _ = self.stdin.write_all(b"-gdb-exit\n"); + } +} + +#[cfg(test)] +mod tests { + + use super::*; + #[test] + fn test_gdb() { + let mut g = Gdb::new( + &"/work/vmlinux-btf/vmlinux/vmlinux-4.19.91-007.ali4000.alios7.x86_64".to_owned(), + ) + .unwrap(); + g.infoline(&"net/ipv4/tcp.c:400".to_owned()).unwrap(); + } +} diff --git a/source/tools/detect/net_diag/rtrace/rtrace-rs/src/utils/mod.rs b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/utils/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..5fb1942ef66c2cc99b50bbc8e6904adf87b84722 --- /dev/null +++ b/source/tools/detect/net_diag/rtrace/rtrace-rs/src/utils/mod.rs @@ -0,0 +1 @@ +pub mod gdb; \ No newline at end of file