From f944eec4fcb4cfda789592c5968d3b61024e4582 Mon Sep 17 00:00:00 2001 From: up200504098 Date: Thu, 23 May 2024 11:04:51 +0000 Subject: [PATCH] =?UTF-8?q?!10=20TCP=E6=96=AD=E8=BF=9Ereset&destory=20patc?= =?UTF-8?q?h=20*=20TCP=E6=96=AD=E8=BF=9Ereset&destory=20patch?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- include/linux/bpf.h | 19 ++++++++++++ include/uapi/linux/bpf.h | 22 ++++++++++++++ net/Kconfig | 7 +++++ net/core/filter.c | 55 ++++++++++++++++++++++++++++++++++ net/ipv4/tcp.c | 11 +++++-- tools/include/uapi/linux/bpf.h | 22 ++++++++++++++ 6 files changed, 134 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a2bc8f19c855..4ca2eb4d0dbf 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1571,6 +1571,18 @@ static inline bool unprivileged_ebpf_enabled(void) return !sysctl_unprivileged_bpf_disabled; } +#ifdef CONFIG_TCP_SOCK_DESTROY +/* Not all bpf prog type has the bpf_ctx. + * For the bpf prog type that has initialized the bpf_ctx, + * this function can be used to decide if a kernel function + * is called by a bpf program. + */ +static inline bool has_current_bpf_ctx(void) +{ + return !!current->bpf_ctx; +} +#endif /* CONFIG_TCP_SOCK_DESTROY */ + #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -1771,6 +1783,13 @@ static inline bool unprivileged_ebpf_enabled(void) return false; } +#ifdef CONFIG_TCP_SOCK_DESTROY +static inline bool has_current_bpf_ctx(void) +{ + return false; +} +#endif /* CONFIG_TCP_SOCK_DESTROY */ + #endif /* CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 0f39fdcb2273..2cf6369cfaf4 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3742,6 +3742,26 @@ union bpf_attr { * Return * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. + * + * int bpf_sock_tcp_send_reset(struct sk_buff *skb) + * Description + * Redirect If Netfirewall intercepts socket TCP interception, + * we need to actively send a reset packet to disconnect the current TCP connection. + * Return + * The helper returns Send packet reset sucess. + * + * int bpf_sock_destroy(struct sk_buff *skb) + * Description + * Destroy the given socket with ECONNABORTED error code. + * The function expects a non-NULL pointer to a socket, and invokes the + * protocol specific socket destroy handlers. + * + * The helper can only be called from BPF contexts that have acquired the socket + * locks. + * Return + * On error, may return EPROTONOSUPPORT, EINVAL. + * EPROTONOSUPPORT if protocol specific destroy handler is not supported. + * 0 otherwise */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3900,6 +3920,8 @@ union bpf_attr { FN(per_cpu_ptr), \ FN(this_cpu_ptr), \ FN(redirect_peer), \ + FN(sock_tcp_send_reset), \ + FN(bpf_sock_destroy), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/net/Kconfig b/net/Kconfig index 6f863a06fbc5..af7a95cec10e 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -473,6 +473,13 @@ config LOWPOWER_PROTOCOL This option enables the lowpower protocol feature. When enabled, the kernel will include support for CONFIG_LOWPOWER_PROTOCOL. +config TCP_SOCK_DESTROY + bool "TCP socket destroy feature" + default n + help + This option enables the tcp sock destroy feature. + When enabled, the kernel will include support for CONFIG_TCP_SOCK_DESTROY. + endif # if NET # Used by archs to tell that they support BPF JIT compiler plus which flavour. diff --git a/net/core/filter.c b/net/core/filter.c index 9343a911e9e2..29342e6f54c5 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4616,6 +4616,57 @@ static const struct bpf_func_proto bpf_get_socket_cookie_proto = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_sock_tcp_send_reset, struct sk_buff *, skb) +{ +#ifdef CONFIG_TCP_SOCK_DESTROY +#if IS_ENABLED(CONFIG_NF_REJECT_IPV4) + //destroy tcp + struct sock *sk = sk_to_full_sk(skb->sk); + if (sk->sk_protocol == IPPROTO_TCP) { + struct net *net = dev_net(skb->dev); + int hook = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD) | (1 << NF_INET_LOCAL_OUT); + if (sk->sk_family == AF_INET) { + nf_send_reset(net, skb, hook); +#if IS_ENABLED(CONFIG_NF_REJECT_IPV6) + } else if (sk->sk_family == AF_INET6) { + nf_send_reset6(net, skb, hook); +#endif + } + } +#endif + return 0; +#else + return -1; +#endif +} + +static const struct bpf_func_proto bpf_sock_tcp_send_reset_proto = { + .func = bpf_sock_tcp_send_reset, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + +BPF_CALL_1(bpf_sock_destroy, struct sk_buff *, skb) +{ +#ifdef CONFIG_TCP_SOCK_DESTROY + struct sock *sk = sk_to_full_sk(skb->sk); + if (!sk->sk_prot->diag_destroy || (sk->sk_protocol != IPPROTO_TCP)) { + return -EOPNOTSUPP; + } + return sk->sk_prot->diag_destroy(sk, ECONNABORTED); +#else + return -1; +#endif +} + +static const struct bpf_func_proto bpf_sock_destroy_proto = { + .func = bpf_sock_destroy, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) { return __sock_gen_cookie(ctx->sk); @@ -7094,6 +7145,8 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_local_storage_proto; case BPF_FUNC_sk_fullsock: return &bpf_sk_fullsock_proto; + case BPF_FUNC_sock_tcp_send_reset: + return &bpf_sock_tcp_send_reset_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: @@ -7126,6 +7179,8 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_skb_ecn_set_ce: return &bpf_skb_ecn_set_ce_proto; #endif + case BPF_FUNC_bpf_sock_destroy: + return &bpf_sock_destroy_proto; default: return sk_filter_func_proto(func_id, prog); } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f946fa611c38..1e8317aa5e7c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4306,7 +4306,11 @@ int tcp_abort(struct sock *sk, int err) } /* Don't race with userspace socket closes such as tcp_close. */ - lock_sock(sk); +#ifdef CONFIG_TCP_SOCK_DESTROY + /* BPF context ensures sock locking. */ + if (!has_current_bpf_ctx()) +#endif /* CONFIG_TCP_SOCK_DESTROY */ + lock_sock(sk); if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); @@ -4330,7 +4334,10 @@ int tcp_abort(struct sock *sk, int err) bh_unlock_sock(sk); local_bh_enable(); tcp_write_queue_purge(sk); - release_sock(sk); +#ifdef CONFIG_TCP_SOCK_DESTROY + if (!has_current_bpf_ctx()) +#endif /* CONFIG_TCP_SOCK_DESTROY */ + release_sock(sk); return 0; } EXPORT_SYMBOL_GPL(tcp_abort); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 1dd4b1acbcb0..a1ab5df3f00d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3742,6 +3742,26 @@ union bpf_attr { * Return * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. + * + * int bpf_sock_tcp_send_reset(struct sk_buff *skb) + * Description + * Redirect If Netfirewall intercepts socket TCP interception, + * we need to actively send a reset packet to disconnect the current TCP connection. + * Return + * The helper returns Send packet reset sucess. + * + * int bpf_sock_destroy(struct sk_buff *skb) + * Description + * Destroy the given socket with ECONNABORTED error code. + * The function expects a non-NULL pointer to a socket, and invokes the + * protocol specific socket destroy handlers. + * + * The helper can only be called from BPF contexts that have acquired the socket + * locks. + * Return + * On error, may return EPROTONOSUPPORT, EINVAL. + * EPROTONOSUPPORT if protocol specific destroy handler is not supported. + * 0 otherwise */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3900,6 +3920,8 @@ union bpf_attr { FN(per_cpu_ptr), \ FN(this_cpu_ptr), \ FN(redirect_peer), \ + FN(sock_tcp_send_reset), \ + FN(bpf_sock_destroy), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- Gitee