diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a2bc8f19c8552c19edba1dab3e505a513525720a..4ca2eb4d0dbf7496b6ce1aa20f8b7089b7488406 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1571,6 +1571,18 @@ static inline bool unprivileged_ebpf_enabled(void) return !sysctl_unprivileged_bpf_disabled; } +#ifdef CONFIG_TCP_SOCK_DESTROY +/* Not all bpf prog type has the bpf_ctx. + * For the bpf prog type that has initialized the bpf_ctx, + * this function can be used to decide if a kernel function + * is called by a bpf program. + */ +static inline bool has_current_bpf_ctx(void) +{ + return !!current->bpf_ctx; +} +#endif /* CONFIG_TCP_SOCK_DESTROY */ + #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -1771,6 +1783,13 @@ static inline bool unprivileged_ebpf_enabled(void) return false; } +#ifdef CONFIG_TCP_SOCK_DESTROY +static inline bool has_current_bpf_ctx(void) +{ + return false; +} +#endif /* CONFIG_TCP_SOCK_DESTROY */ + #endif /* CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 0f39fdcb2273c4ca42c7a00a88ec2a34e3faa886..a466deadc36e92da4d40e7870119e402f24d4ef2 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3742,6 +3742,26 @@ union bpf_attr { * Return * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. + * + * int bpf_sock_tcp_send_reset(struct sk_buff *skb) + * Description + * Redirect If Netfirewall intercepts socket TCP interception, + * we need to actively send a reset packet to disconnect the current TCP connection. + * Return + * The helper returns Send packet reset sucess. + * + * int bpf_sock_destroy(struct sk_buff *skb) + * Description + * Destroy the given socket with ECONNABORTED error code. + * The function expects a non-NULL pointer to a socket, and invokes the + * protocol specific socket destroy handlers. + * + * The helper can only be called from BPF contexts that have acquired the socket + * locks. + * Return + * On error, may return EPROTONOSUPPORT, EINVAL. + * EPROTONOSUPPORT if protocol specific destroy handler is not supported. + * 0 otherwise */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3900,6 +3920,8 @@ union bpf_attr { FN(per_cpu_ptr), \ FN(this_cpu_ptr), \ FN(redirect_peer), \ + FN(sock_tcp_send_reset), \ + FN(bpf_sock_destroy), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/net/Kconfig b/net/Kconfig index 6f863a06fbc5d1af7e1e7122bd267cd86ef74bc6..af7a95cec10eec191d041d918b07f2f3a7159a0c 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -473,6 +473,13 @@ config LOWPOWER_PROTOCOL This option enables the lowpower protocol feature. When enabled, the kernel will include support for CONFIG_LOWPOWER_PROTOCOL. +config TCP_SOCK_DESTROY + bool "TCP socket destroy feature" + default n + help + This option enables the tcp sock destroy feature. + When enabled, the kernel will include support for CONFIG_TCP_SOCK_DESTROY. + endif # if NET # Used by archs to tell that they support BPF JIT compiler plus which flavour. diff --git a/net/core/filter.c b/net/core/filter.c index 9343a911e9e247564c93e4314b7a55f4f47db8bd..ca79f29284860bbfee566f0d2a0d7b1ff3ac43d5 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -78,6 +78,12 @@ #include #include +#ifdef CONFIG_TCP_SOCK_DESTROY +#include +#include +#include +#endif + static const struct bpf_func_proto * bpf_sk_base_func_proto(enum bpf_func_id func_id); @@ -4616,6 +4622,57 @@ static const struct bpf_func_proto bpf_get_socket_cookie_proto = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_sock_tcp_send_reset, struct sk_buff *, skb) +{ +#ifdef CONFIG_TCP_SOCK_DESTROY +#ifdef CONFIG_NF_REJECT_IPV4 + //destroy tcp + struct sock *sk = sk_to_full_sk(skb->sk); + if (sk->sk_protocol == IPPROTO_TCP) { + struct net *net = dev_net(skb->dev); + int hook = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD) | (1 << NF_INET_LOCAL_OUT); + if (sk->sk_family == AF_INET) { + nf_send_reset(net, skb, hook); +#ifdef CONFIG_NF_REJECT_IPV6 + } else if (sk->sk_family == AF_INET6) { + nf_send_reset6(net, skb, hook); +#endif + } + } +#endif + return 0; +#else + return -1; +#endif +} + +static const struct bpf_func_proto bpf_sock_tcp_send_reset_proto = { + .func = bpf_sock_tcp_send_reset, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + +BPF_CALL_1(bpf_sock_destroy, struct sk_buff *, skb) +{ +#ifdef CONFIG_TCP_SOCK_DESTROY + struct sock *sk = sk_to_full_sk(skb->sk); + if (!sk->sk_prot->diag_destroy || (sk->sk_protocol != IPPROTO_TCP)) { + return -EOPNOTSUPP; + } + return sk->sk_prot->diag_destroy(sk, ECONNABORTED); +#else + return -1; +#endif +} + +static const struct bpf_func_proto bpf_sock_destroy_proto = { + .func = bpf_sock_destroy, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) { return __sock_gen_cookie(ctx->sk); @@ -7094,6 +7151,8 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_local_storage_proto; case BPF_FUNC_sk_fullsock: return &bpf_sk_fullsock_proto; + case BPF_FUNC_sock_tcp_send_reset: + return &bpf_sock_tcp_send_reset_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: @@ -7126,6 +7185,8 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_skb_ecn_set_ce: return &bpf_skb_ecn_set_ce_proto; #endif + case BPF_FUNC_bpf_sock_destroy: + return &bpf_sock_destroy_proto; default: return sk_filter_func_proto(func_id, prog); } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f946fa611c384a160d5058a017a975591e5c3ca8..1e8317aa5e7c9e17266a60255f645550c767409d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4306,7 +4306,11 @@ int tcp_abort(struct sock *sk, int err) } /* Don't race with userspace socket closes such as tcp_close. */ - lock_sock(sk); +#ifdef CONFIG_TCP_SOCK_DESTROY + /* BPF context ensures sock locking. */ + if (!has_current_bpf_ctx()) +#endif /* CONFIG_TCP_SOCK_DESTROY */ + lock_sock(sk); if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); @@ -4330,7 +4334,10 @@ int tcp_abort(struct sock *sk, int err) bh_unlock_sock(sk); local_bh_enable(); tcp_write_queue_purge(sk); - release_sock(sk); +#ifdef CONFIG_TCP_SOCK_DESTROY + if (!has_current_bpf_ctx()) +#endif /* CONFIG_TCP_SOCK_DESTROY */ + release_sock(sk); return 0; } EXPORT_SYMBOL_GPL(tcp_abort); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 1dd4b1acbcb07f815f9e3ca1f344c4df35b3077c..5a71453fdc297c6a7733a4e2081288423a511855 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3742,6 +3742,26 @@ union bpf_attr { * Return * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. + * + * int bpf_sock_tcp_send_reset(struct sk_buff *skb) + * Description + * Redirect If Netfirewall intercepts socket TCP interception, + * we need to actively send a reset packet to disconnect the current TCP connection. + * Return + * The helper returns Send packet reset sucess. + * + * int bpf_sock_destroy(struct sk_buff *skb) + * Description + * Destroy the given socket with ECONNABORTED error code. + * The function expects a non-NULL pointer to a socket, and invokes the + * protocol specific socket destroy handlers. + * + * The helper can only be called from BPF contexts that have acquired the socket + * locks. + * Return + * On error, may return EPROTONOSUPPORT, EINVAL. + * EPROTONOSUPPORT if protocol specific destroy handler is not supported. + * 0 otherwise */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3900,6 +3920,8 @@ union bpf_attr { FN(per_cpu_ptr), \ FN(this_cpu_ptr), \ FN(redirect_peer), \ + FN(sock_tcp_send_reset), \ + FN(bpf_sock_destroy), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper