diff --git a/include/linux/tcp.h b/include/linux/tcp.h index ee7424595bbfcefe743257f2dac2094619869067..c4b5ba51ee838dbe87dd28028aa5a4d5937768f5 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -365,9 +365,9 @@ struct tcp_sock { #else #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 #endif -#ifdef CONFIG_TCP_NB_URC - u16 tcp_retries2; -#endif /* CONFIG_TCP_NB_URC */ +#if defined(CONFIG_TCP_NATA_URC) || defined(CONFIG_TCP_NATA_STL) + u16 nata_data_retries; +#endif u16 timeout_rehash; /* Timeout-triggered rehash attempts */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 87225d6a85d9c1ab159d0876589ff71aa2c83c57..c1ac02f9a063f9803f83c7d29a67c012f03c5606 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -53,6 +53,14 @@ struct inet_connection_sock_af_ops { void (*mtu_reduced)(struct sock *sk); }; +#if defined(CONFIG_TCP_NATA_URC) || defined(CONFIG_TCP_NATA_STL) +enum nata_retries_type_t { + NATA_NA = 0, + NATA_URC = 1, + NATA_STL = 2, +}; +#endif + /** inet_connection_sock - INET connection oriented sock * * @icsk_accept_queue: FIFO of established children @@ -108,11 +116,13 @@ struct inet_connection_sock { __u8 icsk_syn_retries; __u8 icsk_probes_out; __u16 icsk_ext_hdr_len; -#ifdef CONFIG_TCP_NB_URC - __u8 icsk_nb_urc_enabled:1, - icsk_nb_urc_reserved:7; - __u32 icsk_nb_urc_rto; -#endif /* CONFIG_TCP_NB_URC */ +#if defined(CONFIG_TCP_NATA_URC) || defined(CONFIG_TCP_NATA_STL) + __u8 nata_retries_enabled:1, + nata_reserved:7; + __u8 nata_retries_type; + __u32 nata_syn_rto; + __u32 nata_data_rto; +#endif struct { __u8 pending; /* ACK is pending */ __u8 quick; /* Scheduled number of quick acks */ @@ -217,6 +227,24 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) } } +#if defined(CONFIG_TCP_NATA_URC) || defined(CONFIG_TCP_NATA_STL) +static inline unsigned long get_nata_rto(struct sock *sk, + struct inet_connection_sock *icsk, + unsigned long when) +{ + if (!icsk->nata_retries_enabled) + return when; + + if (icsk->nata_retries_type == NATA_STL) + return sk->sk_state == TCP_SYN_SENT ? + icsk->nata_syn_rto : icsk->nata_data_rto; + if (icsk->nata_retries_type == NATA_URC) + return when >= icsk->nata_data_rto ? icsk->nata_data_rto : when; + + return when; +} +#endif + /* * Reset the retransmission timer */ @@ -226,10 +254,9 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, { struct inet_connection_sock *icsk = inet_csk(sk); -#ifdef CONFIG_TCP_NB_URC - if (icsk->icsk_nb_urc_enabled) - when = icsk->icsk_nb_urc_rto; -#endif /* CONFIG_TCP_NB_URC */ +#if defined(CONFIG_TCP_NATA_URC) || defined(CONFIG_TCP_NATA_STL) + when = get_nata_rto(sk, icsk, when); +#endif if (when > max_when) { pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n", diff --git a/include/net/tcp.h b/include/net/tcp.h index 68283d6425e8144e3a3914b01b63a4af6ff5bc3e..e9c9262ba34b58361d4d5c8406ef2e6b3feadfeb 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2415,16 +2415,16 @@ static inline u64 tcp_transmit_time(const struct sock *sk) return 0; } -#ifdef CONFIG_TCP_NB_URC +#if defined(CONFIG_TCP_NATA_URC) || defined(CONFIG_TCP_NATA_STL) static inline int tcp_get_retries_limit(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - if (inet_csk(sk)->icsk_nb_urc_enabled) - return tp->tcp_retries2; + if (inet_csk(sk)->nata_retries_enabled) + return tp->nata_data_retries; return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); } -#endif /* CONFIG_TCP_NB_URC */ +#endif #endif /* _TCP_H */ diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 5b5d2ac8242a7fb6688e01f481d5a2a2b5a14066..08f8589608f84066915a5c71d8c6fb675e24b7d5 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -128,17 +128,26 @@ enum { #define TCP_CM_INQ TCP_INQ #define TCP_TX_DELAY 37 /* delay outgoing packets by XX usec */ -#define TCP_NB_URC 101 /* netibooster ultar-reliable connection */ #define TCP_REPAIR_ON 1 #define TCP_REPAIR_OFF 0 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ -struct tcp_nb_urc { - __u8 nb_urc_enabled; - __u8 tcp_syn_retries; - __u16 tcp_retries2; - __u32 nb_urc_rto_ms; +#define TCP_NATA_URC 101 /* Nata ultar-reliable connection */ +struct tcp_nata_urc { + __u8 nata_urc_enabled; + __u8 nata_syn_retries; + __u16 nata_data_retries; + __u32 nata_rto_ms; +}; + +#define TCP_NATA_STL 102 /* Nata satellite connection */ +struct tcp_nata_stl { + __u8 nata_stl_enabled; + __u8 nata_syn_retries; + __u16 nata_data_retries; + __u32 nata_syn_rto_ms; + __u32 nata_data_rto_ms; }; struct tcp_repair_opt { diff --git a/net/Kconfig b/net/Kconfig index af7a95cec10eec191d041d918b07f2f3a7159a0c..b1b3e508e80e00637ba2ba6cc646cd32ea1ea357 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -459,12 +459,19 @@ config ETHTOOL_NETLINK source "net/newip/Kconfig" -config TCP_NB_URC - bool "NetiBooster Ultra-Reliable Connection Feature" +config TCP_NATA_URC + bool "Nata Ultra-Reliable Connection Feature" default n help - This option enables the NetiBooster Ultra-Reliable Connection feature. - When enabled, the kernel will include support for CONFIG_TCP_NB_URC. + This option enables the Nata Ultra-Reliable Connection feature. + When enabled, the kernel will include support for CONFIG_TCP_NATA_URC. + +config TCP_NATA_STL + bool "Nata Satellite Connection Feature" + default n + help + This option enables the Nata Satellite Connection feature. + When enabled, the kernel will include support for CONFIG_TCP_NATA_STL. config LOWPOWER_PROTOCOL bool "lowpower protocol" diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 942c6befc5e06ad73df62bd9ded67629e36d9a4c..ee2b3084968c77ea82d1e1a9a7b92dd3cad26627 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -460,11 +460,13 @@ void tcp_init_sock(struct sock *sk) sk_sockets_allocated_inc(sk); sk->sk_route_forced_caps = NETIF_F_GSO; -#ifdef CONFIG_TCP_NB_URC - icsk->icsk_nb_urc_enabled = 0; - icsk->icsk_nb_urc_rto = TCP_TIMEOUT_INIT; - tp->tcp_retries2 = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); -#endif /* CONFIG_TCP_NB_URC */ +#if defined(CONFIG_TCP_NATA_URC) || defined(CONFIG_TCP_NATA_STL) + icsk->nata_retries_enabled = 0; + icsk->nata_retries_type = NATA_NA; + icsk->nata_syn_rto = TCP_TIMEOUT_INIT; + icsk->nata_data_rto = TCP_TIMEOUT_INIT; + tp->nata_data_retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); +#endif } EXPORT_SYMBOL(tcp_init_sock); @@ -2849,11 +2851,13 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_rto = TCP_TIMEOUT_INIT; icsk->icsk_rto_min = TCP_RTO_MIN; icsk->icsk_delack_max = TCP_DELACK_MAX; -#ifdef CONFIG_TCP_NB_URC - icsk->icsk_nb_urc_enabled = 0; - icsk->icsk_nb_urc_rto = TCP_TIMEOUT_INIT; - tp->tcp_retries2 = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); -#endif /* CONFIG_TCP_NB_URC */ +#if defined(CONFIG_TCP_NATA_URC) || defined(CONFIG_TCP_NATA_STL) + icsk->nata_retries_enabled = 0; + icsk->nata_retries_type = NATA_NA; + icsk->nata_syn_rto = TCP_TIMEOUT_INIT; + icsk->nata_data_rto = TCP_TIMEOUT_INIT; + tp->nata_data_retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); +#endif tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd = TCP_INIT_CWND; tp->snd_cwnd_cnt = 0; @@ -3192,40 +3196,89 @@ int tcp_sock_set_keepcnt(struct sock *sk, int val) } EXPORT_SYMBOL(tcp_sock_set_keepcnt); -#ifdef CONFIG_TCP_NB_URC -#define NB_URC_RTO_MS_MIN 200 // 200ms -#define NB_URC_RTO_MS_MAX (120000) // 12s -#define NB_URC_RTO_MS_TO_HZ 1000 - -static int tcp_set_nb_urc(struct sock *sk, sockptr_t optval, int optlen) +#ifdef CONFIG_TCP_NATA_URC +#define NATA_URC_RTO_MS_MIN 200 // 200ms +#define NATA_URC_RTO_MS_MAX 120000 // 12s +#define NATA_URC_RTO_MS_TO_HZ 1000 +static int tcp_set_nata_urc(struct sock *sk, sockptr_t optval, int optlen) { - int err = 0; - struct tcp_nb_urc opt = {}; + int err = -EINVAL; + struct tcp_nata_urc opt = {}; struct inet_connection_sock *icsk = inet_csk(sk); - if (optlen != sizeof(struct tcp_nb_urc)) { - err = -EINVAL; + if (optlen != sizeof(struct tcp_nata_urc)) return err; - } - if (copy_from_sockptr(&opt, optval, sizeof(struct tcp_nb_urc))) { - err = -EINVAL; + if (copy_from_sockptr(&opt, optval, optlen)) return err; + + if (!opt.nata_urc_enabled) { + icsk->nata_retries_enabled = opt.nata_urc_enabled; + icsk->nata_retries_type = NATA_NA; + icsk->icsk_syn_retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syn_retries); + tcp_sk(sk)->nata_data_retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); + icsk->nata_syn_rto = TCP_TIMEOUT_INIT; + icsk->nata_data_rto = TCP_TIMEOUT_INIT; + return 0; } - if (opt.nb_urc_rto_ms < NB_URC_RTO_MS_MIN || opt.nb_urc_rto_ms > NB_URC_RTO_MS_MAX) { - err = -EINVAL; + if (opt.nata_rto_ms < NATA_URC_RTO_MS_MIN || + opt.nata_rto_ms > NATA_URC_RTO_MS_MAX ) return err; + + icsk->nata_retries_enabled = opt.nata_urc_enabled; + icsk->nata_retries_type = NATA_URC; + icsk->icsk_syn_retries = opt.nata_syn_retries; + tcp_sk(sk)->nata_data_retries = opt.nata_data_retries; + icsk->nata_data_rto = opt.nata_rto_ms * HZ / NATA_URC_RTO_MS_TO_HZ; + icsk->nata_syn_rto = icsk->nata_data_rto; + return 0; +} +#endif + +#ifdef CONFIG_TCP_NATA_STL +#define NATA_STL_SYN_RTO_MS_MIN 800 // 800ms +#define NATA_STL_DATA_RTO_MS_MIN 1800 // 1800ms +#define NATA_STL_RTO_MS_MAX 120000 // 12s +#define NATA_STL_RTO_MS_TO_HZ 1000 +static int tcp_set_nata_stl(struct sock *sk, sockptr_t optval, int optlen) +{ + int err = -EINVAL; + struct tcp_nata_stl opt = {}; + struct inet_connection_sock *icsk = inet_csk(sk); + + if (optlen != sizeof(struct tcp_nata_stl)) + return err; + + if (copy_from_sockptr(&opt, optval, optlen)) + return err; + + if (!opt.nata_stl_enabled) { + icsk->nata_retries_enabled = opt.nata_stl_enabled; + icsk->nata_retries_type = NATA_NA; + icsk->icsk_syn_retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syn_retries); + tcp_sk(sk)->nata_data_retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); + icsk->nata_syn_rto = TCP_TIMEOUT_INIT; + icsk->nata_data_rto = TCP_TIMEOUT_INIT; + return 0; } - icsk->icsk_syn_retries = opt.tcp_syn_retries; - tcp_sk(sk)->tcp_retries2 = opt.tcp_retries2; - icsk->icsk_nb_urc_enabled = opt.nb_urc_enabled; - icsk->icsk_nb_urc_rto = opt.nb_urc_rto_ms * HZ / NB_URC_RTO_MS_TO_HZ; + if ((opt.nata_syn_rto_ms < NATA_STL_SYN_RTO_MS_MIN || + opt.nata_syn_rto_ms > NATA_STL_RTO_MS_MAX || + opt.nata_data_rto_ms < NATA_STL_DATA_RTO_MS_MIN || + opt.nata_data_rto_ms > NATA_STL_RTO_MS_MAX)) + return err; - return err; + icsk->nata_retries_enabled = opt.nata_stl_enabled; + icsk->nata_retries_type = NATA_STL; + icsk->icsk_syn_retries = opt.nata_syn_retries; + tcp_sk(sk)->nata_data_retries = opt.nata_data_retries; + icsk->nata_syn_rto = opt.nata_syn_rto_ms * HZ / NATA_STL_RTO_MS_TO_HZ; + icsk->nata_data_rto = opt.nata_data_rto_ms * HZ / NATA_STL_RTO_MS_TO_HZ; + return 0; } -#endif /* CONFIG_TCP_NB_URC */ +#endif + /* * Socket option code for TCP. */ @@ -3532,11 +3585,16 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, tcp_enable_tx_delay(); WRITE_ONCE(tp->tcp_tx_delay, val); break; -#ifdef CONFIG_TCP_NB_URC - case TCP_NB_URC: - err = tcp_set_nb_urc(sk, optval, optlen); +#ifdef CONFIG_TCP_NATA_URC + case TCP_NATA_URC: + err = tcp_set_nata_urc(sk, optval, optlen); break; -#endif /* CONFIG_TCP_NB_URC */ +#endif +#ifdef CONFIG_TCP_NATA_STL + case TCP_NATA_STL: + err = tcp_set_nata_stl(sk, optval, optlen); + break; +#endif default: err = -ENOPROTOOPT; break; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 74f0f9eb07afd53f74597e3306f7aeaf655a267c..ddfe2ba3ea5d0dc29a2a3a3b0a83eadee6c9f72e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -4122,11 +4122,11 @@ void tcp_send_probe0(struct sock *sk) icsk->icsk_probes_out++; if (err <= 0) { -#ifdef CONFIG_TCP_NB_URC +#if defined(CONFIG_TCP_NATA_URC) || defined(CONFIG_TCP_NATA_STL) if (icsk->icsk_backoff < tcp_get_retries_limit(sk)) #else if (icsk->icsk_backoff < READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2)) -#endif /* CONFIG_TCP_NB_URC */ +#endif icsk->icsk_backoff++; timeout = tcp_probe0_when(sk, TCP_RTO_MAX); } else { diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index eb08d62994e21b94767d3af2f21d6fd7ec3a0f42..bf4d38b524c4c1b50ab2784a546b1551d382f2bd 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -250,11 +250,11 @@ static int tcp_write_timeout(struct sock *sk) __dst_negative_advice(sk); } -#ifdef CONFIG_TCP_NB_URC +#if defined(CONFIG_TCP_NATA_URC) || defined(CONFIG_TCP_NATA_STL) retry_until = tcp_get_retries_limit(sk); #else retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2); -#endif /* CONFIG_TCP_NB_URC */ +#endif if (sock_flag(sk, SOCK_DEAD)) { const bool alive = icsk->icsk_rto < TCP_RTO_MAX; @@ -385,11 +385,11 @@ static void tcp_probe_timer(struct sock *sk) msecs_to_jiffies(icsk->icsk_user_timeout)) goto abort; -#ifdef CONFIG_TCP_NB_URC +#if defined(CONFIG_TCP_NATA_URC) || defined(CONFIG_TCP_NATA_STL) max_probes = tcp_get_retries_limit(sk); #else max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); -#endif /* CONFIG_TCP_NB_URC */ +#endif if (sock_flag(sk, SOCK_DEAD)) { const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;