diff --git a/code/linux/net/newip/af_ninet.c b/code/linux/net/newip/af_ninet.c index 9c5226429a599de9218eb8ea7013b0ef144a73aa..b2fa2930a83f6fdf4d88e88c74943b92ce1a9248 100644 --- a/code/linux/net/newip/af_ninet.c +++ b/code/linux/net/newip/af_ninet.c @@ -206,7 +206,7 @@ int ninet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return -EACCES; if (nip_bind_addr_check(net, &addr->sin_addr) == false) { - DEBUG("%s: binding-addr invalid.", __func__); + DEBUG("%s: binding-addr invalid, bitlen=%u.", __func__, addr->sin_addr.bitlen); return -EADDRNOTAVAIL; } lock_sock(sk); diff --git a/code/linux/net/newip/nip_addrconf.c b/code/linux/net/newip/nip_addrconf.c index 46db1b164f10ba8231d16ca4f36f4fdbaab07b1f..69c3bb9577bf43cd18d2b7edef18e9bcc67f7dd5 100644 --- a/code/linux/net/newip/nip_addrconf.c +++ b/code/linux/net/newip/nip_addrconf.c @@ -398,7 +398,7 @@ int nip_addrconf_add_ifaddr(struct net *net, void __user *arg) } if (nip_addr_invalid(&ireq.ifrn_addr)) { - DEBUG("%s: nip addr invalid.", __func__); + DEBUG("%s: nip addr invalid, bitlen=%u.", __func__, ireq.ifrn_addr.bitlen); return -EFAULT; } @@ -431,7 +431,7 @@ int nip_addrconf_del_ifaddr(struct net *net, void __user *arg) } if (nip_addr_invalid(&ireq.ifrn_addr)) { - DEBUG("%s: nip addr invalid.", __func__); + DEBUG("%s: nip addr invalid, bitlen=%u.", __func__, ireq.ifrn_addr.bitlen); return -EFAULT; } diff --git a/code/linux/net/newip/nip_input.c b/code/linux/net/newip/nip_input.c index 8a1120b3d10943287aaa0b5ff208e72446006d36..5924887061e7300d6b293189ebb905de7e70a087 100644 --- a/code/linux/net/newip/nip_input.c +++ b/code/linux/net/newip/nip_input.c @@ -50,7 +50,7 @@ static int nip_rcv_finish(struct sk_buff *skb) if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) { const struct ninet_protocol *ipprot; - DEBUG("%s: try to early demux skb.\n", __func__); + DEBUG("%s: try to early demux skb.", __func__); ipprot = rcu_dereference(ninet_protos[NIPCB(skb)->nexthdr]); if (ipprot) edemux = READ_ONCE(ipprot->early_demux); diff --git a/code/linux/net/newip/nip_output.c b/code/linux/net/newip/nip_output.c index ff0f0414a4d59aae7fb384bd29dba3e442ae98f4..a108419f431955253370be9e1a773be5f814305d 100644 --- a/code/linux/net/newip/nip_output.c +++ b/code/linux/net/newip/nip_output.c @@ -438,7 +438,7 @@ void tcp_nip_actual_send_reset(struct sock *sk, struct sk_buff *skb, u32 seq, /* alloc skb */ buff = alloc_skb(MAX_TCP_HEADER, priority); if (!buff) { - DEBUG("%s: alloc_skb failed.\n", __func__); + DEBUG("%s: alloc_skb failed.", __func__); return; } skb_reserve(buff, MAX_TCP_HEADER); @@ -460,9 +460,9 @@ void tcp_nip_actual_send_reset(struct sock *sk, struct sk_buff *skb, u32 seq, t1->rst = rst; t1->window = htons(win); t1->check = htons(nip_get_output_checksum_tcp(buff, *saddr, *daddr)); - DEBUG("%s: host dport==%u, net dport==%x, host sport==%u, net sport==%x\n", + DEBUG("%s: host dport==%u, net dport==%x, host sport==%u, net sport==0x%x", __func__, ntohs(t1->dest), t1->dest, ntohs(t1->source), t1->source); - DEBUG("%s: host seq==%u, net seq==%x, host ack_seq==%u, net ack_seq==%x\n", + DEBUG("%s: host seq==%u, net seq==%x, host ack_seq==%u, net ack_seq==0x%x", __func__, seq, t1->seq, ack_seq, t1->ack_seq); buff->protocol = htons(ETH_P_NEWIP); diff --git a/code/linux/net/newip/nndisc.c b/code/linux/net/newip/nndisc.c index 02ac3e657b1c53ba9c0e14fc20dfa75f67a2819e..e365b98bf8cffda93b7c8f7a40ac958554f772ca 100644 --- a/code/linux/net/newip/nndisc.c +++ b/code/linux/net/newip/nndisc.c @@ -508,7 +508,7 @@ int nndisc_rcv_ns(struct sk_buff *skb) } if (nip_addr_invalid(&addr)) { - DEBUG("%s: icmp hdr addr invalid.", __func__); + DEBUG("%s: icmp hdr addr invalid, bitlen=%u.", __func__, addr.bitlen); err = -EFAULT; goto out; } diff --git a/code/linux/net/newip/route.c b/code/linux/net/newip/route.c index dc926ca6554e679bdd2583863ba081d458f20382..5f637af173fa7a976c49fca260468f82f7259eea 100644 --- a/code/linux/net/newip/route.c +++ b/code/linux/net/newip/route.c @@ -264,7 +264,7 @@ void nip_route_input(struct sk_buff *skb) }; if (nip_addr_eq(&fln.daddr, &nip_broadcast_addr_arp)) { - DEBUG("%s: recv broadcast packet!\n", __func__); + DEBUG("%s: recv broadcast packet!", __func__); dst_hold(&net->newip.nip_broadcast_entry->dst); skb_dst_set(skb, (struct dst_entry *)net->newip.nip_broadcast_entry); @@ -337,11 +337,11 @@ struct nip_rt_info *nip_pol_route(struct net *net, struct nip_fib_table *table, rt->dst.__use++; pcpu_rt = nip_rt_get_pcpu_route(rt); - DEBUG("%s: cpu id = %d\n", __func__, smp_processor_id()); + DEBUG("%s: cpu id = %d", __func__, smp_processor_id()); if (pcpu_rt) { rcu_read_unlock_bh(); - DEBUG("%s: pcpu found!\n", __func__); + DEBUG("%s: pcpu found!", __func__); } else { dst_hold(&rt->dst); rcu_read_unlock_bh(); @@ -349,7 +349,7 @@ struct nip_rt_info *nip_pol_route(struct net *net, struct nip_fib_table *table, dst_release(&rt->dst); } - DEBUG("%s: rt dst.__refcnt = %d ; pcpu dst.__refcnt = %d\n", __func__, + DEBUG("%s: rt dst.__refcnt = %d ; pcpu dst.__refcnt = %d", __func__, atomic_read(&rt->dst.__refcnt), atomic_read(&pcpu_rt->dst.__refcnt)); return pcpu_rt; @@ -362,7 +362,7 @@ bool nip_bind_addr_check(struct net *net, struct nip_fib_table *fib_tbl = net->newip.nip_fib_local_tbl; if (nip_addr_invalid(addr)) { - DEBUG("%s: binding-addr invalid.", __func__); + DEBUG("%s: binding-addr invalid, bitlen=%u.", __func__, addr->bitlen); return false; } @@ -571,13 +571,15 @@ int nip_route_ioctl(struct net *net, unsigned int cmd, struct nip_rtmsg *rtmsg) rtmsg_to_fibni_config(net, rtmsg, &cfg); if (nip_addr_invalid(&cfg.fc_dst)) { - DEBUG("%s: nip daddr invalid.", __func__); + DEBUG("%s: nip daddr invalid, bitlen=%u", + __func__, cfg.fc_dst.bitlen); return -EFAULT; } if (cfg.fc_flags & RTF_GATEWAY) { if (nip_addr_invalid(&cfg.fc_gateway)) { - DEBUG("%s: nip gateway daddr invalid.", __func__); + DEBUG("%s: nip gateway daddr invalid, bitlen=%u", + __func__, cfg.fc_gateway.bitlen); return -EFAULT; } } @@ -616,7 +618,7 @@ static void nip_dst_destroy(struct dst_entry *dst) } if (from) { - DEBUG("%s: from->__refcnt = %d\n", __func__, + DEBUG("%s: from->__refcnt = %d", __func__, atomic_read(&from->__refcnt)); } rt->from = NULL; diff --git a/code/linux/net/newip/tcp_nip.c b/code/linux/net/newip/tcp_nip.c index bb0b79eeebb332dbbf62fffd366f0e5296dc7aa4..7333d4e1af205748c62775ab2f361e5790283286 100644 --- a/code/linux/net/newip/tcp_nip.c +++ b/code/linux/net/newip/tcp_nip.c @@ -165,6 +165,7 @@ void tcp_nip_close(struct sock *sk, long timeout) struct sk_buff *skb; int data_was_unread = 0; int state; + u32 sk_ack_backlog; lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; @@ -174,8 +175,11 @@ void tcp_nip_close(struct sock *sk, long timeout) if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); + sk_ack_backlog = READ_ONCE(sk->sk_ack_backlog); inet_csk_listen_stop(sk); - + DEBUG("%s: sk_state CLOSE, sk_ack_backlog=%u to %u, sk_max_ack_backlog=%u", + __func__, sk_ack_backlog, READ_ONCE(sk->sk_ack_backlog), + READ_ONCE(sk->sk_max_ack_backlog)); goto adjudge_to_death; } @@ -202,7 +206,7 @@ void tcp_nip_close(struct sock *sk, long timeout) * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) * TCP_CLOSE_WAIT -> TCP_LAST_ACK */ - DEBUG("%s: ready to send fin, sk_state:%d\n", __func__, sk->sk_state); + DEBUG("%s: ready to send fin, sk_state:%d", __func__, sk->sk_state); tcp_nip_send_fin(sk); } @@ -326,7 +330,7 @@ static int tcp_nip_connect(struct sock *sk, struct sockaddr *uaddr, fln.flowin_oif = sk->sk_bound_dev_if; dst = nip_dst_lookup_flow(sock_net(sk), sk, &fln, NULL); if (IS_ERR(dst)) { - DEBUG("%s cannot find dst\n", __func__); + DEBUG("%s cannot find dst", __func__); err = PTR_ERR(dst); goto failure; } @@ -337,13 +341,13 @@ static int tcp_nip_connect(struct sock *sk, struct sockaddr *uaddr, fln.saddr = sk->sk_nip_rcv_saddr; if (nip_addr_invalid(&fln.daddr)) { - DEBUG("%s: nip daddr invalid.", __func__); + DEBUG("%s: nip daddr invalid, bitlen=%u.", __func__, fln.daddr.bitlen); err = -EFAULT; goto failure; } if (nip_addr_invalid(&fln.saddr)) { - DEBUG("%s: nip saddr invalid.", __func__); + DEBUG("%s: nip saddr invalid, bitlen=%u.", __func__, fln.saddr.bitlen); err = -EFAULT; goto failure; } @@ -409,7 +413,7 @@ static void tcp_nip_send_reset(struct sock *sk, struct sk_buff *skb) if (th->rst) return; - DEBUG("%s: send RST!\n", __func__); + DEBUG("%s: send RST!", __func__); if (th->ack) seq = ntohl(th->ack_seq); @@ -633,7 +637,7 @@ static int tcp_nip_keepalive_para_update(struct sock *sk, /* set keep idle (TCP_KEEPIDLE) */ val = keepalive_time; if (val < 1 || val > MAX_NIP_TCP_KEEPIDLE) { - pr_crit("%s keepalive_time(%u) invalid.", __func__, val); + DEBUG("%s keepalive_time(%u) invalid.", __func__, val); return -EINVAL; } @@ -652,7 +656,7 @@ static int tcp_nip_keepalive_para_update(struct sock *sk, /* set keep intvl (TCP_KEEPINTVL) */ val = keepalive_intvl; if (val < 1 || val > MAX_NIP_TCP_KEEPINTVL) { - pr_crit("%s keepalive_intvl(%u) invalid.", __func__, val); + DEBUG("%s keepalive_intvl(%u) invalid.", __func__, val); return -EINVAL; } tp->keepalive_intvl = val; @@ -660,7 +664,7 @@ static int tcp_nip_keepalive_para_update(struct sock *sk, /* set keep cnt (TCP_KEEPCNT) */ val = keepalive_probes; if (val < 1 || val > MAX_NIP_TCP_KEEPCNT) { - pr_crit("%s keepalive_probes(%u) invalid.", __func__, val); + DEBUG("%s keepalive_probes(%u) invalid.", __func__, val); return -EINVAL; } tp->keepalive_probes = val; @@ -670,7 +674,7 @@ static int tcp_nip_keepalive_para_update(struct sock *sk, sk->sk_prot->keepalive(sk, 1); sock_valbool_flag(sk, SOCK_KEEPOPEN, 1); } else { - pr_crit("%s keepalive func is null.", __func__); + DEBUG("%s keepalive func is null.", __func__); } return 0; @@ -688,11 +692,13 @@ void tcp_nip_keepalive_enable(struct sock *sk) g_nip_keepalive_intvl, g_nip_keepalive_probes); if (ret != 0) { - pr_crit("%s fail", __func__); + DEBUG("%s fail, ka_time=%u, ka_probes=%u, ka_intvl=%u", __func__, + tp->keepalive_time, tp->keepalive_probes, tp->keepalive_intvl); return; } - pr_crit("%s ok", __func__); + DEBUG("%s ok, ka_time=%u, ka_probes=%u, ka_intvl=%u", __func__, + tp->keepalive_time, tp->keepalive_probes, tp->keepalive_intvl); tp->nip_keepalive_enable = true; } @@ -711,7 +717,7 @@ void tcp_nip_keepalive_disable(struct sock *sk) sk->sk_prot->keepalive(sk, 0); sock_valbool_flag(sk, SOCK_KEEPOPEN, 0); - pr_crit("%s ok, idle_ka_probes_out=%u", __func__, g_nip_idle_ka_probes_out); + DEBUG("%s ok, idle_ka_probes_out=%u", __func__, g_nip_idle_ka_probes_out); tp->nip_keepalive_enable = false; } @@ -860,7 +866,7 @@ int tcp_nip_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) restart: mss_now = tcp_nip_send_mss(sk, &size_goal, flags); - DEBUG("%s: tcp_nip_send_mss %d\n", __func__, mss_now); + DEBUG("%s: tcp_nip_send_mss %d", __func__, mss_now); err = -EPIPE; if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) @@ -902,7 +908,7 @@ restart: if (err) goto do_fault; } else { - DEBUG("%s: msg too big! tcp cannot devide packet now\n", __func__); + DEBUG("%s: msg too big! tcp cannot devide packet now", __func__); goto out; } @@ -973,7 +979,7 @@ void tcp_nip_cleanup_rbuf(struct sock *sk, int copied) struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), - "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", + "cleanup rbuf bug: copied %X seq %X rcvnxt %X", tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); if (inet_csk_ack_scheduled(sk)) { @@ -1055,13 +1061,13 @@ int tcp_nip_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonbloc * shouldn't happen. */ if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), - "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", + "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X", *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags)) break; offset = *seq - TCP_SKB_CB(skb)->seq; if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { - pr_err_once("%s: found a SYN, please report !\n", __func__); + pr_err_once("%s: found a SYN, please report !", __func__); offset--; } if (offset < skb->len) @@ -1072,7 +1078,7 @@ int tcp_nip_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonbloc * be replicated, then MSG_PEEK should be set in flags */ WARN(!(flags & MSG_PEEK), - "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", + "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X", *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); } @@ -1129,7 +1135,7 @@ int tcp_nip_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonbloc release_sock(sk); lock_sock(sk); } else { - DEBUG("%s: no enough data receive queue, wait\n", __func__); + DEBUG("%s: no enough data receive queue, wait", __func__); sk_wait_data(sk, &timeo, last); } continue; @@ -1137,11 +1143,11 @@ found_ok_skb: used = skb->len - offset; if (len_tmp < used) used = len_tmp; - DEBUG("%s: copy data into msg, len=%ld\n", __func__, used); + DEBUG("%s: copy data into msg, len=%ld", __func__, used); if (!(flags & MSG_TRUNC)) { err = skb_copy_datagram_msg(skb, offset, msg, used); if (err) { - DEBUG("%s: copy data failed!\n", __func__); + DEBUG("%s: copy data failed!", __func__); if (!copied) copied = -EFAULT; break; @@ -1217,7 +1223,7 @@ void tcp_nip_destroy_sock(struct sock *sk) */ static int tcp_nip_do_rcv(struct sock *sk, struct sk_buff *skb) { - DEBUG("%s: received newip tcp skb, sk_state=%d\n", __func__, sk->sk_state); + DEBUG("%s: received newip tcp skb, sk_state=%d", __func__, sk->sk_state); if (sk->sk_state == TCP_ESTABLISHED) { tcp_nip_rcv_established(sk, skb, tcp_hdr(skb), skb->len); @@ -1226,7 +1232,7 @@ static int tcp_nip_do_rcv(struct sock *sk, struct sk_buff *skb) /* The connection is established in cookie mode to defend against SYN-flood attacks */ if (sk->sk_state == TCP_LISTEN) - DEBUG("found TCP_LISTEN SOCK!!!\n"); + DEBUG("found TCP_LISTEN SOCK!!!"); if (tcp_nip_rcv_state_process(sk, skb)) goto discard; @@ -1281,7 +1287,7 @@ static bool tcp_nip_add_backlog(struct sock *sk, struct sk_buff *skb) if (unlikely(sk_add_backlog(sk, skb, limit))) { bh_unlock_sock(sk); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP); - DEBUG("%s: insert backlog fail.\n", __func__); + DEBUG("%s: insert backlog fail.", __func__); return true; } return false; @@ -1371,7 +1377,7 @@ static int tcp_nip_rcv(struct sk_buff *skb) if (!sock_owned_by_user(sk)) { ret = tcp_nip_do_rcv(sk, skb); } else { - DEBUG("%s: sock locked by user! put packet into backlog\n", + DEBUG("%s: sock locked by user! put packet into backlog", __func__); if (tcp_nip_add_backlog(sk, skb)) goto discard_and_relse; @@ -1392,7 +1398,7 @@ no_tcp_socket: bad_packet: goto discard_it; discard_it: - DEBUG("%s: drop tcp newip skb and release it\n", __func__); + DEBUG("%s: drop tcp newip skb and release it", __func__); kfree_skb(skb); return 0; @@ -1473,7 +1479,7 @@ void tcp_nip_done(struct sock *sk) this_cpu_dec(*sk->sk_prot->orphan_count); local_bh_enable(); sock_put(sk); - DEBUG("%s: close sock done!!\n", __func__); + DEBUG("%s: close sock done!!", __func__); } } @@ -1490,12 +1496,17 @@ int tcp_nip_disconnect(struct sock *sk, int flags) struct tcp_sock *tp = tcp_sk(sk); int err = 0; int old_state = sk->sk_state; + u32 sk_ack_backlog; if (old_state != TCP_CLOSE) tcp_set_state(sk, TCP_CLOSE); if (old_state == TCP_LISTEN) { + sk_ack_backlog = READ_ONCE(sk->sk_ack_backlog); inet_csk_listen_stop(sk); + DEBUG("%s: sk_state CLOSE, sk_ack_backlog=%u to %u, sk_max_ack_backlog=%u", + __func__, sk_ack_backlog, READ_ONCE(sk->sk_ack_backlog), + READ_ONCE(sk->sk_max_ack_backlog)); } else if (tcp_nip_need_reset(old_state) || (tp->snd_nxt != tp->write_seq && (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { @@ -1552,13 +1563,26 @@ int tcp_nip_disconnect(struct sock *sk, int flags) return err; } +struct sock *ninet_csk_accept(struct sock *sk, int flags, int *err, bool kern) +{ + struct sock *newsk; + u32 sk_ack_backlog_last = READ_ONCE(sk->sk_ack_backlog); + u32 sk_max_ack_backlog = READ_ONCE(sk->sk_max_ack_backlog); + + newsk = inet_csk_accept(sk, flags, err, kern); + DEBUG("%s: accept %s, sk_ack_backlog_last=%u, sk_max_ack_backlog=%u", + __func__, (newsk ? "ok" : "fail"), sk_ack_backlog_last, sk_max_ack_backlog); + + return newsk; +} + struct proto tcp_nip_prot = { .name = "NIP_TCP", .owner = THIS_MODULE, .close = tcp_nip_close, .connect = tcp_nip_connect, .disconnect = tcp_nip_disconnect, - .accept = inet_csk_accept, + .accept = ninet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_nip_init_sock, .destroy = tcp_nip_destroy_sock, diff --git a/code/linux/net/newip/tcp_nip_input.c b/code/linux/net/newip/tcp_nip_input.c index dc9752b88bd76ec88bbdc44de5f2f2385a77fead..ee7271b2b90d388eddd5d96b1b2aeeb6d3d3f73f 100644 --- a/code/linux/net/newip/tcp_nip_input.c +++ b/code/linux/net/newip/tcp_nip_input.c @@ -97,7 +97,7 @@ void tcp_nip_fin(struct sock *sk) /* Only TCP_LISTEN and TCP_CLOSE are left, in these * cases we should never reach this piece of code. */ - pr_err("%s: Impossible, sk->sk_state=%d\n", + pr_err("%s: Impossible, sk->sk_state=%d", __func__, sk->sk_state); break; } @@ -219,7 +219,7 @@ static void tcp_nip_data_queue(struct sock *sk, struct sk_buff *skb) tp->snd_up = tp->snd_up > PKT_DISCARD_MAX ? 0 : tp->snd_up; if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { - DEBUG("%s: no data, only handle ack.\n", __func__); + DEBUG("%s: no data, only handle ack.", __func__); __kfree_skb(skb); return; } @@ -230,7 +230,7 @@ static void tcp_nip_data_queue(struct sock *sk, struct sk_buff *skb) } if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_wup + tp->rcv_wnd)) { - DEBUG("seq is %u and %u\n", TCP_SKB_CB(skb)->seq, tp->rcv_nxt); + DEBUG("seq is %u and %u", TCP_SKB_CB(skb)->seq, tp->rcv_nxt); __kfree_skb(skb); return; } @@ -256,7 +256,7 @@ out_of_window: if (TCP_SKB_CB(skb)->seq != tp->rcv_nxt) tcp_nip_overlap_handle(tp, skb); - DEBUG("%s: tcp newip packet received. data len:%d\n", __func__, skb->len); + DEBUG("%s: tcp newip packet received. data len:%d", __func__, skb->len); __skb_queue_tail(&sk->sk_receive_queue, skb); skb_set_owner_r(skb, sk); @@ -365,7 +365,7 @@ static inline void tcp_nip_ack_snd_check(struct sock *sk) { if (!inet_csk_ack_scheduled(sk)) { /* We sent a data segment already. */ - DEBUG("We sent a data segment already.!!\n"); + DEBUG("We sent a data segment already."); return; } __tcp_nip_ack_snd_check(sk, 1); @@ -408,7 +408,7 @@ static int tcp_nip_clean_rtx_queue(struct sock *sk, ktime_t *skb_snd_tstamp) if (after(scb->end_seq, tp->snd_una)) { if (tcp_skb_pcount(skb) == 1 || !after(tp->snd_una, scb->seq)) break; - DEBUG("%s: ack error!\n", __func__); + DEBUG("%s: ack error!", __func__); } else { prefetchw(skb->next); acked_pcount = tcp_skb_pcount(skb); @@ -486,7 +486,7 @@ void tcp_nip_parse_mss(struct tcp_options_received *opt_rx, if (opsize == TCPOLEN_MSS && th->syn && !estab) { u16 in_mss = get_unaligned_be16(ptr); - DEBUG("%s: in_mss %d\n", __func__, in_mss); + DEBUG("%s: in_mss %d", __func__, in_mss); if (in_mss) { if (opt_rx->user_mss && @@ -763,7 +763,7 @@ int tcp_newip_conn_request(struct request_sock_ops *rsk_ops, * the current request is discarded */ if (inet_csk_reqsk_queue_is_full(sk) && !isn) { - DEBUG("inet_csk_reqsk_queue_is_full!!!!!\n"); + DEBUG("inet_csk_reqsk_queue_is_full!!!!!"); goto drop; } @@ -773,7 +773,8 @@ int tcp_newip_conn_request(struct request_sock_ops *rsk_ops, */ if (sk_acceptq_is_full(sk)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); - DEBUG("sk_acceptq_is_full!!!!!\n"); + DEBUG("%s sk_acceptq_is_full, sk_ack_backlog=%u, sk_max_ack_backlog=%u", + __func__, READ_ONCE(sk->sk_ack_backlog), READ_ONCE(sk->sk_max_ack_backlog)); goto drop; } @@ -1112,7 +1113,7 @@ static int tcp_nip_ack(struct sock *sk, const struct sk_buff *skb, int flag) flag |= tcp_nip_ack_update_window(sk, skb, ack, ack_seq); if (!prior_packets) { - DEBUG("No prior pack and ack is %u\n", ack); + DEBUG("No prior pack and ack is %u", ack); if (tcp_nip_send_head(sk)) tcp_nip_ack_probe(sk); } @@ -1189,7 +1190,7 @@ static void tcp_nip_send_dupack(struct sock *sk, const struct sk_buff *skb) before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); } - DEBUG("[nip]%s send dupack!\n", __func__); + DEBUG("[nip]%s send dupack!", __func__); tcp_nip_send_ack(sk); } @@ -1216,7 +1217,7 @@ static bool tcp_nip_validate_incoming(struct sock *sk, struct sk_buff *skb, * unexpected packets do not need to be processed, but reply for an ACK */ if (!tcp_nip_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { - DEBUG("%s receive an err seq and seq is %u, ack is %u\n", __func__, + DEBUG("%s receive an err seq and seq is %u, ack is %u", __func__, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); if (!th->rst) tcp_nip_send_dupack(sk, skb); @@ -1491,7 +1492,7 @@ int tcp_nip_rcv_state_process(struct sock *sk, struct sk_buff *skb) } goto discard; case TCP_SYN_SENT: - DEBUG("%s TCP_SYN_SENT!!\n", __func__); + DEBUG("%s TCP_SYN_SENT!!", __func__); tp->rx_opt.saw_tstamp = 0; tcp_mstamp_refresh(tp); queued = tcp_nip_rcv_synsent_state_process(sk, skb, th); @@ -1527,7 +1528,7 @@ int tcp_nip_rcv_state_process(struct sock *sk, struct sk_buff *skb) /* Invoke memory barrier (annotated prior to checkpatch requirements) */ smp_mb(); tcp_set_state(sk, TCP_ESTABLISHED); - DEBUG("TCP_ESTABLISHED!!!!!\n"); + DEBUG("TCP_ESTABLISHED!!!!!"); sk->sk_state_change(sk); /* Sets the part to be sent, and the size of the send window */ @@ -1541,7 +1542,7 @@ int tcp_nip_rcv_state_process(struct sock *sk, struct sk_buff *skb) break; case TCP_FIN_WAIT1: { if (tp->snd_una != tp->write_seq) { - DEBUG("%s: tp->snd_una != tp->write_seq!!\n", __func__); + DEBUG("%s: tp->snd_una != tp->write_seq!!", __func__); break; } @@ -1551,11 +1552,11 @@ int tcp_nip_rcv_state_process(struct sock *sk, struct sk_buff *skb) if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { tcp_nip_done(sk); - DEBUG("%s: received payload packets, call tcp_nip_done.\n", __func__); + DEBUG("%s: received payload packets, call tcp_nip_done.", __func__); return 1; } - DEBUG("%s: TCP_FIN_WAIT1: recvd ack for fin.Wait for fin from other side.\n", + DEBUG("%s: TCP_FIN_WAIT1: recvd ack for fin.Wait for fin from other side.", __func__); inet_csk_reset_keepalive_timer(sk, TCP_NIP_CSK_KEEPALIVE_CYCLE * HZ); @@ -1564,15 +1565,15 @@ int tcp_nip_rcv_state_process(struct sock *sk, struct sk_buff *skb) case TCP_CLOSING: if (tp->snd_una == tp->write_seq) { - DEBUG("%s: TCP_CLOSING: recvd ack for fin.Ready to destroy.\n", __func__); + DEBUG("%s: TCP_CLOSING: recvd ack for fin.Ready to destroy.", __func__); inet_csk_reset_keepalive_timer(sk, TCP_TIMEWAIT_LEN); goto discard; } break; case TCP_LAST_ACK: - DEBUG("tcp_nip_rcv_state_process_2: TCP_LAST_ACK\n"); + DEBUG("tcp_nip_rcv_state_process_2: TCP_LAST_ACK"); if (tp->snd_una == tp->write_seq) { - DEBUG("%s: LAST_ACK: recvd ack for fin.Directly destroy.\n", __func__); + DEBUG("%s: LAST_ACK: recvd ack for fin.Directly destroy.", __func__); tcp_nip_done(sk); goto discard; } @@ -1581,16 +1582,16 @@ int tcp_nip_rcv_state_process(struct sock *sk, struct sk_buff *skb) switch (sk->sk_state) { case TCP_CLOSE_WAIT: - DEBUG("%s: into TCP_CLOSE_WAIT, rst = %d, seq = %u, end_seq = %u, rcv_nxt = %u\n", + DEBUG("%s: into TCP_CLOSE_WAIT, rst = %d, seq = %u, end_seq = %u, rcv_nxt = %u", __func__, th->rst, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); fallthrough; case TCP_CLOSING: case TCP_LAST_ACK: if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { - DEBUG("%s: break in TCP_LAST_ACK\n", __func__); + DEBUG("%s: break in TCP_LAST_ACK", __func__); break; } - DEBUG("tcp_nip_rcv_state_process_3: TCP_LAST_ACK_2\n"); + DEBUG("tcp_nip_rcv_state_process_3: TCP_LAST_ACK_2"); fallthrough; case TCP_FIN_WAIT1: case TCP_FIN_WAIT2: @@ -1601,7 +1602,7 @@ int tcp_nip_rcv_state_process(struct sock *sk, struct sk_buff *skb) if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { tcp_nip_reset(sk); - DEBUG("%s: call tcp_nip_reset\n", __func__); + DEBUG("%s: call tcp_nip_reset", __func__); return 1; } } diff --git a/code/linux/net/newip/tcp_nip_output.c b/code/linux/net/newip/tcp_nip_output.c index e5287b61c629e66dea1099a1bc79f96151d2fd04..a216cff4dc3bc85fb9c4616530c38fc6a25fa520 100644 --- a/code/linux/net/newip/tcp_nip_output.c +++ b/code/linux/net/newip/tcp_nip_output.c @@ -144,7 +144,7 @@ void __tcp_nip_push_pending_frames(struct sock *sk, unsigned int cur_mss, if (tcp_nip_write_xmit(sk, cur_mss, nonagle, 0, sk_gfp_mask(sk, GFP_ATOMIC))) { - DEBUG("%s check probe0 timer!\n", __func__); + DEBUG("%s check probe0 timer!", __func__); tcp_nip_check_probe_timer(sk); } } @@ -380,7 +380,7 @@ static unsigned int tcp_nip_syn_options(struct sock *sk, struct sk_buff *skb, unsigned int remaining = MAX_TCP_OPTION_SPACE; opts->mss = tcp_nip_advertise_mss(sk); - DEBUG("advertise mss%d", opts->mss); + DEBUG("advertise mss %d", opts->mss); remaining -= TCPOLEN_MSS_ALIGNED; return MAX_TCP_OPTION_SPACE - remaining; @@ -506,9 +506,9 @@ static int __tcp_nip_transmit_skb(struct sock *sk, struct sk_buff *skb, skb_set_hash_from_sk(skb, sk); /* Increase allocated memory */ refcount_add(skb->truesize, &sk->sk_wmem_alloc); - DEBUG("th->inet_sport==%u, th->inet_dport==%u\n", + DEBUG("th->inet_sport=%u, th->inet_dport=%u", ntohs(inet->inet_sport), ntohs(inet->inet_dport)); - DEBUG("sk->sk_rcvbuf==%d, sk->sk_rmem_alloc==%d\n", + DEBUG("sk->sk_rcvbuf=%d, sk->sk_rmem_alloc=%d", sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc)); /* Build TCP header and checksum it. */ th = (struct tcphdr *)skb->data; @@ -634,9 +634,9 @@ unsigned int tcp_nip_sync_mss(struct sock *sk, u32 pmtu) icsk->icsk_mtup.search_high = pmtu; mss_now = tcp_nip_mtu_to_mss(sk, pmtu); - DEBUG("%s: sync mtu_to_mss %d\n", __func__, mss_now); + DEBUG("%s: sync mtu_to_mss %d", __func__, mss_now); mss_now = tcp_bound_to_half_wnd(tp, mss_now); - DEBUG("%s: sync bound to half wnd %d\n", __func__, mss_now); + DEBUG("%s: sync bound to half wnd %d", __func__, mss_now); /* And store cached results */ icsk->icsk_pmtu_cookie = pmtu; @@ -644,7 +644,7 @@ unsigned int tcp_nip_sync_mss(struct sock *sk, u32 pmtu) mss_now = min(mss_now, tcp_nip_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); tp->mss_cache = mss_now; - DEBUG("%s: sync final mss %d\n", __func__, mss_now); + DEBUG("%s: sync final mss %d", __func__, mss_now); return mss_now; } @@ -655,30 +655,24 @@ unsigned int tcp_nip_current_mss(struct sock *sk) const struct dst_entry *dst = __sk_dst_get(sk); u32 mss_now; unsigned int header_len; - struct tcp_nip_out_options opts; mss_now = tp->mss_cache; - DEBUG("%s: mss_cache %d\n", __func__, mss_now); - if (dst) { u32 mtu = dst_mtu(dst); if (mtu != inet_csk(sk)->icsk_pmtu_cookie) mss_now = tcp_nip_sync_mss(sk, mtu); - DEBUG("%s: mtu %d\n", __func__, mtu); } - header_len = tcp_nip_established_options(sk, NULL, &opts) + - sizeof(struct tcphdr); - + header_len = tcp_nip_established_options(sk, NULL, &opts) + sizeof(struct tcphdr); if (header_len != tp->tcp_header_len) { int delta = (int)header_len - tp->tcp_header_len; mss_now -= delta; } - DEBUG("%s:after sync_mss%d\n", __func__, mss_now); + return mss_now; } @@ -964,7 +958,7 @@ void tcp_nip_send_fin(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); u32 cur_mss; - DEBUG("%s: send fin!\n", __func__); + DEBUG("%s: send fin!", __func__); /* Set the fin position of the last packet to 1 */ if (tskb && tcp_nip_send_head(sk)) { coalesce: @@ -993,11 +987,11 @@ void tcp_nip_send_active_reset(struct sock *sk, gfp_t priority) { struct sk_buff *skb; - DEBUG("%s: send RST!\n", __func__); + DEBUG("%s: send RST!", __func__); /* NOTE: No TCP options attached and we never retransmit this. */ skb = alloc_skb(MAX_TCP_HEADER, priority); if (!skb) { - DEBUG("%s: alloc_skb failed.\n", __func__); + DEBUG("%s: alloc_skb failed.", __func__); return; } /* Reserve space for headers and prepare control bits. */ @@ -1071,7 +1065,7 @@ static bool tcp_nip_write_xmit(struct sock *sk, unsigned int mss_now, int nonagl } while ((skb = tcp_nip_send_head(sk)) && (snd_num--)) { - DEBUG("%s:tcp_nip_send_head head found!\n", __func__); + DEBUG("%s:tcp_nip_send_head head found!", __func__); tcp_nip_init_tso_segs(skb, mss_now); if (unlikely(!tcp_nip_snd_wnd_test(tp, skb, mss_now))) break; @@ -1220,7 +1214,7 @@ static int tcp_nip_xmit_probe_skb(struct sock *sk, int urgent, int mib) tcp_nip_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); NET_INC_STATS(sock_net(sk), mib); - DEBUG("[nip]%s: send probe packet!\n", __func__); + DEBUG("[nip]%s: send probe packet!", __func__); return tcp_nip_transmit_skb(sk, skb, 0, (__force gfp_t)0); } @@ -1247,7 +1241,7 @@ int tcp_nip_write_wakeup(struct sock *sk, int mib) err = tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, skb, seg_size, mss, GFP_ATOMIC); if (err) { - DEBUG("[nip]:tcp_fragment return err = %d!\n", err); + DEBUG("[nip]:tcp_fragment return err = %d!", err); return -1; } } diff --git a/code/linux/net/newip/tcp_nip_parameter.c b/code/linux/net/newip/tcp_nip_parameter.c index ccc64a608e0c9db90c0a367ffc8e183152e95262..b402e52f12fc6e708e10cb072d70f9bb3c00008b 100644 --- a/code/linux/net/newip/tcp_nip_parameter.c +++ b/code/linux/net/newip/tcp_nip_parameter.c @@ -88,7 +88,11 @@ module_param_named(dup_ack_retrans_num, g_dup_ack_retrans_num, int, 0644); int g_ack_retrans_num = 5; module_param_named(ack_retrans_num, g_ack_retrans_num, int, 0644); -int g_dup_ack_snd_max = 6; +int g_dup_ack_snd_max = 500; /* + * fix session auto close + * Resolve multithreaded stability use cases to + * test individual socket disconnections + */ module_param_named(dup_ack_snd_max, g_dup_ack_snd_max, int, 0644); /*********************************************************************************************/ @@ -164,7 +168,7 @@ int g_nip_tcp_zero_probe = 20; module_param_named(nip_tcp_zero_probe, g_nip_tcp_zero_probe, int, 0644); /*********************************************************************************************/ -/* window mode parameters */ +/* window mode parameters */ /*********************************************************************************************/ bool g_nip_tcp_snd_win_enable; module_param_named(nip_tcp_snd_win_enable, g_nip_tcp_snd_win_enable, bool, 0644); diff --git a/code/linux/net/newip/tcp_nip_parameter.h b/code/linux/net/newip/tcp_nip_parameter.h index 3c7bc1177b475ec523258cd5dfaa8ffa7a7acfaf..f0ab28869290c5d8d8f721fbdc5033827d246953 100644 --- a/code/linux/net/newip/tcp_nip_parameter.h +++ b/code/linux/net/newip/tcp_nip_parameter.h @@ -96,7 +96,7 @@ extern int g_nip_keepalive_probes; extern int g_nip_tcp_zero_probe; /*********************************************************************************************/ -/* window mode parameters */ +/* window mode parameters */ /*********************************************************************************************/ extern bool g_nip_tcp_snd_win_enable; extern bool g_nip_tcp_rcv_win_enable; diff --git a/code/linux/net/newip/tcp_nip_timer.c b/code/linux/net/newip/tcp_nip_timer.c index 4f3c44cc182ae83e76b13d12206d7d8cc34cb0db..5c25ef13cb4a9880aeabed5f4b2143d5e59fbf0e 100644 --- a/code/linux/net/newip/tcp_nip_timer.c +++ b/code/linux/net/newip/tcp_nip_timer.c @@ -100,11 +100,12 @@ static bool retransmits_nip_timed_out(struct sock *sk, * Currently, it determines whether the timeout period is based on * the retransmission times */ - DEBUG("%s: icsk->retransmits=%u\n", __func__, - inet_csk(sk)->icsk_retransmits); + DEBUG("%s: icsk->retransmits=%u, boundary=%u", __func__, + inet_csk(sk)->icsk_retransmits, boundary); return inet_csk(sk)->icsk_retransmits > boundary; } +#define NIP_RETRY_UNTIL 200 // fix session auto close static int tcp_nip_write_timeout(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -125,9 +126,12 @@ static int tcp_nip_write_timeout(struct sock *sk) } } +#ifdef NIP_RETRY_UNTIL + retry_until = NIP_RETRY_UNTIL; +#endif if (retransmits_nip_timed_out(sk, retry_until, syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) { - DEBUG("%s: tcp retransmit time out!!!\n", __func__); + DEBUG("%s: tcp retransmit time out!!!", __func__); tcp_nip_write_err(sk); return 1; } @@ -179,30 +183,45 @@ void tcp_nip_retransmit_timer(struct sock *sk) inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); } +#define NIP_MAX_PROBES 2000 // fix session auto close void tcp_nip_probe_timer(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int max_probes; + int icsk_backoff; + int icsk_probes_out; if (tp->packets_out || !tcp_nip_send_head(sk)) { icsk->icsk_probes_out = 0; - DEBUG("[nip]%s packets_out!=0 or send_head=NULL, don't send probe packet.", + DEBUG("%s packets_out!=0 or send_head=NULL, don't send probe packet.", __func__); return; } +#ifdef NIP_MAX_PROBES + max_probes = NIP_MAX_PROBES; +#else max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; +#endif + if (sock_flag(sk, SOCK_DEAD)) { const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; max_probes = tcp_nip_orphan_retries(sk, alive); - if (!alive && icsk->icsk_backoff >= max_probes) + if (!alive && icsk->icsk_backoff >= max_probes) { + DEBUG("%s will close session, icsk_backoff=%u, max_probes=%u", + __func__, icsk->icsk_backoff, max_probes); goto abort; + } } if (icsk->icsk_probes_out >= max_probes) { -abort: tcp_nip_write_err(sk); +abort: icsk_backoff = icsk->icsk_backoff; + icsk_probes_out = icsk->icsk_probes_out; + tcp_nip_write_err(sk); + DEBUG("%s close session, icsk_probes_out=%u, icsk_backoff=%u, max_probes=%u", + __func__, icsk_probes_out, icsk_backoff, max_probes); } else { /* Only send another probe if we didn't close things up. */ tcp_nip_send_probe0(sk); @@ -267,7 +286,7 @@ static void tcp_nip_keepalive_timeout(struct sock *sk) u32 keepalive_time = keepalive_time_when(tp); if (keepalive_time > HZ) { - pr_crit("%s keepalive timeout, disconnect sock.", __func__); + DEBUG("%s keepalive timeout, disconnect sock.", __func__); tcp_nip_write_err(sk); return; } @@ -277,11 +296,11 @@ static void tcp_nip_keepalive_timeout(struct sock *sk) icsk->icsk_probes_out = 0; inet_csk_reset_keepalive_timer(sk, keepalive_time); - pr_crit("%s ms keepalive scale(%u) < thresh, connect sock continue.", - __func__, tp->nip_keepalive_timeout_scale); + DEBUG("%s ms keepalive scale(%u) < thresh, connect sock continue.", + __func__, tp->nip_keepalive_timeout_scale); } else { - pr_crit("%s ms keepalive timeout(scale=%u), disconnect sock.", - __func__, tp->nip_keepalive_timeout_scale); + DEBUG("%s ms keepalive timeout(scale=%u), disconnect sock.", + __func__, tp->nip_keepalive_timeout_scale); tcp_nip_write_err(sk); } } @@ -302,7 +321,7 @@ static void tcp_nip_keepalive_timer(struct timer_list *t) } if (sk->sk_state == TCP_LISTEN) { - pr_err("Hmm... keepalive on a LISTEN\n"); + pr_err("Hmm... keepalive on a LISTEN"); goto out; } tcp_mstamp_refresh(tp); @@ -312,7 +331,7 @@ static void tcp_nip_keepalive_timer(struct timer_list *t) */ if ((sk->sk_state == TCP_FIN_WAIT2 || sk->sk_state == TCP_CLOSING) && sock_flag(sk, SOCK_DEAD)) { - DEBUG("%s: finish wait, close sock\n", __func__); + DEBUG("%s: finish wait, close sock", __func__); goto death; }