diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index d21d4ba2eb391c11af779cdb7a7dc4dadaa3f7d7..984f4772a01e2e674babcf3bdffe53e5569d5b67 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -36,7 +36,7 @@ struct bpf_ringbuf { u64 mask; struct page **pages; int nr_pages; - spinlock_t spinlock ____cacheline_aligned_in_smp; + raw_spinlock_t spinlock ____cacheline_aligned_in_smp; /* Consumer and producer counters are put into separate pages to allow * mapping consumer page as r/w, but restrict producer page to r/o. * This protects producer position from being modified by user-space @@ -139,7 +139,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node) if (!rb) return ERR_PTR(-ENOMEM); - spin_lock_init(&rb->spinlock); + raw_spin_lock_init(&rb->spinlock); init_waitqueue_head(&rb->waitq); init_irq_work(&rb->work, bpf_ringbuf_notify); @@ -339,10 +339,10 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) cons_pos = smp_load_acquire(&rb->consumer_pos); if (in_nmi()) { - if (!spin_trylock_irqsave(&rb->spinlock, flags)) + if (!raw_spin_trylock_irqsave(&rb->spinlock, flags)) return NULL; } else { - spin_lock_irqsave(&rb->spinlock, flags); + raw_spin_lock_irqsave(&rb->spinlock, flags); } pend_pos = rb->pending_pos; @@ -368,7 +368,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) */ if (new_prod_pos - cons_pos > rb->mask || new_prod_pos - pend_pos > rb->mask) { - spin_unlock_irqrestore(&rb->spinlock, flags); + raw_spin_unlock_irqrestore(&rb->spinlock, flags); return NULL; } @@ -380,7 +380,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) /* pairs with consumer's smp_load_acquire() */ smp_store_release(&rb->producer_pos, new_prod_pos); - spin_unlock_irqrestore(&rb->spinlock, flags); + raw_spin_unlock_irqrestore(&rb->spinlock, flags); return (void *)hdr + BPF_RINGBUF_HDR_SZ; } diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 073abbe3866b48fb57aa5e86863240278c362942..1893fe5460acb169c32f26d6a2728d873e269660 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -256,7 +256,7 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup, if (len == 0) { trace_probe_log_err(offset, NO_EVENT_NAME); return -EINVAL; - } else if (len > MAX_EVENT_NAME_LEN) { + } else if (len >= MAX_EVENT_NAME_LEN) { trace_probe_log_err(offset, EVENT_TOO_LONG); return -EINVAL; } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 0f7c7506e5befd0c2a6f37928b3c35f95b6f6a77..12610c22ae55336ad26d63af394b86277f41f5e7 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -723,21 +723,31 @@ static bool reqsk_queue_unlink(struct request_sock *req) found = __sk_nulls_del_node_init_rcu(req_to_sk(req)); spin_unlock(lock); } - if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) - reqsk_put(req); + return found; } -bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) +static bool __inet_csk_reqsk_queue_drop(struct sock *sk, + struct request_sock *req, + bool from_timer) { bool unlinked = reqsk_queue_unlink(req); + if (!from_timer && del_timer_sync(&req->rsk_timer)) + reqsk_put(req); + if (unlinked) { reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); reqsk_put(req); } + return unlinked; } + +bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) +{ + return __inet_csk_reqsk_queue_drop(sk, req, false); +} EXPORT_SYMBOL(inet_csk_reqsk_queue_drop); void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req) @@ -804,7 +814,8 @@ static void reqsk_timer_handler(struct timer_list *t) return; } drop: - inet_csk_reqsk_queue_drop_and_put(sk_listener, req); + __inet_csk_reqsk_queue_drop(sk_listener, req, true); + reqsk_put(req); } static void reqsk_queue_hash_req(struct request_sock *req, diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 8391e8c23e506fc9c2dcd87c8c56e4846663cf00..460f24b028f5d735b805fcf55cd29db086d74abf 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -81,6 +81,8 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * } skb = nskb; } + /* Make sure idev stays alive */ + rcu_read_lock(); if (skb && pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { kfree_skb(skb); @@ -88,8 +90,10 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * } if (!skb) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); + rcu_read_unlock(); return -ENOMEM; } + rcu_read_unlock(); } if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {