diff --git a/crypto/ecdh.c b/crypto/ecdh.c index 96f80c8f8e30484c2c1b656d91d12a0e82eeee9f..611b27ad7e3df2175f559d18ce7068bac9176734 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -49,6 +49,7 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, ctx->curve_id = params.curve_id; ctx->ndigits = ndigits; + memset(ctx->private_key, 0, sizeof(ctx->private_key)); if (!params.key || !params.key_size) return ecc_gen_privkey(ctx->curve_id, ctx->ndigits, diff --git a/fs/proc/array.c b/fs/proc/array.c index e0eb4c74825b47743e4c5d964d61d344049fd75a..a9635929e7162cafe2eeb2188b9f8a94ba357251 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -534,10 +534,12 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, } while (need_seqretry(&sig->stats_lock, seq)); done_seqretry_irqrestore(&sig->stats_lock, seq, flags); - if (!whole) { + if (whole) { + thread_group_cputime_adjusted(task, &utime, &stime); + } else { + task_cputime_adjusted(task, &utime, &stime); min_flt = task->min_flt; maj_flt = task->maj_flt; - task_cputime_adjusted(task, &utime, &stime); gtime = task_gtime(task); } diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index de21a45a4ee7d4f99332a213359c81b7cac51227..99d45576a596d9a1cb6939664ddac49ea1fedc47 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -56,8 +56,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern int sched_fork(unsigned long clone_flags, struct task_struct *p); -extern void sched_post_fork(struct task_struct *p, - struct kernel_clone_args *kargs); +extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs); +extern void sched_post_fork(struct task_struct *p); extern void sched_dead(struct task_struct *p); void __noreturn do_task_dead(void); diff --git a/kernel/fork.c b/kernel/fork.c index 70ca6cca1eb65f975be2d3bd72090926ebce4f33..a3a17257df3c5eb1ed5177d01efc0fe61d7d8c35 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2247,6 +2247,17 @@ static __latent_entropy struct task_struct *copy_process( if (retval) goto bad_fork_put_pidfd; + /* + * Now that the cgroups are pinned, re-clone the parent cgroup and put + * the new task on the correct runqueue. All this *before* the task + * becomes visible. + * + * This isn't part of ->can_fork() because while the re-cloning is + * cgroup specific, it unconditionally needs to place the task on a + * runqueue. + */ + sched_cgroup_fork(p, args); + /* * From this point on we must avoid any synchronous user-space * communication until we take the tasklist-lock. In particular, we do @@ -2356,7 +2367,7 @@ static __latent_entropy struct task_struct *copy_process( fd_install(pidfd, pidfile); proc_fork_connector(p); - sched_post_fork(p, args); + sched_post_fork(p); cgroup_post_fork(p, args); perf_event_fork(p); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 576e7a5767f0c7fc5e12a167829f1f4b69d41ec7..49ed67b62e9f21e2abc22e9647559e00a8ab1827 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -852,9 +852,8 @@ int tg_nop(struct task_group *tg, void *data) } #endif -static void set_load_weight(struct task_struct *p) +static void set_load_weight(struct task_struct *p, bool update_load) { - bool update_load = !(READ_ONCE(p->state) & TASK_NEW); int prio = p->static_prio - MAX_RT_PRIO; struct load_weight *load = &p->se.load; @@ -3436,7 +3435,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->static_prio = NICE_TO_PRIO(0); p->prio = p->normal_prio = p->static_prio; - set_load_weight(p); + set_load_weight(p, false); #ifdef CONFIG_SCHED_LATENCY_NICE p->latency_prio = NICE_TO_LATENCY(0); @@ -3459,6 +3458,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) init_entity_runnable_average(&p->se); + #ifdef CONFIG_SCHED_INFO if (likely(sched_info_on())) memset(&p->sched_info, 0, sizeof(p->sched_info)); @@ -3474,18 +3474,23 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) return 0; } -void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs) +void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) { unsigned long flags; -#ifdef CONFIG_CGROUP_SCHED - struct task_group *tg; -#endif + /* + * Because we're not yet on the pid-hash, p->pi_lock isn't strictly + * required yet, but lockdep gets upset if rules are violated. + */ raw_spin_lock_irqsave(&p->pi_lock, flags); #ifdef CONFIG_CGROUP_SCHED - tg = container_of(kargs->cset->subsys[cpu_cgrp_id], - struct task_group, css); - p->sched_task_group = autogroup_task_group(p, tg); + if (1) { + struct task_group *tg; + tg = container_of(kargs->cset->subsys[cpu_cgrp_id], + struct task_group, css); + tg = autogroup_task_group(p, tg); + p->sched_task_group = tg; + } #endif rseq_migrate(p); /* @@ -3496,7 +3501,10 @@ void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs) if (p->sched_class->task_fork) p->sched_class->task_fork(p); raw_spin_unlock_irqrestore(&p->pi_lock, flags); +} +void sched_post_fork(struct task_struct *p) +{ uclamp_post_fork(p); } @@ -5214,7 +5222,7 @@ void set_user_nice(struct task_struct *p, long nice) put_prev_task(rq, p); p->static_prio = NICE_TO_PRIO(nice); - set_load_weight(p); + set_load_weight(p, true); old_prio = p->prio; p->prio = effective_prio(p); @@ -5387,7 +5395,7 @@ static void __setscheduler_params(struct task_struct *p, */ p->rt_priority = attr->sched_priority; p->normal_prio = normal_prio(p); - set_load_weight(p); + set_load_weight(p, true); } /* @@ -7792,7 +7800,7 @@ void __init sched_init(void) } BUG_ON(alloc_related_thread_groups()); - set_load_weight(&init_task); + set_load_weight(&init_task, false); /* * The boot idle thread does lazy MMU switching as well: diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2806b9ed6387940a429345ff8458b8971931c4bf..d6dbb042489f7bf3d38d35e7132b6368886d8fde 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1864,7 +1864,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, - [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, + [IFLA_GSO_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1), [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index a99c374101fc5120ab9583eff4da82448a404940..b47dfd6001fa941dad45de4f74c214cc4a6d40f8 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1571,7 +1571,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk, * so icmphdr does not in skb linear region and can not get icmp_type * by icmp_hdr(skb)->type. */ - if (sk->sk_type == SOCK_RAW && !inet_sk(sk)->hdrincl) + if (sk->sk_type == SOCK_RAW && + !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH)) icmp_type = fl4->fl4_icmp_type; else icmp_type = icmp_hdr(skb)->type; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 650da4d8f7ad12bca1c2b1a6f51ae2e24571d927..5a8d5ebe6590f285d3c0d66467116a9ffec1d43d 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -634,6 +634,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0, sk->sk_uid); + fl4.fl4_icmp_type = 0; + fl4.fl4_icmp_code = 0; + if (!hdrincl) { rfv.msg = msg; rfv.hlen = 0;