From 12c3de2bede588cf31503d390a331e7912151a39 Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Mon, 12 Jun 2023 09:48:49 +0800 Subject: [PATCH 1/9] sched: Fix migration_cpu_stop() requeueing commit b44753fd375b6b34553a9e775a3edaef3b899962 upstream. commit 8a6edb5257e2a84720fe78cb179eca58ba76126f upstream. When affine_move_task(p) is called on a running task @p, which is not otherwise already changing affinity, we'll first set p->migration_pending and then do: stop_one_cpu(cpu_of_rq(rq), migration_cpu_stop, &arg); This then gets us to migration_cpu_stop() running on the CPU that was previously running our victim task @p. If we find that our task is no longer on that runqueue (this can happen because of a concurrent migration due to load-balance etc.), then we'll end up at the: } else if (dest_cpu < 1 || pending) { branch. Which we'll take because we set pending earlier. Here we first check if the task @p has already satisfied the affinity constraints, if so we bail early [A]. Otherwise we'll reissue migration_cpu_stop() onto the CPU that is now hosting our task @p: stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, &pending->arg, &pending->stop_work); Except, we've never initialized pending->arg, which will be all 0s. This then results in running migration_cpu_stop() on the next CPU with arg->p == NULL, which gives the by now obvious result of fireworks. The cure is to change affine_move_task() to always use pending->arg, furthermore we can use the exact same pattern as the SCA_MIGRATE_ENABLE case, since we'll block on the pending->done completion anyway, no point in adding yet another completion in stop_one_cpu(). This then gives a clear distinction between the two migration_cpu_stop() use cases: - sched_exec() / migrate_task_to() : arg->pending == NULL - affine_move_task() : arg->pending != NULL; And we can have it ignore p->migration_pending when !arg->pending. Any stop work from sched_exec() / migrate_task_to() is in addition to stop works from affine_move_task(), which will be sufficient to issue the completion. Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()") Cc: stable@kernel.org Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Reviewed-by: Valentin Schneider Link: https://lkml.kernel.org/r/20210224131355.357743989@infradead.org Signed-off-by: Paul Gortmaker Signed-off-by: Steven Rostedt (VMware) --- kernel/sched/core.c | 39 ++++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4db0cfa24952..65e3c82ce93a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1986,6 +1986,24 @@ static int migration_cpu_stop(void *data) rq_lock(rq, &rf); pending = p->migration_pending; + if (pending && !arg->pending) { + /* + * This happens from sched_exec() and migrate_task_to(), + * neither of them care about pending and just want a task to + * maybe move about. + * + * Even if there is a pending, we can ignore it, since + * affine_move_task() will have it's own stop_work's in flight + * which will manage the completion. + * + * Notably, pending doesn't need to match arg->pending. This can + * happen when tripple concurrent affine_move_task() first sets + * pending, then clears pending and eventually sets another + * pending. + */ + pending = NULL; + } + /* * If task_rq(p) != rq, it cannot be migrated here, because we're * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because @@ -2258,10 +2276,6 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag int dest_cpu, unsigned int flags) { struct set_affinity_pending my_pending = { }, *pending = NULL; - struct migration_arg arg = { - .task = p, - .dest_cpu = dest_cpu, - }; bool complete = false; /* Can the task run on the task's current CPU? If so, we're done */ @@ -2299,6 +2313,12 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag /* Install the request */ refcount_set(&my_pending.refs, 1); init_completion(&my_pending.done); + my_pending.arg = (struct migration_arg) { + .task = p, + .dest_cpu = -1, /* any */ + .pending = &my_pending, + }; + p->migration_pending = &my_pending; } else { pending = p->migration_pending; @@ -2329,12 +2349,6 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag p->migration_flags &= ~MDF_PUSH; task_rq_unlock(rq, p, rf); - pending->arg = (struct migration_arg) { - .task = p, - .dest_cpu = -1, - .pending = pending, - }; - stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, &pending->arg, &pending->stop_work); @@ -2347,8 +2361,11 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag * is_migration_disabled(p) checks to the stopper, which will * run on the same CPU as said p. */ + refcount_inc(&pending->refs); /* pending->{arg,stop_work} */ task_rq_unlock(rq, p, rf); - stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); + + stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, + &pending->arg, &pending->stop_work); } else { -- Gitee From fc9a12132e0d57032c2187b99308ef42d3021a0e Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Mon, 12 Jun 2023 09:49:01 +0800 Subject: [PATCH 2/9] sched: Simplify migration_cpu_stop() commit 5fcc21b7699ae42c0222029ad3adac769350fe91 upstream. commit c20cf065d4a619d394d23290093b1002e27dff86 upstream. When affine_move_task() issues a migration_cpu_stop(), the purpose of that function is to complete that @pending, not any random other p->migration_pending that might have gotten installed since. This realization much simplifies migration_cpu_stop() and allows further necessary steps to fix all this as it provides the guarantee that @pending's stopper will complete @pending (and not some random other @pending). Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()") Cc: stable@kernel.org Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Reviewed-by: Valentin Schneider Link: https://lkml.kernel.org/r/20210224131355.430014682@infradead.org Signed-off-by: Paul Gortmaker Signed-off-by: Steven Rostedt (VMware) --- kernel/sched/core.c | 56 +++++++-------------------------------------- 1 file changed, 8 insertions(+), 48 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 65e3c82ce93a..59567ac62895 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1962,8 +1962,8 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, */ static int migration_cpu_stop(void *data) { - struct set_affinity_pending *pending; struct migration_arg *arg = data; + struct set_affinity_pending *pending = arg->pending; struct task_struct *p = arg->task; int dest_cpu = arg->dest_cpu; struct rq *rq = this_rq(); @@ -1985,25 +1985,6 @@ static int migration_cpu_stop(void *data) raw_spin_lock(&p->pi_lock); rq_lock(rq, &rf); - pending = p->migration_pending; - if (pending && !arg->pending) { - /* - * This happens from sched_exec() and migrate_task_to(), - * neither of them care about pending and just want a task to - * maybe move about. - * - * Even if there is a pending, we can ignore it, since - * affine_move_task() will have it's own stop_work's in flight - * which will manage the completion. - * - * Notably, pending doesn't need to match arg->pending. This can - * happen when tripple concurrent affine_move_task() first sets - * pending, then clears pending and eventually sets another - * pending. - */ - pending = NULL; - } - /* * If task_rq(p) != rq, it cannot be migrated here, because we're * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because @@ -2014,31 +1995,20 @@ static int migration_cpu_stop(void *data) goto out; if (pending) { - p->migration_pending = NULL; + if (p->migration_pending == pending) + p->migration_pending = NULL; complete = true; } - /* migrate_enable() -- we must not race against SCA */ - if (dest_cpu < 0) { - /* - * When this was migrate_enable() but we no longer - * have a @pending, a concurrent SCA 'fixed' things - * and we should be valid again. Nothing to do. - */ - if (!pending) { - WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)); - goto out; - } - + if (dest_cpu < 0) dest_cpu = cpumask_any_distribute(&p->cpus_mask); - } if (task_on_rq_queued(p)) rq = __migrate_task(rq, &rf, p, dest_cpu); else p->wake_cpu = dest_cpu; - } else if (dest_cpu < 0 || pending) { + } else if (pending) { /* * This happens when we get migrated between migrate_enable()'s * preempt_enable() and scheduling the stopper task. At that @@ -2053,22 +2023,13 @@ static int migration_cpu_stop(void *data) * ->pi_lock, so the allowed mask is stable - if it got * somewhere allowed, we're done. */ - if (pending && cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { - p->migration_pending = NULL; + if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { + if (p->migration_pending == pending) + p->migration_pending = NULL; complete = true; goto out; } - /* - * When this was migrate_enable() but we no longer have an - * @pending, a concurrent SCA 'fixed' things and we should be - * valid again. Nothing to do. - */ - if (!pending) { - WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)); - goto out; - } - /* * When migrate_enable() hits a rq mis-match we can't reliably * determine is_migration_disabled() and so have to chase after @@ -2086,7 +2047,6 @@ static int migration_cpu_stop(void *data) complete_all(&pending->done); /* For pending->{arg,stop_work} */ - pending = arg->pending; if (pending && refcount_dec_and_test(&pending->refs)) wake_up_var(&pending->refs); -- Gitee From cedf708c2b1b1ce36588dcbf08308d7a56ea89f4 Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Mon, 12 Jun 2023 09:49:11 +0800 Subject: [PATCH 3/9] sched: Collate affine_move_task() stoppers commit e225c5538057c22e9117c5bda1ed79811230f9f9 upstream. commit 58b1a45086b5f80f2b2842aa7ed0da51a64a302b upstream. The SCA_MIGRATE_ENABLE and task_running() cases are almost identical, collapse them to avoid further duplication. Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()") Cc: stable@kernel.org Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Reviewed-by: Valentin Schneider Link: https://lkml.kernel.org/r/20210224131355.500108964@infradead.org Signed-off-by: Paul Gortmaker Signed-off-by: Steven Rostedt (VMware) --- kernel/sched/core.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 59567ac62895..689e2aadfaa6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2303,30 +2303,23 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag return -EINVAL; } - if (flags & SCA_MIGRATE_ENABLE) { - - refcount_inc(&pending->refs); /* pending->{arg,stop_work} */ - p->migration_flags &= ~MDF_PUSH; - task_rq_unlock(rq, p, rf); - - stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, - &pending->arg, &pending->stop_work); - - return 0; - } - if (task_running(rq, p) || p->state == TASK_WAKING) { /* - * Lessen races (and headaches) by delegating - * is_migration_disabled(p) checks to the stopper, which will - * run on the same CPU as said p. + * MIGRATE_ENABLE gets here because 'p == current', but for + * anything else we cannot do is_migration_disabled(), punt + * and have the stopper function handle it all race-free. */ + refcount_inc(&pending->refs); /* pending->{arg,stop_work} */ + if (flags & SCA_MIGRATE_ENABLE) + p->migration_flags &= ~MDF_PUSH; task_rq_unlock(rq, p, rf); stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, &pending->arg, &pending->stop_work); + if (flags & SCA_MIGRATE_ENABLE) + return 0; } else { if (!is_migration_disabled(p)) { -- Gitee From 7b2bfdcbd7ee26a19475b6429f93711cb5ea498c Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Mon, 12 Jun 2023 09:49:22 +0800 Subject: [PATCH 4/9] sched: Optimize migration_cpu_stop() commit 4fc685474465461a03ea661ae37d428a36398276 upstream. commit 3f1bc119cd7fc987c8ed25ffb717f99403bb308c upstream. When the purpose of migration_cpu_stop() is to migrate the task to 'any' valid CPU, don't migrate the task when it's already running on a valid CPU. Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()") Cc: stable@kernel.org Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Reviewed-by: Valentin Schneider Link: https://lkml.kernel.org/r/20210224131355.569238629@infradead.org Signed-off-by: Paul Gortmaker Signed-off-by: Steven Rostedt (VMware) --- kernel/sched/core.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 689e2aadfaa6..25c8fe81639b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2000,14 +2000,25 @@ static int migration_cpu_stop(void *data) complete = true; } - if (dest_cpu < 0) + if (dest_cpu < 0) { + if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) + goto out; + dest_cpu = cpumask_any_distribute(&p->cpus_mask); + } if (task_on_rq_queued(p)) rq = __migrate_task(rq, &rf, p, dest_cpu); else p->wake_cpu = dest_cpu; + /* + * XXX __migrate_task() can fail, at which point we might end + * up running on a dodgy CPU, AFAICT this can only happen + * during CPU hotplug, at which point we'll get pushed out + * anyway, so it's probably not a big deal. + */ + } else if (pending) { /* * This happens when we get migrated between migrate_enable()'s -- Gitee From 89ea38c5281c69794468ba537b50eed2e2f23da0 Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Mon, 12 Jun 2023 09:49:32 +0800 Subject: [PATCH 5/9] sched: Fix affine_move_task() self-concurrency commit 28901a61850429a30853d339f6d15e9edec05589 upstream. commit 9e81889c7648d48dd5fe13f41cbc99f3c362484a upstream. Consider: sched_setaffinity(p, X); sched_setaffinity(p, Y); Then the first will install p->migration_pending = &my_pending; and issue stop_one_cpu_nowait(pending); and the second one will read p->migration_pending and _also_ issue: stop_one_cpu_nowait(pending), the _SAME_ @pending. This causes stopper list corruption. Add set_affinity_pending::stop_pending, to indicate if a stopper is in progress. Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()") Cc: stable@kernel.org Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Reviewed-by: Valentin Schneider Link: https://lkml.kernel.org/r/20210224131355.649146419@infradead.org Signed-off-by: Paul Gortmaker Signed-off-by: Steven Rostedt (VMware) --- kernel/sched/core.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 25c8fe81639b..ec3d0b1255ce 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1928,6 +1928,7 @@ struct migration_arg { struct set_affinity_pending { refcount_t refs; + unsigned int stop_pending; struct completion done; struct cpu_stop_work stop_work; struct migration_arg arg; @@ -2046,12 +2047,15 @@ static int migration_cpu_stop(void *data) * determine is_migration_disabled() and so have to chase after * it. */ + WARN_ON_ONCE(!pending->stop_pending); task_rq_unlock(rq, p, &rf); stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, &pending->arg, &pending->stop_work); return 0; } out: + if (pending) + pending->stop_pending = false; task_rq_unlock(rq, p, &rf); if (complete) @@ -2247,7 +2251,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag int dest_cpu, unsigned int flags) { struct set_affinity_pending my_pending = { }, *pending = NULL; - bool complete = false; + bool stop_pending, complete = false; /* Can the task run on the task's current CPU? If so, we're done */ if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { @@ -2320,14 +2324,19 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag * anything else we cannot do is_migration_disabled(), punt * and have the stopper function handle it all race-free. */ + stop_pending = pending->stop_pending; + if (!stop_pending) + pending->stop_pending = true; refcount_inc(&pending->refs); /* pending->{arg,stop_work} */ if (flags & SCA_MIGRATE_ENABLE) p->migration_flags &= ~MDF_PUSH; task_rq_unlock(rq, p, rf); - stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, - &pending->arg, &pending->stop_work); + if (!stop_pending) { + stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, + &pending->arg, &pending->stop_work); + } if (flags & SCA_MIGRATE_ENABLE) return 0; -- Gitee From 3dd9f6f64a55d530d114ee148968f26e5d7f3d43 Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Mon, 12 Jun 2023 09:49:43 +0800 Subject: [PATCH 6/9] sched: Simplify set_affinity_pending refcounts commit 5e2fc92da21f41a619ca7d96834a3d4a831fefd5 upstream. commit 50caf9c14b1498c90cf808dbba2ca29bd32ccba4 upstream. Now that we have set_affinity_pending::stop_pending to indicate if a stopper is in progress, and we have the guarantee that if that stopper exists, it will (eventually) complete our @pending we can simplify the refcount scheme by no longer counting the stopper thread. Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()") Cc: stable@kernel.org Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Reviewed-by: Valentin Schneider Link: https://lkml.kernel.org/r/20210224131355.724130207@infradead.org Signed-off-by: Paul Gortmaker Signed-off-by: Steven Rostedt (VMware) --- kernel/sched/core.c | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ec3d0b1255ce..03a328915b6b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1926,6 +1926,10 @@ struct migration_arg { struct set_affinity_pending *pending; }; +/* + * @refs: number of wait_for_completion() + * @stop_pending: is @stop_work in use + */ struct set_affinity_pending { refcount_t refs; unsigned int stop_pending; @@ -2061,10 +2065,6 @@ static int migration_cpu_stop(void *data) if (complete) complete_all(&pending->done); - /* For pending->{arg,stop_work} */ - if (pending && refcount_dec_and_test(&pending->refs)) - wake_up_var(&pending->refs); - return 0; } @@ -2263,12 +2263,16 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag push_task = get_task_struct(p); } + /* + * If there are pending waiters, but no pending stop_work, + * then complete now. + */ pending = p->migration_pending; - if (pending) { - refcount_inc(&pending->refs); + if (pending && !pending->stop_pending) { p->migration_pending = NULL; complete = true; } + task_rq_unlock(rq, p, rf); if (push_task) { @@ -2277,7 +2281,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag } if (complete) - goto do_complete; + complete_all(&pending->done); return 0; } @@ -2328,9 +2332,9 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag if (!stop_pending) pending->stop_pending = true; - refcount_inc(&pending->refs); /* pending->{arg,stop_work} */ if (flags & SCA_MIGRATE_ENABLE) p->migration_flags &= ~MDF_PUSH; + task_rq_unlock(rq, p, rf); if (!stop_pending) { @@ -2346,12 +2350,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag if (task_on_rq_queued(p)) rq = move_queued_task(rq, rf, p, dest_cpu); - p->migration_pending = NULL; - complete = true; + if (!pending->stop_pending) { + p->migration_pending = NULL; + complete = true; + } } task_rq_unlock(rq, p, rf); -do_complete: if (complete) complete_all(&pending->done); } @@ -2359,7 +2364,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag wait_for_completion(&pending->done); if (refcount_dec_and_test(&pending->refs)) - wake_up_var(&pending->refs); + wake_up_var(&pending->refs); /* No UaF, just an address */ /* * Block the original owner of &pending until all subsequent callers @@ -2367,6 +2372,9 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag */ wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs)); + /* ARGH */ + WARN_ON_ONCE(my_pending.stop_pending); + return 0; } -- Gitee From 8e8108dd0d462b0781ac7d9b525499243adddc08 Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Mon, 12 Jun 2023 09:50:01 +0800 Subject: [PATCH 7/9] sched: Don't defer CPU pick to migration_cpu_stop() commit de8cf5c8b5e4fd8279c4475b4299cd8b55a64828 upstream. commit 475ea6c60279e9f2ddf7e4cf2648cd8ae0608361 upstream. Will reported that the 'XXX __migrate_task() can fail' in migration_cpu_stop() can happen, and it *is* sort of a big deal. Looking at it some more, one will note there is a glaring hole in the deferred CPU selection: (w/ CONFIG_CPUSET=n, so that the affinity mask passed via taskset doesn't get AND'd with cpu_online_mask) $ taskset -pc 0-2 $PID # offline CPUs 3-4 $ taskset -pc 3-5 $PID `\ $PID may stay on 0-2 due to the cpumask_any_distribute() picking an offline CPU and __migrate_task() refusing to do anything due to cpu_is_allowed(). set_cpus_allowed_ptr() goes to some length to pick a dest_cpu that matches the right constraints vs affinity and the online/active state of the CPUs. Reuse that instead of discarding it in the affine_move_task() case. Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()") Reported-by: Will Deacon Signed-off-by: Valentin Schneider Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20210526205751.842360-2-valentin.schneider@arm.com Signed-off-by: Paul Gortmaker Signed-off-by: Steven Rostedt (VMware) --- kernel/sched/core.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 03a328915b6b..1ed21442efa9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1970,7 +1970,6 @@ static int migration_cpu_stop(void *data) struct migration_arg *arg = data; struct set_affinity_pending *pending = arg->pending; struct task_struct *p = arg->task; - int dest_cpu = arg->dest_cpu; struct rq *rq = this_rq(); bool complete = false; struct rq_flags rf; @@ -2003,19 +2002,15 @@ static int migration_cpu_stop(void *data) if (p->migration_pending == pending) p->migration_pending = NULL; complete = true; - } - if (dest_cpu < 0) { if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) goto out; - - dest_cpu = cpumask_any_distribute(&p->cpus_mask); } if (task_on_rq_queued(p)) - rq = __migrate_task(rq, &rf, p, dest_cpu); + rq = __migrate_task(rq, &rf, p, arg->dest_cpu); else - p->wake_cpu = dest_cpu; + p->wake_cpu = arg->dest_cpu; /* * XXX __migrate_task() can fail, at which point we might end @@ -2294,7 +2289,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag init_completion(&my_pending.done); my_pending.arg = (struct migration_arg) { .task = p, - .dest_cpu = -1, /* any */ + .dest_cpu = dest_cpu, .pending = &my_pending, }; @@ -2302,6 +2297,15 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag } else { pending = p->migration_pending; refcount_inc(&pending->refs); + /* + * Affinity has changed, but we've already installed a + * pending. migration_cpu_stop() *must* see this, else + * we risk a completion of the pending despite having a + * task on a disallowed CPU. + * + * Serialized by p->pi_lock, so this is safe. + */ + pending->arg.dest_cpu = dest_cpu; } } pending = p->migration_pending; -- Gitee From 06ec24df2bb5892ea32004cf2cceba74c88ddfea Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Mon, 12 Jun 2023 09:50:37 +0800 Subject: [PATCH 8/9] sched: Fix get_push_task() vs migrate_disable() commit 2be7c9e425a6a13a25949b0bcd1f1a6b8a07d66d upstream. push_rt_task() attempts to move the currently running task away if the next runnable task has migration disabled and therefore is pinned on the current CPU. The current task is retrieved via get_push_task() which only checks for nr_cpus_allowed == 1, but does not check whether the task has migration disabled and therefore cannot be moved either. The consequence is a pointless invocation of the migration thread which correctly observes that the task cannot be moved. Return NULL if the task has migration disabled and cannot be moved to another CPU. Cc: stable-rt@vger.kernel.org Fixes: a7c81556ec4d3 ("sched: Fix migrate_disable() vs rt/dl balancing") Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20210826133738.yiotqbtdaxzjsnfj@linutronix.de Signed-off-by: Steven Rostedt (VMware) --- kernel/sched/sched.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b48fa5bef758..1dc4da1d6d54 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2182,6 +2182,9 @@ static inline struct task_struct *get_push_task(struct rq *rq) if (p->nr_cpus_allowed == 1) return NULL; + if (p->migration_disabled) + return NULL; + rq->push_busy = true; return get_task_struct(p); } -- Gitee From 2a61a3673643e22c8d5e66be1a3f722f4e2e06d9 Mon Sep 17 00:00:00 2001 From: "yang.yang29@zte.com.cn" Date: Mon, 12 Jun 2023 09:50:51 +0800 Subject: [PATCH 9/9] sched: Switch wait_task_inactive to HRTIMER_MODE_REL_HARD commit 7893a5f97789761923ff5fb84eeb1e1a8bc5e580 upstream. With PREEMPT_RT enabled all hrtimers callbacks will be invoked in softirq mode unless they are explicitly marked as HRTIMER_MODE_HARD. During boot kthread_bind() is used for the creation of per-CPU threads and then hangs in wait_task_inactive() if the ksoftirqd is not yet up and running. The hang disappeared since commit 26c7295be0c5e ("kthread: Do not preempt current task if it is going to call schedule()") but enabling function trace on boot reliably leads to the freeze on boot behaviour again. The timer in wait_task_inactive() can not be directly used by an user interface to abuse it and create a mass wake of several tasks at the same time which would to long sections with disabled interrupts. Therefore it is safe to make the timer HRTIMER_MODE_REL_HARD. Switch the timer to HRTIMER_MODE_REL_HARD. Cc: stable-rt@vger.kernel.org Link: https://lkml.kernel.org/r/20210826170408.vm7rlj7odslshwch@linutronix.de Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Steven Rostedt (VMware) --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1ed21442efa9..5f7040003713 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2752,7 +2752,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) ktime_t to = NSEC_PER_SEC / HZ; set_current_state(TASK_UNINTERRUPTIBLE); - schedule_hrtimeout(&to, HRTIMER_MODE_REL); + schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD); continue; } -- Gitee