From 652a88a36b928cd0cb073df5bed0f95bf251437e Mon Sep 17 00:00:00 2001 From: Hu Zhaodong Date: Tue, 8 Mar 2022 19:34:37 +0800 Subject: [PATCH 1/2] sched: fair: fix WALT load statistic for cfs_bandwidth enable ohos inclusion category: bugfix issue: #I4WP5B CVE: NA Signed-off-by: Hu Zhaodong ------------------------------------------- While enqueueing sched_entity to cfs_rq, no matter whether the cfs_rq was throttled, walt_inc_cfs_rq_stats should be called accompanied by cfs_rq->h_nr_running++ to make sure the cfs_rq perceive the attached task. It's same for the dequeue routine. Signed-off-by: Li Ming --- kernel/sched/fair.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2c0781ce163f..dafc7d8d9c8f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4984,7 +4984,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) /* At this point se is NULL and we are at root level*/ add_nr_running(rq, task_delta); - walt_inc_throttled_cfs_rq_stats(&cfs_rq->walt_stats, tcfs_rq); + walt_inc_throttled_cfs_rq_stats(&rq->walt_stats, tcfs_rq); unthrottle_throttle: /* @@ -5616,13 +5616,12 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq->h_nr_running++; cfs_rq->idle_h_nr_running += idle_h_nr_running; + walt_inc_cfs_rq_stats(cfs_rq, p); /* end evaluation on encountering a throttled cfs_rq */ if (cfs_rq_throttled(cfs_rq)) goto enqueue_throttle; - walt_inc_cfs_rq_stats(cfs_rq, p); - flags = ENQUEUE_WAKEUP; } @@ -5713,13 +5712,12 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq->h_nr_running--; cfs_rq->idle_h_nr_running -= idle_h_nr_running; + walt_dec_cfs_rq_stats(cfs_rq, p); /* end evaluation on encountering a throttled cfs_rq */ if (cfs_rq_throttled(cfs_rq)) goto dequeue_throttle; - walt_dec_cfs_rq_stats(cfs_rq, p); - /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { /* Avoid re-evaluating load for this entity: */ -- Gitee From 671893a0f6ca835294dff921212ff82eb7bd12b3 Mon Sep 17 00:00:00 2001 From: Lingutla Chandrasekhar Date: Fri, 11 Jan 2019 12:32:05 +0530 Subject: [PATCH 2/2] sched: core: Fix stale rq clock usage in migration path codeauora inclusion category: bugfix issue: #I4WP5B CVE: NA Signed-off-by: Hu Zhaodong ------------------------------------------- While migrating a task, move_queued_task() updates current cpu's rq clock (which sets RQCF_UPDATED) with rq->lock held, and momentarly releases the rq->lock and reaquire it along with new cpu rq->lock. In between, if any other cpu takes the current rq->lock, which might have called rq_pin_lock() (which clears RQCF_UPDATED) and released the lock without updating cpu rq clock, then rq's clock_update_flags becomes stale until rq_pin_lock() called again. If the migration tries to reports load to cpufreq governor, then it would access the stale rq_clock, and the assert_clock_updated reports warning with below call stack: detach_entity_cfs_rq+0x71c/0x780 migrate_task_rq_fair+0x50/0xd0 set_task_cpu+0x150/0x238 move_queued_task+0x1b4/0x3e8 migration_cpu_stop+0x188/0x1f0 cpu_stopper_thread+0xac/0x150 smpboot_thread_fn+0x1c4/0x2e8 Also as commit '2463f46361a02d("sched: Fix assert_clock_updated warning emitted during CPU isolation")' mentioned, this warning could lead to deadlock when console enabled. To fix this, while reacquring the cpu rq->lock, if RQCF_UPDATED is not set then force update the rq clock. Signed-off-by: Lingutla Chandrasekhar Signed-off-by: Li Ming --- kernel/sched/core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 471b2129ea84..46a0df7d1047 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1751,6 +1751,8 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, deactivate_task(rq, p, DEQUEUE_NOCLOCK); #ifdef CONFIG_SCHED_WALT double_lock_balance(rq, cpu_rq(new_cpu)); + if (!(rq->clock_update_flags & RQCF_UPDATED)) + update_rq_clock(rq); #endif set_task_cpu(p, new_cpu); #ifdef CONFIG_SCHED_WALT -- Gitee