diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 471b2129ea84341f764fa37ad58ed6eab4e1111c..46a0df7d1047d7d28620c806a6c8e400cf79b5fc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1751,6 +1751,8 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, deactivate_task(rq, p, DEQUEUE_NOCLOCK); #ifdef CONFIG_SCHED_WALT double_lock_balance(rq, cpu_rq(new_cpu)); + if (!(rq->clock_update_flags & RQCF_UPDATED)) + update_rq_clock(rq); #endif set_task_cpu(p, new_cpu); #ifdef CONFIG_SCHED_WALT diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2c0781ce163f6e93ebcd3caf9c35ad34c1601a89..dafc7d8d9c8fa611ac78fdda3ae059b8ca5fecbd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4984,7 +4984,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) /* At this point se is NULL and we are at root level*/ add_nr_running(rq, task_delta); - walt_inc_throttled_cfs_rq_stats(&cfs_rq->walt_stats, tcfs_rq); + walt_inc_throttled_cfs_rq_stats(&rq->walt_stats, tcfs_rq); unthrottle_throttle: /* @@ -5616,13 +5616,12 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq->h_nr_running++; cfs_rq->idle_h_nr_running += idle_h_nr_running; + walt_inc_cfs_rq_stats(cfs_rq, p); /* end evaluation on encountering a throttled cfs_rq */ if (cfs_rq_throttled(cfs_rq)) goto enqueue_throttle; - walt_inc_cfs_rq_stats(cfs_rq, p); - flags = ENQUEUE_WAKEUP; } @@ -5713,13 +5712,12 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq->h_nr_running--; cfs_rq->idle_h_nr_running -= idle_h_nr_running; + walt_dec_cfs_rq_stats(cfs_rq, p); /* end evaluation on encountering a throttled cfs_rq */ if (cfs_rq_throttled(cfs_rq)) goto dequeue_throttle; - walt_dec_cfs_rq_stats(cfs_rq, p); - /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { /* Avoid re-evaluating load for this entity: */