From 664599f95f8edff277f74ee7b06fa879e1e5aaf3 Mon Sep 17 00:00:00 2001 From: Wang Shuo Date: Fri, 22 Jul 2022 15:24:27 +0800 Subject: [PATCH 1/2] sched: Add latency_nice priority linux inclusion category:feature issue:I5IIOZ CVE: NA Signed-off-by: Wang Shuo ----------------------------------------------- This patchset restarts the work about adding a latency nice priority to describe the latency tolerance of cfs tasks. See more at: https://lore.kernel.org/all/20220512163534.2572-1-vincent.guittot@linaro.org/ Signed-off-by: Parth Shah Signed-off-by: Vincent Guittot Signed-off-by: Wang Shuo --- include/linux/sched.h | 3 ++ include/uapi/linux/sched.h | 4 +- include/uapi/linux/sched/types.h | 19 +++++++++ init/init_task.c | 1 + kernel/sched/core.c | 55 ++++++++++++++++++++++++++ kernel/sched/debug.c | 1 + kernel/sched/fair.c | 68 +++++++++++++++++++++++++++++++- kernel/sched/sched.h | 30 ++++++++++++++ tools/include/uapi/linux/sched.h | 4 +- 9 files changed, 181 insertions(+), 4 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 6ae4d7ae5a3b..439952aa49b8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -538,6 +538,8 @@ struct sched_entity { unsigned long runnable_weight; #endif + int latency_weight; + #ifdef CONFIG_SMP /* * Per entity load average tracking. @@ -797,6 +799,7 @@ struct task_struct { int static_prio; int normal_prio; unsigned int rt_priority; + int latency_prio; const struct sched_class *sched_class; struct sched_entity se; diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h index 3bac0a8ceab2..b2e932c25be6 100644 --- a/include/uapi/linux/sched.h +++ b/include/uapi/linux/sched.h @@ -132,6 +132,7 @@ struct clone_args { #define SCHED_FLAG_KEEP_PARAMS 0x10 #define SCHED_FLAG_UTIL_CLAMP_MIN 0x20 #define SCHED_FLAG_UTIL_CLAMP_MAX 0x40 +#define SCHED_FLAG_LATENCY_NICE 0x80 #define SCHED_FLAG_KEEP_ALL (SCHED_FLAG_KEEP_POLICY | \ SCHED_FLAG_KEEP_PARAMS) @@ -143,6 +144,7 @@ struct clone_args { SCHED_FLAG_RECLAIM | \ SCHED_FLAG_DL_OVERRUN | \ SCHED_FLAG_KEEP_ALL | \ - SCHED_FLAG_UTIL_CLAMP) + SCHED_FLAG_UTIL_CLAMP | \ + SCHED_FLAG_LATENCY_NICE) #endif /* _UAPI_LINUX_SCHED_H */ diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h index c852153ddb0d..3eedbeba2f32 100644 --- a/include/uapi/linux/sched/types.h +++ b/include/uapi/linux/sched/types.h @@ -10,6 +10,7 @@ struct sched_param { #define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ #define SCHED_ATTR_SIZE_VER1 56 /* add: util_{min,max} */ +#define SCHED_ATTR_SIZE_VER2 60 /* add: latency_nice */ /* * Extended scheduling parameters data structure. @@ -96,6 +97,22 @@ struct sched_param { * on a CPU with a capacity big enough to fit the specified value. * A task with a max utilization value smaller than 1024 is more likely * scheduled on a CPU with no more capacity than the specified value. + * + * Latency Tolerance Attributes + * =========================== + * + * A subset of sched_attr attributes allows to specify the relative latency + * requirements of a task with respect to the other tasks running/queued in the + * system. + * + * @ sched_latency_nice task's latency_nice value + * + * The latency_nice of a task can have any value in a range of + * [MIN_LATENCY_NICE..MAX_LATENCY_NICE]. + * + * A task with latency_nice with the value of LATENCY_NICE_MIN can be + * taken for a task requiring a lower latency as opposed to the task with + * higher latency_nice. */ struct sched_attr { __u32 size; @@ -118,6 +135,8 @@ struct sched_attr { __u32 sched_util_min; __u32 sched_util_max; + /* latency requirement hints */ + __s32 sched_latency_nice; }; #endif /* _UAPI_LINUX_SCHED_TYPES_H */ diff --git a/init/init_task.c b/init/init_task.c index 5fa18ed59d33..a6d8bb9ae2a0 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -78,6 +78,7 @@ struct task_struct init_task .prio = MAX_PRIO - 20, .static_prio = MAX_PRIO - 20, .normal_prio = MAX_PRIO - 20, + .latency_prio = NICE_WIDTH - 20, .policy = SCHED_NORMAL, .cpus_ptr = &init_task.cpus_mask, .cpus_mask = CPU_MASK_ALL, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 549334102718..ae2ff80deff8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -873,6 +873,11 @@ static void set_load_weight(struct task_struct *p, bool update_load) } } +static void set_latency_weight(struct task_struct *p) +{ + p->se.latency_weight = sched_latency_to_weight[p->latency_prio]; +} + #ifdef CONFIG_UCLAMP_TASK /* * Serializes updates of utilization clamp values @@ -3346,6 +3351,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) */ p->prio = current->normal_prio; + /* Propagate the parent's latency requirements to the child as well */ + p->latency_prio = current->latency_prio; + uclamp_fork(p); /* @@ -3369,6 +3377,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->prio = p->normal_prio = p->static_prio; set_load_weight(p, false); + p->latency_prio = NICE_TO_LATENCY(0); + set_latency_weight(p); + /* * We don't need the reset flag anymore after the fork. It has * fulfilled its duty: @@ -5315,6 +5326,16 @@ static void __setscheduler_params(struct task_struct *p, p->rt_priority = attr->sched_priority; p->normal_prio = normal_prio(p); set_load_weight(p, true); + +} + +static void __setscheduler_latency(struct task_struct *p, + const struct sched_attr *attr) +{ + if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE) { + p->latency_prio = NICE_TO_LATENCY(attr->sched_latency_nice); + set_latency_weight(p); + } } /* @@ -5441,6 +5462,17 @@ static int __sched_setscheduler(struct task_struct *p, return retval; } + if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE) { + if (attr->sched_latency_nice > MAX_LATENCY_NICE) + return -EINVAL; + if (attr->sched_latency_nice < MIN_LATENCY_NICE) + return -EINVAL; + /* Use the same security checks as NICE */ + if (attr->sched_latency_nice < LATENCY_TO_NICE(p->latency_prio) && + !capable(CAP_SYS_NICE)) + return -EPERM; + } + if (pi) cpuset_read_lock(); @@ -5475,6 +5507,9 @@ static int __sched_setscheduler(struct task_struct *p, goto change; if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) goto change; + if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE && + attr->sched_latency_nice != LATENCY_TO_NICE(p->latency_prio)) + goto change; p->sched_reset_on_fork = reset_on_fork; retval = 0; @@ -5563,6 +5598,7 @@ static int __sched_setscheduler(struct task_struct *p, __setscheduler_params(p, attr); __setscheduler_prio(p, newprio); } + __setscheduler_latency(p, attr); __setscheduler_uclamp(p, attr); if (queued) { @@ -5771,6 +5807,9 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a size < SCHED_ATTR_SIZE_VER1) return -EINVAL; + if ((attr->sched_flags & SCHED_FLAG_LATENCY_NICE) && + size < SCHED_ATTR_SIZE_VER2) + return -EINVAL; /* * XXX: Do we want to be lenient like existing syscalls; or do we want * to be strict and return an error on out-of-bounds values? @@ -6000,6 +6039,8 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, else kattr.sched_nice = task_nice(p); + kattr.sched_latency_nice = LATENCY_TO_NICE(p->latency_prio); + #ifdef CONFIG_UCLAMP_TASK /* * This could race with another potential updater, but this is fine @@ -9048,6 +9089,20 @@ const u32 sched_prio_to_wmult[40] = { /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, }; +/* + * latency weight for wakeup preemption + */ +const int sched_latency_to_weight[40] = { + /* -20 */ 1024, 973, 922, 870, 819, + /* -15 */ 768, 717, 666, 614, 563, + /* -10 */ 512, 461, 410, 358, 307, + /* -5 */ 256, 205, 154, 102, 51, + /* 0 */ 0, -51, -102, -154, -205, + /* 5 */ -256, -307, -358, -410, -461, + /* 10 */ -512, -563, -614, -666, -717, + /* 15 */ -768, -819, -870, -922, -973, +}; + void call_trace_sched_update_nr_running(struct rq *rq, int count) { trace_sched_update_nr_running_tp(rq, count); diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index e5af311230be..e33b837ded9c 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -1046,6 +1046,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, #endif P(policy); P(prio); + P(latency_prio); if (task_has_dl_policy(p)) { P(dl.runtime); P(dl.deadline); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index dafc7d8d9c8f..39882e5c0847 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5579,6 +5579,35 @@ static int sched_idle_cpu(int cpu) } #endif +static void set_next_buddy(struct sched_entity *se); + +static void check_preempt_from_idle(struct cfs_rq *cfs, struct sched_entity *se) +{ + struct sched_entity *next; + + if (se->latency_weight <= 0) + return; + + if (cfs->nr_running <= 1) + return; + /* + * When waking from idle, we don't need to check to preempt at wakeup + * the idle thread and don't set next buddy as a candidate for being + * picked in priority. + * In case of simultaneous wakeup from idle, the latency sensitive tasks + * lost opportunity to preempt non sensitive tasks which woke up + * simultaneously. + */ + + if (cfs->next) + next = cfs->next; + else + next = __pick_first_entity(cfs); + + if (next && wakeup_preempt_entity(next, se) == 1) + set_next_buddy(se); +} + /* * The enqueue_task method is called before nr_running is * increased. Here we update the fair scheduling stats and @@ -5668,6 +5697,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!task_new) update_overutilized_status(rq); + if (rq->curr == rq->idle) + check_preempt_from_idle(cfs_rq_of(&p->se), &p->se); + enqueue_throttle: if (cfs_bandwidth_used()) { /* @@ -5689,8 +5721,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) hrtick_update(rq); } -static void set_next_buddy(struct sched_entity *se); - /* * The dequeue_task method is called before nr_running is * decreased. We remove the task from the rbtree and @@ -7012,6 +7042,37 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } #endif /* CONFIG_SMP */ +static long wakeup_latency_gran(struct sched_entity *curr, struct sched_entity *se) +{ + int latency_weight = se->latency_weight; + long thresh = sysctl_sched_latency; + + /* + * A positive latency weigth means that the sched_entity has latency + * requirement that needs to be evaluated versus other entity. + * Otherwise, use the latency weight to evaluate how much scheduling + * delay is acceptable by se. + */ + if ((se->latency_weight > 0) || (curr->latency_weight > 0)) + latency_weight -= curr->latency_weight; + + if (!latency_weight) + return 0; + + if (sched_feat(GENTLE_FAIR_SLEEPERS)) + thresh >>= 1; + + /* + * Clamp the delta to stay in the scheduler period range + * [-sysctl_sched_latency:sysctl_sched_latency] + */ + latency_weight = clamp_t(long, latency_weight, + -1 * NICE_LATENCY_WEIGHT_MAX, + NICE_LATENCY_WEIGHT_MAX); + + return (thresh * latency_weight) >> NICE_LATENCY_SHIFT; +} + static unsigned long wakeup_gran(struct sched_entity *se) { unsigned long gran = sysctl_sched_wakeup_granularity; @@ -7051,6 +7112,9 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) { s64 gran, vdiff = curr->vruntime - se->vruntime; + /* Take into account latency priority */ + vdiff += wakeup_latency_gran(curr, se); + if (vdiff <= 0) return -1; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e4c65d96185e..6dca085961bb 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -154,6 +154,35 @@ extern void call_trace_sched_update_nr_running(struct rq *rq, int count); */ #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) +/* + * Latency nice is meant to provide scheduler hints about the relative + * latency requirements of a task with respect to other tasks. + * Thus a task with latency_nice == 19 can be hinted as the task with no + * latency requirements, in contrast to the task with latency_nice == -20 + * which should be given priority in terms of lower latency. + */ +#define MAX_LATENCY_NICE 19 +#define MIN_LATENCY_NICE -20 + +#define LATENCY_NICE_WIDTH \ + (MAX_LATENCY_NICE - MIN_LATENCY_NICE + 1) + +/* + * Default tasks should be treated as a task with latency_nice = 0. + */ +#define DEFAULT_LATENCY_NICE 0 +#define DEFAULT_LATENCY_PRIO (DEFAULT_LATENCY_NICE + LATENCY_NICE_WIDTH/2) + +/* + * Convert user-nice values [ -20 ... 0 ... 19 ] + * to static latency [ 0..39 ], + * and back. + */ +#define NICE_TO_LATENCY(nice) ((nice) + DEFAULT_LATENCY_PRIO) +#define LATENCY_TO_NICE(prio) ((prio) - DEFAULT_LATENCY_PRIO) +#define NICE_LATENCY_SHIFT (SCHED_FIXEDPOINT_SHIFT) +#define NICE_LATENCY_WEIGHT_MAX (1L << NICE_LATENCY_SHIFT) + /* * Increase resolution of nice-level calculations for 64-bit architectures. * The extra resolution improves shares distribution and load balancing of @@ -1841,6 +1870,7 @@ static inline int task_on_rq_migrating(struct task_struct *p) extern const int sched_prio_to_weight[40]; extern const u32 sched_prio_to_wmult[40]; +extern const int sched_latency_to_weight[40]; /* * {de,en}queue flags: diff --git a/tools/include/uapi/linux/sched.h b/tools/include/uapi/linux/sched.h index 3bac0a8ceab2..ecc4884bfe4b 100644 --- a/tools/include/uapi/linux/sched.h +++ b/tools/include/uapi/linux/sched.h @@ -132,6 +132,7 @@ struct clone_args { #define SCHED_FLAG_KEEP_PARAMS 0x10 #define SCHED_FLAG_UTIL_CLAMP_MIN 0x20 #define SCHED_FLAG_UTIL_CLAMP_MAX 0x40 +#define SCHED_FLAG_LATENCY_NICE 0X80 #define SCHED_FLAG_KEEP_ALL (SCHED_FLAG_KEEP_POLICY | \ SCHED_FLAG_KEEP_PARAMS) @@ -143,6 +144,7 @@ struct clone_args { SCHED_FLAG_RECLAIM | \ SCHED_FLAG_DL_OVERRUN | \ SCHED_FLAG_KEEP_ALL | \ - SCHED_FLAG_UTIL_CLAMP) + SCHED_FLAG_UTIL_CLAMP | \ + SCHED_FLAG_LATENCY_NICE) #endif /* _UAPI_LINUX_SCHED_H */ -- Gitee From c8599be2ad1b25c02c2728744b36e8775e3193ae Mon Sep 17 00:00:00 2001 From: Wang Shuo Date: Fri, 22 Jul 2022 17:19:19 +0800 Subject: [PATCH 2/2] sched: Add CONFIG_SCHED_LATENCY_NCIE for latency_nice feature ohos inclusion category:feature issue:I5IIOZ CVE: NA Signed-off-by: Wang Shuo --- include/linux/sched.h | 4 +++ init/Kconfig | 7 +++++ init/init_task.c | 2 ++ kernel/sched/core.c | 71 ++++++++++++++++++++++++++++++++----------- kernel/sched/debug.c | 2 ++ kernel/sched/fair.c | 8 +++++ kernel/sched/sched.h | 4 +++ 7 files changed, 80 insertions(+), 18 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 439952aa49b8..3dca22f2a018 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -538,7 +538,9 @@ struct sched_entity { unsigned long runnable_weight; #endif +#ifdef CONFIG_SCHED_LATENCY_NICE int latency_weight; +#endif #ifdef CONFIG_SMP /* @@ -799,7 +801,9 @@ struct task_struct { int static_prio; int normal_prio; unsigned int rt_priority; +#ifdef CONFIG_SCHED_LATENCY_NICE int latency_prio; +#endif const struct sched_class *sched_class; struct sched_entity se; diff --git a/init/Kconfig b/init/Kconfig index 2e5b9288081e..470982f54ffd 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -858,6 +858,13 @@ config UCLAMP_BUCKETS_COUNT If in doubt, use the default value. +config SCHED_LATENCY_NICE + bool "Enable latency feature for FAIR tasks" + default n + help + This feature use latency nice priority to decide if a cfs task can + preempt the current running task. + source "kernel/sched/rtg/Kconfig" config SCHED_EAS diff --git a/init/init_task.c b/init/init_task.c index a6d8bb9ae2a0..65e4a3432c6e 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -78,7 +78,9 @@ struct task_struct init_task .prio = MAX_PRIO - 20, .static_prio = MAX_PRIO - 20, .normal_prio = MAX_PRIO - 20, +#ifdef CONFIG_SCHED_LATENCY_NICE .latency_prio = NICE_WIDTH - 20, +#endif .policy = SCHED_NORMAL, .cpus_ptr = &init_task.cpus_mask, .cpus_mask = CPU_MASK_ALL, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ae2ff80deff8..fc2cefc69eff 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -873,11 +873,49 @@ static void set_load_weight(struct task_struct *p, bool update_load) } } +#ifdef CONFIG_SCHED_LATENCY_NICE static void set_latency_weight(struct task_struct *p) { p->se.latency_weight = sched_latency_to_weight[p->latency_prio]; } +static void __setscheduler_latency(struct task_struct *p, + const struct sched_attr *attr) +{ + if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE) { + p->latency_prio = NICE_TO_LATENCY(attr->sched_latency_nice); + set_latency_weight(p); + } +} + +static int latency_nice_validate(struct task_struct *p, bool user, + const struct sched_attr *attr) +{ + if (attr->sched_latency_nice > MAX_LATENCY_NICE) + return -EINVAL; + if (attr->sched_latency_nice < MIN_LATENCY_NICE) + return -EINVAL; + /* Use the same security checks as NICE */ + if (user && attr->sched_latency_nice < LATENCY_TO_NICE(p->latency_prio) + && !capable(CAP_SYS_NICE)) + return -EPERM; + + return 0; +} +#else +static void +__setscheduler_latency(struct task_struct *p, const struct sched_attr *attr) +{ +} + +static inline +int latency_nice_validate(struct task_struct *p, bool user, + const struct sched_attr *attr) +{ + return -EOPNOTSUPP; +} +#endif + #ifdef CONFIG_UCLAMP_TASK /* * Serializes updates of utilization clamp values @@ -3351,8 +3389,10 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) */ p->prio = current->normal_prio; +#ifdef CONFIG_SCHED_LATENCY_NICE /* Propagate the parent's latency requirements to the child as well */ p->latency_prio = current->latency_prio; +#endif uclamp_fork(p); @@ -3377,8 +3417,10 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->prio = p->normal_prio = p->static_prio; set_load_weight(p, false); +#ifdef CONFIG_SCHED_LATENCY_NICE p->latency_prio = NICE_TO_LATENCY(0); set_latency_weight(p); +#endif /* * We don't need the reset flag anymore after the fork. It has @@ -5326,16 +5368,6 @@ static void __setscheduler_params(struct task_struct *p, p->rt_priority = attr->sched_priority; p->normal_prio = normal_prio(p); set_load_weight(p, true); - -} - -static void __setscheduler_latency(struct task_struct *p, - const struct sched_attr *attr) -{ - if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE) { - p->latency_prio = NICE_TO_LATENCY(attr->sched_latency_nice); - set_latency_weight(p); - } } /* @@ -5463,14 +5495,9 @@ static int __sched_setscheduler(struct task_struct *p, } if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE) { - if (attr->sched_latency_nice > MAX_LATENCY_NICE) - return -EINVAL; - if (attr->sched_latency_nice < MIN_LATENCY_NICE) - return -EINVAL; - /* Use the same security checks as NICE */ - if (attr->sched_latency_nice < LATENCY_TO_NICE(p->latency_prio) && - !capable(CAP_SYS_NICE)) - return -EPERM; + retval = latency_nice_validate(p, user, attr); + if (retval) + return retval; } if (pi) @@ -5507,9 +5534,11 @@ static int __sched_setscheduler(struct task_struct *p, goto change; if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) goto change; +#ifdef CONFIG_SCHED_LATENCY_NICE if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE && attr->sched_latency_nice != LATENCY_TO_NICE(p->latency_prio)) goto change; +#endif p->sched_reset_on_fork = reset_on_fork; retval = 0; @@ -5807,9 +5836,11 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a size < SCHED_ATTR_SIZE_VER1) return -EINVAL; +#ifdef CONFIG_SCHED_LATENCY_NICE if ((attr->sched_flags & SCHED_FLAG_LATENCY_NICE) && size < SCHED_ATTR_SIZE_VER2) return -EINVAL; +#endif /* * XXX: Do we want to be lenient like existing syscalls; or do we want * to be strict and return an error on out-of-bounds values? @@ -6039,7 +6070,9 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, else kattr.sched_nice = task_nice(p); +#ifdef CONFIG_SCHED_LATENCY_NICE kattr.sched_latency_nice = LATENCY_TO_NICE(p->latency_prio); +#endif #ifdef CONFIG_UCLAMP_TASK /* @@ -9089,6 +9122,7 @@ const u32 sched_prio_to_wmult[40] = { /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, }; +#ifdef CONFIG_SCHED_LATENCY_NICE /* * latency weight for wakeup preemption */ @@ -9102,6 +9136,7 @@ const int sched_latency_to_weight[40] = { /* 10 */ -512, -563, -614, -666, -717, /* 15 */ -768, -819, -870, -922, -973, }; +#endif void call_trace_sched_update_nr_running(struct rq *rq, int count) { diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index e33b837ded9c..0268a72c1b85 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -1046,7 +1046,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, #endif P(policy); P(prio); +#ifdef CONFIG_SCHED_LATENCY_NICE P(latency_prio); +#endif if (task_has_dl_policy(p)) { P(dl.runtime); P(dl.deadline); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 39882e5c0847..d964070fda71 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5581,6 +5581,7 @@ static int sched_idle_cpu(int cpu) static void set_next_buddy(struct sched_entity *se); +#ifdef CONFIG_SCHED_LATENCY_NICE static void check_preempt_from_idle(struct cfs_rq *cfs, struct sched_entity *se) { struct sched_entity *next; @@ -5607,6 +5608,7 @@ static void check_preempt_from_idle(struct cfs_rq *cfs, struct sched_entity *se) if (next && wakeup_preempt_entity(next, se) == 1) set_next_buddy(se); } +#endif /* * The enqueue_task method is called before nr_running is @@ -5697,8 +5699,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!task_new) update_overutilized_status(rq); +#ifdef CONFIG_SCHED_LATENCY_NICE if (rq->curr == rq->idle) check_preempt_from_idle(cfs_rq_of(&p->se), &p->se); +#endif enqueue_throttle: if (cfs_bandwidth_used()) { @@ -7042,6 +7046,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } #endif /* CONFIG_SMP */ +#ifdef CONFIG_SCHED_LATENCY_NICE static long wakeup_latency_gran(struct sched_entity *curr, struct sched_entity *se) { int latency_weight = se->latency_weight; @@ -7072,6 +7077,7 @@ static long wakeup_latency_gran(struct sched_entity *curr, struct sched_entity * return (thresh * latency_weight) >> NICE_LATENCY_SHIFT; } +#endif static unsigned long wakeup_gran(struct sched_entity *se) { @@ -7112,8 +7118,10 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) { s64 gran, vdiff = curr->vruntime - se->vruntime; +#ifdef CONFIG_SCHED_LATENCY_NICE /* Take into account latency priority */ vdiff += wakeup_latency_gran(curr, se); +#endif if (vdiff <= 0) return -1; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6dca085961bb..592c8653c153 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -154,6 +154,7 @@ extern void call_trace_sched_update_nr_running(struct rq *rq, int count); */ #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) +#ifdef CONFIG_SCHED_LATENCY_NICE /* * Latency nice is meant to provide scheduler hints about the relative * latency requirements of a task with respect to other tasks. @@ -182,6 +183,7 @@ extern void call_trace_sched_update_nr_running(struct rq *rq, int count); #define LATENCY_TO_NICE(prio) ((prio) - DEFAULT_LATENCY_PRIO) #define NICE_LATENCY_SHIFT (SCHED_FIXEDPOINT_SHIFT) #define NICE_LATENCY_WEIGHT_MAX (1L << NICE_LATENCY_SHIFT) +#endif /* CONFIG_SCHED_LATENCY_NICE */ /* * Increase resolution of nice-level calculations for 64-bit architectures. @@ -1870,7 +1872,9 @@ static inline int task_on_rq_migrating(struct task_struct *p) extern const int sched_prio_to_weight[40]; extern const u32 sched_prio_to_wmult[40]; +#ifdef CONFIG_SCHED_LATENCY_NICE extern const int sched_latency_to_weight[40]; +#endif /* * {de,en}queue flags: -- Gitee