diff --git a/include/linux/sched/frame_rtg.h b/include/linux/sched/frame_rtg.h new file mode 100644 index 0000000000000000000000000000000000000000..71ecf65c599e4b469c92ab5fe50c7cc84e581d4f --- /dev/null +++ b/include/linux/sched/frame_rtg.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Frame declaration + * + * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd. + */ + +#ifndef __SCHED_FRAME_RTG_H +#define __SCHED_FRAME_RTG_H + +#ifdef CONFIG_SCHED_RTG_FRAME + +#define MAX_TID_NUM 5 + +struct frame_info { + /* + * use rtg load tracking in frame_info + * rtg->curr_window_load -=> the workload of current frame + * rtg->prev_window_load -=> the workload of last frame + * rtg->curr_window_exec -=> the thread's runtime of current frame + * rtg->prev_window_exec -=> the thread's runtime of last frame + * rtg->prev_window_time -=> the actual time of the last frame + */ + rwlock_t lock; + struct related_thread_group *rtg; + int prio; + struct task_struct *thread[MAX_TID_NUM]; + int thread_num; + unsigned int frame_rate; // frame rate + u64 frame_time; + atomic_t curr_rt_thread_num; + atomic_t max_rt_thread_num; + atomic_t frame_sched_state; + atomic_t start_frame_freq; + atomic_t frame_state; + + /* + * frame_vload : the emergency level of current frame. + * max_vload_time : the timeline frame_load increase to FRAME_MAX_VLOAD + * it's always equal to 2 * frame_time / NSEC_PER_MSEC + * + * The closer to the deadline, the higher emergency of current + * frame, so the frame_vload is only related to frame time, + * and grown with time. + */ + u64 frame_vload; + int vload_margin; + int max_vload_time; + + u64 frame_util; + unsigned long status; + unsigned long prev_fake_load_util; + unsigned long prev_frame_load_util; + unsigned long prev_frame_time; + unsigned long prev_frame_exec; + unsigned long prev_frame_load; + unsigned int frame_min_util; + unsigned int frame_max_util; + unsigned int prev_min_util; + unsigned int prev_max_util; + unsigned int frame_boost_min_util; + + bool margin_imme; + bool timestamp_skipped; +}; + +struct frame_info *rtg_frame_info(int id); +static inline +struct related_thread_group *frame_info_rtg(const struct frame_info *frame_info) +{ + return frame_info->rtg; +} +#endif +#endif diff --git a/include/linux/sched/rtg.h b/include/linux/sched/rtg.h index 735b8ccae7459e7eb1a06e710c8fece51ebad741..ec738f49fd1e95404c04791b0bc75733d32fc9c5 100644 --- a/include/linux/sched/rtg.h +++ b/include/linux/sched/rtg.h @@ -47,6 +47,7 @@ struct related_thread_group { unsigned long freq_update_interval; /* in nanoseconds */ u64 last_util_update_time; u64 last_freq_update_time; + void *private_data; }; struct rtg_class { diff --git a/include/linux/sched/rtg_ctrl.h b/include/linux/sched/rtg_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..0e346ff49fe4147572c3a9062abd19a85ffb9115 --- /dev/null +++ b/include/linux/sched/rtg_ctrl.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * rtg control interface + * + * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd. + */ + +#ifndef __SCHED_RTG_CTL_H +#define __SCHED_RTG_CTL_H + +#include + +#define SYSTEM_SERVER_UID 1000 +#define MIN_APP_UID 10000 +#define MAX_BOOST_DURATION_MS 5000 + +#define RTG_SCHED_IPC_MAGIC 0XAB + +#define CMD_ID_SET_ENABLE \ + _IOWR(RTG_SCHED_IPC_MAGIC, SET_ENABLE, struct rtg_enable_data) +#define CMD_ID_SET_RTG \ + _IOWR(RTG_SCHED_IPC_MAGIC, SET_RTG, struct rtg_str_data) +#define CMD_ID_SET_CONFIG \ + _IOWR(RTG_SCHED_IPC_MAGIC, SET_CONFIG, struct rtg_str_data) +#define CMD_ID_SET_RTG_ATTR \ + _IOWR(RTG_SCHED_IPC_MAGIC, SET_RTG_ATTR, struct rtg_str_data) +#define CMD_ID_BEGIN_FRAME_FREQ \ + _IOWR(RTG_SCHED_IPC_MAGIC, BEGIN_FRAME_FREQ, struct proc_state_data) +#define CMD_ID_END_FRAME_FREQ \ + _IOWR(RTG_SCHED_IPC_MAGIC, END_FRAME_FREQ, struct proc_state_data) +#define CMD_ID_END_SCENE \ + _IOWR(RTG_SCHED_IPC_MAGIC, END_SCENE, struct proc_state_data) +#define CMD_ID_SET_MIN_UTIL \ + _IOWR(RTG_SCHED_IPC_MAGIC, SET_MIN_UTIL, struct proc_state_data) +#define CMD_ID_SET_MARGIN \ + _IOWR(RTG_SCHED_IPC_MAGIC, SET_MARGIN, struct proc_state_data) +#define CMD_ID_LIST_RTG \ + _IOWR(RTG_SCHED_IPC_MAGIC, LIST_RTG, struct rtg_info) +#define CMD_ID_LIST_RTG_THREAD \ + _IOWR(RTG_SCHED_IPC_MAGIC, LIST_RTG_THREAD, struct rtg_grp_data) +#define CMD_ID_SEARCH_RTG \ + _IOWR(RTG_SCHED_IPC_MAGIC, SEARCH_RTG, struct proc_state_data) +#define CMD_ID_GET_ENABLE \ + _IOWR(RTG_SCHED_IPC_MAGIC, GET_ENABLE, struct rtg_enable_data) + +enum ioctl_abi_format { + IOCTL_ABI_ARM32, + IOCTL_ABI_AARCH64, +}; + +enum rtg_sched_cmdid { + SET_ENABLE = 1, + SET_RTG, + SET_CONFIG, + SET_RTG_ATTR, + BEGIN_FRAME_FREQ = 5, + END_FRAME_FREQ, + END_SCENE, + SET_MIN_UTIL, + SET_MARGIN, + LIST_RTG = 10, + LIST_RTG_THREAD, + SEARCH_RTG, + GET_ENABLE, + RTG_CTRL_MAX_NR, +}; + +/* proc_state */ +enum grp_ctrl_cmd { + CMD_CREATE_RTG_GRP, + CMD_ADD_RTG_THREAD, + CMD_REMOVE_RTG_THREAD, + CMD_CLEAR_RTG_GRP, + CMD_DESTROY_RTG_GRP +}; + +struct rtg_enable_data { + int enable; + int len; + char *data; +}; + +struct rtg_str_data { + int type; + int len; + char *data; +}; + +struct proc_state_data { + int grp_id; + int state_param; +}; +#endif diff --git a/include/trace/events/rtg.h b/include/trace/events/rtg.h index 12422d2c3ee2dcc5b38fb76da68ed942f7ca8292..b885e328de125ca4886968084611e4dc8282855e 100644 --- a/include/trace/events/rtg.h +++ b/include/trace/events/rtg.h @@ -7,6 +7,7 @@ #include #include +#include struct rq; @@ -111,6 +112,33 @@ TRACE_EVENT(sched_rtg_valid_normalized_util, __entry->id, __entry->nr_running, __get_bitmask(cpus), __entry->valid) ); + +#ifdef CONFIG_SCHED_RTG_FRAME +TRACE_EVENT(rtg_frame_sched, + + TP_PROTO(int rtgid, const char *s, s64 value), + + TP_ARGS(rtgid, s, value), + TP_STRUCT__entry( + __field(int, rtgid) + __field(struct frame_info *, frame) + __field(pid_t, pid) + __string(str, s) + __field(s64, value) + ), + + TP_fast_assign( + __assign_str(str, s); + __entry->rtgid = rtgid != -1 ? rtgid : (current->grp ? current->grp->id : 0); + __entry->frame = rtg_frame_info(rtgid); + __entry->pid = __entry->frame ? ((__entry->frame->thread[0]) ? + ((__entry->frame->thread[0])->pid) : + current->tgid) : current->tgid; + __entry->value = value; + ), + TP_printk("C|%d|%s_%d|%lld", __entry->pid, __get_str(str), __entry->rtgid, __entry->value) +); +#endif #endif /* _TRACE_RTG_H */ /* This part must be outside protection */ diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 742ed2fe50deea8a0495070154ed0acc029a1771..5fbf2207c0b26dcad5a9db33bed0ba7746a7c068 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -456,6 +456,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, bool force_update = false; #ifdef CONFIG_SCHED_RTG + unsigned long irq_flag; + force_update = flags & SCHED_CPUFREQ_FORCE_UPDATE; #endif @@ -490,9 +492,17 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, if (sg_policy->policy->fast_switch_enabled) { sugov_fast_switch(sg_policy, time, next_f); } else { +#ifdef CONFIG_SCHED_RTG + raw_spin_lock_irqsave(&sg_policy->update_lock, irq_flag); +#else raw_spin_lock(&sg_policy->update_lock); +#endif sugov_deferred_update(sg_policy, time, next_f); +#ifdef CONFIG_SCHED_RTG + raw_spin_unlock_irqrestore(&sg_policy->update_lock, irq_flag); +#else raw_spin_unlock(&sg_policy->update_lock); +#endif } } @@ -532,11 +542,16 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) struct sugov_policy *sg_policy = sg_cpu->sg_policy; unsigned int next_f; bool force_update = false; +#ifdef CONFIG_SCHED_RTG + unsigned long irq_flag; +#endif #ifdef CONFIG_SCHED_RTG force_update = flags & SCHED_CPUFREQ_FORCE_UPDATE; -#endif + raw_spin_lock_irqsave(&sg_policy->update_lock, irq_flag); +#else raw_spin_lock(&sg_policy->update_lock); +#endif sugov_iowait_boost(sg_cpu, time, flags); sg_cpu->last_update = time; @@ -557,7 +572,11 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) sugov_deferred_update(sg_policy, time, next_f); } +#ifdef CONFIG_SCHED_RTG + raw_spin_unlock_irqrestore(&sg_policy->update_lock, irq_flag); +#else raw_spin_unlock(&sg_policy->update_lock); +#endif } static void sugov_work(struct kthread_work *work) diff --git a/kernel/sched/rtg/Kconfig b/kernel/sched/rtg/Kconfig index 3e5acad17ac5bccad235f2eb7678f0effeb6b029..1cb0c4298b097e03c6860e37eea3bde664fea260 100644 --- a/kernel/sched/rtg/Kconfig +++ b/kernel/sched/rtg/Kconfig @@ -22,4 +22,19 @@ config SCHED_RTG_CGROUP If set, support for adding the tasks which belong to co-located cgroup to DEFAULT_CGROUP_COLOC RTG. +config SCHED_RTG_FRAME + bool "Frame-based Related Thread Group" + depends on SCHED_RTG + default n + help + Support frame-based related thread group scheduling. + If set, you can set the task to RTG and kernel will + statistic the load per frame. + +config SCHED_RTG_RT_THREAD_LIMIT + bool "Limit the number of RT threads in groups" + depends on SCHED_RTG_FRAME + default n + help + If set, limit the number of RT threads in frame RTG. endmenu diff --git a/kernel/sched/rtg/Makefile b/kernel/sched/rtg/Makefile index a911575b0734c5db8a7d4628686de4aede34c58d..4d55523d1f32b8acb0404b943de3cb407d7b3832 100644 --- a/kernel/sched/rtg/Makefile +++ b/kernel/sched/rtg/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_SCHED_RTG) += rtg.o +obj-$(CONFIG_SCHED_RTG_FRAME) += frame_rtg.o rtg_ctrl.o diff --git a/kernel/sched/rtg/frame_rtg.c b/kernel/sched/rtg/frame_rtg.c new file mode 100644 index 0000000000000000000000000000000000000000..89561c84774e726ad7bb61216dabdec5f4291779 --- /dev/null +++ b/kernel/sched/rtg/frame_rtg.c @@ -0,0 +1,1221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Frame-based load tracking for rt_frame and RTG + * + * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd. + */ + +#include "frame_rtg.h" +#include "rtg.h" + +#include +#include +#include <../kernel/sched/sched.h> +#include + +static struct multi_frame_id_manager g_id_manager = { + .id_map = {0}, + .offset = 0, + .lock = __RW_LOCK_UNLOCKED(g_id_manager.lock) +}; + +static struct frame_info g_multi_frame_info[MULTI_FRAME_NUM]; + +static bool is_rtg_rt_task(struct task_struct *task) +{ + bool ret = false; + + if (!task) + return ret; + + ret = ((task->prio < MAX_RT_PRIO) && + (task->rtg_depth == STATIC_RTG_DEPTH)); + + return ret; +} + +#ifdef CONFIG_SCHED_RTG_RT_THREAD_LIMIT +static atomic_t g_rtg_rt_thread_num = ATOMIC_INIT(0); + +static unsigned int _get_rtg_rt_thread_num(struct related_thread_group *grp) +{ + unsigned int rtg_rt_thread_num = 0; + struct task_struct *p = NULL; + + if (list_empty(&grp->tasks)) + goto out; + + list_for_each_entry(p, &grp->tasks, grp_list) { + if (is_rtg_rt_task(p)) + ++rtg_rt_thread_num; + } + +out: + return rtg_rt_thread_num; +} + +static unsigned int get_rtg_rt_thread_num(void) +{ + struct related_thread_group *grp = NULL; + unsigned int total_rtg_rt_thread_num = 0; + unsigned long flag; + unsigned int i; + + for (i = MULTI_FRAME_ID; i < MULTI_FRAME_ID + MULTI_FRAME_NUM; i++) { + grp = lookup_related_thread_group(i); + if (grp == NULL) + continue; + raw_spin_lock_irqsave(&grp->lock, flag); + total_rtg_rt_thread_num += _get_rtg_rt_thread_num(grp); + raw_spin_unlock_irqrestore(&grp->lock, flag); + } + + return total_rtg_rt_thread_num; +} + +static void inc_rtg_rt_thread_num(void) +{ + atomic_inc(&g_rtg_rt_thread_num); +} + +static void dec_rtg_rt_thread_num(void) +{ + atomic_dec_if_positive(&g_rtg_rt_thread_num); +} + +static int test_and_read_rtg_rt_thread_num(void) +{ + if (atomic_read(&g_rtg_rt_thread_num) >= RTG_MAX_RT_THREAD_NUM) + atomic_set(&g_rtg_rt_thread_num, get_rtg_rt_thread_num()); + + return atomic_read(&g_rtg_rt_thread_num); +} + +int read_rtg_rt_thread_num(void) +{ + return atomic_read(&g_rtg_rt_thread_num); +} +#else +static inline void inc_rtg_rt_thread_num(void) { } +static inline void dec_rtg_rt_thread_num(void) { } +static inline int test_and_read_rtg_rt_thread_num(void) +{ + return 0; +} +#endif + +bool is_frame_rtg(int id) +{ + return (id >= MULTI_FRAME_ID) && + (id < (MULTI_FRAME_ID + MULTI_FRAME_NUM)); +} + +static struct related_thread_group *frame_rtg(int id) +{ + if (!is_frame_rtg(id)) + return NULL; + + return lookup_related_thread_group(id); +} + +struct frame_info *rtg_frame_info(int id) +{ + if (!is_frame_rtg(id)) + return NULL; + + return rtg_active_multi_frame_info(id); +} + +static int alloc_rtg_id(void) +{ + unsigned int id_offset; + int id; + + write_lock(&g_id_manager.lock); + id_offset = find_next_zero_bit(g_id_manager.id_map, MULTI_FRAME_NUM, + g_id_manager.offset); + if (id_offset >= MULTI_FRAME_NUM) { + id_offset = find_first_zero_bit(g_id_manager.id_map, + MULTI_FRAME_NUM); + if (id_offset >= MULTI_FRAME_NUM) { + write_unlock(&g_id_manager.lock); + return -EINVAL; + } + } + + set_bit(id_offset, g_id_manager.id_map); + g_id_manager.offset = id_offset; + id = id_offset + MULTI_FRAME_ID; + write_unlock(&g_id_manager.lock); + pr_debug("[FRAME_RTG] %s id_offset=%u, id=%d\n", __func__, id_offset, id); + + return id; +} + +static void free_rtg_id(int id) +{ + unsigned int id_offset = id - MULTI_FRAME_ID; + + if (id_offset >= MULTI_FRAME_NUM) { + pr_err("[FRAME_RTG] %s id_offset is invalid, id=%d, id_offset=%u.\n", + __func__, id, id_offset); + return; + } + + pr_debug("[FRAME_RTG] %s id=%d id_offset=%u\n", __func__, id, id_offset); + write_lock(&g_id_manager.lock); + clear_bit(id_offset, g_id_manager.id_map); + write_unlock(&g_id_manager.lock); +} + +int set_frame_rate(struct frame_info *frame_info, int rate) +{ + int id; + + if ((rate < MIN_FRAME_RATE) || (rate > MAX_FRAME_RATE)) { + pr_err("[FRAME_RTG]: %s invalid QOS(rate) value\n", + __func__); + return -EINVAL; + } + + if (!frame_info || !frame_info->rtg) + return -EINVAL; + + frame_info->frame_rate = (unsigned int)rate; + frame_info->frame_time = frame_info->frame_time = div_u64(NSEC_PER_SEC, rate); + frame_info->max_vload_time = + div_u64(frame_info->frame_time, NSEC_PER_MSEC) + + frame_info->vload_margin; + id = frame_info->rtg->id; + trace_rtg_frame_sched(id, "FRAME_QOS", rate); + trace_rtg_frame_sched(id, "FRAME_MAX_TIME", frame_info->max_vload_time); + + return 0; +} + +int alloc_multi_frame_info(void) +{ + struct frame_info *frame_info = NULL; + int id; + + id = alloc_rtg_id(); + if (id < 0) + return id; + + frame_info = rtg_frame_info(id); + if (!frame_info) { + free_rtg_id(id); + return -EINVAL; + } + + set_frame_rate(frame_info, DEFAULT_FRAME_RATE); + atomic_set(&frame_info->curr_rt_thread_num, 0); + atomic_set(&frame_info->max_rt_thread_num, DEFAULT_MAX_RT_THREAD); + + return id; +} + +void release_multi_frame_info(int id) +{ + if ((id < MULTI_FRAME_ID) || (id >= MULTI_FRAME_ID + MULTI_FRAME_NUM)) { + pr_err("[FRAME_RTG] %s frame(id=%d) not found.\n", __func__, id); + return; + } + + read_lock(&g_id_manager.lock); + if (!test_bit(id - MULTI_FRAME_ID, g_id_manager.id_map)) { + read_unlock(&g_id_manager.lock); + return; + } + read_unlock(&g_id_manager.lock); + + pr_debug("[FRAME_RTG] %s release frame(id=%d).\n", __func__, id); + free_rtg_id(id); +} + +void clear_multi_frame_info(void) +{ + write_lock(&g_id_manager.lock); + bitmap_zero(g_id_manager.id_map, MULTI_FRAME_NUM); + g_id_manager.offset = 0; + write_unlock(&g_id_manager.lock); +} + +struct frame_info *rtg_active_multi_frame_info(int id) +{ + struct frame_info *frame_info = NULL; + + if ((id < MULTI_FRAME_ID) || (id >= MULTI_FRAME_ID + MULTI_FRAME_NUM)) + return NULL; + + read_lock(&g_id_manager.lock); + if (test_bit(id - MULTI_FRAME_ID, g_id_manager.id_map)) + frame_info = &g_multi_frame_info[id - MULTI_FRAME_ID]; + read_unlock(&g_id_manager.lock); + if (!frame_info) + pr_debug("[FRAME_RTG] %s frame %d has been released\n", + __func__, id); + + return frame_info; +} + +struct frame_info *rtg_multi_frame_info(int id) +{ + if ((id < MULTI_FRAME_ID) || (id >= MULTI_FRAME_ID + MULTI_FRAME_NUM)) + return NULL; + + return &g_multi_frame_info[id - MULTI_FRAME_ID]; +} + +static void do_update_frame_task_prio(struct frame_info *frame_info, + struct task_struct *task, int prio) +{ + int policy = SCHED_NORMAL; + struct sched_param sp = {0}; + bool is_rt_task = (prio != NOT_RT_PRIO); + bool need_dec_flag = false; + bool need_inc_flag = false; + int err; + + trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num", + read_rtg_rt_thread_num()); + /* change policy to RT */ + if (is_rt_task && (atomic_read(&frame_info->curr_rt_thread_num) < + atomic_read(&frame_info->max_rt_thread_num))) { + /* change policy from CFS to RT */ + if (!is_rtg_rt_task(task)) { + if (test_and_read_rtg_rt_thread_num() >= RTG_MAX_RT_THREAD_NUM) + goto out; + need_inc_flag = true; + } + /* change RT priority */ + policy = SCHED_FIFO | SCHED_RESET_ON_FORK; + sp.sched_priority = MAX_USER_RT_PRIO - 1 - prio; + atomic_inc(&frame_info->curr_rt_thread_num); + } else { + /* change policy from RT to CFS */ + if (!is_rt_task && is_rtg_rt_task(task)) + need_dec_flag = true; + } +out: + trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num", + read_rtg_rt_thread_num()); + trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num", + atomic_read(&frame_info->curr_rt_thread_num)); + err = sched_setscheduler_nocheck(task, policy, &sp); + if (err == 0) { + if (need_dec_flag) + dec_rtg_rt_thread_num(); + else if (need_inc_flag) + inc_rtg_rt_thread_num(); + } +} + +int list_rtg_group(struct rtg_info *rs_data) +{ + int i; + int num = 0; + + read_lock(&g_id_manager.lock); + for (i = MULTI_FRAME_ID; i < MULTI_FRAME_ID + MULTI_FRAME_NUM; i++) { + if (test_bit(i - MULTI_FRAME_ID, g_id_manager.id_map)) { + rs_data->rtgs[num] = i; + num++; + } + } + read_unlock(&g_id_manager.lock); + rs_data->rtg_num = num; + + return num; +} + +int search_rtg(int pid) +{ + struct rtg_info grp_info; + struct frame_info *frame_info = NULL; + int i = 0; + int j = 0; + + grp_info.rtg_num = 0; + read_lock(&g_id_manager.lock); + for (i = MULTI_FRAME_ID; i < MULTI_FRAME_ID + MULTI_FRAME_NUM; i++) { + if (test_bit(i - MULTI_FRAME_ID, g_id_manager.id_map)) { + grp_info.rtgs[grp_info.rtg_num] = i; + grp_info.rtg_num++; + } + } + read_unlock(&g_id_manager.lock); + for (i = 0; i < grp_info.rtg_num; i++) { + frame_info = lookup_frame_info_by_grp_id(grp_info.rtgs[i]); + if (!frame_info) { + pr_err("[FRAME_RTG] unexpected grp %d find error.", i); + return -EINVAL; + } + + for (j = 0; j < frame_info->thread_num; j++) { + if (frame_info->thread[j] && frame_info->thread[j]->pid == pid) + return grp_info.rtgs[i]; + } + } + + return 0; +} + +static void update_frame_task_prio(struct frame_info *frame_info, int prio) +{ + int i; + struct task_struct *thread = NULL; + + /* reset curr_rt_thread_num */ + atomic_set(&frame_info->curr_rt_thread_num, 0); + + for (i = 0; i < MAX_TID_NUM; i++) { + thread = frame_info->thread[i]; + if (thread) + do_update_frame_task_prio(frame_info, thread, prio); + } +} + +void set_frame_prio(struct frame_info *frame_info, int prio) +{ + if (!frame_info) + return; + + write_lock(&frame_info->lock); + if (frame_info->prio == prio) + goto out; + + update_frame_task_prio(frame_info, prio); + frame_info->prio = prio; +out: + write_unlock(&frame_info->lock); +} + +static int do_set_rtg_sched(struct task_struct *task, bool is_rtg, + int grp_id, int prio) +{ + int err; + int policy = SCHED_NORMAL; + int grpid = DEFAULT_RTG_GRP_ID; + bool is_rt_task = (prio != NOT_RT_PRIO); + struct sched_param sp = {0}; + + if (is_rtg) { + if (is_rt_task) { + if (test_and_read_rtg_rt_thread_num() >= RTG_MAX_RT_THREAD_NUM) + // rtg_rt_thread_num is inavailable, set policy to CFS + goto skip_setpolicy; + policy = SCHED_FIFO | SCHED_RESET_ON_FORK; + sp.sched_priority = MAX_USER_RT_PRIO - 1 - prio; + } +skip_setpolicy: + grpid = grp_id; + } + err = sched_setscheduler_nocheck(task, policy, &sp); + if (err < 0) { + pr_err("[FRAME_RTG]: %s task:%d setscheduler err:%d\n", + __func__, task->pid, err); + return err; + } + err = sched_set_group_id(task, grpid); + if (err < 0) { + pr_err("[FRAME_RTG]: %s task:%d set_group_id err:%d\n", + __func__, task->pid, err); + if (is_rtg) { + policy = SCHED_NORMAL; + sp.sched_priority = 0; + sched_setscheduler_nocheck(task, policy, &sp); + } + } + if (err == 0) { + if (is_rtg) { + if (policy != SCHED_NORMAL) + inc_rtg_rt_thread_num(); + } else { + dec_rtg_rt_thread_num(); + } + } + + return err; +} + +static int set_rtg_sched(struct task_struct *task, bool is_rtg, + int grp_id, int prio) +{ + int err = -1; + bool is_rt_task = (prio != NOT_RT_PRIO); + + if (!task) + return err; + + if (is_rt_task && is_rtg && ((prio < 0) || + (prio > MAX_USER_RT_PRIO - 1))) + return err; + /* + * if CONFIG_HW_FUTEX_PI is set, task->prio and task->sched_class + * may be modified by rtmutex. So we use task->policy instead. + */ + if (is_rtg && (!fair_policy(task->policy) || (task->flags & PF_EXITING))) + return err; + + if (in_interrupt()) { + pr_err("[FRAME_RTG]: %s is in interrupt\n", __func__); + return err; + } + + return do_set_rtg_sched(task, is_rtg, grp_id, prio); +} + +static bool set_frame_rtg_thread(int grp_id, struct task_struct *task, + bool is_rtg, int prio) +{ + int depth; + + if (!task) + return false; + depth = task->rtg_depth; + if (is_rtg) + task->rtg_depth = STATIC_RTG_DEPTH; + else + task->rtg_depth = 0; + + if (set_rtg_sched(task, is_rtg, grp_id, prio) < 0) { + task->rtg_depth = depth; + return false; + } + + return true; +} + +struct task_struct *update_frame_thread(struct frame_info *frame_info, + int old_prio, int prio, int pid, + struct task_struct *old_task) +{ + struct task_struct *task = NULL; + bool is_rt_task = (prio != NOT_RT_PRIO); + int new_prio = prio; + bool update_ret = false; + + if (pid > 0) { + if (old_task && (pid == old_task->pid) && (old_prio == new_prio)) { + if (is_rt_task && atomic_read(&frame_info->curr_rt_thread_num) < + atomic_read(&frame_info->max_rt_thread_num) && + (atomic_read(&frame_info->frame_sched_state) == 1)) + atomic_inc(&frame_info->curr_rt_thread_num); + trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num", + atomic_read(&frame_info->curr_rt_thread_num)); + return old_task; + } + rcu_read_lock(); + task = find_task_by_vpid(pid); + if (task) + get_task_struct(task); + rcu_read_unlock(); + } + trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE", + atomic_read(&frame_info->frame_sched_state)); + if (atomic_read(&frame_info->frame_sched_state) == 1) { + if (task && is_rt_task) { + if (atomic_read(&frame_info->curr_rt_thread_num) < + atomic_read(&frame_info->max_rt_thread_num)) + atomic_inc(&frame_info->curr_rt_thread_num); + else + new_prio = NOT_RT_PRIO; + } + trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num", + atomic_read(&frame_info->curr_rt_thread_num)); + trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num", + read_rtg_rt_thread_num()); + + set_frame_rtg_thread(frame_info->rtg->id, old_task, false, NOT_RT_PRIO); + update_ret = set_frame_rtg_thread(frame_info->rtg->id, task, true, new_prio); + } + if (old_task) + put_task_struct(old_task); + if (!update_ret) + return NULL; + + return task; +} + +void update_frame_thread_info(struct frame_info *frame_info, + struct frame_thread_info *frame_thread_info) +{ + int i; + int old_prio; + int prio; + int thread_num; + int real_thread; + + if (!frame_info || !frame_thread_info || + frame_thread_info->thread_num < 0) + return; + + prio = frame_thread_info->prio; + thread_num = frame_thread_info->thread_num; + if (thread_num > MAX_TID_NUM) + thread_num = MAX_TID_NUM; + + // reset curr_rt_thread_num + atomic_set(&frame_info->curr_rt_thread_num, 0); + write_lock(&frame_info->lock); + old_prio = frame_info->prio; + real_thread = 0; + for (i = 0; i < thread_num; i++) { + frame_info->thread[i] = update_frame_thread(frame_info, old_prio, prio, + frame_thread_info->thread[i], + frame_info->thread[i]); + if (frame_info->thread[i] && (frame_thread_info->thread[i] > 0)) + real_thread++; + } + frame_info->prio = prio; + frame_info->thread_num = real_thread; + write_unlock(&frame_info->lock); +} + +static void do_set_frame_sched_state(struct frame_info *frame_info, + struct task_struct *task, + bool enable, int prio) +{ + int new_prio = prio; + bool is_rt_task = (prio != NOT_RT_PRIO); + + if (enable && is_rt_task) { + if (atomic_read(&frame_info->curr_rt_thread_num) < + atomic_read(&frame_info->max_rt_thread_num)) + atomic_inc(&frame_info->curr_rt_thread_num); + else + new_prio = NOT_RT_PRIO; + } + trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num", + atomic_read(&frame_info->curr_rt_thread_num)); + trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num", + read_rtg_rt_thread_num()); + set_frame_rtg_thread(frame_info->rtg->id, task, enable, new_prio); +} + +void set_frame_sched_state(struct frame_info *frame_info, bool enable) +{ + atomic_t *frame_sched_state = NULL; + int prio; + int i; + + if (!frame_info || !frame_info->rtg) + return; + + frame_sched_state = &(frame_info->frame_sched_state); + if (enable) { + if (atomic_read(frame_sched_state) == 1) + return; + atomic_set(frame_sched_state, 1); + trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE", 1); + + frame_info->prev_fake_load_util = 0; + frame_info->prev_frame_load_util = 0; + frame_info->frame_vload = 0; + frame_info_rtg_load(frame_info)->curr_window_load = 0; + } else { + if (atomic_read(frame_sched_state) == 0) + return; + atomic_set(frame_sched_state, 0); + trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE", 0); + + (void)sched_set_group_normalized_util(frame_info->rtg->id, + 0, RTG_FREQ_NORMAL_UPDATE); + trace_rtg_frame_sched(frame_info->rtg->id, "preferred_cluster", + INVALID_PREFERRED_CLUSTER); + frame_info->status = FRAME_END; + } + + /* reset curr_rt_thread_num */ + atomic_set(&frame_info->curr_rt_thread_num, 0); + write_lock(&frame_info->lock); + prio = frame_info->prio; + for (i = 0; i < MAX_TID_NUM; i++) { + if (frame_info->thread[i]) + do_set_frame_sched_state(frame_info, frame_info->thread[i], + enable, prio); + } + write_unlock(&frame_info->lock); + + trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_STATUS", + frame_info->status); + trace_rtg_frame_sched(frame_info->rtg->id, "frame_status", + frame_info->status); +} + +static inline bool check_frame_util_invalid(const struct frame_info *frame_info, + u64 timeline) +{ + return ((frame_info_rtg(frame_info)->util_invalid_interval <= timeline) && + (frame_info_rtg_load(frame_info)->curr_window_exec * FRAME_UTIL_INVALID_FACTOR + <= timeline)); +} + +static u64 calc_prev_fake_load_util(const struct frame_info *frame_info) +{ + u64 prev_frame_load = frame_info->prev_frame_load; + u64 prev_frame_time = max_t(unsigned long, frame_info->prev_frame_time, + frame_info->frame_time); + u64 frame_util = 0; + + if (prev_frame_time > 0) + frame_util = div_u64((prev_frame_load << SCHED_CAPACITY_SHIFT), + prev_frame_time); + frame_util = clamp_t(unsigned long, frame_util, + frame_info->prev_min_util, + frame_info->prev_max_util); + + return frame_util; +} + +static u64 calc_prev_frame_load_util(const struct frame_info *frame_info) +{ + u64 prev_frame_load = frame_info->prev_frame_load; + u64 frame_time = frame_info->frame_time; + u64 frame_util = 0; + + if (prev_frame_load >= frame_time) + frame_util = FRAME_MAX_LOAD; + else + frame_util = div_u64((prev_frame_load << SCHED_CAPACITY_SHIFT), + frame_info->frame_time); + frame_util = clamp_t(unsigned long, frame_util, + frame_info->prev_min_util, + frame_info->prev_max_util); + + return frame_util; +} + +/* last frame load tracking */ +static void update_frame_prev_load(struct frame_info *frame_info, bool fake) +{ + /* last frame load tracking */ + frame_info->prev_frame_exec = + frame_info_rtg_load(frame_info)->prev_window_exec; + frame_info->prev_frame_time = + frame_info_rtg(frame_info)->prev_window_time; + frame_info->prev_frame_load = + frame_info_rtg_load(frame_info)->prev_window_load; + + if (fake) + frame_info->prev_fake_load_util = + calc_prev_fake_load_util(frame_info); + else + frame_info->prev_frame_load_util = + calc_prev_frame_load_util(frame_info); +} + +static void do_frame_end(struct frame_info *frame_info, bool fake) +{ + unsigned long prev_util; + int id = frame_info->rtg->id; + + frame_info->status = FRAME_END; + trace_rtg_frame_sched(id, "frame_status", frame_info->status); + + /* last frame load tracking */ + update_frame_prev_load(frame_info, fake); + + /* reset frame_info */ + frame_info->frame_vload = 0; + + /* reset frame_min_util */ + frame_info->frame_min_util = 0; + + if (fake) + prev_util = frame_info->prev_fake_load_util; + else + prev_util = frame_info->prev_frame_load_util; + + frame_info->frame_util = clamp_t(unsigned long, prev_util, + frame_info->frame_min_util, + frame_info->frame_max_util); + + trace_rtg_frame_sched(id, "frame_last_task_time", + frame_info->prev_frame_exec); + trace_rtg_frame_sched(id, "frame_last_time", frame_info->prev_frame_time); + trace_rtg_frame_sched(id, "frame_last_load", frame_info->prev_frame_load); + trace_rtg_frame_sched(id, "frame_last_load_util", + frame_info->prev_frame_load_util); + trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util); + trace_rtg_frame_sched(id, "frame_vload", frame_info->frame_vload); +} + +/* + * frame_load : calculate frame load using exec util + */ +static inline u64 calc_frame_exec(const struct frame_info *frame_info) +{ + if (frame_info->frame_time > 0) + return div_u64((frame_info_rtg_load(frame_info)->curr_window_exec << + SCHED_CAPACITY_SHIFT), frame_info->frame_time); + else + return 0; +} + +/* + * real_util: + * max(last_util, virtual_util, boost_util, phase_util, frame_min_util) + */ +static u64 calc_frame_util(const struct frame_info *frame_info, bool fake) +{ + unsigned long load_util; + + if (fake) + load_util = frame_info->prev_fake_load_util; + else + load_util = frame_info->prev_frame_load_util; + + load_util = max_t(unsigned long, load_util, frame_info->frame_vload); + load_util = clamp_t(unsigned long, load_util, + frame_info->frame_min_util, + frame_info->frame_max_util); + + return load_util; +} + +/* + * frame_vload [0~1024] + * vtime: now - timestamp + * max_time: frame_info->frame_time + vload_margin + * load = F(vtime) + * = vtime ^ 2 - vtime * max_time + FRAME_MAX_VLOAD * vtime / max_time; + * = vtime * (vtime + FRAME_MAX_VLOAD / max_time - max_time); + * [0, 0] -=> [max_time, FRAME_MAX_VLOAD] + * + */ +static u64 calc_frame_vload(const struct frame_info *frame_info, u64 timeline) +{ + u64 vload; + int vtime = div_u64(timeline, NSEC_PER_MSEC); + int max_time = frame_info->max_vload_time; + int factor; + + if ((max_time <= 0) || (vtime > max_time)) + return FRAME_MAX_VLOAD; + + factor = vtime + FRAME_MAX_VLOAD / max_time; + /* margin maybe negative */ + if ((vtime <= 0) || (factor <= max_time)) + return 0; + + vload = (u64)vtime * (u64)(factor - max_time); + + return vload; +} + +static int update_frame_info_tick_inner(int id, struct frame_info *frame_info, + u64 timeline) +{ + switch (frame_info->status) { + case FRAME_INVALID: + case FRAME_END: + if (timeline >= frame_info->frame_time) { + /* + * fake FRAME_END here to rollover frame_window. + */ + sched_set_group_window_rollover(id); + do_frame_end(frame_info, true); + } else { + frame_info->frame_vload = calc_frame_exec(frame_info); + frame_info->frame_util = + calc_frame_util(frame_info, true); + } + + /* when not in boost, start tick timer */ + break; + case FRAME_START: + /* check frame_util invalid */ + if (!check_frame_util_invalid(frame_info, timeline)) { + /* frame_vload statistic */ + frame_info->frame_vload = calc_frame_vload(frame_info, timeline); + /* frame_util statistic */ + frame_info->frame_util = + calc_frame_util(frame_info, false); + } else { + frame_info->status = FRAME_INVALID; + trace_rtg_frame_sched(id, "FRAME_STATUS", + frame_info->status); + trace_rtg_frame_sched(id, "frame_status", + frame_info->status); + + /* + * trigger FRAME_END to rollover frame_window, + * we treat FRAME_INVALID as FRAME_END. + */ + sched_set_group_window_rollover(id); + do_frame_end(frame_info, false); + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static inline struct frame_info *rtg_frame_info_inner( + const struct related_thread_group *grp) +{ + return (struct frame_info *)grp->private_data; +} + +static inline void frame_boost(struct frame_info *frame_info) +{ + if (frame_info->frame_util < frame_info->frame_boost_min_util) + frame_info->frame_util = frame_info->frame_boost_min_util; +} + +/* + * update CPUFREQ and PLACEMENT when frame task running (in tick) and migration + */ +static void update_frame_info_tick(struct related_thread_group *grp) +{ + u64 window_start; + u64 wallclock; + u64 timeline; + struct frame_info *frame_info = NULL; + int id = grp->id; + + rcu_read_lock(); + frame_info = rtg_frame_info_inner(grp); + window_start = grp->window_start; + rcu_read_unlock(); + if (unlikely(!frame_info)) + return; + + if (atomic_read(&frame_info->frame_sched_state) == 0) + return; + trace_rtg_frame_sched(id, "frame_status", frame_info->status); + + wallclock = ktime_get_ns(); + timeline = wallclock - window_start; + + trace_rtg_frame_sched(id, "update_curr_pid", current->pid); + trace_rtg_frame_sched(id, "frame_timeline", div_u64(timeline, NSEC_PER_MSEC)); + + if (update_frame_info_tick_inner(grp->id, frame_info, timeline) == -EINVAL) + return; + + frame_boost(frame_info); + trace_rtg_frame_sched(id, "frame_vload", frame_info->frame_vload); + trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util); + + sched_set_group_normalized_util(grp->id, + frame_info->frame_util, RTG_FREQ_NORMAL_UPDATE); + + if (grp->preferred_cluster) + trace_rtg_frame_sched(id, "preferred_cluster", + grp->preferred_cluster->id); +} + +const struct rtg_class frame_rtg_class = { + .sched_update_rtg_tick = update_frame_info_tick, +}; + +int set_frame_margin(struct frame_info *frame_info, int margin) +{ + int id; + + if ((margin < MIN_VLOAD_MARGIN) || (margin > MAX_VLOAD_MARGIN)) { + pr_err("[FRAME_RTG]: %s invalid MARGIN value\n", + __func__); + return -EINVAL; + } + + if (!frame_info || !frame_info->rtg) + return -EINVAL; + + frame_info->vload_margin = margin; + frame_info->max_vload_time = + div_u64(frame_info->frame_time, NSEC_PER_MSEC) + + frame_info->vload_margin; + id = frame_info->rtg->id; + trace_rtg_frame_sched(id, "FRAME_MARGIN", margin); + trace_rtg_frame_sched(id, "FRAME_MAX_TIME", frame_info->max_vload_time); + + return 0; +} + +static void set_frame_start(struct frame_info *frame_info) +{ + int id = frame_info->rtg->id; + + if (likely(frame_info->status == FRAME_START)) { + /* + * START -=> START -=> ...... + * FRMAE_START is + * the end of last frame + * the start of the current frame + */ + update_frame_prev_load(frame_info, false); + } else if ((frame_info->status == FRAME_END) || + (frame_info->status == FRAME_INVALID)) { + /* START -=> END -=> [START] + * FRAME_START is + * only the start of current frame + * we shoudn't tracking the last rtg-window + * [FRAME_END, FRAME_START] + * it's not an available frame window + */ + update_frame_prev_load(frame_info, true); + frame_info->status = FRAME_START; + } + trace_rtg_frame_sched(id, "FRAME_STATUS", frame_info->status); + trace_rtg_frame_sched(id, "frame_last_task_time", + frame_info->prev_frame_exec); + trace_rtg_frame_sched(id, "frame_last_time", frame_info->prev_frame_time); + trace_rtg_frame_sched(id, "frame_last_load", frame_info->prev_frame_load); + trace_rtg_frame_sched(id, "frame_last_load_util", + frame_info->prev_frame_load_util); + + /* new_frame_start */ + if (!frame_info->margin_imme) { + frame_info->frame_vload = 0; + frame_info->frame_util = clamp_t(unsigned long, + frame_info->prev_frame_load_util, + frame_info->frame_min_util, + frame_info->frame_max_util); + } else { + frame_info->frame_vload = calc_frame_vload(frame_info, 0); + frame_info->frame_util = calc_frame_util(frame_info, false); + } + + trace_rtg_frame_sched(id, "frame_vload", frame_info->frame_vload); +} + +static void set_frame_end(struct frame_info *frame_info) +{ + trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_STATUS", FRAME_END); + do_frame_end(frame_info, false); +} + +static int update_frame_timestamp(unsigned long status, + struct frame_info *frame_info, struct related_thread_group *grp) +{ + int id = frame_info->rtg->id; + + /* SCHED_FRAME timestamp */ + switch (status) { + case FRAME_START: + /* collect frame_info when frame_end timestamp coming */ + set_frame_start(frame_info); + break; + case FRAME_END: + /* FRAME_END should only set and update freq once */ + if (unlikely(frame_info->status == FRAME_END)) + return 0; + set_frame_end(frame_info); + break; + default: + pr_err("[FRAME_RTG]: %s invalid timestamp(status)\n", + __func__); + return -EINVAL; + } + + frame_boost(frame_info); + trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util); + + /* update cpufreq force when frame_stop */ + sched_set_group_normalized_util(grp->id, + frame_info->frame_util, RTG_FREQ_FORCE_UPDATE); + if (grp->preferred_cluster) + trace_rtg_frame_sched(id, "preferred_cluster", + grp->preferred_cluster->id); + + return 0; +} + +static int set_frame_status(struct frame_info *frame_info, unsigned long status) +{ + struct related_thread_group *grp = NULL; + int id; + + if (!frame_info) + return -EINVAL; + + grp = frame_info->rtg; + if (unlikely(!grp)) + return -EINVAL; + + if (atomic_read(&frame_info->frame_sched_state) == 0) + return -EINVAL; + + if (!(status & FRAME_SETTIME) || + (status == (unsigned long)FRAME_SETTIME_PARAM)) { + pr_err("[FRAME_RTG]: %s invalid timetsamp(status)\n", + __func__); + return -EINVAL; + } + + if (status & FRAME_TIMESTAMP_SKIP_START) { + frame_info->timestamp_skipped = true; + status &= ~FRAME_TIMESTAMP_SKIP_START; + } else if (status & FRAME_TIMESTAMP_SKIP_END) { + frame_info->timestamp_skipped = false; + status &= ~FRAME_TIMESTAMP_SKIP_END; + } else if (frame_info->timestamp_skipped) { + /* + * skip the following timestamp until + * FRAME_TIMESTAMP_SKIPPED reset + */ + return 0; + } + id = grp->id; + trace_rtg_frame_sched(id, "FRAME_TIMESTAMP_SKIPPED", + frame_info->timestamp_skipped); + trace_rtg_frame_sched(id, "FRAME_MAX_UTIL", frame_info->frame_max_util); + + if (status & FRAME_USE_MARGIN_IMME) { + frame_info->margin_imme = true; + status &= ~FRAME_USE_MARGIN_IMME; + } else { + frame_info->margin_imme = false; + } + trace_rtg_frame_sched(id, "FRAME_MARGIN_IMME", frame_info->margin_imme); + trace_rtg_frame_sched(id, "FRAME_TIMESTAMP", status); + + return update_frame_timestamp(status, frame_info, grp); +} + +int set_frame_timestamp(struct frame_info *frame_info, unsigned long timestamp) +{ + int ret; + + if (!frame_info || !frame_info->rtg) + return -EINVAL; + + if (atomic_read(&frame_info->frame_sched_state) == 0) + return -EINVAL; + + ret = sched_set_group_window_rollover(frame_info->rtg->id); + if (!ret) + ret = set_frame_status(frame_info, timestamp); + + return ret; +} + +int set_frame_min_util(struct frame_info *frame_info, int min_util, bool is_boost) +{ + int id; + + if (unlikely((min_util < 0) || (min_util > SCHED_CAPACITY_SCALE))) { + pr_err("[FRAME_RTG]: %s invalid min_util value\n", + __func__); + return -EINVAL; + } + + if (!frame_info || !frame_info->rtg) + return -EINVAL; + + id = frame_info->rtg->id; + if (is_boost) { + frame_info->frame_boost_min_util = min_util; + trace_rtg_frame_sched(id, "FRAME_BOOST_MIN_UTIL", min_util); + } else { + frame_info->frame_min_util = min_util; + + frame_info->frame_util = calc_frame_util(frame_info, false); + trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util); + sched_set_group_normalized_util(id, + frame_info->frame_util, RTG_FREQ_FORCE_UPDATE); + } + + return 0; +} + +int set_frame_max_util(struct frame_info *frame_info, int max_util) +{ + int id; + + if ((max_util < 0) || (max_util > SCHED_CAPACITY_SCALE)) { + pr_err("[FRAME_RTG]: %s invalid max_util value\n", + __func__); + return -EINVAL; + } + + if (!frame_info || !frame_info->rtg) + return -EINVAL; + + frame_info->frame_max_util = max_util; + id = frame_info->rtg->id; + trace_rtg_frame_sched(id, "FRAME_MAX_UTIL", frame_info->frame_max_util); + + return 0; +} + +struct frame_info *lookup_frame_info_by_grp_id(int grp_id) +{ + if (grp_id >= (MULTI_FRAME_ID + MULTI_FRAME_NUM) || (grp_id <= 0)) + return NULL; + if (grp_id >= MULTI_FRAME_ID) { + read_lock(&g_id_manager.lock); + if (!test_bit(grp_id - MULTI_FRAME_ID, g_id_manager.id_map)) { + read_unlock(&g_id_manager.lock); + return NULL; + } + read_unlock(&g_id_manager.lock); + return rtg_frame_info(grp_id); + } else + return rtg_frame_info(grp_id); +} + +static int _init_frame_info(struct frame_info *frame_info, int id) +{ + struct related_thread_group *grp = NULL; + unsigned long flags; + + memset(frame_info, 0, sizeof(struct frame_info)); + rwlock_init(&frame_info->lock); + + write_lock(&frame_info->lock); + frame_info->frame_rate = DEFAULT_FRAME_RATE; + frame_info->frame_time = div_u64(NSEC_PER_SEC, frame_info->frame_rate); + frame_info->thread_num = 0; + frame_info->prio = NOT_RT_PRIO; + atomic_set(&(frame_info->curr_rt_thread_num), 0); + atomic_set(&(frame_info->frame_sched_state), 0); + frame_info->vload_margin = DEFAULT_VLOAD_MARGIN; + frame_info->max_vload_time = + div_u64(frame_info->frame_time, NSEC_PER_MSEC) + + frame_info->vload_margin; + frame_info->frame_min_util = FRAME_DEFAULT_MIN_UTIL; + frame_info->frame_max_util = FRAME_DEFAULT_MAX_UTIL; + frame_info->prev_min_util = FRAME_DEFAULT_MIN_PREV_UTIL; + frame_info->prev_max_util = FRAME_DEFAULT_MAX_PREV_UTIL; + frame_info->margin_imme = false; + frame_info->timestamp_skipped = false; + frame_info->status = FRAME_END; + + grp = frame_rtg(id); + if (unlikely(!grp)) { + write_unlock(&frame_info->lock); + return -EINVAL; + } + + raw_spin_lock_irqsave(&grp->lock, flags); + grp->private_data = frame_info; + grp->rtg_class = &frame_rtg_class; + raw_spin_unlock_irqrestore(&grp->lock, flags); + + frame_info->rtg = grp; + write_unlock(&frame_info->lock); + + return 0; +} + +static int __init init_frame_info(void) +{ + int ret = 0; + int id; + + for (id = MULTI_FRAME_ID; id < (MULTI_FRAME_ID + MULTI_FRAME_NUM); id++) { + if (ret != 0) + break; + ret = _init_frame_info(rtg_multi_frame_info(id), id); + } + + return ret; +} +late_initcall(init_frame_info); diff --git a/kernel/sched/rtg/frame_rtg.h b/kernel/sched/rtg/frame_rtg.h new file mode 100644 index 0000000000000000000000000000000000000000..049bd88652499977b4384129fac6e3c0f3788895 --- /dev/null +++ b/kernel/sched/rtg/frame_rtg.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Frame declaration + * + * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd. + */ + +#ifndef __FRAME_RTG_H +#define __FRAME_RTG_H + +#include +#include +#include +#include + +#define MULTI_FRAME_ID (DEFAULT_CGROUP_COLOC_ID + 1) +#define MULTI_FRAME_NUM (MAX_NUM_CGROUP_COLOC_ID - DEFAULT_CGROUP_COLOC_ID - 1) + +#define NOT_RT_PRIO (-1) +#define STATIC_RTG_DEPTH (-1) + +#define FRAME_START (1 << 0) +#define FRAME_END (1 << 1) +#define FRAME_INVALID (1 << 2) +#define FRAME_USE_MARGIN_IMME (1 << 4) +#define FRAME_TIMESTAMP_SKIP_START (1 << 5) +#define FRAME_TIMESTAMP_SKIP_END (1 << 6) +#define FRAME_SETTIME (FRAME_START | FRAME_END | \ + FRAME_USE_MARGIN_IMME) +#define FRAME_SETTIME_PARAM (-1) + +#define DEFAULT_FRAME_RATE 60 +#define MIN_FRAME_RATE 1 +#define MAX_FRAME_RATE 120 + +/* MARGIN value : [-100, 100] */ +#define DEFAULT_VLOAD_MARGIN 16 +#define MIN_VLOAD_MARGIN (-100) +#define MAX_VLOAD_MARGIN 0xffff + +#define FRAME_MAX_VLOAD SCHED_CAPACITY_SCALE +#define FRAME_MAX_LOAD SCHED_CAPACITY_SCALE +#define FRAME_UTIL_INVALID_FACTOR 4 +#define FRAME_DEFAULT_MIN_UTIL 0 +#define FRAME_DEFAULT_MAX_UTIL SCHED_CAPACITY_SCALE +#define FRAME_DEFAULT_MIN_PREV_UTIL 0 +#define FRAME_DEFAULT_MAX_PREV_UTIL SCHED_CAPACITY_SCALE + +#define DEFAULT_MAX_RT_THREAD 2 +#define RTG_MAX_RT_THREAD_NUM CONFIG_NR_CPUS +#define INVALID_PREFERRED_CLUSTER 10 + +enum rtg_type { + VIP = 0, + TOP_TASK_KEY, + NORMAL_TASK, + RTG_TYPE_MAX, +}; + +struct frame_thread_info { + int prio; + int thread[MAX_TID_NUM]; + int thread_num; +}; + +struct multi_frame_id_manager { + DECLARE_BITMAP(id_map, MULTI_FRAME_NUM); + unsigned int offset; + rwlock_t lock; +}; + +struct rtg_info { + int rtg_num; + int rtgs[MULTI_FRAME_NUM]; +}; + +bool is_frame_rtg(int id); +int set_frame_rate(struct frame_info *frame_info, int rate); +int alloc_multi_frame_info(void); +struct frame_info *rtg_active_multi_frame_info(int id); +struct frame_info *rtg_multi_frame_info(int id); +void release_multi_frame_info(int id); +void clear_multi_frame_info(void); +void set_frame_prio(struct frame_info *frame_info, int prio); +struct task_struct *update_frame_thread(struct frame_info *frame_info, + int old_prio, int prio, int pid, + struct task_struct *old_task); +void update_frame_thread_info(struct frame_info *frame_info, + struct frame_thread_info *frame_thread_info); +#ifdef CONFIG_SCHED_RTG_RT_THREAD_LIMIT +int read_rtg_rt_thread_num(void); +#else +static inline int read_rtg_rt_thread_num(void) +{ + return 0; +} +#endif +static inline +struct group_ravg *frame_info_rtg_load(const struct frame_info *frame_info) +{ + return &frame_info_rtg(frame_info)->ravg; +} +void set_frame_sched_state(struct frame_info *frame_info, bool enable); +int set_frame_margin(struct frame_info *frame_info, int margin); +int set_frame_timestamp(struct frame_info *frame_info, unsigned long timestamp); +int set_frame_max_util(struct frame_info *frame_info, int max_util); +int set_frame_min_util(struct frame_info *frame_info, int min_util, bool is_boost); +struct frame_info *lookup_frame_info_by_grp_id(int grp_id); +int list_rtg_group(struct rtg_info *rs_data); +int search_rtg(int pid); +#endif diff --git a/kernel/sched/rtg/rtg.c b/kernel/sched/rtg/rtg.c index dabadd54e59c40a5a74c7158db428750c6d74296..91e2c6abfa4e65e85ecb4f823b0dcb58870d7d79 100644 --- a/kernel/sched/rtg/rtg.c +++ b/kernel/sched/rtg/rtg.c @@ -14,6 +14,10 @@ #include "rtg.h" #include "../walt.h" +#ifdef CONFIG_SCHED_RTG_FRAME +#include "frame_rtg.h" +#endif + #define ADD_TASK 0 #define REM_TASK 1 @@ -1125,6 +1129,10 @@ static void print_rtg_info(struct seq_file *file, grp->util_invalid_interval / NSEC_PER_MSEC); seq_printf_rtg(file, "RTG_CLUSTER : %d\n", grp->preferred_cluster ? grp->preferred_cluster->id : -1); +#ifdef CONFIG_SCHED_RTG_RT_THREAD_LIMIT + seq_printf_rtg(file, "RTG_RT_THREAD_NUM : %d/%d\n", + read_rtg_rt_thread_num(), RTG_MAX_RT_THREAD_NUM); +#endif } static char rtg_task_state_to_char(const struct task_struct *tsk) diff --git a/kernel/sched/rtg/rtg_ctrl.c b/kernel/sched/rtg/rtg_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..0e87dc7162c4bd940567e40e6d2d4560a9104e2e --- /dev/null +++ b/kernel/sched/rtg/rtg_ctrl.c @@ -0,0 +1,1060 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * rtg control entry + * + * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd. + */ + +#include "rtg.h" +#include "rtg_ctrl.h" + +#include +#include +#include +#include +#include + +atomic_t g_rtg_enable = ATOMIC_INIT(0); +atomic_t g_enable_type = ATOMIC_INIT(ALL_ENABLE); // default: all enable +static atomic_t g_rt_frame_num = ATOMIC_INIT(0); +static int g_frame_max_util = DEFAULT_MAX_UTIL; +static int g_max_rt_frames = DEFAULT_MAX_RT_FRAME; +typedef long (*rtg_ctrl_func)(int abi, void __user *arg); + +static long ctrl_set_enable(int abi, void __user *uarg); +static long ctrl_set_rtg(int abi, void __user *uarg); +static long ctrl_set_config(int abi, void __user *uarg); +static long ctrl_set_rtg_attr(int abi, void __user *uarg); +static long ctrl_begin_frame(int abi, void __user *uarg); +static long ctrl_end_frame(int abi, void __user *uarg); +static long ctrl_end_scene(int abi, void __user *uarg); +static long ctrl_set_min_util(int abi, void __user *uarg); +static long ctrl_set_margin(int abi, void __user *uarg); +static long ctrl_list_rtg(int abi, void __user *uarg); +static long ctrl_list_rtg_thread(int abi, void __user *uarg); +static long ctrl_search_rtg(int abi, void __user *uarg); +static long ctrl_get_enable(int abi, void __user *uarg); + +static rtg_ctrl_func g_func_array[RTG_CTRL_MAX_NR] = { + NULL, /* reserved */ + ctrl_set_enable, // 1 + ctrl_set_rtg, + ctrl_set_config, + ctrl_set_rtg_attr, + ctrl_begin_frame, // 5 + ctrl_end_frame, + ctrl_end_scene, + ctrl_set_min_util, + ctrl_set_margin, + ctrl_list_rtg, // 10 + ctrl_list_rtg_thread, + ctrl_search_rtg, + ctrl_get_enable +}; + +static int init_proc_state(const int *config, int len); +static void deinit_proc_state(void); + +int get_enable_type(void) +{ + return atomic_read(&g_enable_type); +} + +static int set_enable_config(char *config_str) +{ + char *p = NULL; + char *tmp = NULL; + int value; + int config[RTG_CONFIG_NUM]; + int i; + int ret = 0; + + for (i = 0; i < RTG_CONFIG_NUM; i++) + config[i] = INVALID_VALUE; + /* eg: key1:value1;key2:value2;key3:value3 */ + for (p = strsep(&config_str, ";"); p != NULL; + p = strsep(&config_str, ";")) { + tmp = strsep(&p, ":"); + if ((tmp == NULL) || (p == NULL)) + continue; + if (kstrtoint((const char *)p, DECIMAL, &value)) + return -INVALID_ARG; + + if (!strcmp(tmp, "sched_cycle")) + config[RTG_FREQ_CYCLE] = value; + else if (!strcmp(tmp, "frame_max_util")) + config[RTG_FRAME_MAX_UTIL] = value; + else if (!strcmp(tmp, "invalid_interval")) + config[RTG_INVALID_INTERVAL] = value; + else if (!strcmp(tmp, "enable_type")) + atomic_set(&g_enable_type, value); + else + continue; + } + + for (i = 0; i < RTG_CONFIG_NUM; i++) + pr_info("[SCHED_RTG] config[%d] = %d\n", i, config[i]); + + ret = init_proc_state(config, RTG_CONFIG_NUM); + + return ret; +} + +static void rtg_enable(int abi, const struct rtg_enable_data *data) +{ + char temp[MAX_DATA_LEN]; + int ret = -1; + + if (atomic_read(&g_rtg_enable) == 1) { + pr_info("[SCHED_RTG] already enabled!\n"); + return; + } + + if ((data->len <= 0) || (data->len >= MAX_DATA_LEN)) { + pr_err("[SCHED_RTG] %s data len invalid\n", __func__); + return; + } + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpointer-to-int-cast" + switch (abi) { + case IOCTL_ABI_ARM32: + ret = copy_from_user(&temp, + (void __user *)compat_ptr((compat_uptr_t)data->data), data->len); + break; + case IOCTL_ABI_AARCH64: + ret = copy_from_user(&temp, (void __user *)data->data, data->len); + break; + default: + pr_err("[SCHED_RTG] abi format error\n"); + break; + } + if (ret) { + pr_err("[SCHED_RTG] %s copy user data failed\n", __func__); + return; + } +#pragma GCC diagnostic pop + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wincompatible-pointer-types" + temp[data->len] = '\0'; + + if (set_enable_config(&temp) != SUCC) { + pr_err("[SCHED_RTG] %s failed!\n", __func__); + return; + } +#pragma GCC diagnostic pop + + atomic_set(&g_rtg_enable, 1); + pr_info("[SCHED_RTG] enabled!\n"); +} + +static void rtg_disable(void) +{ + if (atomic_read(&g_rtg_enable) == 0) { + pr_info("[SCHED_RTG] already disabled!\n"); + return; + } + pr_info("[SCHED_RTG] disabled!\n"); + atomic_set(&g_rtg_enable, 0); + deinit_proc_state(); +} + +static inline bool is_rt_type(int type) +{ + return (type >= VIP && type < NORMAL_TASK); +} + +static int do_update_rt_frame_num(struct frame_info *frame_info, int new_type) +{ + int old_type; + int ret = SUCC; + + read_lock(&frame_info->lock); + old_type = frame_info->prio - DEFAULT_RT_PRIO; + if (is_rt_type(new_type) == is_rt_type(old_type)) + goto out; + + if (is_rt_type(old_type)) { + if (atomic_read(&g_rt_frame_num) > 0) + atomic_dec(&g_rt_frame_num); + } else if (is_rt_type(new_type)) { + if (atomic_read(&g_rt_frame_num) < g_max_rt_frames) { + atomic_inc(&g_rt_frame_num); + } else { + pr_err("[SCHED_RTG]: %s g_max_rt_frames is %d\n", + __func__, g_max_rt_frames); + ret = -INVALID_ARG; + } + } +out: + read_unlock(&frame_info->lock); + + return ret; +} + +static int update_rt_frame_num(struct frame_info *frame_info, int new_type, int cmd) +{ + int ret = SUCC; + + switch (cmd) { + case UPDATE_RTG_FRAME: + ret = do_update_rt_frame_num(frame_info, new_type); + break; + case ADD_RTG_FRAME: + if (is_rt_type(new_type)) { + if (atomic_read(&g_rt_frame_num) >= g_max_rt_frames) { + pr_err("[SCHED_RTG] g_max_rt_frames is %d!\n", g_max_rt_frames); + ret = -INVALID_ARG; + } else { + atomic_inc(&g_rt_frame_num); + } + } + break; + case CLEAR_RTG_FRAME: + if ((atomic_read(&g_rt_frame_num) > 0) && is_rt_type(new_type)) + atomic_dec(&g_rt_frame_num); + break; + default: + return -INVALID_ARG; + } + trace_rtg_frame_sched(frame_info->rtg->id, "g_rt_frame_num", atomic_read(&g_rt_frame_num)); + trace_rtg_frame_sched(frame_info->rtg->id, "g_max_rt_frames", g_max_rt_frames); + + return ret; +} + +static long ctrl_set_enable(int abi, void __user *uarg) +{ + struct rtg_enable_data rs_enable; + + if (copy_from_user(&rs_enable, uarg, sizeof(rs_enable))) { + pr_err("[SCHED_RTG] CMD_ID_SET_ENABLE copy data failed\n"); + return -INVALID_ARG; + } + if (rs_enable.enable == 1) + rtg_enable(abi, &rs_enable); + else + rtg_disable(); + + return SUCC; +} + +static long ctrl_get_enable(int abi, void __user *uarg) +{ + return get_enable_type(); +} + +static int parse_config(const struct rtg_str_data *rs_data) +{ + int len; + char *p = NULL; + char *tmp = NULL; + char *data = NULL; + int value; + + if (rs_data == NULL) + return -INVALID_ARG; + data = rs_data->data; + len = rs_data->len; + if ((data == NULL) || (strlen(data) != len)) //lint !e737 + return -INVALID_ARG; + /* + * eg: rtframe:4; + */ + for (p = strsep(&data, ";"); p != NULL; p = strsep(&data, ";")) { + tmp = strsep(&p, ":"); + if ((tmp == NULL) || (p == NULL)) + continue; + if (kstrtoint((const char *)p, DECIMAL, &value)) + return -INVALID_ARG; + if (!strcmp(tmp, "rtframe")) { + if (value > 0 && value <= MULTI_FRAME_NUM) { + g_max_rt_frames = value; + } else { + pr_err("[SCHED_RTG]%s invalid max_rt_frame:%d, MULTI_FRAME_NUM=%d\n", + __func__, value, MULTI_FRAME_NUM); + return -INVALID_ARG; + } + } + } + + return SUCC; +} + +static long ctrl_set_config(int abi, void __user *uarg) +{ + struct rtg_str_data rs; + char temp[MAX_DATA_LEN]; + long ret = SUCC; + + if (uarg == NULL) + return -INVALID_ARG; + + if (copy_from_user(&rs, uarg, sizeof(rs))) { + pr_err("[SCHED_RTG] CMD_ID_SET_CONFIG copy data failed\n"); + return -INVALID_ARG; + } + if ((rs.len <= 0) || (rs.len >= MAX_DATA_LEN)) { + pr_err("[SCHED_RTG] CMD_ID_SET_CONFIG data len invalid\n"); + return -INVALID_ARG; + } + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpointer-to-int-cast" + switch (abi) { + case IOCTL_ABI_ARM32: + ret = copy_from_user(&temp, + (void __user *)compat_ptr((compat_uptr_t)rs.data), rs.len); + break; + case IOCTL_ABI_AARCH64: + ret = copy_from_user(&temp, (void __user *)rs.data, rs.len); + break; + default: + pr_err("[SCHED_RTG] abi format error\n"); + return -INVALID_ARG; + } + if (ret) { + pr_err("[SCHED_RTG] CMD_ID_SET_CONFIG copy rs.data failed\n"); + return -INVALID_ARG; + } +#pragma GCC diagnostic pop + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wincompatible-pointer-types" + temp[rs.len] = '\0'; + rs.data = &temp; +#pragma GCC diagnostic pop + + return parse_config(&rs); +} + +static inline bool is_valid_type(int type) +{ + return (type >= VIP && type < RTG_TYPE_MAX); +} + +static int parse_rtg_attr(const struct rtg_str_data *rs_data) +{ + char *p = NULL; + char *tmp = NULL; + char *data = NULL; + int value; + struct frame_info *frame_info = NULL; + int rate = -1; + int type = -1; + + if (rs_data == NULL) { + pr_err("[SCHED_RTG] rtg attr: rs_data is null!\n"); + return -INVALID_ARG; + } + + data = rs_data->data; + if ((data == NULL) || (rs_data->len <= 0) || + (rs_data->len > MAX_DATA_LEN)) { + pr_err("[SCHED_RTG] rtg attr: rs_data len err!\n"); + return -INVALID_ARG; + } + + // eg: rtgId:xx;rate:xx;type:xx; + for (p = strsep(&data, ";"); p != NULL; p = strsep(&data, ";")) { + tmp = strsep(&p, ":"); + if ((tmp == NULL) || (p == NULL)) + continue; + if (kstrtoint((const char *)p, DECIMAL, &value)) { + pr_err("[SCHED_RTG] rtg attr: rs_data format err!\n"); + return -INVALID_ARG; + } + if (!strcmp(tmp, "rtgId")) { + frame_info = rtg_frame_info(value); + } else if (!strcmp(tmp, "rate")) { + rate = value; + } else if (!strcmp(tmp, "type")) { + if (is_valid_type(value)) { + type = value; + } else { + pr_err("[SCHED_RTG] invalid type : %d\n", value); + return -INVALID_ARG; + } + } else { + pr_err("[SCHED_RTG] parse rtg attr failed!\n"); + return -INVALID_ARG; + } + } + + if (!frame_info) { + pr_err("[SCHED_RTG] rtg attr: invalid args!\n"); + return -INVALID_ARG; + } + + if (rate > 0) + set_frame_rate(frame_info, rate); + + if (is_valid_type(type)) { + if (update_rt_frame_num(frame_info, type, UPDATE_RTG_FRAME)) { + pr_err("[SCHED_RTG] set rtg attr failed!\n"); + return -INVALID_ARG; + } + + set_frame_prio(frame_info, (type == NORMAL_TASK ? + NOT_RT_PRIO : (type + DEFAULT_RT_PRIO))); + } + + return SUCC; +} + +static long ctrl_set_rtg_attr(int abi, void __user *uarg) +{ + struct rtg_str_data rs; + char temp[MAX_DATA_LEN]; + int ret; + + if (uarg == NULL) + return -INVALID_ARG; + + if (copy_from_user(&rs, uarg, sizeof(rs))) { + pr_err("[SCHED_RTG] CMD_ID_SET_RTG_ATTR copy data failed\n"); + return -INVALID_ARG; + } + if ((rs.len <= 0) || (rs.len >= MAX_DATA_LEN)) { + pr_err("[SCHED_RTG] CMD_ID_SET_RTG_ATTR data len invalid\n"); + return -INVALID_ARG; + } + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpointer-to-int-cast" + switch (abi) { + case IOCTL_ABI_ARM32: + ret = copy_from_user(&temp, + (void __user *)compat_ptr((compat_uptr_t)rs.data), rs.len); + break; + case IOCTL_ABI_AARCH64: + ret = copy_from_user(&temp, (void __user *)rs.data, rs.len); + break; + default: + pr_err("[SCHED_RTG] abi format error\n"); + return -INVALID_ARG; + } +#pragma GCC diagnostic pop + + if (ret) { + pr_err("[SCHED_RTG] CMD_ID_SET_RTG_ATTR copy rs.data failed with ret %d\n", ret); + return -INVALID_ARG; + } + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wincompatible-pointer-types" + temp[rs.len] = '\0'; + rs.data = &temp; +#pragma GCC diagnostic pop + + return parse_rtg_attr(&rs); +} + +static void start_frame_freq(struct frame_info *frame_info) +{ + if (!frame_info) + return; + + if (atomic_read(&frame_info->start_frame_freq) == 0) { + atomic_set(&frame_info->start_frame_freq, 1); + set_frame_sched_state(frame_info, true); + } +} + +static void set_frame(struct frame_info *frame_info, int margin) +{ + if (!frame_info) + return; + + atomic_set(&frame_info->frame_state, FRAME_DRAWING); + if (set_frame_margin(frame_info, margin) == SUCC) + set_frame_timestamp(frame_info, FRAME_START); +} + +static void reset_frame(struct frame_info *frame_info) +{ + if (!frame_info) + return; + + if (atomic_read(&frame_info->frame_state) == FRAME_END_STATE) { + pr_debug("[SCHED_RTG]: Frame state is already reset\n"); + return; + } + + atomic_set(&frame_info->frame_state, FRAME_END_STATE); + set_frame_timestamp(frame_info, FRAME_END); +} + +int update_frame_state(int grp_id, int margin, bool in_frame) +{ + struct frame_info *frame_info = NULL; + + frame_info = lookup_frame_info_by_grp_id(grp_id); + if (!frame_info || !frame_info->rtg) + return -INVALID_RTG_ID; + + if (in_frame) { + start_frame_freq(frame_info); + set_frame(frame_info, margin); + trace_rtg_frame_sched(grp_id, "margin", margin); + } else { + reset_frame(frame_info); + } + + return SUCC; +} + +static long ctrl_frame_state(void __user *uarg, bool is_enter) +{ + struct proc_state_data state_data; + + if (uarg == NULL) + return -INVALID_ARG; + + if (copy_from_user(&state_data, uarg, sizeof(state_data))) { + pr_err("[SCHED_RTG] CMD_ID_FRAME_FREQ copy data failed\n"); + return -INVALID_ARG; + } + + return update_frame_state(state_data.grp_id, state_data.state_param, is_enter); +} + +static long ctrl_begin_frame(int abi, void __user *uarg) +{ + return ctrl_frame_state(uarg, true); +} + +static long ctrl_end_frame(int abi, void __user *uarg) +{ + return ctrl_frame_state(uarg, false); +} + +static int stop_frame_freq(int gid) +{ + struct frame_info *frame_info = NULL; + + frame_info = lookup_frame_info_by_grp_id(gid); + if (!frame_info) + return -INVALID_RTG_ID; + + atomic_set(&frame_info->start_frame_freq, 0); + set_frame_sched_state(frame_info, false); + + return 0; +} + +static long ctrl_end_scene(int abi, void __user *uarg) +{ + int rtg_id; + + if (uarg == NULL) + return -INVALID_ARG; + + if (copy_from_user(&rtg_id, uarg, sizeof(int))) { + pr_err("[SCHED_RTG] CMD_ID_END_SCENE copy data failed\n"); + return -INVALID_ARG; + } + + return stop_frame_freq(rtg_id); +} + +static int set_min_util(int gid, int min_util) +{ + struct frame_info *frame_info = NULL; + + frame_info = lookup_frame_info_by_grp_id(gid); + if (!frame_info) + return -FRAME_ERR_PID; + + set_frame_min_util(frame_info, min_util, false); + + return SUCC; +} + +static long ctrl_set_min_util(int abi, void __user *uarg) +{ + struct proc_state_data state_data; + + if (uarg == NULL) + return -INVALID_ARG; + + if (copy_from_user(&state_data, uarg, sizeof(state_data))) { + pr_err("[SCHED_RTG] CMD_ID_SET_MIN_UTIL copy data failed\n"); + return -INVALID_ARG; + } + + return set_min_util(state_data.grp_id, state_data.state_param); +} + +static int set_margin(int grp_id, int margin) +{ + struct frame_info *frame_info = NULL; + + frame_info = lookup_frame_info_by_grp_id(grp_id); + if (!frame_info) + return -FRAME_ERR_PID; + + set_frame_margin(frame_info, margin); + + return SUCC; +} + +static long ctrl_set_margin(int abi, void __user *uarg) +{ + struct proc_state_data state_data; + + if (uarg == NULL) + return -INVALID_ARG; + + if (copy_from_user(&state_data, uarg, sizeof(state_data))) { + pr_err("[SCHED_RTG] CMD_ID_SET_MARGIN copy data failed\n"); + return -INVALID_ARG; + } + + return set_margin(state_data.grp_id, state_data.state_param); +} + +static void clear_rtg_frame_thread(struct frame_info *frame_info, bool reset) +{ + struct frame_thread_info frame_thread_info; + int i; + + if (!reset && frame_info) + frame_thread_info.prio = frame_info->prio; + else + frame_thread_info.prio = NOT_RT_PRIO; + for (i = 0; i < MAX_TID_NUM; i++) + frame_thread_info.thread[i] = -1; + frame_thread_info.thread_num = MAX_TID_NUM; + update_frame_thread_info(frame_info, &frame_thread_info); + if (reset) { + atomic_set(&frame_info->max_rt_thread_num, DEFAULT_MAX_RT_THREAD); + atomic_set(&frame_info->frame_sched_state, 0); + trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE", 0); + } +} + +static void copy_proc_from_rsdata(struct rtg_proc_data *proc_info, + const struct rtg_grp_data *rs_data) +{ + memset(proc_info, 0, sizeof(struct rtg_proc_data)); + proc_info->type = VIP; + proc_info->rtcnt = DEFAULT_MAX_RT_THREAD; + if ((rs_data->grp_type > 0) && (rs_data->grp_type < RTG_TYPE_MAX)) + proc_info->type = rs_data->grp_type; + if ((rs_data->rt_cnt > 0) && (rs_data->rt_cnt < DEFAULT_MAX_RT_THREAD)) + proc_info->rtcnt = rs_data->rt_cnt; +} + +static void init_frame_thread_info(struct frame_thread_info *frame_thread_info, + const struct rtg_proc_data *proc_info) +{ + int i; + int type = proc_info->type; + + frame_thread_info->prio = (type == NORMAL_TASK ? NOT_RT_PRIO : (type + DEFAULT_RT_PRIO)); + for (i = 0; i < MAX_TID_NUM; i++) + frame_thread_info->thread[i] = proc_info->thread[i]; + frame_thread_info->thread_num = MAX_TID_NUM; +} + +static int parse_create_rtg_grp(const struct rtg_grp_data *rs_data) +{ + struct rtg_proc_data proc_info; + struct frame_info *frame_info; + struct frame_thread_info frame_thread_info; + + copy_proc_from_rsdata(&proc_info, rs_data); + proc_info.rtgid = alloc_multi_frame_info(); + frame_info = rtg_frame_info(proc_info.rtgid); + if (!frame_info) { + pr_err("[SCHED_RTG] no free multi frame.\n"); + return -NO_FREE_MULTI_FRAME; + } + atomic_set(&frame_info->max_rt_thread_num, proc_info.rtcnt); + if (update_rt_frame_num(frame_info, rs_data->grp_type, ADD_RTG_FRAME)) { + release_multi_frame_info(proc_info.rtgid); + return -NO_RT_FRAME; + } + init_frame_thread_info(&frame_thread_info, &proc_info); + update_frame_thread_info(frame_info, &frame_thread_info); + atomic_set(&frame_info->frame_sched_state, 1); + pr_info("[SCHED_RTG] %s rtgid=%d, type=%d, prio=%d, threadnum=%d\n", + __func__, proc_info.rtgid, rs_data->grp_type, + frame_thread_info.prio, frame_thread_info.thread_num); + + return proc_info.rtgid; +} + +static int parse_add_rtg_thread(const struct rtg_grp_data *rs_data) +{ + struct rtg_proc_data proc_info; + struct frame_info *frame_info; + int add_index; + int add_num; + int prio; + int fail_num = 0; + int i; + + if ((rs_data->grp_id <= 0) || (rs_data->grp_id >= MAX_NUM_CGROUP_COLOC_ID)) + return -INVALID_ARG; + copy_proc_from_rsdata(&proc_info, rs_data); + frame_info = lookup_frame_info_by_grp_id(rs_data->grp_id); + if (!frame_info) { + pr_err("[SCHED_RTG] grp not created yet.\n"); + return -INVALID_ARG; + } + write_lock(&frame_info->lock); + add_num = rs_data->tid_num; + if ((frame_info->thread_num < 0) || (add_num < 0)) { + pr_err("[SCHED_RTG] Unexception err: frame_info num < 0.\n"); + write_unlock(&frame_info->lock); + return -INVALID_RTG_ID; + } + if (frame_info->thread_num + add_num > MAX_TID_NUM) { + pr_err("[SCHED_RTG] frame info thread up to max already.\n"); + write_unlock(&frame_info->lock); + return -INVALID_RTG_ID; + } + add_index = frame_info->thread_num; + prio = frame_info->prio; + for (i = 0; i < add_num; i++) { + frame_info->thread[add_index] = update_frame_thread(frame_info, prio, prio, + rs_data->tids[i], + frame_info->thread[add_index]); + if (frame_info->thread[add_index]) { + frame_info->thread_num++; + add_index = frame_info->thread_num; + } else { + fail_num++; + } + } + write_unlock(&frame_info->lock); + + return fail_num; +} + +static int parse_remove_thread(const struct rtg_grp_data *rs_data) +{ + pr_err("[SCHED_RTG] frame rtg not support remove single yet.\n"); + + return -INVALID_ARG; +} + +static int do_clear_or_destroy_grp(const struct rtg_grp_data *rs_data, bool destroy) +{ + struct frame_info *frame_info; + int type; + int id = rs_data->grp_id; + + if (!is_frame_rtg(id)) { + pr_err("[SCHED_RTG] Failed to destroy rtg group %d!\n", id); + return -INVALID_ARG; + } + + frame_info = rtg_frame_info(id); + if (!frame_info) { + pr_err("[SCHED_RTG] Failed to destroy rtg group %d: grp not exist.\n", id); + return -INVALID_ARG; + } + + type = frame_info->prio - DEFAULT_RT_PRIO; + if (destroy) { + clear_rtg_frame_thread(frame_info, true); + release_multi_frame_info(id); + update_rt_frame_num(frame_info, type, CLEAR_RTG_FRAME); + } else { + clear_rtg_frame_thread(frame_info, false); + } + pr_info("[SCHED_RTG] %s clear frame(id=%d)\n", __func__, id); + + return SUCC; +} + +static int parse_clear_grp(const struct rtg_grp_data *rs_data) +{ + return do_clear_or_destroy_grp(rs_data, false); +} + +static int parse_destroy_grp(const struct rtg_grp_data *rs_data) +{ + return do_clear_or_destroy_grp(rs_data, true); +} + +long ctrl_set_rtg(int abi, void __user *uarg) +{ + struct rtg_grp_data rs_data; + long ret; + + if (copy_from_user(&rs_data, uarg, sizeof(rs_data))) { + pr_err("[SCHED_RTG] CMD_ID_SET_RTG copy data failed\n"); + return -INVALID_ARG; + } + + switch (rs_data.rtg_cmd) { + case CMD_CREATE_RTG_GRP: + ret = parse_create_rtg_grp(&rs_data); + break; + case CMD_ADD_RTG_THREAD: + ret = parse_add_rtg_thread(&rs_data); + break; + case CMD_REMOVE_RTG_THREAD: + ret = parse_remove_thread(&rs_data); + break; + case CMD_CLEAR_RTG_GRP: + ret = parse_clear_grp(&rs_data); + break; + case CMD_DESTROY_RTG_GRP: + ret = parse_destroy_grp(&rs_data); + break; + default: + return -INVALID_ARG; + } + + return ret; +} + +static long ctrl_list_rtg(int abi, void __user *uarg) +{ + struct rtg_info rs_data; + long ret; + + if (copy_from_user(&rs_data, uarg, sizeof(rs_data))) { + pr_err("[SCHED_RTG] CMD_ID_LIST_RTG copy data failed\n"); + return -INVALID_ARG; + } + ret = list_rtg_group(&rs_data); + if (copy_to_user(uarg, &rs_data, sizeof(rs_data))) { + pr_err("[SCHED_RTG]] CMD_ID_LIST_RTG send data failed\n"); + return -INVALID_ARG; + } + + return ret; +} + +static int list_rtg_thread(struct rtg_grp_data *rs_data) +{ + int num = 0; + int grp_id = rs_data->grp_id; + struct frame_info *frame_info = NULL; + int i; + + frame_info = lookup_frame_info_by_grp_id(grp_id); + if (!frame_info) { + pr_err("[SCHED_RTG] Look up for grp %d failed!\n", grp_id); + return -INVALID_ARG; + } + for (i = 0; i < frame_info->thread_num; i++) { + if (frame_info->thread[i]) { + rs_data->tids[num] = frame_info->thread[i]->pid; + num++; + } + } + rs_data->tid_num = num; + + return num; +} + +static long ctrl_list_rtg_thread(int abi, void __user *uarg) +{ + struct rtg_grp_data rs_data; + long ret; + + if (copy_from_user(&rs_data, uarg, sizeof(rs_data))) { + pr_err("[SCHED_RTG] CMD_ID_LIST_RTG_THREAD copy data failed\n"); + return -INVALID_ARG; + } + ret = list_rtg_thread(&rs_data); + if (copy_to_user(uarg, &rs_data, sizeof(rs_data))) { + pr_err("[SCHED_RTG]] CMD_ID_LIST_RTG_THREAD send data failed\n"); + return -INVALID_ARG; + } + + return ret; +} + +static long ctrl_search_rtg(int abi, void __user *uarg) +{ + struct proc_state_data search_data; + + if (copy_from_user(&search_data, uarg, sizeof(search_data))) { + pr_err("[SCHED_RTG] CMD_ID_SEARCH_RTG copy data failed\n"); + return -INVALID_ARG; + } + + return search_rtg(search_data.state_param); +} + +static long do_proc_rtg_ioctl(int abi, struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *uarg = (void __user *)(uintptr_t)arg; + unsigned int func_id = _IOC_NR(cmd); + + if (uarg == NULL) { + pr_err("[SCHED_RTG] %s: invalid user uarg\n", __func__); + return -EINVAL; + } + + if (_IOC_TYPE(cmd) != RTG_SCHED_IPC_MAGIC) { + pr_err("[SCHED_RTG] %s: RTG_SCHED_IPC_MAGIC fail, TYPE=%d\n", + __func__, _IOC_TYPE(cmd)); + return -INVALID_MAGIC; + } + + if ((func_id != SET_ENABLE) && !atomic_read(&g_rtg_enable)) { + pr_err("[SCHED_RTG] CMD_ID %x error: Rtg not enabled yet.\n", cmd); + return -RTG_DISABLED; + } + + if (func_id >= RTG_CTRL_MAX_NR) { + pr_err("[SCHED_RTG] %s: RTG_MAX_NR fail, _IOC_NR(cmd)=%d, MAX_NR=%d\n", + __func__, _IOC_NR(cmd), RTG_CTRL_MAX_NR); + return -INVALID_CMD; + } + + if (g_func_array[func_id] != NULL) + return (*g_func_array[func_id])(abi, uarg); + + return -EINVAL; +} + +static void reset_frame_info(struct frame_info *frame_info) +{ + clear_rtg_frame_thread(frame_info, true); + atomic_set(&frame_info->frame_state, -1); + atomic_set(&frame_info->curr_rt_thread_num, 0); + atomic_set(&frame_info->max_rt_thread_num, DEFAULT_MAX_RT_THREAD); +} + +static int do_init_proc_state(int rtgid, const int *config, int len) +{ + struct related_thread_group *grp = NULL; + struct frame_info *frame_info = NULL; + + grp = lookup_related_thread_group(rtgid); + if (unlikely(!grp)) + return -EINVAL; + + frame_info = (struct frame_info *)grp->private_data; + if (!frame_info) + return -EINVAL; + + reset_frame_info(frame_info); + + if ((config[RTG_FREQ_CYCLE] >= MIN_FREQ_CYCLE) && + (config[RTG_FREQ_CYCLE] <= MAX_FREQ_CYCLE)) + sched_set_group_freq_update_interval(rtgid, + (unsigned int)config[RTG_FREQ_CYCLE]); + else + sched_set_group_freq_update_interval(rtgid, + DEFAULT_FREQ_CYCLE); + + if (config[RTG_INVALID_INTERVAL] != INVALID_VALUE) + sched_set_group_util_invalid_interval(rtgid, + config[RTG_INVALID_INTERVAL]); + else + sched_set_group_util_invalid_interval(rtgid, + DEFAULT_INVALID_INTERVAL); + + set_frame_max_util(frame_info, g_frame_max_util); + + return SUCC; +} + +static int init_proc_state(const int *config, int len) +{ + int ret; + int id; + + if ((config == NULL) || (len != RTG_CONFIG_NUM)) + return -INVALID_ARG; + + if ((config[RTG_FRAME_MAX_UTIL] > 0) && + (config[RTG_FRAME_MAX_UTIL] < DEFAULT_MAX_UTIL)) + g_frame_max_util = config[RTG_FRAME_MAX_UTIL]; + + for (id = MULTI_FRAME_ID; id < (MULTI_FRAME_ID + MULTI_FRAME_NUM); id++) { + ret = do_init_proc_state(id, config, len); + if (ret) { + pr_err("[SCHED_RTG] init proc state for FRAME_ID=%d failed, ret=%d\n", + id, ret); + return ret; + } + } + atomic_set(&g_rt_frame_num, 0); + + return SUCC; +} + +static void deinit_proc_state(void) +{ + int id; + struct frame_info *frame_info = NULL; + struct related_thread_group *grp = NULL; + + for (id = MULTI_FRAME_ID; id < (MULTI_FRAME_ID + MULTI_FRAME_NUM); id++) { + grp = lookup_related_thread_group(id); + if (unlikely(!grp)) + return; + + frame_info = (struct frame_info *)grp->private_data; + if (frame_info) + reset_frame_info(frame_info); + } + clear_multi_frame_info(); + atomic_set(&g_rt_frame_num, 0); +} + +static int proc_rtg_open(struct inode *inode, struct file *filp) +{ + return SUCC; +} + +static int proc_rtg_release(struct inode *inode, struct file *filp) +{ + return SUCC; +} + +static long proc_rtg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + return do_proc_rtg_ioctl(IOCTL_ABI_AARCH64, file, cmd, arg); +} + +#ifdef CONFIG_COMPAT +static long proc_rtg_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + return do_proc_rtg_ioctl(IOCTL_ABI_ARM32, file, cmd, + (unsigned long)(compat_ptr((compat_uptr_t)arg))); +} +#endif + +static const struct file_operations rtg_ctrl_fops = { + .open = proc_rtg_open, + .release = proc_rtg_release, + .unlocked_ioctl = proc_rtg_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = proc_rtg_compat_ioctl, +#endif +}; + +static struct miscdevice rtg_ctrl_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sched_rtg_ctrl", + .fops = &rtg_ctrl_fops, + .mode = 0666, +}; + +static int __init rtg_ctrl_dev_init(void) +{ + return misc_register(&rtg_ctrl_device); +} + +static void __exit rtg_ctrl_dev_exit(void) +{ + misc_deregister(&rtg_ctrl_device); +} + +module_init(rtg_ctrl_dev_init); +module_exit(rtg_ctrl_dev_exit); diff --git a/kernel/sched/rtg/rtg_ctrl.h b/kernel/sched/rtg/rtg_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..df8ac420d156c4d60456365fd0e3b4d9a91c3dd8 --- /dev/null +++ b/kernel/sched/rtg/rtg_ctrl.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * rtg control interface + * + * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd. + */ + +#ifndef __RTG_CTL_H +#define __RTG_CTL_H + +#include +#include +#include + +#include "frame_rtg.h" + +/* set rtg */ +#define INVALID_VALUE 0xffff +#define DEFAULT_RT_PRIO 97 + +#define MAX_DATA_LEN 256 +#define DECIMAL 10 +#define DEFAULT_MAX_UTIL 1024 +#define MAX_SUBPROCESS_NUM 8 + +#define RTG_ID_INVALID (-1) +#define DEFAULT_MAX_RT_FRAME 3 +#define MAX_RT_THREAD (MAX_TID_NUM + 2) +#define INIT_VALUE (-1) +#define UPDATE_RTG_FRAME (1 << 0) +#define ADD_RTG_FRAME (1 << 1) +#define CLEAR_RTG_FRAME (1 << 2) + +#define DEFAULT_FREQ_CYCLE 4 +#define MIN_FREQ_CYCLE 1 +#define MAX_FREQ_CYCLE 16 +#define DEFAULT_INVALID_INTERVAL 50 + +/* proc_state */ +enum proc_state { + STATE_MIN = 0, + FRAME_DRAWING, + FRAME_RME_MAX = 19, + /* rme end */ + FRAME_END_STATE = FRAME_RME_MAX + 1, + + FRAME_CLICK = 100, + STATE_MAX, +}; + +enum rtg_config { + RTG_FREQ_CYCLE, + RTG_FRAME_MAX_UTIL, + RTG_INVALID_INTERVAL, + RTG_CONFIG_NUM, +}; + +enum rtg_err_no { + SUCC = 0, + RTG_DISABLED = 1, + INVALID_ARG, + INVALID_MAGIC, + INVALID_CMD, + FRAME_ERR_PID = 100, + NO_FREE_MULTI_FRAME, + NOT_MULTI_FRAME, + INVALID_RTG_ID, + NO_RT_FRAME, +}; +enum enable_type { + ALL_ENABLE = 1, + ENABLE_MAX +}; + +struct rtg_grp_data { + int rtg_cmd; + int grp_id; + int grp_type; + int rt_cnt; + int tid_num; + int tids[MAX_TID_NUM]; +}; + +struct rtg_proc_data { + int rtgid; + int type; + int thread[MAX_TID_NUM]; + int rtcnt; +}; +#endif