diff --git a/include/linux/bpf_sched.h b/include/linux/bpf_sched.h new file mode 100644 index 0000000000000000000000000000000000000000..9cd2493d2787ce9ea689d9c5d50d539453eb76f0 --- /dev/null +++ b/include/linux/bpf_sched.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BPF_SCHED_H +#define _LINUX_BPF_SCHED_H + +#include + +#ifdef CONFIG_BPF_SCHED + +#include + +#define BPF_SCHED_HOOK(RET, DEFAULT, NAME, ...) \ + RET bpf_sched_##NAME(__VA_ARGS__); +#include +#undef BPF_SCHED_HOOK + +int bpf_sched_verify_prog(struct bpf_verifier_log *vlog, + const struct bpf_prog *prog); + +DECLARE_STATIC_KEY_FALSE(bpf_sched_enabled_key); + +static inline bool bpf_sched_enabled(void) +{ + return static_branch_unlikely(&bpf_sched_enabled_key); +} + +static inline void bpf_sched_inc(void) +{ + static_branch_inc(&bpf_sched_enabled_key); +} + +static inline void bpf_sched_dec(void) +{ + static_branch_dec(&bpf_sched_enabled_key); +} + +#else /* !CONFIG_BPF_SCHED */ + +static inline int bpf_sched_verify_prog(struct bpf_verifier_log *vlog, + const struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} + +static inline bool bpf_sched_enabled(void) +{ + return false; +} + +#endif /* CONFIG_BPF_SCHED */ +#endif /* _LINUX_BPF_SCHED_H */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index a8137bb6dd3c223e42426c3b6d6442af0d63c762..5732b485c53991f6c92322cd0a788e83d08b2349 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -77,6 +77,10 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm, void *, void *) #endif /* CONFIG_BPF_LSM */ #endif +#ifdef CONFIG_BPF_SCHED +BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED, bpf_sched, + void *, void *) +#endif /* CONFIG_BPF_SCHED */ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) diff --git a/include/linux/sched_hook_defs.h b/include/linux/sched_hook_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..e2f65e4b8895daba3cbafc76efc413be410da587 --- /dev/null +++ b/include/linux/sched_hook_defs.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +BPF_SCHED_HOOK(int, 0, cfs_check_preempt_tick, struct sched_entity *curr, unsigned long delta_exec) +BPF_SCHED_HOOK(int, 0, cfs_check_preempt_wakeup, struct task_struct *curr, struct task_struct *p) +BPF_SCHED_HOOK(int, 0, cfs_wakeup_preempt_entity, struct sched_entity *curr, + struct sched_entity *se) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 8fae845d80e260f59a1729c8b1d4364a0fdc80fc..499bb05b01fd8482a6e45d57bc3bb71beb5d24aa 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -199,6 +199,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_EXT, BPF_PROG_TYPE_LSM, BPF_PROG_TYPE_SK_LOOKUP, + BPF_PROG_TYPE_SCHED, }; enum bpf_attach_type { @@ -240,6 +241,7 @@ enum bpf_attach_type { BPF_XDP_CPUMAP, BPF_SK_LOOKUP, BPF_XDP, + BPF_SCHED, __MAX_BPF_ATTACH_TYPE }; @@ -3755,6 +3757,26 @@ union bpf_attr { * Get Ipv4 origdst or replysrc. Works with IPv4. * Return * 0 on success, or a negative error in case of failure. + * + * u64 bpf_sched_entity_to_tgidpid(struct sched_entity *se) + * Description + * Return task's encoded tgid and pid if the sched entity is a task. + * Return + * Tgid and pid encoded as tgid << 32 \| pid, if *se* is a task. (u64)-1 otherwise. + * + * u64 bpf_sched_entity_to_cgrpid(struct sched_entity *se) + * Description + * Return cgroup id if the given sched entity is a cgroup. + * Return + * Cgroup id, if *se* is a cgroup. (u64)-1 otherwise. + * + * long bpf_sched_entity_belongs_to_cgrp(struct sched_entity *se, u64 cgrpid) + * Description + * Checks whether the sched entity belongs to a cgroup or + * it's sub-tree. It doesn't require a cgroup CPU controller + * to be enabled. + * Return + * Cgroup id, if *se* is a cgroup. (u64)-1 otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3915,6 +3937,9 @@ union bpf_attr { FN(redirect_peer), \ FN(get_sockops_uid_gid), \ FN(sk_original_addr), \ + FN(sched_entity_to_tgidpid), \ + FN(sched_entity_to_cgrpid), \ + FN(sched_entity_belongs_to_cgrp), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/init/Kconfig b/init/Kconfig index 27c5ed16fef173303a64de3c228e002df858defa..c3fa3240d87d7d374d68bffda1d3958f3749e9f1 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1749,6 +1749,16 @@ config BPF_LSM If you are unsure how to answer this question, answer N. +config BPF_SCHED + bool "SCHED Instrumentation with BPF" + depends on BPF_EVENTS + depends on BPF_SYSCALL + help + Enables instrumentation of the sched hooks with eBPF programs for + implementing dynamic scheduling Policies. + + If you are unsure how to answer this question, answer N. + config BPF_SYSCALL bool "Enable bpf() system call" select BPF diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index fba28f17e61aa7b353cb8d42b3fef9eae740c0d3..9a0a9895ec62ad071a319eeb0e72aa16433e4dd8 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -4479,6 +4479,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, return true; t = btf_type_by_id(btf, t->type); break; + case BPF_SCHED: case BPF_MODIFY_RETURN: /* For now the BPF_MODIFY_RETURN can only be attached to * functions that return an int. diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 419dbc3d060ee1fea6835a59291bf36b9c570d7e..2f4091da923fb1e00038f201bb18e192850d0b17 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -31,6 +31,7 @@ #include #include #include +#include #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ @@ -1997,6 +1998,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, case BPF_PROG_TYPE_LSM: case BPF_PROG_TYPE_STRUCT_OPS: case BPF_PROG_TYPE_EXT: + case BPF_PROG_TYPE_SCHED: break; default: return -EINVAL; @@ -2108,6 +2110,7 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) case BPF_PROG_TYPE_LSM: case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ case BPF_PROG_TYPE_EXT: /* extends any prog */ + case BPF_PROG_TYPE_SCHED: return true; default: return false; @@ -2529,6 +2532,11 @@ static void bpf_tracing_link_release(struct bpf_link *link) struct bpf_tracing_link *tr_link = container_of(link, struct bpf_tracing_link, link); +#ifdef CONFIG_BPF_SCHED + if (link->prog->type == BPF_PROG_TYPE_SCHED) + bpf_sched_dec(); +#endif + WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog, tr_link->trampoline)); @@ -2608,6 +2616,12 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, goto out_put_prog; } break; + case BPF_PROG_TYPE_SCHED: + if (prog->expected_attach_type != BPF_SCHED) { + err = -EINVAL; + goto out_put_prog; + } + break; default: err = -EINVAL; goto out_put_prog; @@ -2710,6 +2724,11 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, goto out_unlock; } +#ifdef CONFIG_BPF_SCHED + if (prog->type == BPF_PROG_TYPE_SCHED) + bpf_sched_inc(); +#endif + link->tgt_prog = tgt_prog; link->trampoline = tr; @@ -2838,6 +2857,7 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr) case BPF_PROG_TYPE_TRACING: case BPF_PROG_TYPE_EXT: case BPF_PROG_TYPE_LSM: + case BPF_PROG_TYPE_SCHED: if (attr->raw_tracepoint.name) { /* The attach point for this category of programs * should be specified via btf_id during program load. diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 986dabc3d11f0688433bd27d02d67991d9574811..cc6ba35a1d14dc584d6114b5d309a8672c3971b4 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -357,6 +357,7 @@ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog) switch (prog->expected_attach_type) { case BPF_TRACE_FENTRY: return BPF_TRAMP_FENTRY; + case BPF_SCHED: case BPF_MODIFY_RETURN: return BPF_TRAMP_MODIFY_RETURN; case BPF_TRACE_FEXIT: diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6423f1714a2f456b17c017217e63218611f653cb..d26104b258baba4020f1587504c81ec4b403894e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "disasm.h" @@ -12178,6 +12179,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, case BPF_LSM_MAC: case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: + case BPF_SCHED: if (!btf_type_is_func(t)) { bpf_log(log, "attach_btf_id %u is not a function\n", btf_id); @@ -12283,7 +12285,8 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) if (prog->type != BPF_PROG_TYPE_TRACING && prog->type != BPF_PROG_TYPE_LSM && - prog->type != BPF_PROG_TYPE_EXT) + prog->type != BPF_PROG_TYPE_EXT && + prog->type != BPF_PROG_TYPE_SCHED) return 0; ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); @@ -12323,6 +12326,12 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) return ret; } + if (prog->type == BPF_PROG_TYPE_SCHED) { + ret = bpf_sched_verify_prog(&env->log, prog); + if (ret < 0) + return ret; + } + key = bpf_trampoline_compute_key(tgt_prog, btf_id); tr = bpf_trampoline_get(key, &tgt_info); if (!tr) diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 5fc9c9b70862f72486b4b6e57711d530ceaeca18..8ae9e39eb83ab9cf9bd30f5005932a3ce924e370 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -36,3 +36,4 @@ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_MEMBARRIER) += membarrier.o obj-$(CONFIG_CPU_ISOLATION) += isolation.o obj-$(CONFIG_PSI) += psi.o +obj-$(CONFIG_BPF_SCHED) += bpf_sched.o \ No newline at end of file diff --git a/kernel/sched/bpf_sched.c b/kernel/sched/bpf_sched.c new file mode 100644 index 0000000000000000000000000000000000000000..2ce2afcacb179a5844da670264e8b77911034af7 --- /dev/null +++ b/kernel/sched/bpf_sched.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include "sched.h" + +DEFINE_STATIC_KEY_FALSE(bpf_sched_enabled_key); + +/* + * For every hook declare a nop function where a BPF program can be attached. + */ +#define BPF_SCHED_HOOK(RET, DEFAULT, NAME, ...) \ +noinline RET bpf_sched_##NAME(__VA_ARGS__) \ +{ \ + return DEFAULT; \ +} + +#include +#undef BPF_SCHED_HOOK + +#define BPF_SCHED_HOOK(RET, DEFAULT, NAME, ...) BTF_ID(func, bpf_sched_##NAME) +BTF_SET_START(bpf_sched_hooks) +#include +#undef BPF_SCHED_HOOK +BTF_SET_END(bpf_sched_hooks) + +int bpf_sched_verify_prog(struct bpf_verifier_log *vlog, + const struct bpf_prog *prog) +{ + if (!prog->gpl_compatible) { + bpf_log(vlog, + "sched programs must have a GPL compatible license\n"); + return -EINVAL; + } + + if (!btf_id_set_contains(&bpf_sched_hooks, prog->aux->attach_btf_id)) { + bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n", + prog->aux->attach_btf_id, prog->aux->attach_func_name); + return -EINVAL; + } + + return 0; +} + +BPF_CALL_1(bpf_sched_entity_to_tgidpid, struct sched_entity *, se) +{ + if (entity_is_task(se)) { + struct task_struct *task = task_of(se); + + return (u64) task->tgid << 32 | task->pid; + } else { + return (u64) -1; + } +} + +BPF_CALL_1(bpf_sched_entity_to_cgrpid, struct sched_entity *, se) +{ +#ifdef CONFIG_FAIR_GROUP_SCHED + if (!entity_is_task(se)) + return cgroup_id(se->my_q->tg->css.cgroup); +#endif + return (u64) -1; +} + +BPF_CALL_2(bpf_sched_entity_belongs_to_cgrp, struct sched_entity *, se, + u64, cgrpid) +{ +#ifdef CONFIG_CGROUPS + struct cgroup *cgrp; + int level; + + if (entity_is_task(se)) + cgrp = task_dfl_cgroup(task_of(se)); +#ifdef CONFIG_FAIR_GROUP_SCHED + else + cgrp = se->my_q->tg->css.cgroup; +#endif + + for (level = cgrp->level; level; level--) + if (cgrp->ancestor_ids[level] == cgrpid) + return 1; +#endif + return 0; +} + +BTF_ID_LIST_SINGLE(btf_sched_entity_ids, struct, sched_entity) + +static const struct bpf_func_proto bpf_sched_entity_to_tgidpid_proto = { + .func = bpf_sched_entity_to_tgidpid, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &btf_sched_entity_ids[0], +}; + +static const struct bpf_func_proto bpf_sched_entity_to_cgrpid_proto = { + .func = bpf_sched_entity_to_cgrpid, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &btf_sched_entity_ids[0], +}; + +static const struct bpf_func_proto bpf_sched_entity_belongs_to_cgrp_proto = { + .func = bpf_sched_entity_belongs_to_cgrp, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &btf_sched_entity_ids[0], + .arg2_type = ARG_ANYTHING, +}; + +static const struct bpf_func_proto * +bpf_sched_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_trace_printk: + return bpf_get_trace_printk_proto(); + case BPF_FUNC_sched_entity_to_tgidpid: + return &bpf_sched_entity_to_tgidpid_proto; + case BPF_FUNC_sched_entity_to_cgrpid: + return &bpf_sched_entity_to_cgrpid_proto; + case BPF_FUNC_sched_entity_belongs_to_cgrp: + return &bpf_sched_entity_belongs_to_cgrp_proto; + default: + return bpf_base_func_proto(func_id); + } +} + +const struct bpf_prog_ops bpf_sched_prog_ops = { +}; + +const struct bpf_verifier_ops bpf_sched_verifier_ops = { + .get_func_proto = bpf_sched_func_proto, + .is_valid_access = btf_ctx_access, +}; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 50d457979db61fa44f73f64c2a082f30fbeab8ee..522640cd4aa2ace6d18317ab5c9366524b93161d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -28,6 +28,7 @@ #include #include #endif +#include /* * Targeted preemption latency for CPU-bound tasks: @@ -278,33 +279,11 @@ const struct sched_class fair_sched_class; */ #ifdef CONFIG_FAIR_GROUP_SCHED -static inline struct task_struct *task_of(struct sched_entity *se) -{ - SCHED_WARN_ON(!entity_is_task(se)); - return container_of(se, struct task_struct, se); -} /* Walk up scheduling entities hierarchy */ #define for_each_sched_entity(se) \ for (; se; se = se->parent) -static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) -{ - return p->se.cfs_rq; -} - -/* runqueue on which this entity is (to be) queued */ -static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) -{ - return se->cfs_rq; -} - -/* runqueue "owned" by this group */ -static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) -{ - return grp->my_q; -} - static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) { if (!path) @@ -465,33 +444,9 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) #else /* !CONFIG_FAIR_GROUP_SCHED */ -static inline struct task_struct *task_of(struct sched_entity *se) -{ - return container_of(se, struct task_struct, se); -} - #define for_each_sched_entity(se) \ for (; se; se = NULL) -static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) -{ - return &task_rq(p)->cfs; -} - -static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) -{ - struct task_struct *p = task_of(se); - struct rq *rq = task_rq(p); - - return &rq->cfs; -} - -/* runqueue "owned" by this group */ -static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) -{ - return NULL; -} - static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) { if (path) @@ -4498,6 +4453,18 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; + +#ifdef CONFIG_BPF_SCHED + if (bpf_sched_enabled()) { + int ret = bpf_sched_cfs_check_preempt_tick(curr, delta_exec); + + if (ret < 0) + return; + else if (ret > 0) + resched_curr(rq_of(cfs_rq)); + } +#endif + if (delta_exec > ideal_runtime) { resched_curr(rq_of(cfs_rq)); /* @@ -7067,6 +7034,15 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) { s64 gran, vdiff = curr->vruntime - se->vruntime; +#ifdef CONFIG_BPF_SCHED + if (bpf_sched_enabled()) { + int ret = bpf_sched_cfs_wakeup_preempt_entity(curr, se); + + if (ret) + return ret; + } +#endif + if (vdiff <= 0) return -1; @@ -7153,6 +7129,17 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ likely(!task_has_idle_policy(p))) goto preempt; +#ifdef CONFIG_BPF_SCHED + if (bpf_sched_enabled()) { + int ret = bpf_sched_cfs_check_preempt_wakeup(current, p); + + if (ret < 0) + return; + else if (ret > 0) + goto preempt; + } +#endif + /* * Batch and idle tasks do not preempt non-idle tasks (their preemption * is driven by the tick): diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e41a5207a212edf05ab3804ee1c4c20f2481f58c..35cc92b5eeea74af5e80b3452ed9b93d73466521 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1172,6 +1172,58 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); #define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define raw_rq() raw_cpu_ptr(&runqueues) +#ifdef CONFIG_FAIR_GROUP_SCHED +static inline struct task_struct *task_of(struct sched_entity *se) +{ + SCHED_WARN_ON(!entity_is_task(se)); + return container_of(se, struct task_struct, se); +} + +static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) +{ + return p->se.cfs_rq; +} + +/* runqueue on which this entity is (to be) queued */ +static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) +{ + return se->cfs_rq; +} + +/* runqueue "owned" by this group */ +static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) +{ + return grp->my_q; +} + +#else + +static inline struct task_struct *task_of(struct sched_entity *se) +{ + return container_of(se, struct task_struct, se); +} + +static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) +{ + return &task_rq(p)->cfs; +} + +static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) +{ + struct task_struct *p = task_of(se); + struct rq *rq = task_rq(p); + + return &rq->cfs; +} + +/* runqueue "owned" by this group */ +static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) +{ + return NULL; +} +#endif + + extern void update_rq_clock(struct rq *rq); static inline u64 __rq_clock_broken(struct rq *rq) diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 31484377b8b11a69c269285f757dd5094b1e46ed..be21512ee7be4098360b51548665730e8aee1e98 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -435,6 +435,7 @@ class PrinterHelpers(Printer): 'struct xdp_md', 'struct path', 'struct btf_ptr', + 'struct sched_entity', ] known_types = { '...', @@ -478,6 +479,7 @@ class PrinterHelpers(Printer): 'struct task_struct', 'struct path', 'struct btf_ptr', + 'struct sched_entity', } mapped_types = { 'u8': '__u8', diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 6ebf2b215ef49698fc4601339099260380e1b126..81def197e774485c7ea7e1f72ff6ca7da560f779 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -66,6 +66,7 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = { [BPF_MODIFY_RETURN] = "mod_ret", [BPF_LSM_MAC] = "lsm_mac", [BPF_SK_LOOKUP] = "sk_lookup", + [BPF_SCHED] = "sched", }; void p_err(const char *fmt, ...) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 592536904dde2d59a152c838a39273111df77495..4e1d8e57d951a31b55624445561d8f9f0e636777 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -64,6 +64,7 @@ const char * const prog_type_name[] = { [BPF_PROG_TYPE_EXT] = "ext", [BPF_PROG_TYPE_LSM] = "lsm", [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup", + [BPF_PROG_TYPE_SCHED] = "sched", }; const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index a44cb51558254c7bc1dcd9f4c55b17f88c177e40..522ace5af96520c91e97faf4e60cb0c0877394c5 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -199,6 +199,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_EXT, BPF_PROG_TYPE_LSM, BPF_PROG_TYPE_SK_LOOKUP, + BPF_PROG_TYPE_SCHED, }; enum bpf_attach_type { @@ -240,6 +241,7 @@ enum bpf_attach_type { BPF_XDP_CPUMAP, BPF_SK_LOOKUP, BPF_XDP, + BPF_SCHED, __MAX_BPF_ATTACH_TYPE }; @@ -3755,6 +3757,26 @@ union bpf_attr { * Get Ipv4 origdst or replysrc. Works with IPv4. * Return * 0 on success, or a negative error in case of failure. + * + * u64 bpf_sched_entity_to_tgidpid(struct sched_entity *se) + * Description + * Return task's encoded tgid and pid if the sched entity is a task. + * Return + * Tgid and pid encoded as tgid << 32 \| pid, if *se* is a task. (u64)-1 otherwise. + * + * u64 bpf_sched_entity_to_cgrpid(struct sched_entity *se) + * Description + * Return cgroup id if the given sched entity is a cgroup. + * Return + * Cgroup id, if *se* is a cgroup. (u64)-1 otherwise. + * + * long bpf_sched_entity_belongs_to_cgrp(struct sched_entity *se, u64 cgrpid) + * Description + * Checks whether the sched entity belongs to a cgroup or + * it's sub-tree. It doesn't require a cgroup CPU controller + * to be enabled. + * Return + * Cgroup id, if *se* is a cgroup. (u64)-1 otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3915,6 +3937,9 @@ union bpf_attr { FN(redirect_peer), \ FN(get_sockops_uid_gid), \ FN(sk_original_addr), \ + FN(sched_entity_to_tgidpid), \ + FN(sched_entity_to_cgrpid), \ + FN(sched_entity_belongs_to_cgrp), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index d27e34133973b52e1152c955fbaac831feeeecc0..13e08c0d8b1a3084bb64f4c71c1b78e0d7aadbeb 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -236,7 +236,8 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, attr.prog_type == BPF_PROG_TYPE_LSM) { attr.attach_btf_id = load_attr->attach_btf_id; } else if (attr.prog_type == BPF_PROG_TYPE_TRACING || - attr.prog_type == BPF_PROG_TYPE_EXT) { + attr.prog_type == BPF_PROG_TYPE_EXT || + attr.prog_type == BPF_PROG_TYPE_SCHED) { attr.attach_btf_id = load_attr->attach_btf_id; attr.attach_prog_fd = load_attr->attach_prog_fd; } else { diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index c9f5eef6d3d80502f6ca23e4c6902a448f0c8925..2894d837e9f83f2143b28d67a5ff5b87a711209c 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2504,7 +2504,8 @@ static int bpf_object__finalize_btf(struct bpf_object *obj) static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog) { if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || - prog->type == BPF_PROG_TYPE_LSM) + prog->type == BPF_PROG_TYPE_LSM || + prog->type == BPF_PROG_TYPE_SCHED) return true; /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs @@ -6706,7 +6707,8 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, prog->type == BPF_PROG_TYPE_LSM) { load_attr.attach_btf_id = prog->attach_btf_id; } else if (prog->type == BPF_PROG_TYPE_TRACING || - prog->type == BPF_PROG_TYPE_EXT) { + prog->type == BPF_PROG_TYPE_EXT || + prog->type == BPF_PROG_TYPE_SCHED) { load_attr.attach_prog_fd = prog->attach_prog_fd; load_attr.attach_btf_id = prog->attach_btf_id; } else { @@ -6813,7 +6815,8 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) if ((prog->type == BPF_PROG_TYPE_TRACING || prog->type == BPF_PROG_TYPE_LSM || - prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { + prog->type == BPF_PROG_TYPE_EXT || + prog->type == BPF_PROG_TYPE_SCHED) && !prog->attach_btf_id) { btf_id = libbpf_find_attach_btf_id(prog); if (btf_id <= 0) return btf_id; @@ -8238,6 +8241,7 @@ BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING); BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS); BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT); BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP); +BPF_PROG_TYPE_FNS(sched, BPF_PROG_TYPE_SCHED); enum bpf_attach_type bpf_program__get_expected_attach_type(struct bpf_program *prog) @@ -8302,6 +8306,8 @@ static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec, struct bpf_program *prog); static struct bpf_link *attach_iter(const struct bpf_sec_def *sec, struct bpf_program *prog); +static struct bpf_link *attach_sched(const struct bpf_sec_def *sec, + struct bpf_program *prog); static const struct bpf_sec_def section_defs[] = { BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), @@ -8370,6 +8376,10 @@ static const struct bpf_sec_def section_defs[] = { .expected_attach_type = BPF_TRACE_ITER, .is_attach_btf = true, .attach_fn = attach_iter), + SEC_DEF("sched/", SCHED, + .is_attach_btf = true, + .expected_attach_type = BPF_SCHED, + .attach_fn = attach_sched), BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP, BPF_XDP_DEVMAP), BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP, @@ -8453,7 +8463,7 @@ static const struct bpf_sec_def section_defs[] = { #undef BPF_APROG_COMPAT #undef SEC_DEF -#define MAX_TYPE_NAME_SIZE 32 +#define MAX_TYPE_NAME_SIZE 31 static const struct bpf_sec_def *find_sec_def(const char *sec_name) { @@ -8657,6 +8667,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, #define BTF_TRACE_PREFIX "btf_trace_" #define BTF_LSM_PREFIX "bpf_lsm_" #define BTF_ITER_PREFIX "bpf_iter_" +#define BTF_SCHED_PREFIX "bpf_sched_" #define BTF_MAX_NAME_SIZE 128 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, @@ -8690,6 +8701,9 @@ static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name, else if (attach_type == BPF_TRACE_ITER) err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name, BTF_KIND_FUNC); + else if (attach_type == BPF_SCHED) + err = find_btf_by_prefix_kind(btf, BTF_SCHED_PREFIX, name, + BTF_KIND_FUNC); else err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); @@ -9669,6 +9683,11 @@ struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog) return bpf_program__attach_btf_id(prog); } +struct bpf_link *bpf_program__attach_sched(struct bpf_program *prog) +{ + return bpf_program__attach_btf_id(prog); +} + struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog) { return bpf_program__attach_btf_id(prog); @@ -9680,6 +9699,12 @@ static struct bpf_link *attach_trace(const struct bpf_sec_def *sec, return bpf_program__attach_trace(prog); } +static struct bpf_link *attach_sched(const struct bpf_sec_def *sec, + struct bpf_program *prog) +{ + return bpf_program__attach_sched(prog); +} + static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec, struct bpf_program *prog) { diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 57d10b779dea019e05a218a8f0e965b84988088c..a011179f705d0559b6b4a618c009269e03923ddc 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -264,6 +264,8 @@ bpf_program__attach_xdp(struct bpf_program *prog, int ifindex); LIBBPF_API struct bpf_link * bpf_program__attach_freplace(struct bpf_program *prog, int target_fd, const char *attach_func_name); +LIBBPF_API struct bpf_link * +bpf_program__attach_sched(struct bpf_program *prog); struct bpf_map; @@ -360,6 +362,7 @@ LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog); LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog); LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog); LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_sched(struct bpf_program *prog); LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog); LIBBPF_API void bpf_program__set_type(struct bpf_program *prog, @@ -388,6 +391,7 @@ LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_sk_lookup(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_sched(const struct bpf_program *prog); /* * No need for __attribute__((packed)), all members of 'bpf_map_def' diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 4ebfadf45b47d27a3e66bb23aac6d90767df67ed..16393cea53d016c5ee7d20c342b991d1898050b6 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -336,4 +336,7 @@ LIBBPF_0.2.0 { perf_buffer__epoll_fd; perf_buffer__consume_buffer; xsk_socket__create_shared; + bpf_program__attach_sched; + bpf_program__is_sched; + bpf_program__set_sched; } LIBBPF_0.1.0; diff --git a/tools/testing/selftests/bpf/prog_tests/test_sched.c b/tools/testing/selftests/bpf/prog_tests/test_sched.c new file mode 100644 index 0000000000000000000000000000000000000000..dc1fdbab3ce0a5c7a1805d41d7e141cb6242b1d6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_sched.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022 Huawei LLC. + */ + +#include + +#include "sched.skel.h" +#include "cgroup_helpers.h" + +// #define debug(args...) printf(args) +#define debug(args...) + +#define CHECK_TGIDPID_MODE(HOOKNAME, TGIDPID) \ + do { \ + if (skel->bss->HOOKNAME##_tgidpid_ret) { \ + CHECK(skel->bss->HOOKNAME##_tgidpid_ret != TGIDPID, \ + #HOOKNAME"_tgidpid", #HOOKNAME"_tgidpid_ret %lu\n", \ + skel->bss->HOOKNAME##_tgidpid_ret); \ + } \ + } while (0) + +#define CHECK_CGID_MODE(HOOKNAME, PID, CGID) \ + do { \ + if (skel->bss->HOOKNAME##_cgid_ret) { \ + if (skel->bss->HOOKNAME##_cgid_ret) { \ + CHECK(skel->bss->HOOKNAME##_cgid_pid_ret != PID, \ + #HOOKNAME"_cgid_pid", #HOOKNAME"_cgid_pid_ret %u\n", \ + skel->bss->HOOKNAME##_cgid_pid_ret); \ + } \ + if (skel->bss->HOOKNAME##_cgid_se_to_cgid_ret) { \ + CHECK(skel->bss->HOOKNAME##_cgid_se_to_cgid_ret != CGID, \ + #HOOKNAME"_cgid_se_to_cgid", \ + #HOOKNAME"_cgid_se_to_cgid_ret %lu\n", \ + skel->bss->HOOKNAME##_cgid_se_to_cgid_ret); \ + } \ + } \ + } while (0) + +static void work(void) +{ + int i; + + for (i = 0; i < 1000; i++) + usleep(1000); +} + +int create_prioritize_task(int *child_pid) +{ + int cpid; + + cpid = fork(); + if (cpid == -1) { + return -ECHILD; + } else if (cpid == 0) { + work(); + exit(0); + } else { + *child_pid = cpid; + debug("prioritize task(s) with pid %d\n", *child_pid); + return 0; + } + return -EINVAL; +} + +void test_sched_tgidpid_mode(void) +{ + struct sched *skel = NULL; + int err, duration = 0, child_pid = 0, tgid = 0, cgid = 0; + int status = 0; + + skel = sched__open(); + if (CHECK(!skel, "open", "sched open failed\n")) + goto close_prog; + + err = sched__load(skel); + if (CHECK(err, "load", "sched load failed: %d\n", err)) + goto close_prog; + + err = sched__attach(skel); + if (CHECK(err, "attach", "sched attach failed: %d\n", err)) + goto close_prog; + + err = create_prioritize_task(&child_pid); + if (CHECK(err < 0, "create_prior_task", "err %d errno %d\n", err, errno)) + goto close_prog; + + tgid = child_pid; + debug("prioritize task(s) with tgid %d\n", tgid); + skel->bss->tgidpid = (unsigned long)tgid << 32 | child_pid; + debug("prioritize task(s) with tgidpid %lu\n", skel->bss->tgidpid); + skel->bss->cgid = cgid; + + if (child_pid) + err = waitpid(child_pid, &status, 0); + if (CHECK(err == -1 && errno != ECHILD, "waitpid", "failed %d", errno)) + return; + + CHECK_TGIDPID_MODE(tick, skel->bss->tgidpid); + CHECK_TGIDPID_MODE(wakeup, skel->bss->tgidpid); + CHECK_TGIDPID_MODE(entity, skel->bss->tgidpid); + +close_prog: + sched__destroy(skel); +} + +#define TEST_CGROUP "/test-bpf-sched-cgid-mode/" + +void test_sched_cgid_mode(void) +{ + struct sched *skel = NULL; + int err, duration = 0, cgid = 0, cgroup_fd = 0, pid = 0; + + skel = sched__open(); + if (CHECK(!skel, "open", "sched open failed\n")) + goto close_prog; + + err = sched__load(skel); + if (CHECK(err, "load", "sched load failed: %d\n", err)) + goto close_prog; + + err = sched__attach(skel); + if (CHECK(err, "attach", "sched attach failed: %d\n", err)) + goto close_prog; + + cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); + if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno)) + goto cleanup_cgroup_env; + + cgid = get_cgroup_id(TEST_CGROUP); + if (CHECK(!cgid, "get_cgroup_id", "err %d", cgid)) + goto cleanup_cgroup_env; + + skel->bss->tgidpid = 0; + skel->bss->cgid = cgid; + debug("cgid %d\n", cgid); + + /* trigger sched hook */ + work(); + + pid = getpid(); + + CHECK_CGID_MODE(tick, pid, cgid); + CHECK_CGID_MODE(wakeup, pid, cgid); + CHECK_CGID_MODE(entity, pid, cgid); + +cleanup_cgroup_env: + cleanup_cgroup_environment(); +close_prog: + sched__destroy(skel); +} + +void test_test_sched(int argc, char **argv) +{ + if (test__start_subtest("sched_tgidpid_mode")) + test_sched_tgidpid_mode(); + if (test__start_subtest("sched_cgid_mode")) + test_sched_cgid_mode(); +} diff --git a/tools/testing/selftests/bpf/progs/sched.c b/tools/testing/selftests/bpf/progs/sched.c new file mode 100644 index 0000000000000000000000000000000000000000..23b057b1d155bfa77498abef8a4e0872bdb9cb83 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/sched.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2022 Huawei LLC. + */ + +#include "vmlinux.h" +#include +#include +#include + +#ifndef NULL +#define NULL 0 +#endif + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} array SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} hash SEC(".maps"); + +char _license[] SEC("license") = "GPL"; + +unsigned long tgidpid; +unsigned long cgid; + +unsigned long tick_tgidpid_ret; +unsigned int tick_cgid_ret; +unsigned int tick_cgid_pid_ret; +unsigned long tick_cgid_se_to_cgid_ret; + +unsigned long wakeup_tgidpid_ret; +unsigned int wakeup_cgid_ret; +unsigned int wakeup_cgid_pid_ret; +unsigned long wakeup_cgid_se_to_cgid_ret; + +unsigned long entity_tgidpid_ret; +unsigned int entity_cgid_ret; +unsigned int entity_cgid_pid_ret; +unsigned long entity_cgid_se_to_cgid_ret; + +// #define debug(args...) bpf_printk(args) +#define debug(args...) + +SEC("sched/cfs_check_preempt_tick") +int BPF_PROG(test_check_preempt_tick, struct sched_entity *curr, unsigned long delta_exec) +{ + if (curr == NULL) + return 0; + + if (tgidpid) { + unsigned long curr_tgidpid; + + curr_tgidpid = bpf_sched_entity_to_tgidpid(curr); + + if (curr_tgidpid == tgidpid) { + tick_tgidpid_ret = curr_tgidpid; + debug("tick tgid %d pid %d tick_tgidpid_ret %lu", curr_tgidpid >> 32, + curr_tgidpid & 0xFFFFFFFF, tick_tgidpid_ret); + } + } else if (cgid) { + if (bpf_sched_entity_belongs_to_cgrp(curr, cgid)) { + unsigned long curr_tgidpid; + + tick_cgid_ret = 1; + + if (!curr->my_q) { + curr_tgidpid = bpf_sched_entity_to_tgidpid(curr); + tick_cgid_pid_ret = curr_tgidpid & 0xFFFFFFFF; + debug("tick tick_cgid_ret %d curr_pid %d", tick_cgid_ret, + tick_cgid_pid_ret); + } + + if (curr->my_q) { + tick_cgid_se_to_cgid_ret = bpf_sched_entity_to_cgrpid(curr); + debug("tick tick_cgid_ret %d curr_se_to_cgid %d", tick_cgid_ret, + tick_cgid_se_to_cgid_ret); + } + + } + } + return 0; +} + +SEC("sched/cfs_check_preempt_wakeup") +int BPF_PROG(test_check_preempt_wakeup, struct task_struct *curr, struct task_struct *p) +{ + __u64 *value = NULL; + __u32 key = 0; + + if (curr == NULL || p == NULL) + return 0; + + value = bpf_map_lookup_elem(&array, &key); + if (value) + *value = 0; + value = bpf_map_lookup_elem(&hash, &key); + if (value) + *value = 0; + + if (tgidpid) { + unsigned long curr_tgidpid, p_tgidpid; + + curr_tgidpid = bpf_sched_entity_to_tgidpid(&curr->se); + p_tgidpid = bpf_sched_entity_to_tgidpid(&p->se); + + if (curr_tgidpid == tgidpid) { + wakeup_tgidpid_ret = curr_tgidpid; + + debug("wakeup curr_tgid %d curr_pid %d wakeup_tgidpid_ret %lu", + curr_tgidpid >> 32, curr_tgidpid & 0xFFFFFFFF, wakeup_tgidpid_ret); + } else if (p_tgidpid == tgidpid) { + wakeup_tgidpid_ret = p_tgidpid; + + debug("wakeup p_tgid %d p_pid %d wakeup_tgidpid_ret %lu", p_tgidpid >> 32, + p_tgidpid & 0xFFFFFFFF, wakeup_tgidpid_ret); + } + } else if (cgid) { + if (bpf_sched_entity_belongs_to_cgrp(&curr->se, cgid)) { + wakeup_cgid_ret = 1; + wakeup_cgid_pid_ret = curr->pid; + + debug("wakeup wakeup_cgid_ret %d curr_pid %d", wakeup_cgid_ret, + wakeup_cgid_pid_ret); + } else if (bpf_sched_entity_belongs_to_cgrp(&p->se, cgid)) { + wakeup_cgid_ret = 1; + wakeup_cgid_pid_ret = p->pid; + + debug("wakeup wakeup_cgid_ret %d p_pid %d", wakeup_cgid_ret, + wakeup_cgid_pid_ret); + } + } + return 0; +} + +SEC("sched/cfs_wakeup_preempt_entity") +int BPF_PROG(test_wakeup_preempt_entity, struct sched_entity *curr, struct sched_entity *se) +{ + if (curr == NULL || se == NULL) + return 0; + + if (tgidpid) { + unsigned long curr_tgidpid, se_tgidpid; + + curr_tgidpid = bpf_sched_entity_to_tgidpid(curr); + se_tgidpid = bpf_sched_entity_to_tgidpid(se); + + if (curr_tgidpid == tgidpid) { + entity_tgidpid_ret = curr_tgidpid; + debug("entity curr_tgid %d curr_pid %d entity_tgidpid_ret %lu", + curr_tgidpid >> 32, curr_tgidpid & 0xFFFFFFFF, entity_tgidpid_ret); + } else if (se_tgidpid == tgidpid) { + entity_tgidpid_ret = se_tgidpid; + debug("entity se_tgid %d se_pid %d entity_tgidpid_ret %lu", + se_tgidpid >> 32, se_tgidpid & 0xFFFFFFFF, entity_tgidpid_ret); + } + } else if (cgid) { + if (bpf_sched_entity_belongs_to_cgrp(curr, cgid)) { + unsigned long curr_tgidpid; + + entity_cgid_ret = 1; + + if (!curr->my_q) { + curr_tgidpid = bpf_sched_entity_to_tgidpid(curr); + entity_cgid_pid_ret = curr_tgidpid & 0xFFFFFFFF; + debug("entity entity_cgid_ret %d curr_pid %d", + entity_cgid_ret, entity_cgid_pid_ret); + } + + if (curr->my_q) { + entity_cgid_se_to_cgid_ret = bpf_sched_entity_to_cgrpid(curr); + debug("entity entity_cgid_ret %d curr_se_to_cgid %d", + entity_cgid_ret, entity_cgid_se_to_cgid_ret); + } + } else if (bpf_sched_entity_belongs_to_cgrp(se, cgid)) { + unsigned long se_tgidpid; + + entity_cgid_ret = 1; + + if (!se->my_q) { + se_tgidpid = bpf_sched_entity_to_tgidpid(se); + entity_cgid_pid_ret = se_tgidpid & 0xFFFFFFFF; + debug("entity entity_cgid_ret %d se_pid %d", entity_cgid_ret, + entity_cgid_pid_ret); + } + + if (se->my_q) { + entity_cgid_se_to_cgid_ret = bpf_sched_entity_to_cgrpid(se); + debug("entity entity_cgid_ret %d se_se_to_cgid %d", entity_cgid_ret, + entity_cgid_se_to_cgid_ret); + } + } + } + return 0; +}