diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c index ee4fe8cdb857676f3fb69cd14a17aed3c735f8f1..b49ac8ecbbd691d62c0bbda9f81a1e398916e83b 100644 --- a/arch/x86/kernel/itmt.c +++ b/arch/x86/kernel/itmt.c @@ -122,6 +122,7 @@ int sched_set_itmt_support(void) return 0; } +EXPORT_SYMBOL_GPL(sched_set_itmt_support); /** * sched_clear_itmt_support() - Revoke platform's support of ITMT @@ -181,3 +182,4 @@ void sched_set_itmt_core_prio(int prio, int cpu) { per_cpu(sched_core_priority, cpu) = prio; } +EXPORT_SYMBOL_GPL(sched_set_itmt_core_prio); diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 37f1cdf46d291800c6ff5487a96abacebb8dc43f..a209b027a897d8996f0e6b1637ea4a488dba8419 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -628,28 +628,35 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) #endif #ifdef CONFIG_ACPI_CPPC_LIB -static u64 get_max_boost_ratio(unsigned int cpu) +static bool cppc_highest_perf_diff; +static struct cpumask core_prior_mask; + +static void cppc_get_highest_nominal_perf(int cpu, u64 *highest_perf, u64 *nominal_perf) { struct cppc_perf_caps perf_caps; - u64 highest_perf, nominal_perf; int ret; - if (acpi_pstate_strict) - return 0; - ret = cppc_get_perf_caps(cpu, &perf_caps); if (ret) { - pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", - cpu, ret); - return 0; + pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret); + return; } - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) - highest_perf = amd_get_highest_perf(); + *highest_perf = amd_get_highest_perf(); else - highest_perf = perf_caps.highest_perf; + *highest_perf = perf_caps.highest_perf; - nominal_perf = perf_caps.nominal_perf; + *nominal_perf = perf_caps.nominal_perf; +} + +static u64 get_max_boost_ratio(unsigned int cpu) +{ + u64 highest_perf, nominal_perf; + + if (acpi_pstate_strict) + return 0; + + cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf); if (!highest_perf || !nominal_perf) { pr_debug("CPU%d: highest or nominal performance missing\n", cpu); @@ -663,8 +670,51 @@ static u64 get_max_boost_ratio(unsigned int cpu) return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); } + +/* The work item is needed to avoid CPU hotplug locking issues */ +static void cpufreq_sched_itmt_work_fn(struct work_struct *work) +{ + sched_set_itmt_support(); +} + +static DECLARE_WORK(sched_itmt_work, cpufreq_sched_itmt_work_fn); + +static void cpufreq_set_itmt_prio(int cpu) +{ + u64 highest_perf, nominal_perf; + static u64 max_highest_perf = 0, min_highest_perf = U64_MAX; + + cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf); + + sched_set_itmt_core_prio(highest_perf, cpu); + cpumask_set_cpu(cpu, &core_prior_mask); + + if (max_highest_perf <= min_highest_perf) { + if (highest_perf > max_highest_perf) + max_highest_perf = highest_perf; + + if (highest_perf < min_highest_perf) + min_highest_perf = highest_perf; + + if (max_highest_perf > min_highest_perf) { + /* + * This code can be run during CPU online under the + * CPU hotplug locks, so sched_set_itmt_support() + * cannot be called from here. Queue up a work item + * to invoke it. + */ + cppc_highest_perf_diff = true; + } + } + + if (cppc_highest_perf_diff && cpumask_equal(&core_prior_mask, cpu_online_mask)) { + pr_debug("queue a work to set itmt enabled\n"); + schedule_work(&sched_itmt_work); + } +} #else static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; } +static void cpufreq_set_itmt_prio(int cpu) { } #endif static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) @@ -677,7 +727,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) unsigned int valid_states = 0; unsigned int result = 0; u64 max_boost_ratio; - unsigned int i; + unsigned int i, j; #ifdef CONFIG_SMP static int blacklisted; #endif @@ -741,6 +791,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) pr_info_once("overriding BIOS provided _PSD data\n"); } #endif + if (c->x86_vendor == X86_VENDOR_CENTAUR || c->x86_vendor == X86_VENDOR_ZHAOXIN) { + for_each_cpu(j, policy->cpus) { + cpufreq_set_itmt_prio(j); + } + } /* capability check */ if (perf->state_count <= 1) { diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 423d08947962c8acd7ac304e6a7a995fb6a58d5f..b7778c39780b992e49d1f61e7551d4ecffdecc94 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -2469,6 +2469,17 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att } } +#if IS_ENABLED(CONFIG_X86) + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 == 7 && boot_cpu_data.x86_model == 0x5b)) { + for_each_cpu(i, cpu_map) { + for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) + sd->flags |= SD_ASYM_PACKING; + } + } +#endif + /* Calculate CPU capacity for physical packages and nodes */ for (i = nr_cpumask_bits-1; i >= 0; i--) { if (!cpumask_test_cpu(i, cpu_map))