Messages in this thread Patch in this message |  | | From | Viresh Kumar <> | Subject | [RFC 1/3] arm64: topology: Add amu_counters_supported() helper | Date | Thu, 9 Jul 2020 15:43:33 +0530 |
| |
We would need to know earlier during the boot cycle if AMUs are supported or not for all the CPUs, export a routine for that and move code around to make it more readable.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> --- arch/arm64/kernel/topology.c | 108 ++++++++++++++++++----------------- 1 file changed, 56 insertions(+), 52 deletions(-)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index b7da372819fc..74fde35b56ef 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -130,6 +130,9 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale); static DEFINE_PER_CPU(u64, arch_const_cycles_prev); static DEFINE_PER_CPU(u64, arch_core_cycles_prev); static cpumask_var_t amu_fie_cpus; +static cpumask_var_t valid_cpus; +static DEFINE_STATIC_KEY_FALSE(amu_fie_key); +#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key) /* Initialize counter reference per-cpu variables for the current CPU */ void init_cpu_freq_invariance_counters(void) @@ -140,26 +143,14 @@ void init_cpu_freq_invariance_counters(void) read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)); } -static int validate_cpu_freq_invariance_counters(int cpu) +static void setup_freq_invariance(int cpu) { - u64 max_freq_hz, ratio; - - if (!cpu_has_amu_feat(cpu)) { - pr_debug("CPU%d: counters are not supported.\n", cpu); - return -EINVAL; - } - - if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) || - !per_cpu(arch_core_cycles_prev, cpu))) { - pr_debug("CPU%d: cycle counters are not enabled.\n", cpu); - return -EINVAL; - } + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + u64 ratio; - /* Convert maximum frequency from KHz to Hz and validate */ - max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000; - if (unlikely(!max_freq_hz)) { - pr_debug("CPU%d: invalid maximum frequency.\n", cpu); - return -EINVAL; + if (!policy) { + pr_debug("CPU%d: No cpufreq policy found.\n", cpu); + return; } /* @@ -176,69 +167,75 @@ static int validate_cpu_freq_invariance_counters(int cpu) * be unlikely). */ ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT); - ratio = div64_u64(ratio, max_freq_hz); + ratio = div64_u64(ratio, policy->cpuinfo.max_freq * 1000); if (!ratio) { WARN_ONCE(1, "System timer frequency too low.\n"); - return -EINVAL; + goto out; } per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio; - return 0; -} - -static inline void update_amu_fie_cpus(int cpu, cpumask_var_t valid_cpus) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - - if (!policy) { - pr_debug("CPU%d: No cpufreq policy found.\n", cpu); - return; - } - if (cpumask_subset(policy->related_cpus, valid_cpus)) - cpumask_or(amu_fie_cpus, policy->related_cpus, - amu_fie_cpus); + cpumask_or(amu_fie_cpus, policy->related_cpus, amu_fie_cpus); +out: cpufreq_cpu_put(policy); } -static DEFINE_STATIC_KEY_FALSE(amu_fie_key); -#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key) +bool amu_counters_supported(void) +{ + return likely(cpumask_available(valid_cpus)) && + cpumask_equal(valid_cpus, cpu_present_mask); +} -static int __init init_amu_fie(void) +static int __init early_init_amu_fie(void) { - cpumask_var_t valid_cpus; - int ret = 0; int cpu; if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL)) return -ENOMEM; - if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) { - ret = -ENOMEM; - goto free_valid_mask; - } - for_each_present_cpu(cpu) { - if (validate_cpu_freq_invariance_counters(cpu)) + if (!cpu_has_amu_feat(cpu)) { + pr_debug("CPU%d: counters are not supported.\n", cpu); + continue; + } + + if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) || + !per_cpu(arch_core_cycles_prev, cpu))) { + pr_debug("CPU%d: cycle counters are not enabled.\n", cpu); continue; + } + cpumask_set_cpu(cpu, valid_cpus); - update_amu_fie_cpus(cpu, valid_cpus); } + return 0; +} +core_initcall_sync(early_init_amu_fie); + +static int __init late_init_amu_fie(void) +{ + int cpu; + + if (!cpumask_available(valid_cpus)) + return -ENOMEM; + + if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) + return -ENOMEM; + + for_each_present_cpu(cpu) + setup_freq_invariance(cpu); + if (!cpumask_empty(amu_fie_cpus)) { pr_info("CPUs[%*pbl]: counters will be used for FIE.", cpumask_pr_args(amu_fie_cpus)); static_branch_enable(&amu_fie_key); } -free_valid_mask: - free_cpumask_var(valid_cpus); - - return ret; + return 0; } -late_initcall_sync(init_amu_fie); +late_initcall_sync(late_init_amu_fie); bool arch_freq_counters_available(struct cpumask *cpus) { @@ -272,7 +269,7 @@ void topology_scale_freq_tick(void) * scale = ------- * -------------------- * /\const SCHED_CAPACITY_SCALE * - * See validate_cpu_freq_invariance_counters() for details on + * See setup_freq_invariance() for details on * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT. */ scale = core_cnt - prev_core_cnt; @@ -287,4 +284,11 @@ void topology_scale_freq_tick(void) this_cpu_write(arch_core_cycles_prev, core_cnt); this_cpu_write(arch_const_cycles_prev, const_cnt); } +#else +bool amu_counters_supported(void) +{ + return false; +} #endif /* CONFIG_ARM64_AMU_EXTN */ + +EXPORT_SYMBOL_GPL(amu_counters_supported); -- 2.25.0.rc1.19.g042ed3e048af
|  |