lkml.org 
[lkml]   [2018]   [Oct]   [31]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[Patch v4 08/18] sched: Deprecate sched_smt_present and use cpu_smt_enabled static key
Date
The cpu_smt_enabled static key serves identical purpose as cpu_smt_enabled
to enable SMT specific code.

This patch replaces sched_smt_present in the scheduler with
cpu_smt_enabled and deprecate sched_smt_present.

Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
kernel/sched/core.c | 12 ------------
kernel/sched/fair.c | 6 ++----
kernel/sched/sched.h | 4 +---
3 files changed, 3 insertions(+), 19 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 625bc98..a06b157 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5739,18 +5739,6 @@ int sched_cpu_activate(unsigned int cpu)
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;

-#ifdef CONFIG_SCHED_SMT
- /*
- * The sched_smt_present static key needs to be evaluated on every
- * hotplug event because at boot time SMT might be disabled when
- * the number of booted CPUs is limited.
- *
- * If then later a sibling gets hotplugged, then the key would stay
- * off and SMT scheduling would never be functional.
- */
- if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
- static_branch_enable_cpuslocked(&sched_smt_present);
-#endif
set_cpu_active(cpu, true);

if (sched_smp_initialized) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b39fb59..4899bb1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5839,8 +5839,6 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
}

#ifdef CONFIG_SCHED_SMT
-DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-
static inline void set_idle_cores(int cpu, int val)
{
struct sched_domain_shared *sds;
@@ -5900,7 +5898,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
int core, cpu;

- if (!static_branch_likely(&sched_smt_present))
+ if (!static_branch_likely(&cpu_smt_enabled))
return -1;

if (!test_idle_cores(target, false))
@@ -5936,7 +5934,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
{
int cpu;

- if (!static_branch_likely(&sched_smt_present))
+ if (!static_branch_likely(&cpu_smt_enabled))
return -1;

for_each_cpu(cpu, cpu_smt_mask(target)) {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4a2e8ca..5adba57 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -928,13 +928,11 @@ static inline int cpu_of(struct rq *rq)

#ifdef CONFIG_SCHED_SMT

-extern struct static_key_false sched_smt_present;
-
extern void __update_idle_core(struct rq *rq);

static inline void update_idle_core(struct rq *rq)
{
- if (static_branch_unlikely(&sched_smt_present))
+ if (static_branch_likely(&cpu_smt_enabled))
__update_idle_core(rq);
}

--
2.9.4
\
 
 \ /
  Last update: 2018-10-30 20:23    [W:0.261 / U:0.044 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site