Messages in this thread Patch in this message |  | | From | David Carrillo-Cisneros <> | Subject | [PATCH v3 27/46] perf/x86/intel: add pqr cache flags and intel_pqr_ctx_switch | Date | Sat, 29 Oct 2016 17:38:24 -0700 |
| |
In order to support intel_cmt for cgroups with no perf_event, this driver calls intel_pqr_ctx_switch during a task context switch, after perf_event has add/del all events (if any), using the newly introduced finish_arch_pre_lock_switch() .
If pmu->add sets a rmids, the next rmid is "marked" using the flag PQR_RMID_FLAG_EVENT. Removing an event sets the root rmid with the flag PQR_RMID_FLAG_EVENT to indicate that it can be over-written by an actively monitored cgroup.
If no event added to the pmu writes a rmid into PQR msr (and PQR_RMID_FLAG_EVENT is not set), then __pqr_ctx_switch will call __intel_cmt_no_event_sched_in to update PQR cache with the rmid of the active perf cgroup's monr.
In some cases, a change to PQR msr software cache must be write-through immediately, using the flag PQR_FLAG_WT. These are: 1) updating PQR_ASSOC when a cpu goes offline. 2) first execution slot of a newly exec'd task, since new exec'd tasks runs before a context switch.
Signed-off-by: David Carrillo-Cisneros <davidcc@google.com> --- arch/x86/events/intel/cmt.c | 39 ++++++++++++++++++++++++++++----- arch/x86/include/asm/intel_rdt_common.h | 38 ++++++++++++++++++++++++++++---- arch/x86/include/asm/processor.h | 4 ++++ arch/x86/kernel/cpu/intel_rdt_common.c | 29 ++++++++++++++++++++++++ 4 files changed, 100 insertions(+), 10 deletions(-)
diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c index 830ce29..bd903ae 100644 --- a/arch/x86/events/intel/cmt.c +++ b/arch/x86/events/intel/cmt.c @@ -1323,12 +1323,13 @@ static void intel_cmt_event_read(struct perf_event *event) } static inline void __intel_cmt_event_start(struct perf_event *event, - union pmonr_rmids rmids) + union pmonr_rmids rmids, bool wt) { if (!(event->hw.state & PERF_HES_STOPPED)) return; event->hw.state &= ~PERF_HES_STOPPED; - pqr_cache_update_rmid(rmids.sched_rmid); + pqr_cache_update_rmid(rmids.sched_rmid, + PQR_RMID_FLAG_EVENT | (wt ? PQR_FLAG_WT : 0)); } static void intel_cmt_event_start(struct perf_event *event, int mode) @@ -1336,7 +1337,7 @@ static void intel_cmt_event_start(struct perf_event *event, int mode) union pmonr_rmids rmids; rmids = monr_get_sched_in_rmids(monr_from_event(event)); - __intel_cmt_event_start(event, rmids); + __intel_cmt_event_start(event, rmids, false); } static void intel_cmt_event_stop(struct perf_event *event, int mode) @@ -1352,18 +1353,25 @@ static void intel_cmt_event_stop(struct perf_event *event, int mode) * reads occur even if event is Inactive. Therefore there is no need to * read when event is stopped. */ - pqr_cache_update_rmid(rmids.sched_rmid); + pqr_cache_update_rmid(rmids.sched_rmid, 0); } static int intel_cmt_event_add(struct perf_event *event, int mode) { union pmonr_rmids rmids; + bool wt; event->hw.state = PERF_HES_STOPPED; rmids = monr_get_sched_in_rmids(monr_from_event(event)); + /* + * A newly exec'd task may run from the file loader without a context + * switch, if so, the PQR sw cache will not update the rmid in hw. + * Avoid that by issuing a write-through mode to the PQR sw cache. + */ + wt = event->total_time_running == 0; if (mode & PERF_EF_START) - __intel_cmt_event_start(event, rmids); + __intel_cmt_event_start(event, rmids, wt); return 0; } @@ -1697,7 +1705,7 @@ static int intel_cmt_hp_online_exit(unsigned int cpu) struct pkg_data *pkgd; u16 pkgid = topology_logical_package_id(cpu); - pqr_cache_update_rmid(0); + pqr_cache_update_rmid(0, PQR_FLAG_WT); memset(state, 0, sizeof(*state)); rcu_read_lock(); @@ -1932,6 +1940,8 @@ static int __init intel_cmt_init(void) rcu_read_unlock(); pr_cont("and l3 scale of %d KBs.\n", cmt_l3_scale); + static_branch_inc(&pqr_common_enable_key); + return err; err_stop: @@ -1943,4 +1953,21 @@ static int __init intel_cmt_init(void) return err; } +/* Schedule task without a CMT perf_event. */ +inline void __intel_cmt_no_event_sched_in(void) +{ +#ifdef CONFIG_CGROUP_PERF + struct monr *monr; + union pmonr_rmids rmids; + + /* Assume CMT enabled is likely given that PQR is enabled. */ + if (!static_branch_likely(&cmt_initialized_key)) + return; + /* Safe to call from_task since we are in scheduler lock. */ + monr = monr_from_perf_cgroup(perf_cgroup_from_task(current, NULL)); + rmids = monr_get_sched_in_rmids(monr); + pqr_cache_update_rmid(rmids.sched_rmid, 0); +#endif +} + device_initcall(intel_cmt_init); diff --git a/arch/x86/include/asm/intel_rdt_common.h b/arch/x86/include/asm/intel_rdt_common.h index 1d5e691..d8d0dc3 100644 --- a/arch/x86/include/asm/intel_rdt_common.h +++ b/arch/x86/include/asm/intel_rdt_common.h @@ -3,6 +3,7 @@ #if defined(CONFIG_INTEL_RDT_A) || defined(CONFIG_INTEL_RDT_M) +#include <linux/jump_label.h> #include <linux/types.h> #include <asm/percpu.h> #include <asm/msr.h> @@ -10,35 +11,49 @@ #define MSR_IA32_PQR_ASSOC 0x0c8f +extern struct static_key_false pqr_common_enable_key; + /** * struct intel_pqr_state - State cache for the PQR MSR * @rmid: The cached Resource Monitoring ID * @next_rmid: Next rmid to write to hw * @closid: The cached Class Of Service ID * @next_closid: Next closid to write to hw + * @next_rmid_flags: Next rmid's flags * * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always * contains both parts, so we need to cache them. * * The cache also helps to avoid pointless updates if the value does - * not change. + * not change. It also keeps track of the type of RMID set (event vs + * no event) used to determine when a cgroup RMID is required. */ struct intel_pqr_state { u32 rmid; u32 next_rmid; u32 closid; u32 next_closid; + int next_rmid_flags; }; +#define PQR_FLAG_WT BIT(0) /* write-through. */ +#define PQR_RMID_FLAG_EVENT BIT(1) /* associated to a perf_event. */ + DECLARE_PER_CPU(struct intel_pqr_state, pqr_state); -static inline void pqr_cache_update_rmid(u32 rmid) +static inline void pqr_cache_update_rmid(u32 rmid, int next_rmid_flags) { struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); state->next_rmid = rmid; - wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, state->closid); + state->next_rmid_flags |= next_rmid_flags; + + if (next_rmid_flags & PQR_FLAG_WT) { + state->rmid = rmid; + wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, state->closid); + state->next_rmid_flags &= ~PQR_FLAG_WT; + } } static inline void pqr_cache_update_closid(u32 closid) @@ -46,7 +61,22 @@ static inline void pqr_cache_update_closid(u32 closid) struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); state->next_closid = closid; - wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, state->closid); +} + +void __pqr_ctx_switch(void); + +inline void __intel_cmt_no_event_sched_in(void); + +static inline void intel_pqr_ctx_switch(void) +{ + if (static_branch_unlikely(&pqr_common_enable_key)) + __pqr_ctx_switch(); +} + +#else + +static inline void intel_pqr_ctx_switch(void) +{ } #endif diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 984a7bf..967ea93 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -22,6 +22,7 @@ struct vm86; #include <asm/nops.h> #include <asm/special_insns.h> #include <asm/fpu/types.h> +#include <asm/intel_rdt_common.h> #include <linux/personality.h> #include <linux/cache.h> @@ -856,4 +857,7 @@ bool xen_set_default_idle(void); void stop_this_cpu(void *dummy); void df_debug(struct pt_regs *regs, long error_code); + +#define finish_arch_pre_lock_switch intel_pqr_ctx_switch + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/kernel/cpu/intel_rdt_common.c b/arch/x86/kernel/cpu/intel_rdt_common.c index 7fd5b20..6e45287 100644 --- a/arch/x86/kernel/cpu/intel_rdt_common.c +++ b/arch/x86/kernel/cpu/intel_rdt_common.c @@ -6,3 +6,32 @@ * must ensure interruptions are handled properly. */ DEFINE_PER_CPU(struct intel_pqr_state, pqr_state); + +DEFINE_STATIC_KEY_FALSE(pqr_common_enable_key); + +/* + * Update hw's RMID using cgroup's if perf_event did not. + * Sync pqr cache with MSR. + */ +inline void __pqr_ctx_switch(void) +{ + struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); + + /* + * Obtain a rmid from current task's cgroup if no perf event + * set a rmid. + */ + if (likely(!(state->next_rmid_flags & PQR_RMID_FLAG_EVENT))) + __intel_cmt_no_event_sched_in(); + + state->next_rmid_flags = 0; + + /* __intel_cmt_no_event_sched_in might have changed next_rmid. */ + if (likely(state->rmid == state->next_rmid && + state->closid == state->next_closid)) + return; + + state->rmid = state->next_rmid; + state->closid = state->next_closid; + wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, state->closid); +} -- 2.8.0.rc3.226.g39d4020
|  |