Messages in this thread Patch in this message |  | | From | David Carrillo-Cisneros <> | Subject | [PATCH v3 10/46] perf/x86/intel/cmt: add Package MONitored Resource (pmonr) initialization | Date | Sat, 29 Oct 2016 17:38:07 -0700 |
| |
A pmonr is the per-package component of a monr. This patch only adds initialization/destruction of pmonrs. Future patches explain their usage and add functionality.
CPU hotplug is supported by initializing/terminating all pmonrs in monr hierarchy when first/last CPU in package goes online/offline.
Signed-off-by: David Carrillo-Cisneros <davidcc@google.com> --- arch/x86/events/intel/cmt.c | 161 +++++++++++++++++++++++++++++++++++++++++++- arch/x86/events/intel/cmt.h | 20 +++++- 2 files changed, 177 insertions(+), 4 deletions(-)
diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c index 39f4bfa..06e6325 100644 --- a/arch/x86/events/intel/cmt.c +++ b/arch/x86/events/intel/cmt.c @@ -103,13 +103,49 @@ static void monr_hrchy_release_locks(unsigned long *flags) raw_local_irq_restore(*flags); } +static inline struct pmonr *pkgd_pmonr(struct pkg_data *pkgd, struct monr *monr) +{ +#ifdef CONFIG_LOCKDEP + bool safe = lockdep_is_held(&cmt_mutex) || + lockdep_is_held(&pkgd->lock) || + rcu_read_lock_held(); +#endif + + return rcu_dereference_check(monr->pmonrs[pkgd->pkgid], safe); +} + +static struct pmonr *pmonr_alloc(struct pkg_data *pkgd) +{ + struct pmonr *pmonr; + int cpu_node = cpu_to_node(pkgd->work_cpu); + + pmonr = kzalloc_node(sizeof(*pmonr), GFP_KERNEL, cpu_node); + if (!pmonr) + return ERR_PTR(-ENOMEM); + + pmonr->pkgd = pkgd; + + return pmonr; +} + static void monr_dealloc(struct monr *monr) { + u16 p, nr_pkgs = topology_max_packages(); + + for (p = 0; p < nr_pkgs; p++) { + /* out of monr_hrchy, so no need for rcu or lock protection. */ + if (!monr->pmonrs[p]) + continue; + kfree(monr->pmonrs[p]); + } kfree(monr); } +/* Alloc monr with all pmonrs in Off state. */ static struct monr *monr_alloc(void) { + struct pkg_data *pkgd = NULL; + struct pmonr *pmonr; struct monr *monr; lockdep_assert_held(&cmt_mutex); @@ -122,6 +158,28 @@ static struct monr *monr_alloc(void) INIT_LIST_HEAD(&monr->children); INIT_LIST_HEAD(&monr->parent_entry); + monr->pmonrs = kcalloc(topology_max_packages(), + sizeof(pmonr), GFP_KERNEL); + if (!monr->pmonrs) { + monr_dealloc(monr); + return ERR_PTR(-ENOMEM); + } + + /* + * Do not create pmonrs for unitialized packages. + * Protected from initialization of new pkgs by cqm_mutex + */ + while ((pkgd = cmt_pkgs_data_next_rcu(pkgd))) { + pmonr = pmonr_alloc(pkgd); + if (IS_ERR(pmonr)) { + monr_dealloc(monr); + return ERR_CAST(pmonr); + } + pmonr->monr = monr; + /* safe to assign since pmonr is not in monr_hrchy. */ + RCU_INIT_POINTER(monr->pmonrs[pkgd->pkgid], pmonr); + } + return monr; } @@ -318,6 +376,69 @@ static int mon_group_setup_event(struct perf_event *event) return monr_hrchy_attach_event(event); } +static struct monr *monr_next_child(struct monr *pos, struct monr *parent) +{ + if (!pos) + return list_first_entry_or_null( + &parent->children, struct monr, parent_entry); + if (list_is_last(&pos->parent_entry, &parent->children)) + return NULL; + + return list_next_entry(pos, parent_entry); +} + +static struct monr *monr_next_descendant_pre(struct monr *pos, + struct monr *root) +{ + struct monr *next; + + if (!pos) + return root; + + next = monr_next_child(NULL, pos); + if (next) + return next; + + while (pos != root) { + next = monr_next_child(pos, pos->parent); + if (next) + return next; + pos = pos->parent; + } + + return NULL; +} + +static struct monr *monr_leftmost_descendant(struct monr *pos) +{ + struct monr *last; + + do { + last = pos; + pos = monr_next_child(NULL, pos); + } while (pos); + + return last; +} + +static struct monr *monr_next_descendant_post(struct monr *pos, + struct monr *root) +{ + struct monr *next; + + if (!pos) + return monr_leftmost_descendant(root); + + if (pos == root) + return NULL; + + next = monr_next_child(pos, pos->parent); + if (next) + return monr_leftmost_descendant(next); + + return pos->parent; +} + static void intel_cmt_event_read(struct perf_event *event) { } @@ -482,14 +603,29 @@ static struct pkg_data *alloc_pkg_data(int cpu) static void __terminate_pkg_data(struct pkg_data *pkgd) { + struct monr *pos = NULL; + unsigned long flags; + lockdep_assert_held(&cmt_mutex); + raw_spin_lock_irqsave(&pkgd->lock, flags); + /* post-order traversal guarantees pos to be leaf of monr hierarchy. */ + while ((pos = monr_next_descendant_post(pos, monr_hrchy_root))) + RCU_INIT_POINTER(pos->pmonrs[pkgd->pkgid], NULL); + + raw_spin_unlock_irqrestore(&pkgd->lock, flags); + + synchronize_rcu(); + free_pkg_data(pkgd); } static int init_pkg_data(int cpu) { + struct monr *pos = NULL; struct pkg_data *pkgd; + struct pmonr *pmonr; + int err = 0; u16 pkgid = topology_logical_package_id(cpu); lockdep_assert_held(&cmt_mutex); @@ -502,10 +638,28 @@ static int init_pkg_data(int cpu) if (IS_ERR(pkgd)) return PTR_ERR(pkgd); - rcu_assign_pointer(cmt_pkgs_data[pkgid], pkgd); - synchronize_rcu(); + while ((pos = monr_next_descendant_pre(pos, monr_hrchy_root))) { + pmonr = pmonr_alloc(pkgd); + if (IS_ERR(pmonr)) { + err = PTR_ERR(pmonr); + break; + } + pmonr->monr = pos; + /* + * No need to protect pmonrs since this pkgd is + * not set in cmt_pkgs_data yet. + */ + RCU_INIT_POINTER(pos->pmonrs[pkgid], pmonr); + } - return 0; + if (err) { + __terminate_pkg_data(pkgd); + } else { + rcu_assign_pointer(cmt_pkgs_data[pkgid], pkgd); + synchronize_rcu(); + } + + return err; } static int intel_cmt_hp_online_enter(unsigned int cpu) @@ -604,6 +758,7 @@ static int __init cmt_alloc(void) if (!cmt_pkgs_data) return -ENOMEM; + /* won't alloc any pmonr since no cmt_pkg_data is initialized yet. */ mutex_lock(&cmt_mutex); monr_hrchy_root = monr_alloc(); mutex_unlock(&cmt_mutex); diff --git a/arch/x86/events/intel/cmt.h b/arch/x86/events/intel/cmt.h index 46e8335..7f3a7b8 100644 --- a/arch/x86/events/intel/cmt.h +++ b/arch/x86/events/intel/cmt.h @@ -27,6 +27,9 @@ * and to make possible to capture dependencies between threads in the same * cgroup or process. * + * Each monr has a package monr (pmonr) for each package with at least one + * online cpu. The pmonr handles the CMT and MBM monitoring within its package. + * * * Locking * @@ -38,8 +41,19 @@ * cgroup start/stop. * - Hold pkg->mutex and pkg->lock in _all_ active packages to traverse or * change the monr hierarchy. - * - pkgd->lock: Hold in current package to access that pkgd's members. + * - pkgd->lock: Hold in current package to access that pkgd's members. Hold + * a pmonr's package pkgd->lock for non-atomic access to pmonr. + */ + +/** + * struct pmonr - per-package componet of MONitored Resources (monr). + * @monr: The monr that contains this pmonr. + * @pkgd: The package data associated with this pmonr. */ +struct pmonr { + struct monr *monr; + struct pkg_data *pkgd; +}; /** * struct pkg_data - Per-package CMT data. @@ -65,6 +79,7 @@ struct pkg_data { * struct monr - MONitored Resource. * @mon_events: The head of event's group that use this monr, if any. * @entry: List entry into cmt_event_monrs. + * @pmonrs: Per-package pmonrs. * @parent: Parent in monr hierarchy. * @children: List of children in monr hierarchy. * @parent_entry: Entry in parent's children list. @@ -72,10 +87,13 @@ struct pkg_data { * An monr is assigned to every CMT event and/or monitored cgroups when * monitoring is activated and that instance's address do not change during * the lifetime of the event or cgroup. + * + * On initialization, all monr's pmonrs start in Off state. */ struct monr { struct perf_event *mon_events; struct list_head entry; + struct pmonr **pmonrs; struct monr *parent; struct list_head children; -- 2.8.0.rc3.226.g39d4020
|  |