lkml.org 
[lkml]   [2019]   [Dec]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[PATCH v5 08/10] perf: cache perf_event_groups_first for cgroups
From
Add a per-CPU cache of the pinned and flexible perf_event_groups_first
value for a cgroup avoiding an O(log(#perf events)) searches during
sched_in.

Based-on-work-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
---
include/linux/perf_event.h | 7 ++++
kernel/events/core.c | 84 ++++++++++++++++++++++++++++++++++----
2 files changed, 82 insertions(+), 9 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index cd7d3b624655..a29a38df909e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -892,6 +892,13 @@ struct perf_cgroup_info {
struct perf_cgroup {
struct cgroup_subsys_state css;
struct perf_cgroup_info __percpu *info;
+ /*
+ * A cache of the first event with the perf_cpu_context's
+ * perf_event_context for the first event in pinned_groups or
+ * flexible_groups. Avoids an rbtree search during sched_in.
+ */
+ struct perf_event * __percpu *pinned_event;
+ struct perf_event * __percpu *flexible_event;
};

/*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3da9cc1ebc2d..5935d2474050 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1638,6 +1638,27 @@ perf_event_groups_insert(struct perf_event_groups *groups,

rb_link_node(&event->group_node, parent, node);
rb_insert_color(&event->group_node, &groups->tree);
+#ifdef CONFIG_CGROUP_PERF
+ if (is_cgroup_event(event)) {
+ struct perf_event **cgrp_event;
+
+ if (event->attr.pinned) {
+ cgrp_event = per_cpu_ptr(event->cgrp->pinned_event,
+ event->cpu);
+ } else {
+ cgrp_event = per_cpu_ptr(event->cgrp->flexible_event,
+ event->cpu);
+ }
+ /*
+ * Remember smallest, left-most, group index event. The
+ * less-than condition is only possible if the group_index
+ * overflows.
+ */
+ if (!*cgrp_event ||
+ event->group_index < (*cgrp_event)->group_index)
+ *cgrp_event = event;
+ }
+#endif
}

/*
@@ -1652,6 +1673,9 @@ add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
perf_event_groups_insert(groups, event);
}

+static struct perf_event *
+perf_event_groups_next(struct perf_event *event);
+
/*
* Delete a group from a tree.
*/
@@ -1662,6 +1686,22 @@ perf_event_groups_delete(struct perf_event_groups *groups,
WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
RB_EMPTY_ROOT(&groups->tree));

+#ifdef CONFIG_CGROUP_PERF
+ if (is_cgroup_event(event)) {
+ struct perf_event **cgrp_event;
+
+ if (event->attr.pinned) {
+ cgrp_event = per_cpu_ptr(event->cgrp->pinned_event,
+ event->cpu);
+ } else {
+ cgrp_event = per_cpu_ptr(event->cgrp->flexible_event,
+ event->cpu);
+ }
+ if (*cgrp_event == event)
+ *cgrp_event = perf_event_groups_next(event);
+ }
+#endif
+
rb_erase(&event->group_node, &groups->tree);
init_event_group(event);
}
@@ -1679,7 +1719,8 @@ del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
}

/*
- * Get the leftmost event in the cpu/cgroup subtree.
+ * Get the leftmost event in the cpu subtree without a cgroup (ie task or
+ * system-wide).
*/
static struct perf_event *
perf_event_groups_first(struct perf_event_groups *groups, int cpu,
@@ -3596,8 +3637,8 @@ static int ctx_groups_sched_in(struct perf_event_context *ctx,
.cap = ARRAY_SIZE(itrs),
};
/* Events not within a CPU context may be on any CPU. */
- __heap_add(&event_heap, perf_event_groups_first(groups, -1,
- NULL));
+ __heap_add(&event_heap,
+ perf_event_groups_first(groups, -1, NULL));
}
evt = event_heap.data;

@@ -3605,8 +3646,16 @@ static int ctx_groups_sched_in(struct perf_event_context *ctx,

#ifdef CONFIG_CGROUP_PERF
for (; css; css = css->parent) {
- __heap_add(&event_heap, perf_event_groups_first(groups, cpu,
- css->cgroup));
+ struct perf_cgroup *cgrp;
+
+ /* root cgroup doesn't have events */
+ if (css->id == 1)
+ break;
+
+ cgrp = container_of(css, struct perf_cgroup, css);
+ __heap_add(&event_heap, is_pinned
+ ? *per_cpu_ptr(cgrp->pinned_event, cpu)
+ : *per_cpu_ptr(cgrp->flexible_event, cpu));
}
#endif

@@ -12672,18 +12721,35 @@ perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
return ERR_PTR(-ENOMEM);

jc->info = alloc_percpu(struct perf_cgroup_info);
- if (!jc->info) {
- kfree(jc);
- return ERR_PTR(-ENOMEM);
- }
+ if (!jc->info)
+ goto free_jc;
+
+ jc->pinned_event = alloc_percpu(struct perf_event *);
+ if (!jc->pinned_event)
+ goto free_jc_info;
+
+ jc->flexible_event = alloc_percpu(struct perf_event *);
+ if (!jc->flexible_event)
+ goto free_jc_pinned;

return &jc->css;
+
+free_jc_pinned:
+ free_percpu(jc->pinned_event);
+free_jc_info:
+ free_percpu(jc->info);
+free_jc:
+ kfree(jc);
+
+ return ERR_PTR(-ENOMEM);
}

static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
{
struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);

+ free_percpu(jc->pinned_event);
+ free_percpu(jc->flexible_event);
free_percpu(jc->info);
kfree(jc);
}
--
2.24.0.393.g34dc348eaf-goog
\
 
 \ /
  Last update: 2019-12-07 00:16    [W:0.168 / U:0.816 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site