Skip to content

Commit

Permalink
rbtree, perf: Use new rbtree helpers
Browse files Browse the repository at this point in the history
Reduce rbtree boiler plate by using the new helpers.

One noteworthy change is unification of the various (partial) compare
functions. We construct a subtree match by forcing the sub-order to
always match, see __group_cmp().

Due to 'const' we had to touch cgroup_id().

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Acked-by: Tejun Heo <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Feb 17, 2021
1 parent 8ecca39 commit a3b8986
Show file tree
Hide file tree
Showing 2 changed files with 92 additions and 107 deletions.
4 changes: 2 additions & 2 deletions include/linux/cgroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ void css_task_iter_end(struct css_task_iter *it);
* Inline functions.
*/

static inline u64 cgroup_id(struct cgroup *cgrp)
static inline u64 cgroup_id(const struct cgroup *cgrp)
{
return cgrp->kn->id;
}
Expand Down Expand Up @@ -701,7 +701,7 @@ void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
struct cgroup_subsys_state;
struct cgroup;

static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; }
static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
Expand Down
195 changes: 90 additions & 105 deletions kernel/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1595,50 +1595,91 @@ static void perf_event_groups_init(struct perf_event_groups *groups)
groups->index = 0;
}

static inline struct cgroup *event_cgroup(const struct perf_event *event)
{
struct cgroup *cgroup = NULL;

#ifdef CONFIG_CGROUP_PERF
if (event->cgrp)
cgroup = event->cgrp->css.cgroup;
#endif

return cgroup;
}

/*
* Compare function for event groups;
*
* Implements complex key that first sorts by CPU and then by virtual index
* which provides ordering when rotating groups for the same CPU.
*/
static bool
perf_event_groups_less(struct perf_event *left, struct perf_event *right)
static __always_inline int
perf_event_groups_cmp(const int left_cpu, const struct cgroup *left_cgroup,
const u64 left_group_index, const struct perf_event *right)
{
if (left->cpu < right->cpu)
return true;
if (left->cpu > right->cpu)
return false;
if (left_cpu < right->cpu)
return -1;
if (left_cpu > right->cpu)
return 1;

#ifdef CONFIG_CGROUP_PERF
if (left->cgrp != right->cgrp) {
if (!left->cgrp || !left->cgrp->css.cgroup) {
/*
* Left has no cgroup but right does, no cgroups come
* first.
*/
return true;
}
if (!right->cgrp || !right->cgrp->css.cgroup) {
/*
* Right has no cgroup but left does, no cgroups come
* first.
*/
return false;
}
/* Two dissimilar cgroups, order by id. */
if (left->cgrp->css.cgroup->kn->id < right->cgrp->css.cgroup->kn->id)
return true;
{
const struct cgroup *right_cgroup = event_cgroup(right);

return false;
if (left_cgroup != right_cgroup) {
if (!left_cgroup) {
/*
* Left has no cgroup but right does, no
* cgroups come first.
*/
return -1;
}
if (!right_cgroup) {
/*
* Right has no cgroup but left does, no
* cgroups come first.
*/
return 1;
}
/* Two dissimilar cgroups, order by id. */
if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup))
return -1;

return 1;
}
}
#endif

if (left->group_index < right->group_index)
return true;
if (left->group_index > right->group_index)
return false;
if (left_group_index < right->group_index)
return -1;
if (left_group_index > right->group_index)
return 1;

return false;
return 0;
}

#define __node_2_pe(node) \
rb_entry((node), struct perf_event, group_node)

static inline bool __group_less(struct rb_node *a, const struct rb_node *b)
{
struct perf_event *e = __node_2_pe(a);
return perf_event_groups_cmp(e->cpu, event_cgroup(e), e->group_index,
__node_2_pe(b)) < 0;
}

struct __group_key {
int cpu;
struct cgroup *cgroup;
};

static inline int __group_cmp(const void *key, const struct rb_node *node)
{
const struct __group_key *a = key;
const struct perf_event *b = __node_2_pe(node);

/* partial/subtree match: @cpu, @cgroup; ignore: @group_index */
return perf_event_groups_cmp(a->cpu, a->cgroup, b->group_index, b);
}

/*
Expand All @@ -1650,27 +1691,9 @@ static void
perf_event_groups_insert(struct perf_event_groups *groups,
struct perf_event *event)
{
struct perf_event *node_event;
struct rb_node *parent;
struct rb_node **node;

event->group_index = ++groups->index;

node = &groups->tree.rb_node;
parent = *node;

while (*node) {
parent = *node;
node_event = container_of(*node, struct perf_event, group_node);

if (perf_event_groups_less(event, node_event))
node = &parent->rb_left;
else
node = &parent->rb_right;
}

rb_link_node(&event->group_node, parent, node);
rb_insert_color(&event->group_node, &groups->tree);
rb_add(&event->group_node, &groups->tree, __group_less);
}

/*
Expand Down Expand Up @@ -1718,45 +1741,17 @@ static struct perf_event *
perf_event_groups_first(struct perf_event_groups *groups, int cpu,
struct cgroup *cgrp)
{
struct perf_event *node_event = NULL, *match = NULL;
struct rb_node *node = groups->tree.rb_node;
#ifdef CONFIG_CGROUP_PERF
u64 node_cgrp_id, cgrp_id = 0;

if (cgrp)
cgrp_id = cgrp->kn->id;
#endif

while (node) {
node_event = container_of(node, struct perf_event, group_node);

if (cpu < node_event->cpu) {
node = node->rb_left;
continue;
}
if (cpu > node_event->cpu) {
node = node->rb_right;
continue;
}
#ifdef CONFIG_CGROUP_PERF
node_cgrp_id = 0;
if (node_event->cgrp && node_event->cgrp->css.cgroup)
node_cgrp_id = node_event->cgrp->css.cgroup->kn->id;
struct __group_key key = {
.cpu = cpu,
.cgroup = cgrp,
};
struct rb_node *node;

if (cgrp_id < node_cgrp_id) {
node = node->rb_left;
continue;
}
if (cgrp_id > node_cgrp_id) {
node = node->rb_right;
continue;
}
#endif
match = node_event;
node = node->rb_left;
}
node = rb_find_first(&key, &groups->tree, __group_cmp);
if (node)
return __node_2_pe(node);

return match;
return NULL;
}

/*
Expand All @@ -1765,27 +1760,17 @@ perf_event_groups_first(struct perf_event_groups *groups, int cpu,
static struct perf_event *
perf_event_groups_next(struct perf_event *event)
{
struct perf_event *next;
#ifdef CONFIG_CGROUP_PERF
u64 curr_cgrp_id = 0;
u64 next_cgrp_id = 0;
#endif

next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node);
if (next == NULL || next->cpu != event->cpu)
return NULL;

#ifdef CONFIG_CGROUP_PERF
if (event->cgrp && event->cgrp->css.cgroup)
curr_cgrp_id = event->cgrp->css.cgroup->kn->id;
struct __group_key key = {
.cpu = event->cpu,
.cgroup = event_cgroup(event),
};
struct rb_node *next;

if (next->cgrp && next->cgrp->css.cgroup)
next_cgrp_id = next->cgrp->css.cgroup->kn->id;
next = rb_next_match(&key, &event->group_node, __group_cmp);
if (next)
return __node_2_pe(next);

if (curr_cgrp_id != next_cgrp_id)
return NULL;
#endif
return next;
return NULL;
}

/*
Expand Down

0 comments on commit a3b8986

Please sign in to comment.