forked from luck/tmp_suning_uos_patched
perf util: Use cached rbtree for rblists
At the cost of an extra pointer, we can avoid the O(logN) cost of finding the first element in the tree (smallest node), which is something required for any of the strlist or intlist traversals (XXX_for_each_entry()). There are a number of users in perf of these (particularly strlists), including probes, and buildid. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Link: http://lkml.kernel.org/r/20181206191819.30182-5-dave@stgolabs.net Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
55ecd6310f
commit
ca2270292e
|
@ -45,7 +45,7 @@ static inline unsigned int intlist__nr_entries(const struct intlist *ilist)
|
|||
/* For intlist iteration */
|
||||
static inline struct int_node *intlist__first(struct intlist *ilist)
|
||||
{
|
||||
struct rb_node *rn = rb_first(&ilist->rblist.entries);
|
||||
struct rb_node *rn = rb_first_cached(&ilist->rblist.entries);
|
||||
return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
|
||||
}
|
||||
static inline struct int_node *intlist__next(struct int_node *in)
|
||||
|
|
|
@ -352,7 +352,7 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
|
|||
else if (metrics && !raw)
|
||||
printf("\nMetrics:\n\n");
|
||||
|
||||
for (node = rb_first(&groups.entries); node; node = next) {
|
||||
for (node = rb_first_cached(&groups.entries); node; node = next) {
|
||||
struct mep *me = container_of(node, struct mep, nd);
|
||||
|
||||
if (metricgroups)
|
||||
|
|
|
@ -140,7 +140,7 @@ struct __name##_sorted *__name = __name##_sorted__new
|
|||
|
||||
/* For 'struct intlist' */
|
||||
#define DECLARE_RESORT_RB_INTLIST(__name, __ilist) \
|
||||
DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries, \
|
||||
DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \
|
||||
__ilist->rblist.nr_entries)
|
||||
|
||||
/* For 'struct machine->threads' */
|
||||
|
|
|
@ -13,8 +13,9 @@
|
|||
|
||||
int rblist__add_node(struct rblist *rblist, const void *new_entry)
|
||||
{
|
||||
struct rb_node **p = &rblist->entries.rb_node;
|
||||
struct rb_node **p = &rblist->entries.rb_root.rb_node;
|
||||
struct rb_node *parent = NULL, *new_node;
|
||||
bool leftmost = true;
|
||||
|
||||
while (*p != NULL) {
|
||||
int rc;
|
||||
|
@ -24,8 +25,10 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
|
|||
rc = rblist->node_cmp(parent, new_entry);
|
||||
if (rc > 0)
|
||||
p = &(*p)->rb_left;
|
||||
else if (rc < 0)
|
||||
else if (rc < 0) {
|
||||
p = &(*p)->rb_right;
|
||||
leftmost = false;
|
||||
}
|
||||
else
|
||||
return -EEXIST;
|
||||
}
|
||||
|
@ -35,7 +38,7 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
|
|||
return -ENOMEM;
|
||||
|
||||
rb_link_node(new_node, parent, p);
|
||||
rb_insert_color(new_node, &rblist->entries);
|
||||
rb_insert_color_cached(new_node, &rblist->entries, leftmost);
|
||||
++rblist->nr_entries;
|
||||
|
||||
return 0;
|
||||
|
@ -43,7 +46,7 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
|
|||
|
||||
void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
|
||||
{
|
||||
rb_erase(rb_node, &rblist->entries);
|
||||
rb_erase_cached(rb_node, &rblist->entries);
|
||||
--rblist->nr_entries;
|
||||
rblist->node_delete(rblist, rb_node);
|
||||
}
|
||||
|
@ -52,8 +55,9 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
|
|||
const void *entry,
|
||||
bool create)
|
||||
{
|
||||
struct rb_node **p = &rblist->entries.rb_node;
|
||||
struct rb_node **p = &rblist->entries.rb_root.rb_node;
|
||||
struct rb_node *parent = NULL, *new_node = NULL;
|
||||
bool leftmost = true;
|
||||
|
||||
while (*p != NULL) {
|
||||
int rc;
|
||||
|
@ -63,8 +67,10 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
|
|||
rc = rblist->node_cmp(parent, entry);
|
||||
if (rc > 0)
|
||||
p = &(*p)->rb_left;
|
||||
else if (rc < 0)
|
||||
else if (rc < 0) {
|
||||
p = &(*p)->rb_right;
|
||||
leftmost = false;
|
||||
}
|
||||
else
|
||||
return parent;
|
||||
}
|
||||
|
@ -73,7 +79,8 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
|
|||
new_node = rblist->node_new(rblist, entry);
|
||||
if (new_node) {
|
||||
rb_link_node(new_node, parent, p);
|
||||
rb_insert_color(new_node, &rblist->entries);
|
||||
rb_insert_color_cached(new_node,
|
||||
&rblist->entries, leftmost);
|
||||
++rblist->nr_entries;
|
||||
}
|
||||
}
|
||||
|
@ -94,7 +101,7 @@ struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry)
|
|||
void rblist__init(struct rblist *rblist)
|
||||
{
|
||||
if (rblist != NULL) {
|
||||
rblist->entries = RB_ROOT;
|
||||
rblist->entries = RB_ROOT_CACHED;
|
||||
rblist->nr_entries = 0;
|
||||
}
|
||||
|
||||
|
@ -103,7 +110,7 @@ void rblist__init(struct rblist *rblist)
|
|||
|
||||
void rblist__exit(struct rblist *rblist)
|
||||
{
|
||||
struct rb_node *pos, *next = rb_first(&rblist->entries);
|
||||
struct rb_node *pos, *next = rb_first_cached(&rblist->entries);
|
||||
|
||||
while (next) {
|
||||
pos = next;
|
||||
|
@ -124,7 +131,8 @@ struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
|
|||
{
|
||||
struct rb_node *node;
|
||||
|
||||
for (node = rb_first(&rblist->entries); node; node = rb_next(node)) {
|
||||
for (node = rb_first_cached(&rblist->entries); node;
|
||||
node = rb_next(node)) {
|
||||
if (!idx--)
|
||||
return node;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
*/
|
||||
|
||||
struct rblist {
|
||||
struct rb_root entries;
|
||||
struct rb_root_cached entries;
|
||||
unsigned int nr_entries;
|
||||
|
||||
int (*node_cmp)(struct rb_node *rbn, const void *entry);
|
||||
|
|
|
@ -168,7 +168,7 @@ static void reset_stat(struct runtime_stat *st)
|
|||
struct rb_node *pos, *next;
|
||||
|
||||
rblist = &st->value_list;
|
||||
next = rb_first(&rblist->entries);
|
||||
next = rb_first_cached(&rblist->entries);
|
||||
while (next) {
|
||||
pos = next;
|
||||
next = rb_next(pos);
|
||||
|
|
|
@ -57,7 +57,7 @@ static inline unsigned int strlist__nr_entries(const struct strlist *slist)
|
|||
/* For strlist iteration */
|
||||
static inline struct str_node *strlist__first(struct strlist *slist)
|
||||
{
|
||||
struct rb_node *rn = rb_first(&slist->rblist.entries);
|
||||
struct rb_node *rn = rb_first_cached(&slist->rblist.entries);
|
||||
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
|
||||
}
|
||||
static inline struct str_node *strlist__next(struct str_node *sn)
|
||||
|
|
Loading…
Reference in New Issue
Block a user