perf hists: Return error from hists__collapse_resort()
authorNamhyung Kim <namhyung@kernel.org>
Tue, 16 Feb 2016 14:08:25 +0000 (23:08 +0900)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Fri, 19 Feb 2016 22:16:06 +0000 (19:16 -0300)
Currently hists__collapse_resort() and hists__collapse_insert_entry()
don't return an error code. Now that callchain_merge() can check for
errors, abort and pass the error to the user.  A later patch can add
more work which also can fail.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1455631723-17345-8-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/util/hist.c
tools/perf/util/hist.h

index a856617be74475182043e4bfad9d540abc1a167a..827c6cbcd05de9fd87e160d1f1002854cd6366b2 100644 (file)
@@ -1046,8 +1046,8 @@ int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
  * collapse the histogram
  */
 
-bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
-                                 struct rb_root *root, struct hist_entry *he)
+int hists__collapse_insert_entry(struct hists *hists, struct rb_root *root,
+                                struct hist_entry *he)
 {
        struct rb_node **p = &root->rb_node;
        struct rb_node *parent = NULL;
@@ -1061,18 +1061,21 @@ bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
                cmp = hist_entry__collapse(iter, he);
 
                if (!cmp) {
+                       int ret = 0;
+
                        he_stat__add_stat(&iter->stat, &he->stat);
                        if (symbol_conf.cumulate_callchain)
                                he_stat__add_stat(iter->stat_acc, he->stat_acc);
 
                        if (symbol_conf.use_callchain) {
                                callchain_cursor_reset(&callchain_cursor);
-                               callchain_merge(&callchain_cursor,
-                                               iter->callchain,
-                                               he->callchain);
+                               if (callchain_merge(&callchain_cursor,
+                                                   iter->callchain,
+                                                   he->callchain) < 0)
+                                       ret = -1;
                        }
                        hist_entry__delete(he);
-                       return false;
+                       return ret;
                }
 
                if (cmp < 0)
@@ -1084,7 +1087,7 @@ bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
 
        rb_link_node(&he->rb_node_in, parent, p);
        rb_insert_color(&he->rb_node_in, root);
-       return true;
+       return 1;
 }
 
 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
@@ -1110,14 +1113,15 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
        hists__filter_entry_by_socket(hists, he);
 }
 
-void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
+int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
 {
        struct rb_root *root;
        struct rb_node *next;
        struct hist_entry *n;
+       int ret;
 
        if (!sort__need_collapse)
-               return;
+               return 0;
 
        hists->nr_entries = 0;
 
@@ -1132,7 +1136,11 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
                next = rb_next(&n->rb_node_in);
 
                rb_erase(&n->rb_node_in, root);
-               if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
+               ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
+               if (ret < 0)
+                       return -1;
+
+               if (ret) {
                        /*
                         * If it wasn't combined with one of the entries already
                         * collapsed, we need to apply the filters that may have
@@ -1143,6 +1151,7 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
                if (prog)
                        ui_progress__update(prog, 1);
        }
+       return 0;
 }
 
 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
index 045a9e785a34a89221400f3f3cae8f6e38b8c0ee..97baa1d6ae5ffba915bd9dddb65e7fa91ab75784 100644 (file)
@@ -138,7 +138,7 @@ void hist_entry__delete(struct hist_entry *he);
 
 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog);
 void hists__output_resort(struct hists *hists, struct ui_progress *prog);
-void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
+int hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
 
 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
 void hists__delete_entries(struct hists *hists);
@@ -197,7 +197,7 @@ int hists__init(void);
 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
 
 struct rb_root *hists__get_rotate_entries_in(struct hists *hists);
-bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
+int hists__collapse_insert_entry(struct hists *hists,
                                  struct rb_root *root, struct hist_entry *he);
 
 struct perf_hpp {