perf tools: Support reading from backward ring buffer
authorWang Nan <wangnan0@huawei.com>
Mon, 9 May 2016 01:47:50 +0000 (01:47 +0000)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Mon, 9 May 2016 20:20:53 +0000 (17:20 -0300)
perf_evlist__mmap_read_backward() is introduced for reading backward
ring buffer. Since direction for reading such ring buffer is different
from the direction kernel writing to it, and since user need to fetch
most recent record from it, a perf_evlist__mmap_read_catchup() is
introduced to move the reading pointer to the end of the buffer.

Signed-off-by: Wang Nan <wangnan0@huawei.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1462758471-89706-2-git-send-email-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/util/evlist.c
tools/perf/util/evlist.h

index 17cd01421e7f85f583f62d6421d50c8a127950f4..c4bfe11479a0e0d7559ff941c63e96557400bce4 100644 (file)
@@ -766,6 +766,56 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
        return perf_mmap__read(md, evlist->overwrite, old, head, &md->prev);
 }
 
+union perf_event *
+perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
+{
+       struct perf_mmap *md = &evlist->mmap[idx];
+       u64 head, end;
+       u64 start = md->prev;
+
+       /*
+        * Check if event was unmapped due to a POLLHUP/POLLERR.
+        */
+       if (!atomic_read(&md->refcnt))
+               return NULL;
+
+       head = perf_mmap__read_head(md);
+       if (!head)
+               return NULL;
+
+       /*
+        * 'head' pointer starts from 0. Kernel minus sizeof(record) form
+        * it each time when kernel writes to it, so in fact 'head' is
+        * negative. 'end' pointer is made manually by adding the size of
+        * the ring buffer to 'head' pointer, means the validate data can
+        * read is the whole ring buffer. If 'end' is positive, the ring
+        * buffer has not fully filled, so we must adjust 'end' to 0.
+        *
+        * However, since both 'head' and 'end' is unsigned, we can't
+        * simply compare 'end' against 0. Here we compare '-head' and
+        * the size of the ring buffer, where -head is the number of bytes
+        * kernel write to the ring buffer.
+        */
+       if (-head < (u64)(md->mask + 1))
+               end = 0;
+       else
+               end = head + md->mask + 1;
+
+       return perf_mmap__read(md, false, start, end, &md->prev);
+}
+
+void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
+{
+       struct perf_mmap *md = &evlist->mmap[idx];
+       u64 head;
+
+       if (!atomic_read(&md->refcnt))
+               return;
+
+       head = perf_mmap__read_head(md);
+       md->prev = head;
+}
+
 static bool perf_mmap__empty(struct perf_mmap *md)
 {
        return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
index 208897a646cae00bd6b279382cbd953a91df0372..85d1b59802e86feafe943f4d823a694776baef15 100644 (file)
@@ -129,6 +129,10 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
 
 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
 
+union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
+                                                 int idx);
+void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
+
 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
 
 int perf_evlist__open(struct perf_evlist *evlist);