Commit | Line | Data |
---|---|---|
76369139 FW |
1 | #ifndef _KERNEL_EVENTS_INTERNAL_H |
2 | #define _KERNEL_EVENTS_INTERNAL_H | |
3 | ||
4 | #define RING_BUFFER_WRITABLE 0x01 | |
5 | ||
6 | struct ring_buffer { | |
7 | atomic_t refcount; | |
8 | struct rcu_head rcu_head; | |
9 | #ifdef CONFIG_PERF_USE_VMALLOC | |
10 | struct work_struct work; | |
11 | int page_order; /* allocation order */ | |
12 | #endif | |
13 | int nr_pages; /* nr of data pages */ | |
14 | int writable; /* are we writable */ | |
15 | ||
16 | atomic_t poll; /* POLL_ for wakeups */ | |
17 | ||
18 | local_t head; /* write position */ | |
19 | local_t nest; /* nested writers */ | |
20 | local_t events; /* event limit */ | |
21 | local_t wakeup; /* wakeup stamp */ | |
22 | local_t lost; /* nr records lost */ | |
23 | ||
24 | long watermark; /* wakeup watermark */ | |
10c6db11 PZ |
25 | /* poll crap */ |
26 | spinlock_t event_lock; | |
27 | struct list_head event_list; | |
76369139 FW |
28 | |
29 | struct perf_event_mmap_page *user_page; | |
30 | void *data_pages[0]; | |
31 | }; | |
32 | ||
76369139 FW |
33 | extern void rb_free(struct ring_buffer *rb); |
34 | extern struct ring_buffer * | |
35 | rb_alloc(int nr_pages, long watermark, int cpu, int flags); | |
36 | extern void perf_event_wakeup(struct perf_event *event); | |
37 | ||
38 | extern void | |
39 | perf_event_header__init_id(struct perf_event_header *header, | |
40 | struct perf_sample_data *data, | |
41 | struct perf_event *event); | |
42 | extern void | |
43 | perf_event__output_id_sample(struct perf_event *event, | |
44 | struct perf_output_handle *handle, | |
45 | struct perf_sample_data *sample); | |
46 | ||
47 | extern struct page * | |
48 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); | |
49 | ||
50 | #ifdef CONFIG_PERF_USE_VMALLOC | |
51 | /* | |
52 | * Back perf_mmap() with vmalloc memory. | |
53 | * | |
54 | * Required for architectures that have d-cache aliasing issues. | |
55 | */ | |
56 | ||
57 | static inline int page_order(struct ring_buffer *rb) | |
58 | { | |
59 | return rb->page_order; | |
60 | } | |
61 | ||
62 | #else | |
63 | ||
64 | static inline int page_order(struct ring_buffer *rb) | |
65 | { | |
66 | return 0; | |
67 | } | |
68 | #endif | |
69 | ||
70 | static unsigned long perf_data_size(struct ring_buffer *rb) | |
71 | { | |
72 | return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); | |
73 | } | |
74 | ||
75 | static inline void | |
76 | __output_copy(struct perf_output_handle *handle, | |
77 | const void *buf, unsigned int len) | |
78 | { | |
79 | do { | |
80 | unsigned long size = min_t(unsigned long, handle->size, len); | |
81 | ||
82 | memcpy(handle->addr, buf, size); | |
83 | ||
84 | len -= size; | |
85 | handle->addr += size; | |
86 | buf += size; | |
87 | handle->size -= size; | |
88 | if (!handle->size) { | |
89 | struct ring_buffer *rb = handle->rb; | |
90 | ||
91 | handle->page++; | |
92 | handle->page &= rb->nr_pages - 1; | |
93 | handle->addr = rb->data_pages[handle->page]; | |
94 | handle->size = PAGE_SIZE << page_order(rb); | |
95 | } | |
96 | } while (len); | |
97 | } | |
98 | ||
99 | #endif /* _KERNEL_EVENTS_INTERNAL_H */ |