Merge tag 'random_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / tools / virtio / ringtest / ptr_ring.c
CommitLineData
9fb6bc5b
MT
1#define _GNU_SOURCE
2#include "main.h"
3#include <stdlib.h>
4#include <stdio.h>
5#include <string.h>
6#include <pthread.h>
7#include <malloc.h>
8#include <assert.h>
9#include <errno.h>
10#include <limits.h>
11
12#define SMP_CACHE_BYTES 64
13#define cache_line_size() SMP_CACHE_BYTES
14#define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
15#define unlikely(x) (__builtin_expect(!!(x), 0))
16#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
17typedef pthread_spinlock_t spinlock_t;
18
19typedef int gfp_t;
59e6ae53
MT
20static void *kmalloc(unsigned size, gfp_t gfp)
21{
22 return memalign(64, size);
23}
24
9fb6bc5b
MT
25static void *kzalloc(unsigned size, gfp_t gfp)
26{
27 void *p = memalign(64, size);
28 if (!p)
29 return p;
30 memset(p, 0, size);
31
32 return p;
33}
34
35static void kfree(void *p)
36{
37 if (p)
38 free(p);
39}
40
41static void spin_lock_init(spinlock_t *lock)
42{
43 int r = pthread_spin_init(lock, 0);
44 assert(!r);
45}
46
47static void spin_lock(spinlock_t *lock)
48{
49 int ret = pthread_spin_lock(lock);
50 assert(!ret);
51}
52
53static void spin_unlock(spinlock_t *lock)
54{
55 int ret = pthread_spin_unlock(lock);
56 assert(!ret);
57}
58
59static void spin_lock_bh(spinlock_t *lock)
60{
61 spin_lock(lock);
62}
63
64static void spin_unlock_bh(spinlock_t *lock)
65{
66 spin_unlock(lock);
67}
68
69static void spin_lock_irq(spinlock_t *lock)
70{
71 spin_lock(lock);
72}
73
74static void spin_unlock_irq(spinlock_t *lock)
75{
76 spin_unlock(lock);
77}
78
79static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
80{
81 spin_lock(lock);
82}
83
84static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
85{
86 spin_unlock(lock);
87}
88
89#include "../../../include/linux/ptr_ring.h"
90
91static unsigned long long headcnt, tailcnt;
92static struct ptr_ring array ____cacheline_aligned_in_smp;
93
94/* implemented by ring */
95void alloc_ring(void)
96{
97 int ret = ptr_ring_init(&array, ring_size, 0);
98 assert(!ret);
99}
100
101/* guest side */
102int add_inbuf(unsigned len, void *buf, void *datap)
103{
104 int ret;
105
106 ret = __ptr_ring_produce(&array, buf);
107 if (ret >= 0) {
108 ret = 0;
109 headcnt++;
110 }
111
112 return ret;
113}
114
115/*
116 * ptr_ring API provides no way for producer to find out whether a given
117 * buffer was consumed. Our tests merely require that a successful get_buf
118 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
119 * fake it accordingly.
120 */
121void *get_buf(unsigned *lenp, void **bufp)
122{
123 void *datap;
124
125 if (tailcnt == headcnt || __ptr_ring_full(&array))
126 datap = NULL;
127 else {
128 datap = "Buffer\n";
129 ++tailcnt;
130 }
131
132 return datap;
133}
134
135void poll_used(void)
136{
137 void *b;
138
139 do {
140 if (tailcnt == headcnt || __ptr_ring_full(&array)) {
141 b = NULL;
142 barrier();
143 } else {
144 b = "Buffer\n";
145 }
146 } while (!b);
147}
148
149void disable_call()
150{
151 assert(0);
152}
153
154bool enable_call()
155{
156 assert(0);
157}
158
159void kick_available(void)
160{
161 assert(0);
162}
163
164/* host side */
165void disable_kick()
166{
167 assert(0);
168}
169
170bool enable_kick()
171{
172 assert(0);
173}
174
175void poll_avail(void)
176{
177 void *b;
178
179 do {
180 barrier();
181 b = __ptr_ring_peek(&array);
182 } while (!b);
183}
184
185bool use_buf(unsigned *lenp, void **bufp)
186{
187 void *ptr;
188
189 ptr = __ptr_ring_consume(&array);
190
191 return ptr;
192}
193
194void call_used(void)
195{
196 assert(0);
197}