Merge tag 'spi-nor/for-4.11-v2' of git://github.com/spi-nor/linux
[linux-2.6-block.git] / tools / virtio / ringtest / ptr_ring.c
CommitLineData
9fb6bc5b
MT
1#define _GNU_SOURCE
2#include "main.h"
3#include <stdlib.h>
4#include <stdio.h>
5#include <string.h>
6#include <pthread.h>
7#include <malloc.h>
8#include <assert.h>
9#include <errno.h>
10#include <limits.h>
11
12#define SMP_CACHE_BYTES 64
13#define cache_line_size() SMP_CACHE_BYTES
14#define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
15#define unlikely(x) (__builtin_expect(!!(x), 0))
52012619 16#define likely(x) (__builtin_expect(!!(x), 1))
9fb6bc5b
MT
17#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
18typedef pthread_spinlock_t spinlock_t;
19
20typedef int gfp_t;
59e6ae53
MT
21static void *kmalloc(unsigned size, gfp_t gfp)
22{
23 return memalign(64, size);
24}
25
9fb6bc5b
MT
26static void *kzalloc(unsigned size, gfp_t gfp)
27{
28 void *p = memalign(64, size);
29 if (!p)
30 return p;
31 memset(p, 0, size);
32
33 return p;
34}
35
36static void kfree(void *p)
37{
38 if (p)
39 free(p);
40}
41
42static void spin_lock_init(spinlock_t *lock)
43{
44 int r = pthread_spin_init(lock, 0);
45 assert(!r);
46}
47
48static void spin_lock(spinlock_t *lock)
49{
50 int ret = pthread_spin_lock(lock);
51 assert(!ret);
52}
53
54static void spin_unlock(spinlock_t *lock)
55{
56 int ret = pthread_spin_unlock(lock);
57 assert(!ret);
58}
59
60static void spin_lock_bh(spinlock_t *lock)
61{
62 spin_lock(lock);
63}
64
65static void spin_unlock_bh(spinlock_t *lock)
66{
67 spin_unlock(lock);
68}
69
70static void spin_lock_irq(spinlock_t *lock)
71{
72 spin_lock(lock);
73}
74
75static void spin_unlock_irq(spinlock_t *lock)
76{
77 spin_unlock(lock);
78}
79
80static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
81{
82 spin_lock(lock);
83}
84
85static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
86{
87 spin_unlock(lock);
88}
89
90#include "../../../include/linux/ptr_ring.h"
91
92static unsigned long long headcnt, tailcnt;
93static struct ptr_ring array ____cacheline_aligned_in_smp;
94
95/* implemented by ring */
96void alloc_ring(void)
97{
98 int ret = ptr_ring_init(&array, ring_size, 0);
99 assert(!ret);
100}
101
102/* guest side */
103int add_inbuf(unsigned len, void *buf, void *datap)
104{
105 int ret;
106
107 ret = __ptr_ring_produce(&array, buf);
108 if (ret >= 0) {
109 ret = 0;
110 headcnt++;
111 }
112
113 return ret;
114}
115
116/*
117 * ptr_ring API provides no way for producer to find out whether a given
118 * buffer was consumed. Our tests merely require that a successful get_buf
119 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
120 * fake it accordingly.
121 */
122void *get_buf(unsigned *lenp, void **bufp)
123{
124 void *datap;
125
126 if (tailcnt == headcnt || __ptr_ring_full(&array))
127 datap = NULL;
128 else {
129 datap = "Buffer\n";
130 ++tailcnt;
131 }
132
133 return datap;
134}
135
d3c3589b 136bool used_empty()
9fb6bc5b 137{
d3c3589b 138 return (tailcnt == headcnt || __ptr_ring_full(&array));
9fb6bc5b
MT
139}
140
141void disable_call()
142{
143 assert(0);
144}
145
146bool enable_call()
147{
148 assert(0);
149}
150
151void kick_available(void)
152{
153 assert(0);
154}
155
156/* host side */
157void disable_kick()
158{
159 assert(0);
160}
161
162bool enable_kick()
163{
164 assert(0);
165}
166
d3c3589b 167bool avail_empty()
9fb6bc5b 168{
d3c3589b 169 return !__ptr_ring_peek(&array);
9fb6bc5b
MT
170}
171
172bool use_buf(unsigned *lenp, void **bufp)
173{
174 void *ptr;
175
176 ptr = __ptr_ring_consume(&array);
177
178 return ptr;
179}
180
181void call_used(void)
182{
183 assert(0);
184}