treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 482
[linux-2.6-block.git] / tools / virtio / ringtest / main.h
CommitLineData
7a338472 1/* SPDX-License-Identifier: GPL-2.0-only */
481eaec3
MT
2/*
3 * Copyright (C) 2016 Red Hat, Inc.
4 * Author: Michael S. Tsirkin <mst@redhat.com>
481eaec3
MT
5 *
6 * Common macros and functions for ring benchmarking.
7 */
8#ifndef MAIN_H
9#define MAIN_H
10
11#include <stdbool.h>
12
a4979505
MT
13extern int param;
14
481eaec3
MT
15extern bool do_exit;
16
17#if defined(__x86_64__) || defined(__i386__)
18#include "x86intrin.h"
19
20static inline void wait_cycles(unsigned long long cycles)
21{
22 unsigned long long t;
23
24 t = __rdtsc();
25 while (__rdtsc() - t < cycles) {}
26}
27
28#define VMEXIT_CYCLES 500
29#define VMENTRY_CYCLES 500
30
47a4c49a
HP
31#elif defined(__s390x__)
32static inline void wait_cycles(unsigned long long cycles)
33{
34 asm volatile("0: brctg %0,0b" : : "d" (cycles));
35}
36
37/* tweak me */
38#define VMEXIT_CYCLES 200
39#define VMENTRY_CYCLES 200
40
481eaec3
MT
41#else
42static inline void wait_cycles(unsigned long long cycles)
43{
44 _Exit(5);
45}
46#define VMEXIT_CYCLES 0
47#define VMENTRY_CYCLES 0
48#endif
49
50static inline void vmexit(void)
51{
52 if (!do_exit)
53 return;
54
55 wait_cycles(VMEXIT_CYCLES);
56}
57static inline void vmentry(void)
58{
59 if (!do_exit)
60 return;
61
62 wait_cycles(VMENTRY_CYCLES);
63}
64
65/* implemented by ring */
66void alloc_ring(void);
67/* guest side */
68int add_inbuf(unsigned, void *, void *);
69void *get_buf(unsigned *, void **);
70void disable_call();
d3c3589b 71bool used_empty();
481eaec3
MT
72bool enable_call();
73void kick_available();
481eaec3
MT
74/* host side */
75void disable_kick();
d3c3589b 76bool avail_empty();
481eaec3
MT
77bool enable_kick();
78bool use_buf(unsigned *, void **);
79void call_used();
481eaec3
MT
80
81/* implemented by main */
82extern bool do_sleep;
83void kick(void);
84void wait_for_kick(void);
85void call(void);
86void wait_for_call(void);
87
88extern unsigned ring_size;
89
90/* Compiler barrier - similar to what Linux uses */
91#define barrier() asm volatile("" ::: "memory")
92
93/* Is there a portable way to do this? */
94#if defined(__x86_64__) || defined(__i386__)
95#define cpu_relax() asm ("rep; nop" ::: "memory")
47a4c49a
HP
96#elif defined(__s390x__)
97#define cpu_relax() barrier()
481eaec3
MT
98#else
99#define cpu_relax() assert(0)
100#endif
101
102extern bool do_relax;
103
104static inline void busy_wait(void)
105{
106 if (do_relax)
107 cpu_relax();
108 else
109 /* prevent compiler from removing busy loops */
110 barrier();
111}
112
450cbdd0 113#if defined(__x86_64__) || defined(__i386__)
491847f3 114#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
450cbdd0 115#else
481eaec3
MT
116/*
117 * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
118 * with other __ATOMIC_SEQ_CST calls.
119 */
120#define smp_mb() __sync_synchronize()
450cbdd0 121#endif
481eaec3
MT
122
123/*
124 * This abuses the atomic builtins for thread fences, and
125 * adds a compiler barrier.
126 */
127#define smp_release() do { \
128 barrier(); \
129 __atomic_thread_fence(__ATOMIC_RELEASE); \
130} while (0)
131
132#define smp_acquire() do { \
133 __atomic_thread_fence(__ATOMIC_ACQUIRE); \
134 barrier(); \
135} while (0)
136
b4eab7de
MT
137#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
138#define smp_wmb() barrier()
139#else
140#define smp_wmb() smp_release()
141#endif
142
143#ifdef __alpha__
144#define smp_read_barrier_depends() smp_acquire()
145#else
146#define smp_read_barrier_depends() do {} while(0)
147#endif
148
149static __always_inline
150void __read_once_size(const volatile void *p, void *res, int size)
151{
152 switch (size) { \
153 case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break; \
154 case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break; \
155 case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break; \
156 case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break; \
157 default: \
158 barrier(); \
159 __builtin_memcpy((void *)res, (const void *)p, size); \
160 barrier(); \
161 } \
162}
163
164static __always_inline void __write_once_size(volatile void *p, void *res, int size)
165{
166 switch (size) {
167 case 1: *(volatile unsigned char *)p = *(unsigned char *)res; break;
168 case 2: *(volatile unsigned short *)p = *(unsigned short *)res; break;
169 case 4: *(volatile unsigned int *)p = *(unsigned int *)res; break;
170 case 8: *(volatile unsigned long long *)p = *(unsigned long long *)res; break;
171 default:
172 barrier();
173 __builtin_memcpy((void *)p, (const void *)res, size);
174 barrier();
175 }
176}
177
178#define READ_ONCE(x) \
179({ \
180 union { typeof(x) __val; char __c[1]; } __u; \
181 __read_once_size(&(x), __u.__c, sizeof(x)); \
182 smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
183 __u.__val; \
184})
185
186#define WRITE_ONCE(x, val) \
187({ \
188 union { typeof(x) __val; char __c[1]; } __u = \
189 { .__val = (typeof(x)) (val) }; \
190 __write_once_size(&(x), __u.__c, sizeof(x)); \
191 __u.__val; \
192})
193
481eaec3 194#endif