io_uring/cmd: add cmd lazy tw wake helper
[linux-block.git] / include / linux / virtio_ring.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
0a8a69dd
RR
2#ifndef _LINUX_VIRTIO_RING_H
3#define _LINUX_VIRTIO_RING_H
0a8a69dd 4
c5610a5d 5#include <asm/barrier.h>
0a8a69dd 6#include <linux/irqreturn.h>
607ca46e
DH
7#include <uapi/linux/virtio_ring.h>
8
a9a0fef7
RR
9/*
10 * Barriers in virtio are tricky. Non-SMP virtio guests can't assume
11 * they're not on an SMP host system, so they need to assume real
12 * barriers. Non-SMP virtio hosts could skip the barriers, but does
13 * anyone care?
14 *
15 * For virtio_pci on SMP, we don't need to order with respect to MMIO
a6596127 16 * accesses through relaxed memory I/O windows, so virt_mb() et al are
a9a0fef7
RR
17 * sufficient.
18 *
19 * For using virtio to talk to real devices (eg. other heterogeneous
20 * CPUs) we do need real barriers. In theory, we could be using both
21 * kinds of virtio, so it's a runtime decision, and the branch is
22 * actually quite cheap.
23 */
24
a9a0fef7
RR
25static inline void virtio_mb(bool weak_barriers)
26{
27 if (weak_barriers)
a6596127 28 virt_mb();
a9a0fef7
RR
29 else
30 mb();
31}
32
33static inline void virtio_rmb(bool weak_barriers)
34{
35 if (weak_barriers)
a6596127 36 virt_rmb();
a9a0fef7 37 else
55e49dc4 38 dma_rmb();
a9a0fef7
RR
39}
40
41static inline void virtio_wmb(bool weak_barriers)
42{
43 if (weak_barriers)
a6596127 44 virt_wmb();
a9a0fef7 45 else
55e49dc4 46 dma_wmb();
a9a0fef7 47}
a9a0fef7 48
54871968
MT
49#define virtio_store_mb(weak_barriers, p, v) \
50do { \
51 if (weak_barriers) { \
52 virt_store_mb(*p, v); \
53 } else { \
54 WRITE_ONCE(*p, v); \
55 mb(); \
56 } \
57} while (0) \
788e5b3a 58
0a8a69dd
RR
59struct virtio_device;
60struct virtqueue;
38fc29ea 61struct device;
0a8a69dd 62
2a2d1382
AL
63/*
64 * Creates a virtqueue and allocates the descriptor ring. If
65 * may_reduce_num is set, then this may allocate a smaller ring than
cf94db21 66 * expected. The caller should query virtqueue_get_vring_size to learn
2a2d1382
AL
67 * the actual size of the ring.
68 */
69struct virtqueue *vring_create_virtqueue(unsigned int index,
70 unsigned int num,
71 unsigned int vring_align,
72 struct virtio_device *vdev,
73 bool weak_barriers,
74 bool may_reduce_num,
f94682dd 75 bool ctx,
2a2d1382
AL
76 bool (*notify)(struct virtqueue *vq),
77 void (*callback)(struct virtqueue *vq),
78 const char *name);
79
2713ea3c
JW
80/*
81 * Creates a virtqueue and allocates the descriptor ring with per
82 * virtqueue DMA device.
83 */
84struct virtqueue *vring_create_virtqueue_dma(unsigned int index,
85 unsigned int num,
86 unsigned int vring_align,
87 struct virtio_device *vdev,
88 bool weak_barriers,
89 bool may_reduce_num,
90 bool ctx,
91 bool (*notify)(struct virtqueue *vq),
92 void (*callback)(struct virtqueue *vq),
93 const char *name,
94 struct device *dma_dev);
95
2a2d1382
AL
96/*
97 * Creates a virtqueue with a standard layout but a caller-allocated
98 * ring.
99 */
17bb6d40
JW
100struct virtqueue *vring_new_virtqueue(unsigned int index,
101 unsigned int num,
87c7d57c 102 unsigned int vring_align,
0a8a69dd 103 struct virtio_device *vdev,
7b21e34f 104 bool weak_barriers,
f94682dd 105 bool ctx,
0a8a69dd 106 void *pages,
46f9c2b9 107 bool (*notify)(struct virtqueue *vq),
9499f5e7
RR
108 void (*callback)(struct virtqueue *vq),
109 const char *name);
2a2d1382
AL
110
111/*
112 * Destroys a virtqueue. If created with vring_create_virtqueue, this
113 * also frees the ring.
114 */
0a8a69dd 115void vring_del_virtqueue(struct virtqueue *vq);
2a2d1382 116
e34f8725
RR
117/* Filter out transport-specific feature bits. */
118void vring_transport_features(struct virtio_device *vdev);
0a8a69dd
RR
119
120irqreturn_t vring_interrupt(int irq, void *_vq);
af8ececd
VP
121
122u32 vring_notification_data(struct virtqueue *_vq);
0a8a69dd 123#endif /* _LINUX_VIRTIO_RING_H */