Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
f87d0fbb RR |
2 | /* |
3 | * Linux host-side vring helpers; for when the kernel needs to access | |
4 | * someone else's vring. | |
5 | * | |
6 | * Copyright IBM Corporation, 2013. | |
7 | * Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc. | |
8 | * | |
f87d0fbb RR |
9 | * Written by: Rusty Russell <rusty@rustcorp.com.au> |
10 | */ | |
11 | #ifndef _LINUX_VRINGH_H | |
12 | #define _LINUX_VRINGH_H | |
13 | #include <uapi/linux/virtio_ring.h> | |
b9f7ac8c | 14 | #include <linux/virtio_byteorder.h> |
f87d0fbb RR |
15 | #include <linux/uio.h> |
16 | #include <linux/slab.h> | |
17 | #include <asm/barrier.h> | |
18 | ||
19 | /* virtio_ring with information needed for host access. */ | |
20 | struct vringh { | |
b9f7ac8c MT |
21 | /* Everything is little endian */ |
22 | bool little_endian; | |
23 | ||
f87d0fbb RR |
24 | /* Guest publishes used event idx (note: we always do). */ |
25 | bool event_indices; | |
26 | ||
27 | /* Can we get away with weak barriers? */ | |
28 | bool weak_barriers; | |
29 | ||
30 | /* Last available index we saw (ie. where we're up to). */ | |
31 | u16 last_avail_idx; | |
32 | ||
33 | /* Last index we used. */ | |
34 | u16 last_used_idx; | |
35 | ||
36 | /* How many descriptors we've completed since last need_notify(). */ | |
37 | u32 completed; | |
38 | ||
39 | /* The vring (note: it may contain user pointers!) */ | |
40 | struct vring vring; | |
3beee86a SB |
41 | |
42 | /* The function to call to notify the guest about added buffers */ | |
43 | void (*notify)(struct vringh *); | |
44 | }; | |
45 | ||
46 | /** | |
47 | * struct vringh_config_ops - ops for creating a host vring from a virtio driver | |
48 | * @find_vrhs: find the host vrings and instantiate them | |
49 | * vdev: the virtio_device | |
50 | * nhvrs: the number of host vrings to find | |
51 | * hvrs: on success, includes new host vrings | |
52 | * callbacks: array of driver callbacks, for each host vring | |
53 | * include a NULL entry for vqs that do not need a callback | |
54 | * Returns 0 on success or error status | |
55 | * @del_vrhs: free the host vrings found by find_vrhs(). | |
56 | */ | |
57 | struct virtio_device; | |
58 | typedef void vrh_callback_t(struct virtio_device *, struct vringh *); | |
59 | struct vringh_config_ops { | |
60 | int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs, | |
61 | struct vringh *vrhs[], vrh_callback_t *callbacks[]); | |
62 | void (*del_vrhs)(struct virtio_device *vdev); | |
f87d0fbb RR |
63 | }; |
64 | ||
65 | /* The memory the vring can access, and what offset to apply. */ | |
66 | struct vringh_range { | |
67 | u64 start, end_incl; | |
68 | u64 offset; | |
69 | }; | |
70 | ||
71 | /** | |
72 | * struct vringh_iov - iovec mangler. | |
73 | * | |
74 | * Mangles iovec in place, and restores it. | |
75 | * Remaining data is iov + i, of used - i elements. | |
76 | */ | |
77 | struct vringh_iov { | |
78 | struct iovec *iov; | |
79 | size_t consumed; /* Within iov[i] */ | |
80 | unsigned i, used, max_num; | |
81 | }; | |
82 | ||
83 | /** | |
84 | * struct vringh_iov - kvec mangler. | |
85 | * | |
86 | * Mangles kvec in place, and restores it. | |
87 | * Remaining data is iov + i, of used - i elements. | |
88 | */ | |
89 | struct vringh_kiov { | |
90 | struct kvec *iov; | |
91 | size_t consumed; /* Within iov[i] */ | |
92 | unsigned i, used, max_num; | |
93 | }; | |
94 | ||
95 | /* Flag on max_num to indicate we're kmalloced. */ | |
96 | #define VRINGH_IOV_ALLOCATED 0x8000000 | |
97 | ||
98 | /* Helpers for userspace vrings. */ | |
b97a8a90 | 99 | int vringh_init_user(struct vringh *vrh, u64 features, |
f87d0fbb RR |
100 | unsigned int num, bool weak_barriers, |
101 | struct vring_desc __user *desc, | |
102 | struct vring_avail __user *avail, | |
103 | struct vring_used __user *used); | |
104 | ||
105 | static inline void vringh_iov_init(struct vringh_iov *iov, | |
106 | struct iovec *iovec, unsigned num) | |
107 | { | |
108 | iov->used = iov->i = 0; | |
109 | iov->consumed = 0; | |
110 | iov->max_num = num; | |
111 | iov->iov = iovec; | |
112 | } | |
113 | ||
114 | static inline void vringh_iov_reset(struct vringh_iov *iov) | |
115 | { | |
116 | iov->iov[iov->i].iov_len += iov->consumed; | |
117 | iov->iov[iov->i].iov_base -= iov->consumed; | |
118 | iov->consumed = 0; | |
119 | iov->i = 0; | |
120 | } | |
121 | ||
122 | static inline void vringh_iov_cleanup(struct vringh_iov *iov) | |
123 | { | |
124 | if (iov->max_num & VRINGH_IOV_ALLOCATED) | |
125 | kfree(iov->iov); | |
126 | iov->max_num = iov->used = iov->i = iov->consumed = 0; | |
127 | iov->iov = NULL; | |
128 | } | |
129 | ||
130 | /* Convert a descriptor into iovecs. */ | |
131 | int vringh_getdesc_user(struct vringh *vrh, | |
132 | struct vringh_iov *riov, | |
133 | struct vringh_iov *wiov, | |
134 | bool (*getrange)(struct vringh *vrh, | |
135 | u64 addr, struct vringh_range *r), | |
136 | u16 *head); | |
137 | ||
138 | /* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */ | |
139 | ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len); | |
140 | ||
141 | /* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */ | |
142 | ssize_t vringh_iov_push_user(struct vringh_iov *wiov, | |
143 | const void *src, size_t len); | |
144 | ||
145 | /* Mark a descriptor as used. */ | |
146 | int vringh_complete_user(struct vringh *vrh, u16 head, u32 len); | |
147 | int vringh_complete_multi_user(struct vringh *vrh, | |
148 | const struct vring_used_elem used[], | |
149 | unsigned num_used); | |
150 | ||
151 | /* Pretend we've never seen descriptor (for easy error handling). */ | |
152 | void vringh_abandon_user(struct vringh *vrh, unsigned int num); | |
153 | ||
154 | /* Do we need to fire the eventfd to notify the other side? */ | |
155 | int vringh_need_notify_user(struct vringh *vrh); | |
156 | ||
157 | bool vringh_notify_enable_user(struct vringh *vrh); | |
158 | void vringh_notify_disable_user(struct vringh *vrh); | |
159 | ||
160 | /* Helpers for kernelspace vrings. */ | |
b97a8a90 | 161 | int vringh_init_kern(struct vringh *vrh, u64 features, |
f87d0fbb RR |
162 | unsigned int num, bool weak_barriers, |
163 | struct vring_desc *desc, | |
164 | struct vring_avail *avail, | |
165 | struct vring_used *used); | |
166 | ||
167 | static inline void vringh_kiov_init(struct vringh_kiov *kiov, | |
168 | struct kvec *kvec, unsigned num) | |
169 | { | |
170 | kiov->used = kiov->i = 0; | |
171 | kiov->consumed = 0; | |
172 | kiov->max_num = num; | |
173 | kiov->iov = kvec; | |
174 | } | |
175 | ||
176 | static inline void vringh_kiov_reset(struct vringh_kiov *kiov) | |
177 | { | |
178 | kiov->iov[kiov->i].iov_len += kiov->consumed; | |
179 | kiov->iov[kiov->i].iov_base -= kiov->consumed; | |
180 | kiov->consumed = 0; | |
181 | kiov->i = 0; | |
182 | } | |
183 | ||
184 | static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov) | |
185 | { | |
186 | if (kiov->max_num & VRINGH_IOV_ALLOCATED) | |
187 | kfree(kiov->iov); | |
188 | kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0; | |
189 | kiov->iov = NULL; | |
190 | } | |
191 | ||
192 | int vringh_getdesc_kern(struct vringh *vrh, | |
193 | struct vringh_kiov *riov, | |
194 | struct vringh_kiov *wiov, | |
195 | u16 *head, | |
196 | gfp_t gfp); | |
197 | ||
198 | ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len); | |
199 | ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, | |
200 | const void *src, size_t len); | |
201 | void vringh_abandon_kern(struct vringh *vrh, unsigned int num); | |
202 | int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len); | |
203 | ||
204 | bool vringh_notify_enable_kern(struct vringh *vrh); | |
205 | void vringh_notify_disable_kern(struct vringh *vrh); | |
206 | ||
207 | int vringh_need_notify_kern(struct vringh *vrh); | |
208 | ||
3beee86a SB |
209 | /* Notify the guest about buffers added to the used ring */ |
210 | static inline void vringh_notify(struct vringh *vrh) | |
211 | { | |
212 | if (vrh->notify) | |
213 | vrh->notify(vrh); | |
214 | } | |
215 | ||
5da7b160 GK |
216 | static inline bool vringh_is_little_endian(const struct vringh *vrh) |
217 | { | |
7d824109 GK |
218 | return vrh->little_endian || |
219 | virtio_legacy_is_little_endian(); | |
5da7b160 GK |
220 | } |
221 | ||
b9f7ac8c MT |
222 | static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val) |
223 | { | |
5da7b160 | 224 | return __virtio16_to_cpu(vringh_is_little_endian(vrh), val); |
b9f7ac8c MT |
225 | } |
226 | ||
227 | static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val) | |
228 | { | |
5da7b160 | 229 | return __cpu_to_virtio16(vringh_is_little_endian(vrh), val); |
b9f7ac8c MT |
230 | } |
231 | ||
232 | static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val) | |
233 | { | |
5da7b160 | 234 | return __virtio32_to_cpu(vringh_is_little_endian(vrh), val); |
b9f7ac8c MT |
235 | } |
236 | ||
237 | static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val) | |
238 | { | |
5da7b160 | 239 | return __cpu_to_virtio32(vringh_is_little_endian(vrh), val); |
b9f7ac8c MT |
240 | } |
241 | ||
242 | static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val) | |
243 | { | |
5da7b160 | 244 | return __virtio64_to_cpu(vringh_is_little_endian(vrh), val); |
b9f7ac8c MT |
245 | } |
246 | ||
247 | static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) | |
248 | { | |
5da7b160 | 249 | return __cpu_to_virtio64(vringh_is_little_endian(vrh), val); |
b9f7ac8c | 250 | } |
f87d0fbb | 251 | #endif /* _LINUX_VRINGH_H */ |