Merge tag 'wq-for-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[linux-block.git] / include / linux / virtio_config.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
ec3d41c4
RR
2#ifndef _LINUX_VIRTIO_CONFIG_H
3#define _LINUX_VIRTIO_CONFIG_H
b4f68be6 4
d2a7ddda 5#include <linux/err.h>
187f1882 6#include <linux/bug.h>
72e61eb4 7#include <linux/virtio.h>
eef960a0 8#include <linux/virtio_byteorder.h>
a4235ec0 9#include <linux/compiler_types.h>
607ca46e 10#include <uapi/linux/virtio_config.h>
ec3d41c4 11
fb5e31d9
CH
12struct irq_affinity;
13
5bfe37ca
SB
14struct virtio_shm_region {
15 u64 addr;
16 u64 len;
17};
18
d16c0cd2
RC
19typedef void vq_callback_t(struct virtqueue *);
20
ec3d41c4 21/**
d16c0cd2 22 * struct virtio_config_ops - operations for configuring a virtio device
d1c1dad8
CH
23 * Note: Do not assume that a transport implements all of the operations
24 * getting/setting a value as a simple read/write! Generally speaking,
25 * any of @get/@set, @get_status/@set_status, or @get_features/
26 * @finalize_features are NOT safe to be called from an atomic
27 * context.
a586d4f6 28 * @get: read the value of a configuration field
ec3d41c4 29 * vdev: the virtio_device
a586d4f6 30 * offset: the offset of the configuration field
ec3d41c4 31 * buf: the buffer to write the field value into.
a586d4f6 32 * len: the length of the buffer
a586d4f6 33 * @set: write the value of a configuration field
ec3d41c4 34 * vdev: the virtio_device
a586d4f6 35 * offset: the offset of the configuration field
ec3d41c4 36 * buf: the buffer to read the field value from.
a586d4f6 37 * len: the length of the buffer
b89a07c4 38 * @generation: config generation counter (optional)
d71de9ec
MT
39 * vdev: the virtio_device
40 * Returns the config generation counter
ec3d41c4
RR
41 * @get_status: read the status byte
42 * vdev: the virtio_device
43 * Returns the status byte
44 * @set_status: write the status byte
45 * vdev: the virtio_device
46 * status: the new status byte
6e5aa7ef
RR
47 * @reset: reset the device
48 * vdev: the virtio device
49 * After this, status and feature negotiation must be done again
e6af578c
MT
50 * Device must not be reset from its vq/config callbacks, or in
51 * parallel with being added/removed.
d2a7ddda 52 * @find_vqs: find virtqueues and instantiate them.
ec3d41c4 53 * vdev: the virtio_device
d2a7ddda
MT
54 * nvqs: the number of virtqueues to find
55 * vqs: on success, includes new virtqueues
56 * callbacks: array of callbacks, for each virtqueue
6457f126 57 * include a NULL entry for vqs that do not need a callback
d2a7ddda 58 * names: array of virtqueue names (mainly for debugging)
6457f126 59 * include a NULL entry for vqs unused by driver
d2a7ddda
MT
60 * Returns 0 on success or error status
61 * @del_vqs: free virtqueues found by find_vqs().
48b69959
JW
62 * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
63 * The function guarantees that all memory operations on the
64 * queue before it are visible to the vring_interrupt() that is
65 * called after it.
66 * vdev: the virtio_device
c45a6816
RR
67 * @get_features: get the array of feature bits for this device.
68 * vdev: the virtio_device
b89a07c4 69 * Returns the first 64 feature bits (all we currently need).
c624896e 70 * @finalize_features: confirm what device features we'll be using.
c45a6816 71 * vdev: the virtio_device
4fa59ede 72 * This sends the driver feature bits to the device: it can change
c624896e 73 * the dev->feature bits if it wants.
d16c0cd2
RC
74 * Note that despite the name this can be called any number of
75 * times.
5c609a5e 76 * Returns 0 on success or error status
b89a07c4 77 * @bus_name: return the bus name associated with the device (optional)
66846048
RJ
78 * vdev: the virtio_device
79 * This returns a pointer to the bus name a la pci_name from which
80 * the caller can then copy.
b89a07c4 81 * @set_vq_affinity: set the affinity for a virtqueue (optional).
bbaba479 82 * @get_vq_affinity: get the affinity for a virtqueue (optional).
5bfe37ca 83 * @get_shm_region: get a shared memory region based on the index.
3086e9fc
XZ
84 * @disable_vq_and_reset: reset a queue individually (optional).
85 * vq: the virtqueue
86 * Returns 0 on success or error status
87 * disable_vq_and_reset will guarantee that the callbacks are disabled and
88 * synchronized.
89 * Except for the callback, the caller should guarantee that the vring is
90 * not accessed by any functions of virtqueue.
91 * @enable_vq_after_reset: enable a reset queue
92 * vq: the virtqueue
93 * Returns 0 on success or error status
94 * If disable_vq_and_reset is set, then enable_vq_after_reset must also be
95 * set.
ec3d41c4 96 */
1842f23c 97struct virtio_config_ops {
a586d4f6 98 void (*get)(struct virtio_device *vdev, unsigned offset,
ec3d41c4 99 void *buf, unsigned len);
a586d4f6 100 void (*set)(struct virtio_device *vdev, unsigned offset,
ec3d41c4 101 const void *buf, unsigned len);
d71de9ec 102 u32 (*generation)(struct virtio_device *vdev);
ec3d41c4
RR
103 u8 (*get_status)(struct virtio_device *vdev);
104 void (*set_status)(struct virtio_device *vdev, u8 status);
6e5aa7ef 105 void (*reset)(struct virtio_device *vdev);
d2a7ddda 106 int (*find_vqs)(struct virtio_device *, unsigned nvqs,
fb5e31d9 107 struct virtqueue *vqs[], vq_callback_t *callbacks[],
9993a4f9 108 const char * const names[], const bool *ctx,
f94682dd 109 struct irq_affinity *desc);
d2a7ddda 110 void (*del_vqs)(struct virtio_device *);
48b69959 111 void (*synchronize_cbs)(struct virtio_device *);
d0254773 112 u64 (*get_features)(struct virtio_device *vdev);
5c609a5e 113 int (*finalize_features)(struct virtio_device *vdev);
66846048 114 const char *(*bus_name)(struct virtio_device *vdev);
19e226e8
CR
115 int (*set_vq_affinity)(struct virtqueue *vq,
116 const struct cpumask *cpu_mask);
bbaba479
CH
117 const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
118 int index);
5bfe37ca
SB
119 bool (*get_shm_region)(struct virtio_device *vdev,
120 struct virtio_shm_region *region, u8 id);
3086e9fc
XZ
121 int (*disable_vq_and_reset)(struct virtqueue *vq);
122 int (*enable_vq_after_reset)(struct virtqueue *vq);
ec3d41c4
RR
123};
124
c45a6816
RR
125/* If driver didn't advertise the feature, it will never appear. */
126void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
127 unsigned int fbit);
128
129/**
d4024af5
MT
130 * __virtio_test_bit - helper to test feature bits. For use by transports.
131 * Devices should normally use virtio_has_feature,
132 * which includes more checks.
c45a6816
RR
133 * @vdev: the device
134 * @fbit: the feature bit
135 */
d4024af5
MT
136static inline bool __virtio_test_bit(const struct virtio_device *vdev,
137 unsigned int fbit)
138{
139 /* Did you forget to fix assumptions on max features? */
140 if (__builtin_constant_p(fbit))
d0254773 141 BUILD_BUG_ON(fbit >= 64);
d4024af5 142 else
d0254773 143 BUG_ON(fbit >= 64);
d4024af5 144
d0254773 145 return vdev->features & BIT_ULL(fbit);
d4024af5
MT
146}
147
148/**
149 * __virtio_set_bit - helper to set feature bits. For use by transports.
150 * @vdev: the device
151 * @fbit: the feature bit
152 */
153static inline void __virtio_set_bit(struct virtio_device *vdev,
154 unsigned int fbit)
155{
156 /* Did you forget to fix assumptions on max features? */
157 if (__builtin_constant_p(fbit))
d0254773 158 BUILD_BUG_ON(fbit >= 64);
d4024af5 159 else
d0254773 160 BUG_ON(fbit >= 64);
d4024af5 161
d0254773 162 vdev->features |= BIT_ULL(fbit);
d4024af5
MT
163}
164
165/**
166 * __virtio_clear_bit - helper to clear feature bits. For use by transports.
167 * @vdev: the device
168 * @fbit: the feature bit
169 */
170static inline void __virtio_clear_bit(struct virtio_device *vdev,
c45a6816
RR
171 unsigned int fbit)
172{
173 /* Did you forget to fix assumptions on max features? */
1765e3a4 174 if (__builtin_constant_p(fbit))
d0254773 175 BUILD_BUG_ON(fbit >= 64);
1765e3a4 176 else
d0254773 177 BUG_ON(fbit >= 64);
c45a6816 178
d0254773 179 vdev->features &= ~BIT_ULL(fbit);
d4024af5
MT
180}
181
182/**
183 * virtio_has_feature - helper to determine if this device has this feature.
184 * @vdev: the device
185 * @fbit: the feature bit
186 */
187static inline bool virtio_has_feature(const struct virtio_device *vdev,
188 unsigned int fbit)
189{
ee006b35
MM
190 if (fbit < VIRTIO_TRANSPORT_F_START)
191 virtio_check_driver_offered_feature(vdev, fbit);
192
d4024af5 193 return __virtio_test_bit(vdev, fbit);
c45a6816
RR
194}
195
1a937693 196/**
24b6842a 197 * virtio_has_dma_quirk - determine whether this device has the DMA quirk
1a937693
MT
198 * @vdev: the device
199 */
24b6842a 200static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
1a937693
MT
201{
202 /*
203 * Note the reverse polarity of the quirk feature (compared to most
204 * other features), this is for compatibility with legacy systems.
205 */
321bd212 206 return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
1a937693
MT
207}
208
d2a7ddda
MT
209static inline
210struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
211 vq_callback_t *c, const char *n)
212{
213 vq_callback_t *callbacks[] = { c };
214 const char *names[] = { n };
215 struct virtqueue *vq;
f94682dd 216 int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
9993a4f9 217 NULL);
d2a7ddda
MT
218 if (err < 0)
219 return ERR_PTR(err);
220 return vq;
221}
66846048 222
9b2bbdb2
MT
223static inline
224int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
225 struct virtqueue *vqs[], vq_callback_t *callbacks[],
226 const char * const names[],
227 struct irq_affinity *desc)
228{
9993a4f9 229 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
f94682dd
MT
230}
231
232static inline
233int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
234 struct virtqueue *vqs[], vq_callback_t *callbacks[],
235 const char * const names[], const bool *ctx,
236 struct irq_affinity *desc)
237{
9993a4f9
MT
238 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
239 desc);
9b2bbdb2
MT
240}
241
48b69959
JW
242/**
243 * virtio_synchronize_cbs - synchronize with virtqueue callbacks
5c669c4a 244 * @dev: the virtio device
48b69959
JW
245 */
246static inline
247void virtio_synchronize_cbs(struct virtio_device *dev)
248{
249 if (dev->config->synchronize_cbs) {
250 dev->config->synchronize_cbs(dev);
251 } else {
252 /*
253 * A best effort fallback to synchronize with
254 * interrupts, preemption and softirq disabled
255 * regions. See comment above synchronize_rcu().
256 */
257 synchronize_rcu();
258 }
259}
260
3569db59
MT
261/**
262 * virtio_device_ready - enable vq use in probe function
5c669c4a 263 * @dev: the virtio device
3569db59
MT
264 *
265 * Driver must call this to use vqs in the probe function.
266 *
267 * Note: vqs are enabled automatically after probe returns.
268 */
269static inline
270void virtio_device_ready(struct virtio_device *dev)
271{
272 unsigned status = dev->config->get_status(dev);
273
619e9e14 274 WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
8b4ec69d 275
c346dae4 276#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
8b4ec69d
JW
277 /*
278 * The virtio_synchronize_cbs() makes sure vring_interrupt()
279 * will see the driver specific setup if it sees vq->broken
280 * as false (even if the notifications come before DRIVER_OK).
281 */
282 virtio_synchronize_cbs(dev);
283 __virtio_unbreak_device(dev);
c346dae4 284#endif
8b4ec69d
JW
285 /*
286 * The transport should ensure the visibility of vq->broken
287 * before setting DRIVER_OK. See the comments for the transport
288 * specific set_status() method.
289 *
290 * A well behaved device will only notify a virtqueue after
291 * DRIVER_OK, this means the device should "see" the coherenct
292 * memory write that set vq->broken as false which is done by
293 * the driver when it sees DRIVER_OK, then the following
294 * driver's vring_interrupt() will see vq->broken as false so
295 * we won't lose any notification.
296 */
3569db59
MT
297 dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
298}
299
66846048
RJ
300static inline
301const char *virtio_bus_name(struct virtio_device *vdev)
302{
303 if (!vdev->config->bus_name)
304 return "virtio";
305 return vdev->config->bus_name(vdev);
306}
307
75a0a52b
JW
308/**
309 * virtqueue_set_affinity - setting affinity for a virtqueue
310 * @vq: the virtqueue
5c669c4a 311 * @cpu_mask: the cpu mask
75a0a52b
JW
312 *
313 * Pay attention the function are best-effort: the affinity hint may not be set
314 * due to config support, irq type and sharing.
315 *
316 */
317static inline
19e226e8 318int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
75a0a52b
JW
319{
320 struct virtio_device *vdev = vq->vdev;
321 if (vdev->config->set_vq_affinity)
19e226e8 322 return vdev->config->set_vq_affinity(vq, cpu_mask);
75a0a52b
JW
323 return 0;
324}
325
5bfe37ca
SB
326static inline
327bool virtio_get_shm_region(struct virtio_device *vdev,
328 struct virtio_shm_region *region, u8 id)
329{
330 if (!vdev->config->get_shm_region)
331 return false;
332 return vdev->config->get_shm_region(vdev, region, id);
333}
334
cf561f0d
GK
335static inline bool virtio_is_little_endian(struct virtio_device *vdev)
336{
7d824109
GK
337 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
338 virtio_legacy_is_little_endian();
cf561f0d
GK
339}
340
eef960a0
MT
341/* Memory accessors */
342static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
343{
cf561f0d 344 return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
eef960a0
MT
345}
346
347static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
348{
cf561f0d 349 return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
eef960a0
MT
350}
351
352static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
353{
cf561f0d 354 return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
eef960a0
MT
355}
356
357static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
358{
cf561f0d 359 return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
eef960a0
MT
360}
361
362static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
363{
cf561f0d 364 return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
eef960a0
MT
365}
366
367static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
368{
cf561f0d 369 return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
eef960a0
MT
370}
371
a5b90f2d
MT
372#define virtio_to_cpu(vdev, x) \
373 _Generic((x), \
374 __u8: (x), \
375 __virtio16: virtio16_to_cpu((vdev), (x)), \
376 __virtio32: virtio32_to_cpu((vdev), (x)), \
83eb9db9 377 __virtio64: virtio64_to_cpu((vdev), (x)) \
a5b90f2d
MT
378 )
379
380#define cpu_to_virtio(vdev, x, m) \
381 _Generic((m), \
382 __u8: (x), \
383 __virtio16: cpu_to_virtio16((vdev), (x)), \
384 __virtio32: cpu_to_virtio32((vdev), (x)), \
83eb9db9 385 __virtio64: cpu_to_virtio64((vdev), (x)) \
a5b90f2d 386 )
a4235ec0
MT
387
388#define __virtio_native_type(structname, member) \
a5b90f2d 389 typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
a4235ec0 390
0b90d062
RR
391/* Config space accessors. */
392#define virtio_cread(vdev, structname, member, ptr) \
393 do { \
a5b90f2d
MT
394 typeof(((structname*)0)->member) virtio_cread_v; \
395 \
ab7a2375 396 might_sleep(); \
a5b90f2d
MT
397 /* Sanity check: must match the member's type */ \
398 typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
0b90d062 399 \
a5b90f2d 400 switch (sizeof(virtio_cread_v)) { \
0b90d062 401 case 1: \
0b90d062 402 case 2: \
0b90d062 403 case 4: \
a5b90f2d
MT
404 vdev->config->get((vdev), \
405 offsetof(structname, member), \
406 &virtio_cread_v, \
407 sizeof(virtio_cread_v)); \
0b90d062
RR
408 break; \
409 default: \
a5b90f2d
MT
410 __virtio_cread_many((vdev), \
411 offsetof(structname, member), \
412 &virtio_cread_v, \
413 1, \
414 sizeof(virtio_cread_v)); \
415 break; \
0b90d062 416 } \
a5b90f2d 417 *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
0b90d062
RR
418 } while(0)
419
420/* Config space accessors. */
421#define virtio_cwrite(vdev, structname, member, ptr) \
422 do { \
a5b90f2d
MT
423 typeof(((structname*)0)->member) virtio_cwrite_v = \
424 cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
425 \
ab7a2375 426 might_sleep(); \
a5b90f2d
MT
427 /* Sanity check: must match the member's type */ \
428 typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
0b90d062 429 \
a5b90f2d
MT
430 vdev->config->set((vdev), offsetof(structname, member), \
431 &virtio_cwrite_v, \
432 sizeof(virtio_cwrite_v)); \
0b90d062
RR
433 } while(0)
434
e598960f
MT
435/*
436 * Nothing virtio-specific about these, but let's worry about generalizing
437 * these later.
438 */
439#define virtio_le_to_cpu(x) \
440 _Generic((x), \
c84f91e2
MT
441 __u8: (u8)(x), \
442 __le16: (u16)le16_to_cpu(x), \
443 __le32: (u32)le32_to_cpu(x), \
444 __le64: (u64)le64_to_cpu(x) \
e598960f
MT
445 )
446
447#define virtio_cpu_to_le(x, m) \
448 _Generic((m), \
449 __u8: (x), \
450 __le16: cpu_to_le16(x), \
451 __le32: cpu_to_le32(x), \
452 __le64: cpu_to_le64(x) \
453 )
454
455/* LE (e.g. modern) Config space accessors. */
456#define virtio_cread_le(vdev, structname, member, ptr) \
457 do { \
458 typeof(((structname*)0)->member) virtio_cread_v; \
459 \
460 might_sleep(); \
461 /* Sanity check: must match the member's type */ \
462 typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
463 \
464 switch (sizeof(virtio_cread_v)) { \
465 case 1: \
466 case 2: \
467 case 4: \
468 vdev->config->get((vdev), \
469 offsetof(structname, member), \
470 &virtio_cread_v, \
471 sizeof(virtio_cread_v)); \
472 break; \
473 default: \
474 __virtio_cread_many((vdev), \
475 offsetof(structname, member), \
476 &virtio_cread_v, \
477 1, \
478 sizeof(virtio_cread_v)); \
479 break; \
480 } \
481 *(ptr) = virtio_le_to_cpu(virtio_cread_v); \
482 } while(0)
483
e598960f
MT
484#define virtio_cwrite_le(vdev, structname, member, ptr) \
485 do { \
486 typeof(((structname*)0)->member) virtio_cwrite_v = \
487 virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
488 \
489 might_sleep(); \
490 /* Sanity check: must match the member's type */ \
491 typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
492 \
493 vdev->config->set((vdev), offsetof(structname, member), \
494 &virtio_cwrite_v, \
495 sizeof(virtio_cwrite_v)); \
496 } while(0)
497
498
d71de9ec
MT
499/* Read @count fields, @bytes each. */
500static inline void __virtio_cread_many(struct virtio_device *vdev,
501 unsigned int offset,
502 void *buf, size_t count, size_t bytes)
503{
504 u32 old, gen = vdev->config->generation ?
505 vdev->config->generation(vdev) : 0;
506 int i;
507
ab7a2375 508 might_sleep();
d71de9ec
MT
509 do {
510 old = gen;
511
512 for (i = 0; i < count; i++)
513 vdev->config->get(vdev, offset + bytes * i,
514 buf + i * bytes, bytes);
515
516 gen = vdev->config->generation ?
517 vdev->config->generation(vdev) : 0;
518 } while (gen != old);
519}
520
0b90d062
RR
521static inline void virtio_cread_bytes(struct virtio_device *vdev,
522 unsigned int offset,
523 void *buf, size_t len)
524{
d71de9ec 525 __virtio_cread_many(vdev, offset, buf, len, 1);
0b90d062
RR
526}
527
caa0e2d0
MT
528static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
529{
530 u8 ret;
ab7a2375
CH
531
532 might_sleep();
caa0e2d0
MT
533 vdev->config->get(vdev, offset, &ret, sizeof(ret));
534 return ret;
535}
536
0b90d062
RR
537static inline void virtio_cwrite8(struct virtio_device *vdev,
538 unsigned int offset, u8 val)
539{
ab7a2375 540 might_sleep();
0b90d062
RR
541 vdev->config->set(vdev, offset, &val, sizeof(val));
542}
543
544static inline u16 virtio_cread16(struct virtio_device *vdev,
545 unsigned int offset)
546{
cacaf775 547 __virtio16 ret;
ab7a2375
CH
548
549 might_sleep();
0b90d062 550 vdev->config->get(vdev, offset, &ret, sizeof(ret));
cacaf775 551 return virtio16_to_cpu(vdev, ret);
0b90d062
RR
552}
553
554static inline void virtio_cwrite16(struct virtio_device *vdev,
555 unsigned int offset, u16 val)
556{
cacaf775
MT
557 __virtio16 v;
558
ab7a2375 559 might_sleep();
cacaf775
MT
560 v = cpu_to_virtio16(vdev, val);
561 vdev->config->set(vdev, offset, &v, sizeof(v));
0b90d062
RR
562}
563
564static inline u32 virtio_cread32(struct virtio_device *vdev,
565 unsigned int offset)
566{
cacaf775 567 __virtio32 ret;
ab7a2375
CH
568
569 might_sleep();
0b90d062 570 vdev->config->get(vdev, offset, &ret, sizeof(ret));
cacaf775 571 return virtio32_to_cpu(vdev, ret);
0b90d062
RR
572}
573
574static inline void virtio_cwrite32(struct virtio_device *vdev,
575 unsigned int offset, u32 val)
576{
cacaf775
MT
577 __virtio32 v;
578
ab7a2375 579 might_sleep();
cacaf775
MT
580 v = cpu_to_virtio32(vdev, val);
581 vdev->config->set(vdev, offset, &v, sizeof(v));
0b90d062
RR
582}
583
584static inline u64 virtio_cread64(struct virtio_device *vdev,
585 unsigned int offset)
586{
cacaf775
MT
587 __virtio64 ret;
588
d71de9ec 589 __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
cacaf775 590 return virtio64_to_cpu(vdev, ret);
0b90d062
RR
591}
592
593static inline void virtio_cwrite64(struct virtio_device *vdev,
594 unsigned int offset, u64 val)
595{
cacaf775
MT
596 __virtio64 v;
597
ab7a2375 598 might_sleep();
cacaf775
MT
599 v = cpu_to_virtio64(vdev, val);
600 vdev->config->set(vdev, offset, &v, sizeof(v));
0b90d062
RR
601}
602
603/* Conditional config space accessors. */
604#define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
605 ({ \
606 int _r = 0; \
607 if (!virtio_has_feature(vdev, fbit)) \
608 _r = -ENOENT; \
609 else \
610 virtio_cread((vdev), structname, member, ptr); \
611 _r; \
612 })
75a0a52b 613
035ce421
MT
614/* Conditional config space accessors. */
615#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
616 ({ \
617 int _r = 0; \
618 if (!virtio_has_feature(vdev, fbit)) \
619 _r = -ENOENT; \
620 else \
621 virtio_cread_le((vdev), structname, member, ptr); \
622 _r; \
623 })
0afa15e1 624
ec3d41c4 625#endif /* _LINUX_VIRTIO_CONFIG_H */