virtio_vdpa: Revert "virtio_vdpa: support the arg sizes of find_vqs()"
[linux-block.git] / include / linux / virtio_config.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
ec3d41c4
RR
2#ifndef _LINUX_VIRTIO_CONFIG_H
3#define _LINUX_VIRTIO_CONFIG_H
b4f68be6 4
d2a7ddda 5#include <linux/err.h>
187f1882 6#include <linux/bug.h>
72e61eb4 7#include <linux/virtio.h>
eef960a0 8#include <linux/virtio_byteorder.h>
a4235ec0 9#include <linux/compiler_types.h>
607ca46e 10#include <uapi/linux/virtio_config.h>
ec3d41c4 11
fb5e31d9
CH
12struct irq_affinity;
13
5bfe37ca
SB
14struct virtio_shm_region {
15 u64 addr;
16 u64 len;
17};
18
ec3d41c4
RR
19/**
20 * virtio_config_ops - operations for configuring a virtio device
d1c1dad8
CH
21 * Note: Do not assume that a transport implements all of the operations
22 * getting/setting a value as a simple read/write! Generally speaking,
23 * any of @get/@set, @get_status/@set_status, or @get_features/
24 * @finalize_features are NOT safe to be called from an atomic
25 * context.
a586d4f6 26 * @get: read the value of a configuration field
ec3d41c4 27 * vdev: the virtio_device
a586d4f6 28 * offset: the offset of the configuration field
ec3d41c4 29 * buf: the buffer to write the field value into.
a586d4f6 30 * len: the length of the buffer
a586d4f6 31 * @set: write the value of a configuration field
ec3d41c4 32 * vdev: the virtio_device
a586d4f6 33 * offset: the offset of the configuration field
ec3d41c4 34 * buf: the buffer to read the field value from.
a586d4f6 35 * len: the length of the buffer
b89a07c4 36 * @generation: config generation counter (optional)
d71de9ec
MT
37 * vdev: the virtio_device
38 * Returns the config generation counter
ec3d41c4
RR
39 * @get_status: read the status byte
40 * vdev: the virtio_device
41 * Returns the status byte
42 * @set_status: write the status byte
43 * vdev: the virtio_device
44 * status: the new status byte
6e5aa7ef
RR
45 * @reset: reset the device
46 * vdev: the virtio device
47 * After this, status and feature negotiation must be done again
e6af578c
MT
48 * Device must not be reset from its vq/config callbacks, or in
49 * parallel with being added/removed.
d2a7ddda 50 * @find_vqs: find virtqueues and instantiate them.
ec3d41c4 51 * vdev: the virtio_device
d2a7ddda
MT
52 * nvqs: the number of virtqueues to find
53 * vqs: on success, includes new virtqueues
54 * callbacks: array of callbacks, for each virtqueue
6457f126 55 * include a NULL entry for vqs that do not need a callback
d2a7ddda 56 * names: array of virtqueue names (mainly for debugging)
6457f126 57 * include a NULL entry for vqs unused by driver
a10fba03 58 * sizes: array of virtqueue sizes
d2a7ddda
MT
59 * Returns 0 on success or error status
60 * @del_vqs: free virtqueues found by find_vqs().
48b69959
JW
61 * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
62 * The function guarantees that all memory operations on the
63 * queue before it are visible to the vring_interrupt() that is
64 * called after it.
65 * vdev: the virtio_device
c45a6816
RR
66 * @get_features: get the array of feature bits for this device.
67 * vdev: the virtio_device
b89a07c4 68 * Returns the first 64 feature bits (all we currently need).
c624896e 69 * @finalize_features: confirm what device features we'll be using.
c45a6816 70 * vdev: the virtio_device
4fa59ede 71 * This sends the driver feature bits to the device: it can change
c624896e 72 * the dev->feature bits if it wants.
4fa59ede 73 * Note: despite the name this can be called any number of times.
5c609a5e 74 * Returns 0 on success or error status
b89a07c4 75 * @bus_name: return the bus name associated with the device (optional)
66846048
RJ
76 * vdev: the virtio_device
77 * This returns a pointer to the bus name a la pci_name from which
78 * the caller can then copy.
b89a07c4 79 * @set_vq_affinity: set the affinity for a virtqueue (optional).
bbaba479 80 * @get_vq_affinity: get the affinity for a virtqueue (optional).
5bfe37ca 81 * @get_shm_region: get a shared memory region based on the index.
3086e9fc
XZ
82 * @disable_vq_and_reset: reset a queue individually (optional).
83 * vq: the virtqueue
84 * Returns 0 on success or error status
85 * disable_vq_and_reset will guarantee that the callbacks are disabled and
86 * synchronized.
87 * Except for the callback, the caller should guarantee that the vring is
88 * not accessed by any functions of virtqueue.
89 * @enable_vq_after_reset: enable a reset queue
90 * vq: the virtqueue
91 * Returns 0 on success or error status
92 * If disable_vq_and_reset is set, then enable_vq_after_reset must also be
93 * set.
ec3d41c4 94 */
d2a7ddda 95typedef void vq_callback_t(struct virtqueue *);
1842f23c 96struct virtio_config_ops {
a586d4f6 97 void (*get)(struct virtio_device *vdev, unsigned offset,
ec3d41c4 98 void *buf, unsigned len);
a586d4f6 99 void (*set)(struct virtio_device *vdev, unsigned offset,
ec3d41c4 100 const void *buf, unsigned len);
d71de9ec 101 u32 (*generation)(struct virtio_device *vdev);
ec3d41c4
RR
102 u8 (*get_status)(struct virtio_device *vdev);
103 void (*set_status)(struct virtio_device *vdev, u8 status);
6e5aa7ef 104 void (*reset)(struct virtio_device *vdev);
d2a7ddda 105 int (*find_vqs)(struct virtio_device *, unsigned nvqs,
fb5e31d9 106 struct virtqueue *vqs[], vq_callback_t *callbacks[],
a10fba03
XZ
107 const char * const names[],
108 u32 sizes[],
109 const bool *ctx,
f94682dd 110 struct irq_affinity *desc);
d2a7ddda 111 void (*del_vqs)(struct virtio_device *);
48b69959 112 void (*synchronize_cbs)(struct virtio_device *);
d0254773 113 u64 (*get_features)(struct virtio_device *vdev);
5c609a5e 114 int (*finalize_features)(struct virtio_device *vdev);
66846048 115 const char *(*bus_name)(struct virtio_device *vdev);
19e226e8
CR
116 int (*set_vq_affinity)(struct virtqueue *vq,
117 const struct cpumask *cpu_mask);
bbaba479
CH
118 const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
119 int index);
5bfe37ca
SB
120 bool (*get_shm_region)(struct virtio_device *vdev,
121 struct virtio_shm_region *region, u8 id);
3086e9fc
XZ
122 int (*disable_vq_and_reset)(struct virtqueue *vq);
123 int (*enable_vq_after_reset)(struct virtqueue *vq);
ec3d41c4
RR
124};
125
c45a6816
RR
126/* If driver didn't advertise the feature, it will never appear. */
127void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
128 unsigned int fbit);
129
130/**
d4024af5
MT
131 * __virtio_test_bit - helper to test feature bits. For use by transports.
132 * Devices should normally use virtio_has_feature,
133 * which includes more checks.
c45a6816
RR
134 * @vdev: the device
135 * @fbit: the feature bit
136 */
d4024af5
MT
137static inline bool __virtio_test_bit(const struct virtio_device *vdev,
138 unsigned int fbit)
139{
140 /* Did you forget to fix assumptions on max features? */
141 if (__builtin_constant_p(fbit))
d0254773 142 BUILD_BUG_ON(fbit >= 64);
d4024af5 143 else
d0254773 144 BUG_ON(fbit >= 64);
d4024af5 145
d0254773 146 return vdev->features & BIT_ULL(fbit);
d4024af5
MT
147}
148
149/**
150 * __virtio_set_bit - helper to set feature bits. For use by transports.
151 * @vdev: the device
152 * @fbit: the feature bit
153 */
154static inline void __virtio_set_bit(struct virtio_device *vdev,
155 unsigned int fbit)
156{
157 /* Did you forget to fix assumptions on max features? */
158 if (__builtin_constant_p(fbit))
d0254773 159 BUILD_BUG_ON(fbit >= 64);
d4024af5 160 else
d0254773 161 BUG_ON(fbit >= 64);
d4024af5 162
d0254773 163 vdev->features |= BIT_ULL(fbit);
d4024af5
MT
164}
165
166/**
167 * __virtio_clear_bit - helper to clear feature bits. For use by transports.
168 * @vdev: the device
169 * @fbit: the feature bit
170 */
171static inline void __virtio_clear_bit(struct virtio_device *vdev,
c45a6816
RR
172 unsigned int fbit)
173{
174 /* Did you forget to fix assumptions on max features? */
1765e3a4 175 if (__builtin_constant_p(fbit))
d0254773 176 BUILD_BUG_ON(fbit >= 64);
1765e3a4 177 else
d0254773 178 BUG_ON(fbit >= 64);
c45a6816 179
d0254773 180 vdev->features &= ~BIT_ULL(fbit);
d4024af5
MT
181}
182
183/**
184 * virtio_has_feature - helper to determine if this device has this feature.
185 * @vdev: the device
186 * @fbit: the feature bit
187 */
188static inline bool virtio_has_feature(const struct virtio_device *vdev,
189 unsigned int fbit)
190{
ee006b35
MM
191 if (fbit < VIRTIO_TRANSPORT_F_START)
192 virtio_check_driver_offered_feature(vdev, fbit);
193
d4024af5 194 return __virtio_test_bit(vdev, fbit);
c45a6816
RR
195}
196
1a937693 197/**
24b6842a 198 * virtio_has_dma_quirk - determine whether this device has the DMA quirk
1a937693
MT
199 * @vdev: the device
200 */
24b6842a 201static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
1a937693
MT
202{
203 /*
204 * Note the reverse polarity of the quirk feature (compared to most
205 * other features), this is for compatibility with legacy systems.
206 */
321bd212 207 return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
1a937693
MT
208}
209
d2a7ddda
MT
210static inline
211struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
212 vq_callback_t *c, const char *n)
213{
214 vq_callback_t *callbacks[] = { c };
215 const char *names[] = { n };
216 struct virtqueue *vq;
f94682dd 217 int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
a10fba03 218 NULL, NULL);
d2a7ddda
MT
219 if (err < 0)
220 return ERR_PTR(err);
221 return vq;
222}
66846048 223
9b2bbdb2
MT
224static inline
225int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
226 struct virtqueue *vqs[], vq_callback_t *callbacks[],
227 const char * const names[],
228 struct irq_affinity *desc)
229{
a10fba03
XZ
230 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL,
231 NULL, desc);
f94682dd
MT
232}
233
234static inline
235int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
236 struct virtqueue *vqs[], vq_callback_t *callbacks[],
237 const char * const names[], const bool *ctx,
238 struct irq_affinity *desc)
239{
a10fba03
XZ
240 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL,
241 ctx, desc);
9b2bbdb2
MT
242}
243
48b69959
JW
244/**
245 * virtio_synchronize_cbs - synchronize with virtqueue callbacks
246 * @vdev: the device
247 */
248static inline
249void virtio_synchronize_cbs(struct virtio_device *dev)
250{
251 if (dev->config->synchronize_cbs) {
252 dev->config->synchronize_cbs(dev);
253 } else {
254 /*
255 * A best effort fallback to synchronize with
256 * interrupts, preemption and softirq disabled
257 * regions. See comment above synchronize_rcu().
258 */
259 synchronize_rcu();
260 }
261}
262
3569db59
MT
263/**
264 * virtio_device_ready - enable vq use in probe function
265 * @vdev: the device
266 *
267 * Driver must call this to use vqs in the probe function.
268 *
269 * Note: vqs are enabled automatically after probe returns.
270 */
271static inline
272void virtio_device_ready(struct virtio_device *dev)
273{
274 unsigned status = dev->config->get_status(dev);
275
619e9e14 276 WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
8b4ec69d 277
c346dae4 278#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
8b4ec69d
JW
279 /*
280 * The virtio_synchronize_cbs() makes sure vring_interrupt()
281 * will see the driver specific setup if it sees vq->broken
282 * as false (even if the notifications come before DRIVER_OK).
283 */
284 virtio_synchronize_cbs(dev);
285 __virtio_unbreak_device(dev);
c346dae4 286#endif
8b4ec69d
JW
287 /*
288 * The transport should ensure the visibility of vq->broken
289 * before setting DRIVER_OK. See the comments for the transport
290 * specific set_status() method.
291 *
292 * A well behaved device will only notify a virtqueue after
293 * DRIVER_OK, this means the device should "see" the coherenct
294 * memory write that set vq->broken as false which is done by
295 * the driver when it sees DRIVER_OK, then the following
296 * driver's vring_interrupt() will see vq->broken as false so
297 * we won't lose any notification.
298 */
3569db59
MT
299 dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
300}
301
66846048
RJ
302static inline
303const char *virtio_bus_name(struct virtio_device *vdev)
304{
305 if (!vdev->config->bus_name)
306 return "virtio";
307 return vdev->config->bus_name(vdev);
308}
309
75a0a52b
JW
310/**
311 * virtqueue_set_affinity - setting affinity for a virtqueue
312 * @vq: the virtqueue
313 * @cpu: the cpu no.
314 *
315 * Pay attention the function are best-effort: the affinity hint may not be set
316 * due to config support, irq type and sharing.
317 *
318 */
319static inline
19e226e8 320int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
75a0a52b
JW
321{
322 struct virtio_device *vdev = vq->vdev;
323 if (vdev->config->set_vq_affinity)
19e226e8 324 return vdev->config->set_vq_affinity(vq, cpu_mask);
75a0a52b
JW
325 return 0;
326}
327
5bfe37ca
SB
328static inline
329bool virtio_get_shm_region(struct virtio_device *vdev,
330 struct virtio_shm_region *region, u8 id)
331{
332 if (!vdev->config->get_shm_region)
333 return false;
334 return vdev->config->get_shm_region(vdev, region, id);
335}
336
cf561f0d
GK
337static inline bool virtio_is_little_endian(struct virtio_device *vdev)
338{
7d824109
GK
339 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
340 virtio_legacy_is_little_endian();
cf561f0d
GK
341}
342
eef960a0
MT
343/* Memory accessors */
344static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
345{
cf561f0d 346 return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
eef960a0
MT
347}
348
349static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
350{
cf561f0d 351 return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
eef960a0
MT
352}
353
354static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
355{
cf561f0d 356 return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
eef960a0
MT
357}
358
359static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
360{
cf561f0d 361 return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
eef960a0
MT
362}
363
364static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
365{
cf561f0d 366 return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
eef960a0
MT
367}
368
369static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
370{
cf561f0d 371 return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
eef960a0
MT
372}
373
a5b90f2d
MT
374#define virtio_to_cpu(vdev, x) \
375 _Generic((x), \
376 __u8: (x), \
377 __virtio16: virtio16_to_cpu((vdev), (x)), \
378 __virtio32: virtio32_to_cpu((vdev), (x)), \
83eb9db9 379 __virtio64: virtio64_to_cpu((vdev), (x)) \
a5b90f2d
MT
380 )
381
382#define cpu_to_virtio(vdev, x, m) \
383 _Generic((m), \
384 __u8: (x), \
385 __virtio16: cpu_to_virtio16((vdev), (x)), \
386 __virtio32: cpu_to_virtio32((vdev), (x)), \
83eb9db9 387 __virtio64: cpu_to_virtio64((vdev), (x)) \
a5b90f2d 388 )
a4235ec0
MT
389
390#define __virtio_native_type(structname, member) \
a5b90f2d 391 typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
a4235ec0 392
0b90d062
RR
393/* Config space accessors. */
394#define virtio_cread(vdev, structname, member, ptr) \
395 do { \
a5b90f2d
MT
396 typeof(((structname*)0)->member) virtio_cread_v; \
397 \
ab7a2375 398 might_sleep(); \
a5b90f2d
MT
399 /* Sanity check: must match the member's type */ \
400 typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
0b90d062 401 \
a5b90f2d 402 switch (sizeof(virtio_cread_v)) { \
0b90d062 403 case 1: \
0b90d062 404 case 2: \
0b90d062 405 case 4: \
a5b90f2d
MT
406 vdev->config->get((vdev), \
407 offsetof(structname, member), \
408 &virtio_cread_v, \
409 sizeof(virtio_cread_v)); \
0b90d062
RR
410 break; \
411 default: \
a5b90f2d
MT
412 __virtio_cread_many((vdev), \
413 offsetof(structname, member), \
414 &virtio_cread_v, \
415 1, \
416 sizeof(virtio_cread_v)); \
417 break; \
0b90d062 418 } \
a5b90f2d 419 *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
0b90d062
RR
420 } while(0)
421
422/* Config space accessors. */
423#define virtio_cwrite(vdev, structname, member, ptr) \
424 do { \
a5b90f2d
MT
425 typeof(((structname*)0)->member) virtio_cwrite_v = \
426 cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
427 \
ab7a2375 428 might_sleep(); \
a5b90f2d
MT
429 /* Sanity check: must match the member's type */ \
430 typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
0b90d062 431 \
a5b90f2d
MT
432 vdev->config->set((vdev), offsetof(structname, member), \
433 &virtio_cwrite_v, \
434 sizeof(virtio_cwrite_v)); \
0b90d062
RR
435 } while(0)
436
e598960f
MT
437/*
438 * Nothing virtio-specific about these, but let's worry about generalizing
439 * these later.
440 */
441#define virtio_le_to_cpu(x) \
442 _Generic((x), \
c84f91e2
MT
443 __u8: (u8)(x), \
444 __le16: (u16)le16_to_cpu(x), \
445 __le32: (u32)le32_to_cpu(x), \
446 __le64: (u64)le64_to_cpu(x) \
e598960f
MT
447 )
448
449#define virtio_cpu_to_le(x, m) \
450 _Generic((m), \
451 __u8: (x), \
452 __le16: cpu_to_le16(x), \
453 __le32: cpu_to_le32(x), \
454 __le64: cpu_to_le64(x) \
455 )
456
457/* LE (e.g. modern) Config space accessors. */
458#define virtio_cread_le(vdev, structname, member, ptr) \
459 do { \
460 typeof(((structname*)0)->member) virtio_cread_v; \
461 \
462 might_sleep(); \
463 /* Sanity check: must match the member's type */ \
464 typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
465 \
466 switch (sizeof(virtio_cread_v)) { \
467 case 1: \
468 case 2: \
469 case 4: \
470 vdev->config->get((vdev), \
471 offsetof(structname, member), \
472 &virtio_cread_v, \
473 sizeof(virtio_cread_v)); \
474 break; \
475 default: \
476 __virtio_cread_many((vdev), \
477 offsetof(structname, member), \
478 &virtio_cread_v, \
479 1, \
480 sizeof(virtio_cread_v)); \
481 break; \
482 } \
483 *(ptr) = virtio_le_to_cpu(virtio_cread_v); \
484 } while(0)
485
e598960f
MT
486#define virtio_cwrite_le(vdev, structname, member, ptr) \
487 do { \
488 typeof(((structname*)0)->member) virtio_cwrite_v = \
489 virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
490 \
491 might_sleep(); \
492 /* Sanity check: must match the member's type */ \
493 typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
494 \
495 vdev->config->set((vdev), offsetof(structname, member), \
496 &virtio_cwrite_v, \
497 sizeof(virtio_cwrite_v)); \
498 } while(0)
499
500
d71de9ec
MT
501/* Read @count fields, @bytes each. */
502static inline void __virtio_cread_many(struct virtio_device *vdev,
503 unsigned int offset,
504 void *buf, size_t count, size_t bytes)
505{
506 u32 old, gen = vdev->config->generation ?
507 vdev->config->generation(vdev) : 0;
508 int i;
509
ab7a2375 510 might_sleep();
d71de9ec
MT
511 do {
512 old = gen;
513
514 for (i = 0; i < count; i++)
515 vdev->config->get(vdev, offset + bytes * i,
516 buf + i * bytes, bytes);
517
518 gen = vdev->config->generation ?
519 vdev->config->generation(vdev) : 0;
520 } while (gen != old);
521}
522
0b90d062
RR
523static inline void virtio_cread_bytes(struct virtio_device *vdev,
524 unsigned int offset,
525 void *buf, size_t len)
526{
d71de9ec 527 __virtio_cread_many(vdev, offset, buf, len, 1);
0b90d062
RR
528}
529
caa0e2d0
MT
530static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
531{
532 u8 ret;
ab7a2375
CH
533
534 might_sleep();
caa0e2d0
MT
535 vdev->config->get(vdev, offset, &ret, sizeof(ret));
536 return ret;
537}
538
0b90d062
RR
539static inline void virtio_cwrite8(struct virtio_device *vdev,
540 unsigned int offset, u8 val)
541{
ab7a2375 542 might_sleep();
0b90d062
RR
543 vdev->config->set(vdev, offset, &val, sizeof(val));
544}
545
546static inline u16 virtio_cread16(struct virtio_device *vdev,
547 unsigned int offset)
548{
cacaf775 549 __virtio16 ret;
ab7a2375
CH
550
551 might_sleep();
0b90d062 552 vdev->config->get(vdev, offset, &ret, sizeof(ret));
cacaf775 553 return virtio16_to_cpu(vdev, ret);
0b90d062
RR
554}
555
556static inline void virtio_cwrite16(struct virtio_device *vdev,
557 unsigned int offset, u16 val)
558{
cacaf775
MT
559 __virtio16 v;
560
ab7a2375 561 might_sleep();
cacaf775
MT
562 v = cpu_to_virtio16(vdev, val);
563 vdev->config->set(vdev, offset, &v, sizeof(v));
0b90d062
RR
564}
565
566static inline u32 virtio_cread32(struct virtio_device *vdev,
567 unsigned int offset)
568{
cacaf775 569 __virtio32 ret;
ab7a2375
CH
570
571 might_sleep();
0b90d062 572 vdev->config->get(vdev, offset, &ret, sizeof(ret));
cacaf775 573 return virtio32_to_cpu(vdev, ret);
0b90d062
RR
574}
575
576static inline void virtio_cwrite32(struct virtio_device *vdev,
577 unsigned int offset, u32 val)
578{
cacaf775
MT
579 __virtio32 v;
580
ab7a2375 581 might_sleep();
cacaf775
MT
582 v = cpu_to_virtio32(vdev, val);
583 vdev->config->set(vdev, offset, &v, sizeof(v));
0b90d062
RR
584}
585
586static inline u64 virtio_cread64(struct virtio_device *vdev,
587 unsigned int offset)
588{
cacaf775
MT
589 __virtio64 ret;
590
d71de9ec 591 __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
cacaf775 592 return virtio64_to_cpu(vdev, ret);
0b90d062
RR
593}
594
595static inline void virtio_cwrite64(struct virtio_device *vdev,
596 unsigned int offset, u64 val)
597{
cacaf775
MT
598 __virtio64 v;
599
ab7a2375 600 might_sleep();
cacaf775
MT
601 v = cpu_to_virtio64(vdev, val);
602 vdev->config->set(vdev, offset, &v, sizeof(v));
0b90d062
RR
603}
604
605/* Conditional config space accessors. */
606#define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
607 ({ \
608 int _r = 0; \
609 if (!virtio_has_feature(vdev, fbit)) \
610 _r = -ENOENT; \
611 else \
612 virtio_cread((vdev), structname, member, ptr); \
613 _r; \
614 })
75a0a52b 615
035ce421
MT
616/* Conditional config space accessors. */
617#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
618 ({ \
619 int _r = 0; \
620 if (!virtio_has_feature(vdev, fbit)) \
621 _r = -ENOENT; \
622 else \
623 virtio_cread_le((vdev), structname, member, ptr); \
624 _r; \
625 })
0afa15e1 626
ec3d41c4 627#endif /* _LINUX_VIRTIO_CONFIG_H */