Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-block.git] / include / linux / vdpa.h
CommitLineData
961e9c84
JW
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_VDPA_H
3#define _LINUX_VDPA_H
4
5#include <linux/kernel.h>
6#include <linux/device.h>
7#include <linux/interrupt.h>
8#include <linux/vhost_iotlb.h>
d8ca2fa5 9#include <linux/virtio_net.h>
c2475a9a 10#include <linux/virtio_blk.h>
d8ca2fa5 11#include <linux/if_ether.h>
961e9c84
JW
12
13/**
9be5d2d4 14 * struct vdpa_callback - vDPA callback definition.
961e9c84
JW
15 * @callback: interrupt callback function
16 * @private: the data passed to the callback function
5e68470f
XY
17 * @trigger: the eventfd for the callback (Optional).
18 * When it is set, the vDPA driver must guarantee that
19 * signaling it is functional equivalent to triggering
20 * the callback. Then vDPA parent can signal it directly
21 * instead of triggering the callback.
961e9c84
JW
22 */
23struct vdpa_callback {
24 irqreturn_t (*callback)(void *data);
25 void *private;
5e68470f 26 struct eventfd_ctx *trigger;
961e9c84
JW
27};
28
c25a26e6 29/**
d0f9164e 30 * struct vdpa_notification_area - vDPA notification area
c25a26e6
JW
31 * @addr: base address of the notification area
32 * @size: size of the notification area
33 */
34struct vdpa_notification_area {
35 resource_size_t addr;
36 resource_size_t size;
37};
38
aac50c0b 39/**
530a5678 40 * struct vdpa_vq_state_split - vDPA split virtqueue state
aac50c0b
EC
41 * @avail_index: available index
42 */
530a5678 43struct vdpa_vq_state_split {
aac50c0b
EC
44 u16 avail_index;
45};
46
530a5678
JW
47/**
48 * struct vdpa_vq_state_packed - vDPA packed virtqueue state
49 * @last_avail_counter: last driver ring wrap counter observed by device
50 * @last_avail_idx: device available index
51 * @last_used_counter: device ring wrap counter
52 * @last_used_idx: used index
53 */
54struct vdpa_vq_state_packed {
86e17a51
XY
55 u16 last_avail_counter:1;
56 u16 last_avail_idx:15;
57 u16 last_used_counter:1;
58 u16 last_used_idx:15;
530a5678
JW
59};
60
61struct vdpa_vq_state {
86e17a51
XY
62 union {
63 struct vdpa_vq_state_split split;
64 struct vdpa_vq_state_packed packed;
65 };
530a5678
JW
66};
67
33b34750
PP
68struct vdpa_mgmt_dev;
69
961e9c84 70/**
d0f9164e 71 * struct vdpa_device - representation of a vDPA device
961e9c84
JW
72 * @dev: underlying device
73 * @dma_dev: the actual device that is performing DMA
240bf4e6
KK
74 * @driver_override: driver name to force a match; do not set directly,
75 * because core frees it; use driver_set_override() to
76 * set or clear it.
961e9c84 77 * @config: the configuration ops for this device.
a6a51adc 78 * @cf_lock: Protects get and set access to configuration layout.
961e9c84 79 * @index: device index
452639a6 80 * @features_valid: were features initialized? for legacy guests
db9adcbf
GD
81 * @ngroups: the number of virtqueue groups
82 * @nas: the number of address spaces
d8945ec4 83 * @use_va: indicate whether virtual address must be used by this device
476c135e 84 * @nvqs: maximum number of supported virtqueues
903f7bca
PP
85 * @mdev: management device pointer; caller must setup when registering device as part
86 * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
961e9c84
JW
87 */
88struct vdpa_device {
89 struct device dev;
90 struct device *dma_dev;
539fec78 91 const char *driver_override;
961e9c84 92 const struct vdpa_config_ops *config;
a6a51adc 93 struct rw_semaphore cf_lock; /* Protects get/set config */
961e9c84 94 unsigned int index;
452639a6 95 bool features_valid;
d8945ec4 96 bool use_va;
81d46d69 97 u32 nvqs;
903f7bca 98 struct vdpa_mgmt_dev *mdev;
d4821902 99 unsigned int ngroups;
db9adcbf 100 unsigned int nas;
961e9c84
JW
101};
102
3f1b623a 103/**
d0f9164e 104 * struct vdpa_iova_range - the IOVA range support by the device
3f1b623a
JW
105 * @first: start of the IOVA range
106 * @last: end of the IOVA range
107 */
108struct vdpa_iova_range {
109 u64 first;
110 u64 last;
111};
112
d8ca2fa5 113struct vdpa_dev_set_config {
90fea5a8 114 u64 device_features;
d8ca2fa5
PP
115 struct {
116 u8 mac[ETH_ALEN];
117 u16 mtu;
aba21aff 118 u16 max_vq_pairs;
d8ca2fa5
PP
119 } net;
120 u64 mask;
121};
122
d8945ec4 123/**
9be5d2d4 124 * struct vdpa_map_file - file area for device memory mapping
d8945ec4
XY
125 * @file: vma->vm_file for the mapping
126 * @offset: mapping offset in the vm_file
127 */
128struct vdpa_map_file {
129 struct file *file;
130 u64 offset;
131};
132
961e9c84 133/**
d0f9164e 134 * struct vdpa_config_ops - operations for configuring a vDPA device.
961e9c84
JW
135 * Note: vDPA device drivers are required to implement all of the
136 * operations unless it is mentioned to be optional in the following
137 * list.
138 *
139 * @set_vq_address: Set the address of virtqueue
140 * @vdev: vdpa device
141 * @idx: virtqueue index
142 * @desc_area: address of desc area
143 * @driver_area: address of driver area
144 * @device_area: address of device area
145 * Returns integer: success (0) or error (< 0)
146 * @set_vq_num: Set the size of virtqueue
147 * @vdev: vdpa device
148 * @idx: virtqueue index
149 * @num: the size of virtqueue
150 * @kick_vq: Kick the virtqueue
151 * @vdev: vdpa device
152 * @idx: virtqueue index
2c4e4a22
AK
153 * @kick_vq_with_data: Kick the virtqueue and supply extra data
154 * (only if VIRTIO_F_NOTIFICATION_DATA is negotiated)
155 * @vdev: vdpa device
156 * @data for split virtqueue:
157 * 16 bits vqn and 16 bits next available index.
158 * @data for packed virtqueue:
159 * 16 bits vqn, 15 least significant bits of
160 * next available index and 1 bit next_wrap.
961e9c84
JW
161 * @set_vq_cb: Set the interrupt callback function for
162 * a virtqueue
163 * @vdev: vdpa device
164 * @idx: virtqueue index
165 * @cb: virtio-vdev interrupt callback structure
166 * @set_vq_ready: Set ready status for a virtqueue
167 * @vdev: vdpa device
168 * @idx: virtqueue index
169 * @ready: ready (true) not ready(false)
170 * @get_vq_ready: Get ready status for a virtqueue
171 * @vdev: vdpa device
172 * @idx: virtqueue index
173 * Returns boolean: ready (true) or not (false)
174 * @set_vq_state: Set the state for a virtqueue
175 * @vdev: vdpa device
176 * @idx: virtqueue index
aac50c0b 177 * @state: pointer to set virtqueue state (last_avail_idx)
961e9c84
JW
178 * Returns integer: success (0) or error (< 0)
179 * @get_vq_state: Get the state for a virtqueue
180 * @vdev: vdpa device
181 * @idx: virtqueue index
aac50c0b 182 * @state: pointer to returned state (last_avail_idx)
9be5d2d4
SH
183 * @get_vendor_vq_stats: Get the vendor statistics of a device.
184 * @vdev: vdpa device
185 * @idx: virtqueue index
186 * @msg: socket buffer holding stats message
187 * @extack: extack for reporting error messages
188 * Returns integer: success (0) or error (< 0)
28cc408b 189 * @get_vq_notification: Get the notification area for a virtqueue (optional)
c25a26e6
JW
190 * @vdev: vdpa device
191 * @idx: virtqueue index
9be5d2d4 192 * Returns the notification area
7164675a
ZL
193 * @get_vq_irq: Get the irq number of a virtqueue (optional,
194 * but must implemented if require vq irq offloading)
195 * @vdev: vdpa device
196 * @idx: virtqueue index
197 * Returns int: irq number of a virtqueue,
198 * negative number if no irq assigned.
0a926fc9
ZL
199 * @get_vq_size: Get the size of a specific virtqueue (optional)
200 * @vdev: vdpa device
201 * @idx: virtqueue index
202 * Return u16: the size of the virtqueue
961e9c84
JW
203 * @get_vq_align: Get the virtqueue align requirement
204 * for the device
205 * @vdev: vdpa device
206 * Returns virtqueue algin requirement
00d1f546
JW
207 * @get_vq_group: Get the group id for a specific
208 * virtqueue (optional)
d4821902
GD
209 * @vdev: vdpa device
210 * @idx: virtqueue index
211 * Returns u32: group id for this virtqueue
a72cac60
SWL
212 * @get_vq_desc_group: Get the group id for the descriptor table of
213 * a specific virtqueue (optional)
214 * @vdev: vdpa device
215 * @idx: virtqueue index
216 * Returns u32: group id for the descriptor table
217 * portion of this virtqueue. Could be different
218 * than the one from @get_vq_group, in which case
219 * the access to the descriptor table can be
220 * confined to a separate asid, isolating from
221 * the virtqueue's buffer address access.
a64917bc 222 * @get_device_features: Get virtio features supported by the device
961e9c84
JW
223 * @vdev: vdpa device
224 * Returns the virtio features support by the
225 * device
b63e5c70
EP
226 * @get_backend_features: Get parent-specific backend features (optional)
227 * Returns the vdpa features supported by the
228 * device.
a64917bc 229 * @set_driver_features: Set virtio features supported by the driver
961e9c84
JW
230 * @vdev: vdpa device
231 * @features: feature support by the driver
232 * Returns integer: success (0) or error (< 0)
a64917bc
EC
233 * @get_driver_features: Get the virtio driver features in action
234 * @vdev: vdpa device
235 * Returns the virtio features accepted
961e9c84
JW
236 * @set_config_cb: Set the config interrupt callback
237 * @vdev: vdpa device
238 * @cb: virtio-vdev interrupt callback structure
239 * @get_vq_num_max: Get the max size of virtqueue
240 * @vdev: vdpa device
241 * Returns u16: max size of virtqueue
3b970a58
WZ
242 * @get_vq_num_min: Get the min size of virtqueue (optional)
243 * @vdev: vdpa device
244 * Returns u16: min size of virtqueue
961e9c84
JW
245 * @get_device_id: Get virtio device id
246 * @vdev: vdpa device
247 * Returns u32: virtio device id
248 * @get_vendor_id: Get id for the vendor that provides this device
249 * @vdev: vdpa device
250 * Returns u32: virtio vendor id
251 * @get_status: Get the device status
252 * @vdev: vdpa device
253 * Returns u8: virtio device status
254 * @set_status: Set the device status
255 * @vdev: vdpa device
256 * @status: virtio device status
0686082d
XY
257 * @reset: Reset device
258 * @vdev: vdpa device
259 * Returns integer: success (0) or error (< 0)
a26f2e4e
SWL
260 * @compat_reset: Reset device with compatibility quirks to
261 * accommodate older userspace. Only needed by
262 * parent driver which used to have bogus reset
263 * behaviour, and has to maintain such behaviour
264 * for compatibility with older userspace.
265 * Historically compliant driver only has to
266 * implement .reset, Historically non-compliant
267 * driver should implement both.
268 * @vdev: vdpa device
269 * @flags: compatibility quirks for reset
270 * Returns integer: success (0) or error (< 0)
1538a8a4
SB
271 * @suspend: Suspend the device (optional)
272 * @vdev: vdpa device
273 * Returns integer: success (0) or error (< 0)
274 * @resume: Resume the device (optional)
848ecea1
EP
275 * @vdev: vdpa device
276 * Returns integer: success (0) or error (< 0)
a61280dd
L
277 * @get_config_size: Get the size of the configuration space includes
278 * fields that are conditional on feature bits.
442706f9
SG
279 * @vdev: vdpa device
280 * Returns size_t: configuration size
961e9c84
JW
281 * @get_config: Read from device specific configuration space
282 * @vdev: vdpa device
283 * @offset: offset from the beginning of
284 * configuration space
285 * @buf: buffer used to read to
286 * @len: the length to read from
287 * configuration space
288 * @set_config: Write to device specific configuration space
289 * @vdev: vdpa device
290 * @offset: offset from the beginning of
291 * configuration space
292 * @buf: buffer used to write from
293 * @len: the length to write to
294 * configuration space
295 * @get_generation: Get device config generation (optional)
296 * @vdev: vdpa device
297 * Returns u32: device generation
3f1b623a
JW
298 * @get_iova_range: Get supported iova range (optional)
299 * @vdev: vdpa device
300 * Returns the iova range supported by
301 * the device.
1d246927
XY
302 * @set_vq_affinity: Set the affinity of virtqueue (optional)
303 * @vdev: vdpa device
304 * @idx: virtqueue index
305 * @cpu_mask: the affinity mask
306 * Returns integer: success (0) or error (< 0)
307 * @get_vq_affinity: Get the affinity of virtqueue (optional)
308 * @vdev: vdpa device
309 * @idx: virtqueue index
310 * Returns the affinity mask
46d554b1 311 * @set_group_asid: Set address space identifier for a
00d1f546 312 * virtqueue group (optional)
46d554b1
GD
313 * @vdev: vdpa device
314 * @group: virtqueue group
315 * @asid: address space id for this group
316 * Returns integer: success (0) or error (< 0)
961e9c84
JW
317 * @set_map: Set device memory mapping (optional)
318 * Needed for device that using device
319 * specific DMA translation (on-chip IOMMU)
320 * @vdev: vdpa device
db9adcbf 321 * @asid: address space identifier
961e9c84
JW
322 * @iotlb: vhost memory mapping to be
323 * used by the vDPA
324 * Returns integer: success (0) or error (< 0)
325 * @dma_map: Map an area of PA to IOVA (optional)
326 * Needed for device that using device
327 * specific DMA translation (on-chip IOMMU)
328 * and preferring incremental map.
329 * @vdev: vdpa device
db9adcbf 330 * @asid: address space identifier
961e9c84
JW
331 * @iova: iova to be mapped
332 * @size: size of the area
333 * @pa: physical address for the map
334 * @perm: device access permission (VHOST_MAP_XX)
335 * Returns integer: success (0) or error (< 0)
336 * @dma_unmap: Unmap an area of IOVA (optional but
337 * must be implemented with dma_map)
338 * Needed for device that using device
339 * specific DMA translation (on-chip IOMMU)
340 * and preferring incremental unmap.
341 * @vdev: vdpa device
db9adcbf 342 * @asid: address space identifier
961e9c84
JW
343 * @iova: iova to be unmapped
344 * @size: size of the area
345 * Returns integer: success (0) or error (< 0)
d2cf1b6e
SWL
346 * @reset_map: Reset device memory mapping to the default
347 * state (optional)
348 * Needed for devices that are using device
349 * specific DMA translation and prefer mapping
350 * to be decoupled from the virtio life cycle,
351 * i.e. device .reset op does not reset mapping
352 * @vdev: vdpa device
353 * @asid: address space identifier
354 * Returns integer: success (0) or error (< 0)
25da258f
JW
355 * @get_vq_dma_dev: Get the dma device for a specific
356 * virtqueue (optional)
357 * @vdev: vdpa device
358 * @idx: virtqueue index
359 * Returns pointer to structure device or error (NULL)
c618c84d
SG
360 * @bind_mm: Bind the device to a specific address space
361 * so the vDPA framework can use VA when this
362 * callback is implemented. (optional)
363 * @vdev: vdpa device
364 * @mm: address space to bind
365 * @unbind_mm: Unbind the device from the address space
366 * bound using the bind_mm callback. (optional)
367 * @vdev: vdpa device
961e9c84
JW
368 * @free: Free resources that belongs to vDPA (optional)
369 * @vdev: vdpa device
370 */
371struct vdpa_config_ops {
372 /* Virtqueue ops */
373 int (*set_vq_address)(struct vdpa_device *vdev,
374 u16 idx, u64 desc_area, u64 driver_area,
375 u64 device_area);
376 void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num);
377 void (*kick_vq)(struct vdpa_device *vdev, u16 idx);
2c4e4a22 378 void (*kick_vq_with_data)(struct vdpa_device *vdev, u32 data);
961e9c84
JW
379 void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx,
380 struct vdpa_callback *cb);
381 void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready);
382 bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
aac50c0b
EC
383 int (*set_vq_state)(struct vdpa_device *vdev, u16 idx,
384 const struct vdpa_vq_state *state);
23750e39
EC
385 int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
386 struct vdpa_vq_state *state);
13b00b13
EC
387 int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx,
388 struct sk_buff *msg,
389 struct netlink_ext_ack *extack);
c25a26e6
JW
390 struct vdpa_notification_area
391 (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
4c05433b 392 /* vq irq is not expected to be changed once DRIVER_OK is set */
d0ae1fbf 393 int (*get_vq_irq)(struct vdpa_device *vdev, u16 idx);
0a926fc9 394 u16 (*get_vq_size)(struct vdpa_device *vdev, u16 idx);
961e9c84
JW
395
396 /* Device ops */
425a5070 397 u32 (*get_vq_align)(struct vdpa_device *vdev);
d4821902 398 u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
a72cac60 399 u32 (*get_vq_desc_group)(struct vdpa_device *vdev, u16 idx);
a64917bc 400 u64 (*get_device_features)(struct vdpa_device *vdev);
b63e5c70 401 u64 (*get_backend_features)(const struct vdpa_device *vdev);
a64917bc
EC
402 int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
403 u64 (*get_driver_features)(struct vdpa_device *vdev);
961e9c84
JW
404 void (*set_config_cb)(struct vdpa_device *vdev,
405 struct vdpa_callback *cb);
406 u16 (*get_vq_num_max)(struct vdpa_device *vdev);
3b970a58 407 u16 (*get_vq_num_min)(struct vdpa_device *vdev);
961e9c84
JW
408 u32 (*get_device_id)(struct vdpa_device *vdev);
409 u32 (*get_vendor_id)(struct vdpa_device *vdev);
410 u8 (*get_status)(struct vdpa_device *vdev);
411 void (*set_status)(struct vdpa_device *vdev, u8 status);
0686082d 412 int (*reset)(struct vdpa_device *vdev);
a26f2e4e
SWL
413 int (*compat_reset)(struct vdpa_device *vdev, u32 flags);
414#define VDPA_RESET_F_CLEAN_MAP 1
848ecea1 415 int (*suspend)(struct vdpa_device *vdev);
1538a8a4 416 int (*resume)(struct vdpa_device *vdev);
442706f9 417 size_t (*get_config_size)(struct vdpa_device *vdev);
961e9c84
JW
418 void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
419 void *buf, unsigned int len);
420 void (*set_config)(struct vdpa_device *vdev, unsigned int offset,
421 const void *buf, unsigned int len);
422 u32 (*get_generation)(struct vdpa_device *vdev);
3f1b623a 423 struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
1d246927
XY
424 int (*set_vq_affinity)(struct vdpa_device *vdev, u16 idx,
425 const struct cpumask *cpu_mask);
426 const struct cpumask *(*get_vq_affinity)(struct vdpa_device *vdev,
427 u16 idx);
961e9c84
JW
428
429 /* DMA ops */
db9adcbf
GD
430 int (*set_map)(struct vdpa_device *vdev, unsigned int asid,
431 struct vhost_iotlb *iotlb);
432 int (*dma_map)(struct vdpa_device *vdev, unsigned int asid,
433 u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
434 int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
435 u64 iova, u64 size);
d2cf1b6e 436 int (*reset_map)(struct vdpa_device *vdev, unsigned int asid);
46d554b1
GD
437 int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
438 unsigned int asid);
25da258f 439 struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx);
c618c84d
SG
440 int (*bind_mm)(struct vdpa_device *vdev, struct mm_struct *mm);
441 void (*unbind_mm)(struct vdpa_device *vdev);
961e9c84
JW
442
443 /* Free device resources */
444 void (*free)(struct vdpa_device *vdev);
445};
446
447struct vdpa_device *__vdpa_alloc_device(struct device *parent,
448 const struct vdpa_config_ops *config,
db9adcbf 449 unsigned int ngroups, unsigned int nas,
d8945ec4
XY
450 size_t size, const char *name,
451 bool use_va);
961e9c84 452
c8d182bd
XY
453/**
454 * vdpa_alloc_device - allocate and initilaize a vDPA device
455 *
456 * @dev_struct: the type of the parent structure
457 * @member: the name of struct vdpa_device within the @dev_struct
458 * @parent: the parent device
459 * @config: the bus operations that is supported by this device
d4821902 460 * @ngroups: the number of virtqueue groups supported by this device
db9adcbf 461 * @nas: the number of address spaces
c8d182bd 462 * @name: name of the vdpa device
d8945ec4 463 * @use_va: indicate whether virtual address must be used by this device
c8d182bd
XY
464 *
465 * Return allocated data structure or ERR_PTR upon error
466 */
db9adcbf
GD
467#define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \
468 name, use_va) \
d4821902 469 container_of((__vdpa_alloc_device( \
db9adcbf
GD
470 parent, config, ngroups, nas, \
471 (sizeof(dev_struct) + \
961e9c84 472 BUILD_BUG_ON_ZERO(offsetof( \
db9adcbf 473 dev_struct, member))), name, use_va)), \
961e9c84
JW
474 dev_struct, member)
475
81d46d69 476int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
961e9c84
JW
477void vdpa_unregister_device(struct vdpa_device *vdev);
478
81d46d69 479int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
903f7bca
PP
480void _vdpa_unregister_device(struct vdpa_device *vdev);
481
961e9c84 482/**
d0f9164e 483 * struct vdpa_driver - operations for a vDPA driver
961e9c84
JW
484 * @driver: underlying device driver
485 * @probe: the function to call when a device is found. Returns 0 or -errno.
486 * @remove: the function to call when a device is removed.
487 */
488struct vdpa_driver {
489 struct device_driver driver;
490 int (*probe)(struct vdpa_device *vdev);
491 void (*remove)(struct vdpa_device *vdev);
492};
493
494#define vdpa_register_driver(drv) \
495 __vdpa_register_driver(drv, THIS_MODULE)
496int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner);
497void vdpa_unregister_driver(struct vdpa_driver *drv);
498
499#define module_vdpa_driver(__vdpa_driver) \
500 module_driver(__vdpa_driver, vdpa_register_driver, \
501 vdpa_unregister_driver)
502
503static inline struct vdpa_driver *drv_to_vdpa(struct device_driver *driver)
504{
505 return container_of(driver, struct vdpa_driver, driver);
506}
507
508static inline struct vdpa_device *dev_to_vdpa(struct device *_dev)
509{
510 return container_of(_dev, struct vdpa_device, dev);
511}
512
513static inline void *vdpa_get_drvdata(const struct vdpa_device *vdev)
514{
515 return dev_get_drvdata(&vdev->dev);
516}
517
518static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data)
519{
520 dev_set_drvdata(&vdev->dev, data);
521}
522
523static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
524{
525 return vdev->dma_dev;
526}
452639a6 527
bc91df5c 528static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags)
452639a6 529{
86e17a51 530 const struct vdpa_config_ops *ops = vdev->config;
aba21aff 531 int ret;
452639a6 532
a6a51adc 533 down_write(&vdev->cf_lock);
452639a6 534 vdev->features_valid = false;
bc91df5c
SWL
535 if (ops->compat_reset && flags)
536 ret = ops->compat_reset(vdev, flags);
537 else
538 ret = ops->reset(vdev);
a6a51adc 539 up_write(&vdev->cf_lock);
aba21aff 540 return ret;
452639a6
MT
541}
542
e0077cc1 543static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features)
452639a6 544{
86e17a51 545 const struct vdpa_config_ops *ops = vdev->config;
aba21aff
EC
546 int ret;
547
452639a6 548 vdev->features_valid = true;
aba21aff 549 ret = ops->set_driver_features(vdev, features);
e0077cc1
SWL
550
551 return ret;
552}
553
554static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
555{
556 int ret;
557
a6a51adc 558 down_write(&vdev->cf_lock);
e0077cc1 559 ret = vdpa_set_features_unlocked(vdev, features);
a6a51adc 560 up_write(&vdev->cf_lock);
aba21aff
EC
561
562 return ret;
452639a6
MT
563}
564
6dbb1f16
PP
565void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
566 void *buf, unsigned int len);
567void vdpa_set_config(struct vdpa_device *dev, unsigned int offset,
568 const void *buf, unsigned int length);
73bc0dbb
EC
569void vdpa_set_status(struct vdpa_device *vdev, u8 status);
570
33b34750 571/**
d0f9164e
PP
572 * struct vdpa_mgmtdev_ops - vdpa device ops
573 * @dev_add: Add a vdpa device using alloc and register
574 * @mdev: parent device to use for device addition
575 * @name: name of the new vdpa device
d8ca2fa5 576 * @config: config attributes to apply to the device under creation
d0f9164e
PP
577 * Driver need to add a new device using _vdpa_register_device()
578 * after fully initializing the vdpa device. Driver must return 0
579 * on success or appropriate error code.
580 * @dev_del: Remove a vdpa device using unregister
581 * @mdev: parent device to use for device removal
582 * @dev: vdpa device to remove
583 * Driver need to remove the specified device by calling
584 * _vdpa_unregister_device().
33b34750
PP
585 */
586struct vdpa_mgmtdev_ops {
d8ca2fa5
PP
587 int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
588 const struct vdpa_dev_set_config *config);
33b34750
PP
589 void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
590};
591
960deb33
PP
592/**
593 * struct vdpa_mgmt_dev - vdpa management device
594 * @device: Management parent device
595 * @ops: operations supported by management device
596 * @id_table: Pointer to device id table of supported ids
d8ca2fa5
PP
597 * @config_attr_mask: bit mask of attributes of type enum vdpa_attr that
598 * management device support during dev_add callback
960deb33 599 * @list: list entry
9be5d2d4
SH
600 * @supported_features: features supported by device
601 * @max_supported_vqs: maximum number of virtqueues supported by device
960deb33 602 */
33b34750
PP
603struct vdpa_mgmt_dev {
604 struct device *device;
605 const struct vdpa_mgmtdev_ops *ops;
ffbda8e9 606 struct virtio_device_id *id_table;
d8ca2fa5 607 u64 config_attr_mask;
33b34750 608 struct list_head list;
cd2629f6
EC
609 u64 supported_features;
610 u32 max_supported_vqs;
33b34750
PP
611};
612
613int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
614void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev);
615
961e9c84 616#endif /* _LINUX_VDPA_H */