1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/kernel.h>
6 #include <linux/device.h>
7 #include <linux/interrupt.h>
8 #include <linux/vhost_iotlb.h>
9 #include <linux/virtio_net.h>
10 #include <linux/if_ether.h>
13 * struct vdpa_callback - vDPA callback definition.
14 * @callback: interrupt callback function
15 * @private: the data passed to the callback function
16 * @trigger: the eventfd for the callback (Optional).
17 * When it is set, the vDPA driver must guarantee that
18 * signaling it is functional equivalent to triggering
19 * the callback. Then vDPA parent can signal it directly
20 * instead of triggering the callback.
22 struct vdpa_callback {
23 irqreturn_t (*callback)(void *data);
25 struct eventfd_ctx *trigger;
29 * struct vdpa_notification_area - vDPA notification area
30 * @addr: base address of the notification area
31 * @size: size of the notification area
33 struct vdpa_notification_area {
39 * struct vdpa_vq_state_split - vDPA split virtqueue state
40 * @avail_index: available index
42 struct vdpa_vq_state_split {
47 * struct vdpa_vq_state_packed - vDPA packed virtqueue state
48 * @last_avail_counter: last driver ring wrap counter observed by device
49 * @last_avail_idx: device available index
50 * @last_used_counter: device ring wrap counter
51 * @last_used_idx: used index
53 struct vdpa_vq_state_packed {
54 u16 last_avail_counter:1;
55 u16 last_avail_idx:15;
56 u16 last_used_counter:1;
60 struct vdpa_vq_state {
62 struct vdpa_vq_state_split split;
63 struct vdpa_vq_state_packed packed;
70 * struct vdpa_device - representation of a vDPA device
71 * @dev: underlying device
72 * @dma_dev: the actual device that is performing DMA
73 * @driver_override: driver name to force a match; do not set directly,
74 * because core frees it; use driver_set_override() to
76 * @config: the configuration ops for this device.
77 * @cf_lock: Protects get and set access to configuration layout.
78 * @index: device index
79 * @features_valid: were features initialized? for legacy guests
80 * @ngroups: the number of virtqueue groups
81 * @nas: the number of address spaces
82 * @use_va: indicate whether virtual address must be used by this device
83 * @nvqs: maximum number of supported virtqueues
84 * @mdev: management device pointer; caller must setup when registering device as part
85 * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
89 struct device *dma_dev;
90 const char *driver_override;
91 const struct vdpa_config_ops *config;
92 struct rw_semaphore cf_lock; /* Protects get/set config */
97 struct vdpa_mgmt_dev *mdev;
103 * struct vdpa_iova_range - the IOVA range support by the device
104 * @first: start of the IOVA range
105 * @last: end of the IOVA range
107 struct vdpa_iova_range {
112 struct vdpa_dev_set_config {
123 * struct vdpa_map_file - file area for device memory mapping
124 * @file: vma->vm_file for the mapping
125 * @offset: mapping offset in the vm_file
127 struct vdpa_map_file {
133 * struct vdpa_config_ops - operations for configuring a vDPA device.
134 * Note: vDPA device drivers are required to implement all of the
135 * operations unless it is mentioned to be optional in the following
138 * @set_vq_address: Set the address of virtqueue
140 * @idx: virtqueue index
141 * @desc_area: address of desc area
142 * @driver_area: address of driver area
143 * @device_area: address of device area
144 * Returns integer: success (0) or error (< 0)
145 * @set_vq_num: Set the size of virtqueue
147 * @idx: virtqueue index
148 * @num: the size of virtqueue
149 * @kick_vq: Kick the virtqueue
151 * @idx: virtqueue index
152 * @kick_vq_with_data: Kick the virtqueue and supply extra data
153 * (only if VIRTIO_F_NOTIFICATION_DATA is negotiated)
155 * @data for split virtqueue:
156 * 16 bits vqn and 16 bits next available index.
157 * @data for packed virtqueue:
158 * 16 bits vqn, 15 least significant bits of
159 * next available index and 1 bit next_wrap.
160 * @set_vq_cb: Set the interrupt callback function for
163 * @idx: virtqueue index
164 * @cb: virtio-vdev interrupt callback structure
165 * @set_vq_ready: Set ready status for a virtqueue
167 * @idx: virtqueue index
168 * @ready: ready (true) not ready(false)
169 * @get_vq_ready: Get ready status for a virtqueue
171 * @idx: virtqueue index
172 * Returns boolean: ready (true) or not (false)
173 * @set_vq_state: Set the state for a virtqueue
175 * @idx: virtqueue index
176 * @state: pointer to set virtqueue state (last_avail_idx)
177 * Returns integer: success (0) or error (< 0)
178 * @get_vq_state: Get the state for a virtqueue
180 * @idx: virtqueue index
181 * @state: pointer to returned state (last_avail_idx)
182 * @get_vendor_vq_stats: Get the vendor statistics of a device.
184 * @idx: virtqueue index
185 * @msg: socket buffer holding stats message
186 * @extack: extack for reporting error messages
187 * Returns integer: success (0) or error (< 0)
188 * @get_vq_notification: Get the notification area for a virtqueue (optional)
190 * @idx: virtqueue index
191 * Returns the notification area
192 * @get_vq_irq: Get the irq number of a virtqueue (optional,
193 * but must implemented if require vq irq offloading)
195 * @idx: virtqueue index
196 * Returns int: irq number of a virtqueue,
197 * negative number if no irq assigned.
198 * @get_vq_align: Get the virtqueue align requirement
201 * Returns virtqueue algin requirement
202 * @get_vq_group: Get the group id for a specific
203 * virtqueue (optional)
205 * @idx: virtqueue index
206 * Returns u32: group id for this virtqueue
207 * @get_device_features: Get virtio features supported by the device
209 * Returns the virtio features support by the
211 * @set_driver_features: Set virtio features supported by the driver
213 * @features: feature support by the driver
214 * Returns integer: success (0) or error (< 0)
215 * @get_driver_features: Get the virtio driver features in action
217 * Returns the virtio features accepted
218 * @set_config_cb: Set the config interrupt callback
220 * @cb: virtio-vdev interrupt callback structure
221 * @get_vq_num_max: Get the max size of virtqueue
223 * Returns u16: max size of virtqueue
224 * @get_vq_num_min: Get the min size of virtqueue (optional)
226 * Returns u16: min size of virtqueue
227 * @get_device_id: Get virtio device id
229 * Returns u32: virtio device id
230 * @get_vendor_id: Get id for the vendor that provides this device
232 * Returns u32: virtio vendor id
233 * @get_status: Get the device status
235 * Returns u8: virtio device status
236 * @set_status: Set the device status
238 * @status: virtio device status
239 * @reset: Reset device
241 * Returns integer: success (0) or error (< 0)
242 * @suspend: Suspend the device (optional)
244 * Returns integer: success (0) or error (< 0)
245 * @resume: Resume the device (optional)
247 * Returns integer: success (0) or error (< 0)
248 * @get_config_size: Get the size of the configuration space includes
249 * fields that are conditional on feature bits.
251 * Returns size_t: configuration size
252 * @get_config: Read from device specific configuration space
254 * @offset: offset from the beginning of
255 * configuration space
256 * @buf: buffer used to read to
257 * @len: the length to read from
258 * configuration space
259 * @set_config: Write to device specific configuration space
261 * @offset: offset from the beginning of
262 * configuration space
263 * @buf: buffer used to write from
264 * @len: the length to write to
265 * configuration space
266 * @get_generation: Get device config generation (optional)
268 * Returns u32: device generation
269 * @get_iova_range: Get supported iova range (optional)
271 * Returns the iova range supported by
273 * @set_vq_affinity: Set the affinity of virtqueue (optional)
275 * @idx: virtqueue index
276 * @cpu_mask: the affinity mask
277 * Returns integer: success (0) or error (< 0)
278 * @get_vq_affinity: Get the affinity of virtqueue (optional)
280 * @idx: virtqueue index
281 * Returns the affinity mask
282 * @set_group_asid: Set address space identifier for a
283 * virtqueue group (optional)
285 * @group: virtqueue group
286 * @asid: address space id for this group
287 * Returns integer: success (0) or error (< 0)
288 * @set_map: Set device memory mapping (optional)
289 * Needed for device that using device
290 * specific DMA translation (on-chip IOMMU)
292 * @asid: address space identifier
293 * @iotlb: vhost memory mapping to be
295 * Returns integer: success (0) or error (< 0)
296 * @dma_map: Map an area of PA to IOVA (optional)
297 * Needed for device that using device
298 * specific DMA translation (on-chip IOMMU)
299 * and preferring incremental map.
301 * @asid: address space identifier
302 * @iova: iova to be mapped
303 * @size: size of the area
304 * @pa: physical address for the map
305 * @perm: device access permission (VHOST_MAP_XX)
306 * Returns integer: success (0) or error (< 0)
307 * @dma_unmap: Unmap an area of IOVA (optional but
308 * must be implemented with dma_map)
309 * Needed for device that using device
310 * specific DMA translation (on-chip IOMMU)
311 * and preferring incremental unmap.
313 * @asid: address space identifier
314 * @iova: iova to be unmapped
315 * @size: size of the area
316 * Returns integer: success (0) or error (< 0)
317 * @get_vq_dma_dev: Get the dma device for a specific
318 * virtqueue (optional)
320 * @idx: virtqueue index
321 * Returns pointer to structure device or error (NULL)
322 * @bind_mm: Bind the device to a specific address space
323 * so the vDPA framework can use VA when this
324 * callback is implemented. (optional)
326 * @mm: address space to bind
327 * @unbind_mm: Unbind the device from the address space
328 * bound using the bind_mm callback. (optional)
330 * @free: Free resources that belongs to vDPA (optional)
333 struct vdpa_config_ops {
335 int (*set_vq_address)(struct vdpa_device *vdev,
336 u16 idx, u64 desc_area, u64 driver_area,
338 void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num);
339 void (*kick_vq)(struct vdpa_device *vdev, u16 idx);
340 void (*kick_vq_with_data)(struct vdpa_device *vdev, u32 data);
341 void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx,
342 struct vdpa_callback *cb);
343 void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready);
344 bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
345 int (*set_vq_state)(struct vdpa_device *vdev, u16 idx,
346 const struct vdpa_vq_state *state);
347 int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
348 struct vdpa_vq_state *state);
349 int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx,
351 struct netlink_ext_ack *extack);
352 struct vdpa_notification_area
353 (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
354 /* vq irq is not expected to be changed once DRIVER_OK is set */
355 int (*get_vq_irq)(struct vdpa_device *vdev, u16 idx);
358 u32 (*get_vq_align)(struct vdpa_device *vdev);
359 u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
360 u64 (*get_device_features)(struct vdpa_device *vdev);
361 int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
362 u64 (*get_driver_features)(struct vdpa_device *vdev);
363 void (*set_config_cb)(struct vdpa_device *vdev,
364 struct vdpa_callback *cb);
365 u16 (*get_vq_num_max)(struct vdpa_device *vdev);
366 u16 (*get_vq_num_min)(struct vdpa_device *vdev);
367 u32 (*get_device_id)(struct vdpa_device *vdev);
368 u32 (*get_vendor_id)(struct vdpa_device *vdev);
369 u8 (*get_status)(struct vdpa_device *vdev);
370 void (*set_status)(struct vdpa_device *vdev, u8 status);
371 int (*reset)(struct vdpa_device *vdev);
372 int (*suspend)(struct vdpa_device *vdev);
373 int (*resume)(struct vdpa_device *vdev);
374 size_t (*get_config_size)(struct vdpa_device *vdev);
375 void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
376 void *buf, unsigned int len);
377 void (*set_config)(struct vdpa_device *vdev, unsigned int offset,
378 const void *buf, unsigned int len);
379 u32 (*get_generation)(struct vdpa_device *vdev);
380 struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
381 int (*set_vq_affinity)(struct vdpa_device *vdev, u16 idx,
382 const struct cpumask *cpu_mask);
383 const struct cpumask *(*get_vq_affinity)(struct vdpa_device *vdev,
387 int (*set_map)(struct vdpa_device *vdev, unsigned int asid,
388 struct vhost_iotlb *iotlb);
389 int (*dma_map)(struct vdpa_device *vdev, unsigned int asid,
390 u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
391 int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
393 int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
395 struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx);
396 int (*bind_mm)(struct vdpa_device *vdev, struct mm_struct *mm);
397 void (*unbind_mm)(struct vdpa_device *vdev);
399 /* Free device resources */
400 void (*free)(struct vdpa_device *vdev);
403 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
404 const struct vdpa_config_ops *config,
405 unsigned int ngroups, unsigned int nas,
406 size_t size, const char *name,
410 * vdpa_alloc_device - allocate and initilaize a vDPA device
412 * @dev_struct: the type of the parent structure
413 * @member: the name of struct vdpa_device within the @dev_struct
414 * @parent: the parent device
415 * @config: the bus operations that is supported by this device
416 * @ngroups: the number of virtqueue groups supported by this device
417 * @nas: the number of address spaces
418 * @name: name of the vdpa device
419 * @use_va: indicate whether virtual address must be used by this device
421 * Return allocated data structure or ERR_PTR upon error
423 #define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \
425 container_of((__vdpa_alloc_device( \
426 parent, config, ngroups, nas, \
427 (sizeof(dev_struct) + \
428 BUILD_BUG_ON_ZERO(offsetof( \
429 dev_struct, member))), name, use_va)), \
432 int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
433 void vdpa_unregister_device(struct vdpa_device *vdev);
435 int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
436 void _vdpa_unregister_device(struct vdpa_device *vdev);
439 * struct vdpa_driver - operations for a vDPA driver
440 * @driver: underlying device driver
441 * @probe: the function to call when a device is found. Returns 0 or -errno.
442 * @remove: the function to call when a device is removed.
445 struct device_driver driver;
446 int (*probe)(struct vdpa_device *vdev);
447 void (*remove)(struct vdpa_device *vdev);
450 #define vdpa_register_driver(drv) \
451 __vdpa_register_driver(drv, THIS_MODULE)
452 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner);
453 void vdpa_unregister_driver(struct vdpa_driver *drv);
455 #define module_vdpa_driver(__vdpa_driver) \
456 module_driver(__vdpa_driver, vdpa_register_driver, \
457 vdpa_unregister_driver)
459 static inline struct vdpa_driver *drv_to_vdpa(struct device_driver *driver)
461 return container_of(driver, struct vdpa_driver, driver);
464 static inline struct vdpa_device *dev_to_vdpa(struct device *_dev)
466 return container_of(_dev, struct vdpa_device, dev);
469 static inline void *vdpa_get_drvdata(const struct vdpa_device *vdev)
471 return dev_get_drvdata(&vdev->dev);
474 static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data)
476 dev_set_drvdata(&vdev->dev, data);
479 static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
481 return vdev->dma_dev;
484 static inline int vdpa_reset(struct vdpa_device *vdev)
486 const struct vdpa_config_ops *ops = vdev->config;
489 down_write(&vdev->cf_lock);
490 vdev->features_valid = false;
491 ret = ops->reset(vdev);
492 up_write(&vdev->cf_lock);
496 static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features)
498 const struct vdpa_config_ops *ops = vdev->config;
501 vdev->features_valid = true;
502 ret = ops->set_driver_features(vdev, features);
507 static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
511 down_write(&vdev->cf_lock);
512 ret = vdpa_set_features_unlocked(vdev, features);
513 up_write(&vdev->cf_lock);
518 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
519 void *buf, unsigned int len);
520 void vdpa_set_config(struct vdpa_device *dev, unsigned int offset,
521 const void *buf, unsigned int length);
522 void vdpa_set_status(struct vdpa_device *vdev, u8 status);
525 * struct vdpa_mgmtdev_ops - vdpa device ops
526 * @dev_add: Add a vdpa device using alloc and register
527 * @mdev: parent device to use for device addition
528 * @name: name of the new vdpa device
529 * @config: config attributes to apply to the device under creation
530 * Driver need to add a new device using _vdpa_register_device()
531 * after fully initializing the vdpa device. Driver must return 0
532 * on success or appropriate error code.
533 * @dev_del: Remove a vdpa device using unregister
534 * @mdev: parent device to use for device removal
535 * @dev: vdpa device to remove
536 * Driver need to remove the specified device by calling
537 * _vdpa_unregister_device().
539 struct vdpa_mgmtdev_ops {
540 int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
541 const struct vdpa_dev_set_config *config);
542 void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
546 * struct vdpa_mgmt_dev - vdpa management device
547 * @device: Management parent device
548 * @ops: operations supported by management device
549 * @id_table: Pointer to device id table of supported ids
550 * @config_attr_mask: bit mask of attributes of type enum vdpa_attr that
551 * management device support during dev_add callback
553 * @supported_features: features supported by device
554 * @max_supported_vqs: maximum number of virtqueues supported by device
556 struct vdpa_mgmt_dev {
557 struct device *device;
558 const struct vdpa_mgmtdev_ops *ops;
559 struct virtio_device_id *id_table;
560 u64 config_attr_mask;
561 struct list_head list;
562 u64 supported_features;
563 u32 max_supported_vqs;
566 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
567 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev);
569 #endif /* _LINUX_VDPA_H */