1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_PCI_MODERN_H
3 #define _LINUX_VIRTIO_PCI_MODERN_H
6 #include <linux/virtio_pci.h>
8 struct virtio_pci_modern_common_cfg {
9 struct virtio_pci_common_cfg cfg;
11 __le16 queue_notify_data; /* read-write */
12 __le16 queue_reset; /* read-write */
15 struct virtio_pci_modern_device {
16 struct pci_dev *pci_dev;
18 struct virtio_pci_common_cfg __iomem *common;
19 /* Device-specific data (non-legacy mode) */
21 /* Base of vq notifications (non-legacy mode). */
22 void __iomem *notify_base;
23 /* Physical base of vq notifications */
24 resource_size_t notify_pa;
25 /* Where to read and clear interrupt */
28 /* So we can sanity-check accesses. */
32 /* Capability for when we need to map notifications per-vq. */
35 /* Multiply queue_notify_off by this value. (non-legacy mode). */
36 u32 notify_offset_multiplier;
40 struct virtio_device_id id;
44 * Type-safe wrappers for io accesses.
45 * Use these to enforce at compile time the following spec requirement:
47 * The driver MUST access each field using the “natural” access
48 * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
49 * for 16-bit fields and 8-bit accesses for 8-bit fields.
51 static inline u8 vp_ioread8(const u8 __iomem *addr)
55 static inline u16 vp_ioread16 (const __le16 __iomem *addr)
57 return ioread16(addr);
60 static inline u32 vp_ioread32(const __le32 __iomem *addr)
62 return ioread32(addr);
65 static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
67 iowrite8(value, addr);
70 static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
72 iowrite16(value, addr);
75 static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
77 iowrite32(value, addr);
80 static inline void vp_iowrite64_twopart(u64 val,
84 vp_iowrite32((u32)val, lo);
85 vp_iowrite32(val >> 32, hi);
88 u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
89 u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev);
90 void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
92 u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
93 u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
94 void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
96 u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
98 u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
100 void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
101 u16 index, u64 desc_addr, u64 driver_addr,
103 void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
104 u16 idx, bool enable);
105 bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
107 void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
109 u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
111 u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev);
112 void __iomem * vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
113 u16 index, resource_size_t *pa);
114 int vp_modern_probe(struct virtio_pci_modern_device *mdev);
115 void vp_modern_remove(struct virtio_pci_modern_device *mdev);
116 int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
117 void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);