Commit | Line | Data |
---|---|---|
961e9c84 JW |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_VDPA_H | |
3 | #define _LINUX_VDPA_H | |
4 | ||
5 | #include <linux/kernel.h> | |
6 | #include <linux/device.h> | |
7 | #include <linux/interrupt.h> | |
8 | #include <linux/vhost_iotlb.h> | |
d8ca2fa5 PP |
9 | #include <linux/virtio_net.h> |
10 | #include <linux/if_ether.h> | |
961e9c84 JW |
11 | |
12 | /** | |
9be5d2d4 | 13 | * struct vdpa_callback - vDPA callback definition. |
961e9c84 JW |
14 | * @callback: interrupt callback function |
15 | * @private: the data passed to the callback function | |
5e68470f XY |
16 | * @trigger: the eventfd for the callback (Optional). |
17 | * When it is set, the vDPA driver must guarantee that | |
18 | * signaling it is functional equivalent to triggering | |
19 | * the callback. Then vDPA parent can signal it directly | |
20 | * instead of triggering the callback. | |
961e9c84 JW |
21 | */ |
22 | struct vdpa_callback { | |
23 | irqreturn_t (*callback)(void *data); | |
24 | void *private; | |
5e68470f | 25 | struct eventfd_ctx *trigger; |
961e9c84 JW |
26 | }; |
27 | ||
c25a26e6 | 28 | /** |
d0f9164e | 29 | * struct vdpa_notification_area - vDPA notification area |
c25a26e6 JW |
30 | * @addr: base address of the notification area |
31 | * @size: size of the notification area | |
32 | */ | |
33 | struct vdpa_notification_area { | |
34 | resource_size_t addr; | |
35 | resource_size_t size; | |
36 | }; | |
37 | ||
aac50c0b | 38 | /** |
530a5678 | 39 | * struct vdpa_vq_state_split - vDPA split virtqueue state |
aac50c0b EC |
40 | * @avail_index: available index |
41 | */ | |
530a5678 | 42 | struct vdpa_vq_state_split { |
aac50c0b EC |
43 | u16 avail_index; |
44 | }; | |
45 | ||
530a5678 JW |
46 | /** |
47 | * struct vdpa_vq_state_packed - vDPA packed virtqueue state | |
48 | * @last_avail_counter: last driver ring wrap counter observed by device | |
49 | * @last_avail_idx: device available index | |
50 | * @last_used_counter: device ring wrap counter | |
51 | * @last_used_idx: used index | |
52 | */ | |
53 | struct vdpa_vq_state_packed { | |
86e17a51 XY |
54 | u16 last_avail_counter:1; |
55 | u16 last_avail_idx:15; | |
56 | u16 last_used_counter:1; | |
57 | u16 last_used_idx:15; | |
530a5678 JW |
58 | }; |
59 | ||
60 | struct vdpa_vq_state { | |
86e17a51 XY |
61 | union { |
62 | struct vdpa_vq_state_split split; | |
63 | struct vdpa_vq_state_packed packed; | |
64 | }; | |
530a5678 JW |
65 | }; |
66 | ||
33b34750 PP |
67 | struct vdpa_mgmt_dev; |
68 | ||
961e9c84 | 69 | /** |
d0f9164e | 70 | * struct vdpa_device - representation of a vDPA device |
961e9c84 JW |
71 | * @dev: underlying device |
72 | * @dma_dev: the actual device that is performing DMA | |
240bf4e6 KK |
73 | * @driver_override: driver name to force a match; do not set directly, |
74 | * because core frees it; use driver_set_override() to | |
75 | * set or clear it. | |
961e9c84 | 76 | * @config: the configuration ops for this device. |
a6a51adc | 77 | * @cf_lock: Protects get and set access to configuration layout. |
961e9c84 | 78 | * @index: device index |
452639a6 | 79 | * @features_valid: were features initialized? for legacy guests |
db9adcbf GD |
80 | * @ngroups: the number of virtqueue groups |
81 | * @nas: the number of address spaces | |
d8945ec4 | 82 | * @use_va: indicate whether virtual address must be used by this device |
476c135e | 83 | * @nvqs: maximum number of supported virtqueues |
903f7bca PP |
84 | * @mdev: management device pointer; caller must setup when registering device as part |
85 | * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device(). | |
961e9c84 JW |
86 | */ |
87 | struct vdpa_device { | |
88 | struct device dev; | |
89 | struct device *dma_dev; | |
539fec78 | 90 | const char *driver_override; |
961e9c84 | 91 | const struct vdpa_config_ops *config; |
a6a51adc | 92 | struct rw_semaphore cf_lock; /* Protects get/set config */ |
961e9c84 | 93 | unsigned int index; |
452639a6 | 94 | bool features_valid; |
d8945ec4 | 95 | bool use_va; |
81d46d69 | 96 | u32 nvqs; |
903f7bca | 97 | struct vdpa_mgmt_dev *mdev; |
d4821902 | 98 | unsigned int ngroups; |
db9adcbf | 99 | unsigned int nas; |
961e9c84 JW |
100 | }; |
101 | ||
3f1b623a | 102 | /** |
d0f9164e | 103 | * struct vdpa_iova_range - the IOVA range support by the device |
3f1b623a JW |
104 | * @first: start of the IOVA range |
105 | * @last: end of the IOVA range | |
106 | */ | |
107 | struct vdpa_iova_range { | |
108 | u64 first; | |
109 | u64 last; | |
110 | }; | |
111 | ||
d8ca2fa5 | 112 | struct vdpa_dev_set_config { |
90fea5a8 | 113 | u64 device_features; |
d8ca2fa5 PP |
114 | struct { |
115 | u8 mac[ETH_ALEN]; | |
116 | u16 mtu; | |
aba21aff | 117 | u16 max_vq_pairs; |
d8ca2fa5 PP |
118 | } net; |
119 | u64 mask; | |
120 | }; | |
121 | ||
d8945ec4 | 122 | /** |
9be5d2d4 | 123 | * struct vdpa_map_file - file area for device memory mapping |
d8945ec4 XY |
124 | * @file: vma->vm_file for the mapping |
125 | * @offset: mapping offset in the vm_file | |
126 | */ | |
127 | struct vdpa_map_file { | |
128 | struct file *file; | |
129 | u64 offset; | |
130 | }; | |
131 | ||
961e9c84 | 132 | /** |
d0f9164e | 133 | * struct vdpa_config_ops - operations for configuring a vDPA device. |
961e9c84 JW |
134 | * Note: vDPA device drivers are required to implement all of the |
135 | * operations unless it is mentioned to be optional in the following | |
136 | * list. | |
137 | * | |
138 | * @set_vq_address: Set the address of virtqueue | |
139 | * @vdev: vdpa device | |
140 | * @idx: virtqueue index | |
141 | * @desc_area: address of desc area | |
142 | * @driver_area: address of driver area | |
143 | * @device_area: address of device area | |
144 | * Returns integer: success (0) or error (< 0) | |
145 | * @set_vq_num: Set the size of virtqueue | |
146 | * @vdev: vdpa device | |
147 | * @idx: virtqueue index | |
148 | * @num: the size of virtqueue | |
149 | * @kick_vq: Kick the virtqueue | |
150 | * @vdev: vdpa device | |
151 | * @idx: virtqueue index | |
2c4e4a22 AK |
152 | * @kick_vq_with_data: Kick the virtqueue and supply extra data |
153 | * (only if VIRTIO_F_NOTIFICATION_DATA is negotiated) | |
154 | * @vdev: vdpa device | |
155 | * @data for split virtqueue: | |
156 | * 16 bits vqn and 16 bits next available index. | |
157 | * @data for packed virtqueue: | |
158 | * 16 bits vqn, 15 least significant bits of | |
159 | * next available index and 1 bit next_wrap. | |
961e9c84 JW |
160 | * @set_vq_cb: Set the interrupt callback function for |
161 | * a virtqueue | |
162 | * @vdev: vdpa device | |
163 | * @idx: virtqueue index | |
164 | * @cb: virtio-vdev interrupt callback structure | |
165 | * @set_vq_ready: Set ready status for a virtqueue | |
166 | * @vdev: vdpa device | |
167 | * @idx: virtqueue index | |
168 | * @ready: ready (true) not ready(false) | |
169 | * @get_vq_ready: Get ready status for a virtqueue | |
170 | * @vdev: vdpa device | |
171 | * @idx: virtqueue index | |
172 | * Returns boolean: ready (true) or not (false) | |
173 | * @set_vq_state: Set the state for a virtqueue | |
174 | * @vdev: vdpa device | |
175 | * @idx: virtqueue index | |
aac50c0b | 176 | * @state: pointer to set virtqueue state (last_avail_idx) |
961e9c84 JW |
177 | * Returns integer: success (0) or error (< 0) |
178 | * @get_vq_state: Get the state for a virtqueue | |
179 | * @vdev: vdpa device | |
180 | * @idx: virtqueue index | |
aac50c0b | 181 | * @state: pointer to returned state (last_avail_idx) |
9be5d2d4 SH |
182 | * @get_vendor_vq_stats: Get the vendor statistics of a device. |
183 | * @vdev: vdpa device | |
184 | * @idx: virtqueue index | |
185 | * @msg: socket buffer holding stats message | |
186 | * @extack: extack for reporting error messages | |
187 | * Returns integer: success (0) or error (< 0) | |
28cc408b | 188 | * @get_vq_notification: Get the notification area for a virtqueue (optional) |
c25a26e6 JW |
189 | * @vdev: vdpa device |
190 | * @idx: virtqueue index | |
9be5d2d4 | 191 | * Returns the notification area |
7164675a ZL |
192 | * @get_vq_irq: Get the irq number of a virtqueue (optional, |
193 | * but must implemented if require vq irq offloading) | |
194 | * @vdev: vdpa device | |
195 | * @idx: virtqueue index | |
196 | * Returns int: irq number of a virtqueue, | |
197 | * negative number if no irq assigned. | |
961e9c84 JW |
198 | * @get_vq_align: Get the virtqueue align requirement |
199 | * for the device | |
200 | * @vdev: vdpa device | |
201 | * Returns virtqueue algin requirement | |
00d1f546 JW |
202 | * @get_vq_group: Get the group id for a specific |
203 | * virtqueue (optional) | |
d4821902 GD |
204 | * @vdev: vdpa device |
205 | * @idx: virtqueue index | |
206 | * Returns u32: group id for this virtqueue | |
a64917bc | 207 | * @get_device_features: Get virtio features supported by the device |
961e9c84 JW |
208 | * @vdev: vdpa device |
209 | * Returns the virtio features support by the | |
210 | * device | |
a64917bc | 211 | * @set_driver_features: Set virtio features supported by the driver |
961e9c84 JW |
212 | * @vdev: vdpa device |
213 | * @features: feature support by the driver | |
214 | * Returns integer: success (0) or error (< 0) | |
a64917bc EC |
215 | * @get_driver_features: Get the virtio driver features in action |
216 | * @vdev: vdpa device | |
217 | * Returns the virtio features accepted | |
961e9c84 JW |
218 | * @set_config_cb: Set the config interrupt callback |
219 | * @vdev: vdpa device | |
220 | * @cb: virtio-vdev interrupt callback structure | |
221 | * @get_vq_num_max: Get the max size of virtqueue | |
222 | * @vdev: vdpa device | |
223 | * Returns u16: max size of virtqueue | |
3b970a58 WZ |
224 | * @get_vq_num_min: Get the min size of virtqueue (optional) |
225 | * @vdev: vdpa device | |
226 | * Returns u16: min size of virtqueue | |
961e9c84 JW |
227 | * @get_device_id: Get virtio device id |
228 | * @vdev: vdpa device | |
229 | * Returns u32: virtio device id | |
230 | * @get_vendor_id: Get id for the vendor that provides this device | |
231 | * @vdev: vdpa device | |
232 | * Returns u32: virtio vendor id | |
233 | * @get_status: Get the device status | |
234 | * @vdev: vdpa device | |
235 | * Returns u8: virtio device status | |
236 | * @set_status: Set the device status | |
237 | * @vdev: vdpa device | |
238 | * @status: virtio device status | |
0686082d XY |
239 | * @reset: Reset device |
240 | * @vdev: vdpa device | |
241 | * Returns integer: success (0) or error (< 0) | |
1538a8a4 SB |
242 | * @suspend: Suspend the device (optional) |
243 | * @vdev: vdpa device | |
244 | * Returns integer: success (0) or error (< 0) | |
245 | * @resume: Resume the device (optional) | |
848ecea1 EP |
246 | * @vdev: vdpa device |
247 | * Returns integer: success (0) or error (< 0) | |
a61280dd L |
248 | * @get_config_size: Get the size of the configuration space includes |
249 | * fields that are conditional on feature bits. | |
442706f9 SG |
250 | * @vdev: vdpa device |
251 | * Returns size_t: configuration size | |
961e9c84 JW |
252 | * @get_config: Read from device specific configuration space |
253 | * @vdev: vdpa device | |
254 | * @offset: offset from the beginning of | |
255 | * configuration space | |
256 | * @buf: buffer used to read to | |
257 | * @len: the length to read from | |
258 | * configuration space | |
259 | * @set_config: Write to device specific configuration space | |
260 | * @vdev: vdpa device | |
261 | * @offset: offset from the beginning of | |
262 | * configuration space | |
263 | * @buf: buffer used to write from | |
264 | * @len: the length to write to | |
265 | * configuration space | |
266 | * @get_generation: Get device config generation (optional) | |
267 | * @vdev: vdpa device | |
268 | * Returns u32: device generation | |
3f1b623a JW |
269 | * @get_iova_range: Get supported iova range (optional) |
270 | * @vdev: vdpa device | |
271 | * Returns the iova range supported by | |
272 | * the device. | |
1d246927 XY |
273 | * @set_vq_affinity: Set the affinity of virtqueue (optional) |
274 | * @vdev: vdpa device | |
275 | * @idx: virtqueue index | |
276 | * @cpu_mask: the affinity mask | |
277 | * Returns integer: success (0) or error (< 0) | |
278 | * @get_vq_affinity: Get the affinity of virtqueue (optional) | |
279 | * @vdev: vdpa device | |
280 | * @idx: virtqueue index | |
281 | * Returns the affinity mask | |
46d554b1 | 282 | * @set_group_asid: Set address space identifier for a |
00d1f546 | 283 | * virtqueue group (optional) |
46d554b1 GD |
284 | * @vdev: vdpa device |
285 | * @group: virtqueue group | |
286 | * @asid: address space id for this group | |
287 | * Returns integer: success (0) or error (< 0) | |
961e9c84 JW |
288 | * @set_map: Set device memory mapping (optional) |
289 | * Needed for device that using device | |
290 | * specific DMA translation (on-chip IOMMU) | |
291 | * @vdev: vdpa device | |
db9adcbf | 292 | * @asid: address space identifier |
961e9c84 JW |
293 | * @iotlb: vhost memory mapping to be |
294 | * used by the vDPA | |
295 | * Returns integer: success (0) or error (< 0) | |
296 | * @dma_map: Map an area of PA to IOVA (optional) | |
297 | * Needed for device that using device | |
298 | * specific DMA translation (on-chip IOMMU) | |
299 | * and preferring incremental map. | |
300 | * @vdev: vdpa device | |
db9adcbf | 301 | * @asid: address space identifier |
961e9c84 JW |
302 | * @iova: iova to be mapped |
303 | * @size: size of the area | |
304 | * @pa: physical address for the map | |
305 | * @perm: device access permission (VHOST_MAP_XX) | |
306 | * Returns integer: success (0) or error (< 0) | |
307 | * @dma_unmap: Unmap an area of IOVA (optional but | |
308 | * must be implemented with dma_map) | |
309 | * Needed for device that using device | |
310 | * specific DMA translation (on-chip IOMMU) | |
311 | * and preferring incremental unmap. | |
312 | * @vdev: vdpa device | |
db9adcbf | 313 | * @asid: address space identifier |
961e9c84 JW |
314 | * @iova: iova to be unmapped |
315 | * @size: size of the area | |
316 | * Returns integer: success (0) or error (< 0) | |
25da258f JW |
317 | * @get_vq_dma_dev: Get the dma device for a specific |
318 | * virtqueue (optional) | |
319 | * @vdev: vdpa device | |
320 | * @idx: virtqueue index | |
321 | * Returns pointer to structure device or error (NULL) | |
c618c84d SG |
322 | * @bind_mm: Bind the device to a specific address space |
323 | * so the vDPA framework can use VA when this | |
324 | * callback is implemented. (optional) | |
325 | * @vdev: vdpa device | |
326 | * @mm: address space to bind | |
327 | * @unbind_mm: Unbind the device from the address space | |
328 | * bound using the bind_mm callback. (optional) | |
329 | * @vdev: vdpa device | |
961e9c84 JW |
330 | * @free: Free resources that belongs to vDPA (optional) |
331 | * @vdev: vdpa device | |
332 | */ | |
333 | struct vdpa_config_ops { | |
334 | /* Virtqueue ops */ | |
335 | int (*set_vq_address)(struct vdpa_device *vdev, | |
336 | u16 idx, u64 desc_area, u64 driver_area, | |
337 | u64 device_area); | |
338 | void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num); | |
339 | void (*kick_vq)(struct vdpa_device *vdev, u16 idx); | |
2c4e4a22 | 340 | void (*kick_vq_with_data)(struct vdpa_device *vdev, u32 data); |
961e9c84 JW |
341 | void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx, |
342 | struct vdpa_callback *cb); | |
343 | void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready); | |
344 | bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); | |
aac50c0b EC |
345 | int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, |
346 | const struct vdpa_vq_state *state); | |
23750e39 EC |
347 | int (*get_vq_state)(struct vdpa_device *vdev, u16 idx, |
348 | struct vdpa_vq_state *state); | |
13b00b13 EC |
349 | int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx, |
350 | struct sk_buff *msg, | |
351 | struct netlink_ext_ack *extack); | |
c25a26e6 JW |
352 | struct vdpa_notification_area |
353 | (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); | |
4c05433b | 354 | /* vq irq is not expected to be changed once DRIVER_OK is set */ |
d0ae1fbf | 355 | int (*get_vq_irq)(struct vdpa_device *vdev, u16 idx); |
961e9c84 JW |
356 | |
357 | /* Device ops */ | |
425a5070 | 358 | u32 (*get_vq_align)(struct vdpa_device *vdev); |
d4821902 | 359 | u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx); |
a64917bc EC |
360 | u64 (*get_device_features)(struct vdpa_device *vdev); |
361 | int (*set_driver_features)(struct vdpa_device *vdev, u64 features); | |
362 | u64 (*get_driver_features)(struct vdpa_device *vdev); | |
961e9c84 JW |
363 | void (*set_config_cb)(struct vdpa_device *vdev, |
364 | struct vdpa_callback *cb); | |
365 | u16 (*get_vq_num_max)(struct vdpa_device *vdev); | |
3b970a58 | 366 | u16 (*get_vq_num_min)(struct vdpa_device *vdev); |
961e9c84 JW |
367 | u32 (*get_device_id)(struct vdpa_device *vdev); |
368 | u32 (*get_vendor_id)(struct vdpa_device *vdev); | |
369 | u8 (*get_status)(struct vdpa_device *vdev); | |
370 | void (*set_status)(struct vdpa_device *vdev, u8 status); | |
0686082d | 371 | int (*reset)(struct vdpa_device *vdev); |
848ecea1 | 372 | int (*suspend)(struct vdpa_device *vdev); |
1538a8a4 | 373 | int (*resume)(struct vdpa_device *vdev); |
442706f9 | 374 | size_t (*get_config_size)(struct vdpa_device *vdev); |
961e9c84 JW |
375 | void (*get_config)(struct vdpa_device *vdev, unsigned int offset, |
376 | void *buf, unsigned int len); | |
377 | void (*set_config)(struct vdpa_device *vdev, unsigned int offset, | |
378 | const void *buf, unsigned int len); | |
379 | u32 (*get_generation)(struct vdpa_device *vdev); | |
3f1b623a | 380 | struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev); |
1d246927 XY |
381 | int (*set_vq_affinity)(struct vdpa_device *vdev, u16 idx, |
382 | const struct cpumask *cpu_mask); | |
383 | const struct cpumask *(*get_vq_affinity)(struct vdpa_device *vdev, | |
384 | u16 idx); | |
961e9c84 JW |
385 | |
386 | /* DMA ops */ | |
db9adcbf GD |
387 | int (*set_map)(struct vdpa_device *vdev, unsigned int asid, |
388 | struct vhost_iotlb *iotlb); | |
389 | int (*dma_map)(struct vdpa_device *vdev, unsigned int asid, | |
390 | u64 iova, u64 size, u64 pa, u32 perm, void *opaque); | |
391 | int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid, | |
392 | u64 iova, u64 size); | |
46d554b1 GD |
393 | int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group, |
394 | unsigned int asid); | |
25da258f | 395 | struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx); |
c618c84d SG |
396 | int (*bind_mm)(struct vdpa_device *vdev, struct mm_struct *mm); |
397 | void (*unbind_mm)(struct vdpa_device *vdev); | |
961e9c84 JW |
398 | |
399 | /* Free device resources */ | |
400 | void (*free)(struct vdpa_device *vdev); | |
401 | }; | |
402 | ||
403 | struct vdpa_device *__vdpa_alloc_device(struct device *parent, | |
404 | const struct vdpa_config_ops *config, | |
db9adcbf | 405 | unsigned int ngroups, unsigned int nas, |
d8945ec4 XY |
406 | size_t size, const char *name, |
407 | bool use_va); | |
961e9c84 | 408 | |
c8d182bd XY |
409 | /** |
410 | * vdpa_alloc_device - allocate and initilaize a vDPA device | |
411 | * | |
412 | * @dev_struct: the type of the parent structure | |
413 | * @member: the name of struct vdpa_device within the @dev_struct | |
414 | * @parent: the parent device | |
415 | * @config: the bus operations that is supported by this device | |
d4821902 | 416 | * @ngroups: the number of virtqueue groups supported by this device |
db9adcbf | 417 | * @nas: the number of address spaces |
c8d182bd | 418 | * @name: name of the vdpa device |
d8945ec4 | 419 | * @use_va: indicate whether virtual address must be used by this device |
c8d182bd XY |
420 | * |
421 | * Return allocated data structure or ERR_PTR upon error | |
422 | */ | |
db9adcbf GD |
423 | #define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \ |
424 | name, use_va) \ | |
d4821902 | 425 | container_of((__vdpa_alloc_device( \ |
db9adcbf GD |
426 | parent, config, ngroups, nas, \ |
427 | (sizeof(dev_struct) + \ | |
961e9c84 | 428 | BUILD_BUG_ON_ZERO(offsetof( \ |
db9adcbf | 429 | dev_struct, member))), name, use_va)), \ |
961e9c84 JW |
430 | dev_struct, member) |
431 | ||
81d46d69 | 432 | int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs); |
961e9c84 JW |
433 | void vdpa_unregister_device(struct vdpa_device *vdev); |
434 | ||
81d46d69 | 435 | int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs); |
903f7bca PP |
436 | void _vdpa_unregister_device(struct vdpa_device *vdev); |
437 | ||
961e9c84 | 438 | /** |
d0f9164e | 439 | * struct vdpa_driver - operations for a vDPA driver |
961e9c84 JW |
440 | * @driver: underlying device driver |
441 | * @probe: the function to call when a device is found. Returns 0 or -errno. | |
442 | * @remove: the function to call when a device is removed. | |
443 | */ | |
444 | struct vdpa_driver { | |
445 | struct device_driver driver; | |
446 | int (*probe)(struct vdpa_device *vdev); | |
447 | void (*remove)(struct vdpa_device *vdev); | |
448 | }; | |
449 | ||
450 | #define vdpa_register_driver(drv) \ | |
451 | __vdpa_register_driver(drv, THIS_MODULE) | |
452 | int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner); | |
453 | void vdpa_unregister_driver(struct vdpa_driver *drv); | |
454 | ||
455 | #define module_vdpa_driver(__vdpa_driver) \ | |
456 | module_driver(__vdpa_driver, vdpa_register_driver, \ | |
457 | vdpa_unregister_driver) | |
458 | ||
459 | static inline struct vdpa_driver *drv_to_vdpa(struct device_driver *driver) | |
460 | { | |
461 | return container_of(driver, struct vdpa_driver, driver); | |
462 | } | |
463 | ||
464 | static inline struct vdpa_device *dev_to_vdpa(struct device *_dev) | |
465 | { | |
466 | return container_of(_dev, struct vdpa_device, dev); | |
467 | } | |
468 | ||
469 | static inline void *vdpa_get_drvdata(const struct vdpa_device *vdev) | |
470 | { | |
471 | return dev_get_drvdata(&vdev->dev); | |
472 | } | |
473 | ||
474 | static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data) | |
475 | { | |
476 | dev_set_drvdata(&vdev->dev, data); | |
477 | } | |
478 | ||
479 | static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) | |
480 | { | |
481 | return vdev->dma_dev; | |
482 | } | |
452639a6 | 483 | |
0686082d | 484 | static inline int vdpa_reset(struct vdpa_device *vdev) |
452639a6 | 485 | { |
86e17a51 | 486 | const struct vdpa_config_ops *ops = vdev->config; |
aba21aff | 487 | int ret; |
452639a6 | 488 | |
a6a51adc | 489 | down_write(&vdev->cf_lock); |
452639a6 | 490 | vdev->features_valid = false; |
aba21aff | 491 | ret = ops->reset(vdev); |
a6a51adc | 492 | up_write(&vdev->cf_lock); |
aba21aff | 493 | return ret; |
452639a6 MT |
494 | } |
495 | ||
e0077cc1 | 496 | static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features) |
452639a6 | 497 | { |
86e17a51 | 498 | const struct vdpa_config_ops *ops = vdev->config; |
aba21aff EC |
499 | int ret; |
500 | ||
452639a6 | 501 | vdev->features_valid = true; |
aba21aff | 502 | ret = ops->set_driver_features(vdev, features); |
e0077cc1 SWL |
503 | |
504 | return ret; | |
505 | } | |
506 | ||
507 | static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features) | |
508 | { | |
509 | int ret; | |
510 | ||
a6a51adc | 511 | down_write(&vdev->cf_lock); |
e0077cc1 | 512 | ret = vdpa_set_features_unlocked(vdev, features); |
a6a51adc | 513 | up_write(&vdev->cf_lock); |
aba21aff EC |
514 | |
515 | return ret; | |
452639a6 MT |
516 | } |
517 | ||
6dbb1f16 PP |
518 | void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, |
519 | void *buf, unsigned int len); | |
520 | void vdpa_set_config(struct vdpa_device *dev, unsigned int offset, | |
521 | const void *buf, unsigned int length); | |
73bc0dbb EC |
522 | void vdpa_set_status(struct vdpa_device *vdev, u8 status); |
523 | ||
33b34750 | 524 | /** |
d0f9164e PP |
525 | * struct vdpa_mgmtdev_ops - vdpa device ops |
526 | * @dev_add: Add a vdpa device using alloc and register | |
527 | * @mdev: parent device to use for device addition | |
528 | * @name: name of the new vdpa device | |
d8ca2fa5 | 529 | * @config: config attributes to apply to the device under creation |
d0f9164e PP |
530 | * Driver need to add a new device using _vdpa_register_device() |
531 | * after fully initializing the vdpa device. Driver must return 0 | |
532 | * on success or appropriate error code. | |
533 | * @dev_del: Remove a vdpa device using unregister | |
534 | * @mdev: parent device to use for device removal | |
535 | * @dev: vdpa device to remove | |
536 | * Driver need to remove the specified device by calling | |
537 | * _vdpa_unregister_device(). | |
33b34750 PP |
538 | */ |
539 | struct vdpa_mgmtdev_ops { | |
d8ca2fa5 PP |
540 | int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name, |
541 | const struct vdpa_dev_set_config *config); | |
33b34750 PP |
542 | void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev); |
543 | }; | |
544 | ||
960deb33 PP |
545 | /** |
546 | * struct vdpa_mgmt_dev - vdpa management device | |
547 | * @device: Management parent device | |
548 | * @ops: operations supported by management device | |
549 | * @id_table: Pointer to device id table of supported ids | |
d8ca2fa5 PP |
550 | * @config_attr_mask: bit mask of attributes of type enum vdpa_attr that |
551 | * management device support during dev_add callback | |
960deb33 | 552 | * @list: list entry |
9be5d2d4 SH |
553 | * @supported_features: features supported by device |
554 | * @max_supported_vqs: maximum number of virtqueues supported by device | |
960deb33 | 555 | */ |
33b34750 PP |
556 | struct vdpa_mgmt_dev { |
557 | struct device *device; | |
558 | const struct vdpa_mgmtdev_ops *ops; | |
ffbda8e9 | 559 | struct virtio_device_id *id_table; |
d8ca2fa5 | 560 | u64 config_attr_mask; |
33b34750 | 561 | struct list_head list; |
cd2629f6 EC |
562 | u64 supported_features; |
563 | u32 max_supported_vqs; | |
33b34750 PP |
564 | }; |
565 | ||
566 | int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev); | |
567 | void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev); | |
568 | ||
961e9c84 | 569 | #endif /* _LINUX_VDPA_H */ |