Commit | Line | Data |
---|---|---|
f30437c5 JS |
1 | /* |
2 | * KVMGT - the implementation of Intel mediated pass-through framework for KVM | |
3 | * | |
4 | * Copyright(c) 2014-2016 Intel Corporation. All rights reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the next | |
14 | * paragraph) shall be included in all copies or substantial portions of the | |
15 | * Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
23 | * SOFTWARE. | |
24 | * | |
25 | * Authors: | |
26 | * Kevin Tian <kevin.tian@intel.com> | |
27 | * Jike Song <jike.song@intel.com> | |
28 | * Xiaoguang Chen <xiaoguang.chen@intel.com> | |
29 | */ | |
30 | ||
31 | #include <linux/init.h> | |
32 | #include <linux/device.h> | |
33 | #include <linux/mm.h> | |
f440c8a5 | 34 | #include <linux/mmu_context.h> |
0a1b60d7 | 35 | #include <linux/sched/mm.h> |
f30437c5 JS |
36 | #include <linux/types.h> |
37 | #include <linux/list.h> | |
38 | #include <linux/rbtree.h> | |
39 | #include <linux/spinlock.h> | |
40 | #include <linux/eventfd.h> | |
41 | #include <linux/uuid.h> | |
42 | #include <linux/kvm_host.h> | |
43 | #include <linux/vfio.h> | |
659643f7 | 44 | #include <linux/mdev.h> |
6846dfeb | 45 | #include <linux/debugfs.h> |
f30437c5 | 46 | |
de5372da GS |
47 | #include <linux/nospec.h> |
48 | ||
f30437c5 JS |
49 | #include "i915_drv.h" |
50 | #include "gvt.h" | |
51 | ||
f30437c5 JS |
52 | static const struct intel_gvt_ops *intel_gvt_ops; |
53 | ||
f30437c5 JS |
54 | /* helper macros copied from vfio-pci */ |
55 | #define VFIO_PCI_OFFSET_SHIFT 40 | |
56 | #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) | |
57 | #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) | |
58 | #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) | |
59 | ||
39c68e87 HY |
60 | #define EDID_BLOB_OFFSET (PAGE_SIZE/2) |
61 | ||
b851adea TZ |
62 | #define OPREGION_SIGNATURE "IntelGraphicsMem" |
63 | ||
64 | struct vfio_region; | |
65 | struct intel_vgpu_regops { | |
66 | size_t (*rw)(struct intel_vgpu *vgpu, char *buf, | |
67 | size_t count, loff_t *ppos, bool iswrite); | |
68 | void (*release)(struct intel_vgpu *vgpu, | |
69 | struct vfio_region *region); | |
70 | }; | |
71 | ||
f30437c5 JS |
72 | struct vfio_region { |
73 | u32 type; | |
74 | u32 subtype; | |
75 | size_t size; | |
76 | u32 flags; | |
b851adea TZ |
77 | const struct intel_vgpu_regops *ops; |
78 | void *data; | |
f30437c5 JS |
79 | }; |
80 | ||
39c68e87 HY |
81 | struct vfio_edid_region { |
82 | struct vfio_region_gfx_edid vfio_edid_regs; | |
83 | void *edid_blob; | |
84 | }; | |
85 | ||
f30437c5 JS |
86 | struct kvmgt_pgfn { |
87 | gfn_t gfn; | |
88 | struct hlist_node hnode; | |
89 | }; | |
90 | ||
91 | struct kvmgt_guest_info { | |
92 | struct kvm *kvm; | |
93 | struct intel_vgpu *vgpu; | |
94 | struct kvm_page_track_notifier_node track_node; | |
95 | #define NR_BKT (1 << 18) | |
96 | struct hlist_head ptable[NR_BKT]; | |
97 | #undef NR_BKT | |
6846dfeb | 98 | struct dentry *debugfs_cache_entries; |
f30437c5 JS |
99 | }; |
100 | ||
101 | struct gvt_dma { | |
cf4ee73f CD |
102 | struct intel_vgpu *vgpu; |
103 | struct rb_node gfn_node; | |
104 | struct rb_node dma_addr_node; | |
f30437c5 | 105 | gfn_t gfn; |
cf4ee73f | 106 | dma_addr_t dma_addr; |
79e542f5 | 107 | unsigned long size; |
cf4ee73f | 108 | struct kref ref; |
f30437c5 JS |
109 | }; |
110 | ||
659643f7 JS |
111 | static inline bool handle_valid(unsigned long handle) |
112 | { | |
113 | return !!(handle & ~0xff); | |
114 | } | |
115 | ||
116 | static int kvmgt_guest_init(struct mdev_device *mdev); | |
117 | static void intel_vgpu_release_work(struct work_struct *work); | |
118 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); | |
119 | ||
79e542f5 CD |
120 | static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, |
121 | unsigned long size) | |
122 | { | |
123 | int total_pages; | |
124 | int npage; | |
125 | int ret; | |
126 | ||
127 | total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; | |
128 | ||
129 | for (npage = 0; npage < total_pages; npage++) { | |
130 | unsigned long cur_gfn = gfn + npage; | |
131 | ||
132 | ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1); | |
133 | WARN_ON(ret != 1); | |
134 | } | |
135 | } | |
136 | ||
137 | /* Pin a normal or compound guest page for dma. */ | |
138 | static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, | |
139 | unsigned long size, struct page **page) | |
140 | { | |
141 | unsigned long base_pfn = 0; | |
142 | int total_pages; | |
143 | int npage; | |
144 | int ret; | |
145 | ||
146 | total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; | |
147 | /* | |
148 | * We pin the pages one-by-one to avoid allocating a big arrary | |
149 | * on stack to hold pfns. | |
150 | */ | |
151 | for (npage = 0; npage < total_pages; npage++) { | |
152 | unsigned long cur_gfn = gfn + npage; | |
153 | unsigned long pfn; | |
154 | ||
155 | ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1, | |
156 | IOMMU_READ | IOMMU_WRITE, &pfn); | |
157 | if (ret != 1) { | |
158 | gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", | |
159 | cur_gfn, ret); | |
160 | goto err; | |
161 | } | |
162 | ||
163 | if (!pfn_valid(pfn)) { | |
164 | gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn); | |
165 | npage++; | |
166 | ret = -EFAULT; | |
167 | goto err; | |
168 | } | |
169 | ||
170 | if (npage == 0) | |
171 | base_pfn = pfn; | |
172 | else if (base_pfn + npage != pfn) { | |
173 | gvt_vgpu_err("The pages are not continuous\n"); | |
174 | ret = -EINVAL; | |
175 | npage++; | |
176 | goto err; | |
177 | } | |
178 | } | |
179 | ||
180 | *page = pfn_to_page(base_pfn); | |
181 | return 0; | |
182 | err: | |
183 | gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); | |
184 | return ret; | |
185 | } | |
186 | ||
cf4ee73f | 187 | static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, |
79e542f5 | 188 | dma_addr_t *dma_addr, unsigned long size) |
b86dc6ed | 189 | { |
b86dc6ed | 190 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; |
79e542f5 | 191 | struct page *page = NULL; |
cf4ee73f | 192 | int ret; |
b86dc6ed | 193 | |
79e542f5 CD |
194 | ret = gvt_pin_guest_page(vgpu, gfn, size, &page); |
195 | if (ret) | |
196 | return ret; | |
b86dc6ed | 197 | |
cf4ee73f | 198 | /* Setup DMA mapping. */ |
79e542f5 | 199 | *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL); |
13bdff33 | 200 | if (dma_mapping_error(dev, *dma_addr)) { |
79e542f5 CD |
201 | gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n", |
202 | page_to_pfn(page), ret); | |
203 | gvt_unpin_guest_page(vgpu, gfn, size); | |
13bdff33 | 204 | return -ENOMEM; |
cf4ee73f | 205 | } |
b86dc6ed | 206 | |
13bdff33 | 207 | return 0; |
b86dc6ed CD |
208 | } |
209 | ||
cf4ee73f | 210 | static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, |
79e542f5 | 211 | dma_addr_t dma_addr, unsigned long size) |
b86dc6ed CD |
212 | { |
213 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | |
b86dc6ed | 214 | |
79e542f5 CD |
215 | dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); |
216 | gvt_unpin_guest_page(vgpu, gfn, size); | |
b86dc6ed CD |
217 | } |
218 | ||
cf4ee73f CD |
219 | static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, |
220 | dma_addr_t dma_addr) | |
f30437c5 | 221 | { |
cf4ee73f CD |
222 | struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node; |
223 | struct gvt_dma *itr; | |
f30437c5 JS |
224 | |
225 | while (node) { | |
cf4ee73f | 226 | itr = rb_entry(node, struct gvt_dma, dma_addr_node); |
f30437c5 | 227 | |
cf4ee73f | 228 | if (dma_addr < itr->dma_addr) |
f30437c5 | 229 | node = node->rb_left; |
cf4ee73f | 230 | else if (dma_addr > itr->dma_addr) |
f30437c5 | 231 | node = node->rb_right; |
cf4ee73f CD |
232 | else |
233 | return itr; | |
f30437c5 | 234 | } |
cf4ee73f | 235 | return NULL; |
f30437c5 JS |
236 | } |
237 | ||
cf4ee73f | 238 | static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) |
f30437c5 | 239 | { |
cf4ee73f CD |
240 | struct rb_node *node = vgpu->vdev.gfn_cache.rb_node; |
241 | struct gvt_dma *itr; | |
f30437c5 | 242 | |
cf4ee73f CD |
243 | while (node) { |
244 | itr = rb_entry(node, struct gvt_dma, gfn_node); | |
f30437c5 | 245 | |
cf4ee73f CD |
246 | if (gfn < itr->gfn) |
247 | node = node->rb_left; | |
248 | else if (gfn > itr->gfn) | |
249 | node = node->rb_right; | |
250 | else | |
251 | return itr; | |
252 | } | |
253 | return NULL; | |
f30437c5 JS |
254 | } |
255 | ||
5cd4223e | 256 | static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, |
79e542f5 | 257 | dma_addr_t dma_addr, unsigned long size) |
f30437c5 JS |
258 | { |
259 | struct gvt_dma *new, *itr; | |
cf4ee73f | 260 | struct rb_node **link, *parent = NULL; |
f30437c5 JS |
261 | |
262 | new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); | |
263 | if (!new) | |
5cd4223e | 264 | return -ENOMEM; |
f30437c5 | 265 | |
cf4ee73f | 266 | new->vgpu = vgpu; |
f30437c5 | 267 | new->gfn = gfn; |
cf4ee73f | 268 | new->dma_addr = dma_addr; |
79e542f5 | 269 | new->size = size; |
cf4ee73f | 270 | kref_init(&new->ref); |
f30437c5 | 271 | |
cf4ee73f CD |
272 | /* gfn_cache maps gfn to struct gvt_dma. */ |
273 | link = &vgpu->vdev.gfn_cache.rb_node; | |
f30437c5 JS |
274 | while (*link) { |
275 | parent = *link; | |
cf4ee73f | 276 | itr = rb_entry(parent, struct gvt_dma, gfn_node); |
f30437c5 | 277 | |
cf4ee73f | 278 | if (gfn < itr->gfn) |
f30437c5 JS |
279 | link = &parent->rb_left; |
280 | else | |
281 | link = &parent->rb_right; | |
282 | } | |
cf4ee73f CD |
283 | rb_link_node(&new->gfn_node, parent, link); |
284 | rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache); | |
f30437c5 | 285 | |
cf4ee73f CD |
286 | /* dma_addr_cache maps dma addr to struct gvt_dma. */ |
287 | parent = NULL; | |
288 | link = &vgpu->vdev.dma_addr_cache.rb_node; | |
289 | while (*link) { | |
290 | parent = *link; | |
291 | itr = rb_entry(parent, struct gvt_dma, dma_addr_node); | |
f30437c5 | 292 | |
cf4ee73f CD |
293 | if (dma_addr < itr->dma_addr) |
294 | link = &parent->rb_left; | |
295 | else | |
296 | link = &parent->rb_right; | |
297 | } | |
298 | rb_link_node(&new->dma_addr_node, parent, link); | |
299 | rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache); | |
6846dfeb CD |
300 | |
301 | vgpu->vdev.nr_cache_entries++; | |
5cd4223e | 302 | return 0; |
f30437c5 JS |
303 | } |
304 | ||
305 | static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, | |
306 | struct gvt_dma *entry) | |
307 | { | |
cf4ee73f CD |
308 | rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache); |
309 | rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache); | |
f30437c5 | 310 | kfree(entry); |
6846dfeb | 311 | vgpu->vdev.nr_cache_entries--; |
f30437c5 JS |
312 | } |
313 | ||
f30437c5 JS |
314 | static void gvt_cache_destroy(struct intel_vgpu *vgpu) |
315 | { | |
316 | struct gvt_dma *dma; | |
317 | struct rb_node *node = NULL; | |
f30437c5 | 318 | |
f16bd3dd CD |
319 | for (;;) { |
320 | mutex_lock(&vgpu->vdev.cache_lock); | |
cf4ee73f | 321 | node = rb_first(&vgpu->vdev.gfn_cache); |
f16bd3dd CD |
322 | if (!node) { |
323 | mutex_unlock(&vgpu->vdev.cache_lock); | |
324 | break; | |
325 | } | |
cf4ee73f | 326 | dma = rb_entry(node, struct gvt_dma, gfn_node); |
79e542f5 | 327 | gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); |
f30437c5 | 328 | __gvt_cache_remove_entry(vgpu, dma); |
f16bd3dd | 329 | mutex_unlock(&vgpu->vdev.cache_lock); |
f30437c5 | 330 | } |
f30437c5 JS |
331 | } |
332 | ||
cf4ee73f CD |
333 | static void gvt_cache_init(struct intel_vgpu *vgpu) |
334 | { | |
335 | vgpu->vdev.gfn_cache = RB_ROOT; | |
336 | vgpu->vdev.dma_addr_cache = RB_ROOT; | |
6846dfeb | 337 | vgpu->vdev.nr_cache_entries = 0; |
cf4ee73f CD |
338 | mutex_init(&vgpu->vdev.cache_lock); |
339 | } | |
340 | ||
f30437c5 JS |
341 | static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) |
342 | { | |
343 | hash_init(info->ptable); | |
344 | } | |
345 | ||
346 | static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info) | |
347 | { | |
348 | struct kvmgt_pgfn *p; | |
349 | struct hlist_node *tmp; | |
350 | int i; | |
351 | ||
352 | hash_for_each_safe(info->ptable, i, tmp, p, hnode) { | |
353 | hash_del(&p->hnode); | |
354 | kfree(p); | |
355 | } | |
356 | } | |
357 | ||
358 | static struct kvmgt_pgfn * | |
359 | __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) | |
360 | { | |
361 | struct kvmgt_pgfn *p, *res = NULL; | |
362 | ||
363 | hash_for_each_possible(info->ptable, p, hnode, gfn) { | |
364 | if (gfn == p->gfn) { | |
365 | res = p; | |
366 | break; | |
367 | } | |
368 | } | |
369 | ||
370 | return res; | |
371 | } | |
372 | ||
373 | static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info, | |
374 | gfn_t gfn) | |
375 | { | |
376 | struct kvmgt_pgfn *p; | |
377 | ||
378 | p = __kvmgt_protect_table_find(info, gfn); | |
379 | return !!p; | |
380 | } | |
381 | ||
382 | static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) | |
383 | { | |
384 | struct kvmgt_pgfn *p; | |
385 | ||
386 | if (kvmgt_gfn_is_write_protected(info, gfn)) | |
387 | return; | |
388 | ||
c55b1de0 | 389 | p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC); |
f30437c5 JS |
390 | if (WARN(!p, "gfn: 0x%llx\n", gfn)) |
391 | return; | |
392 | ||
393 | p->gfn = gfn; | |
394 | hash_add(info->ptable, &p->hnode, gfn); | |
395 | } | |
396 | ||
397 | static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, | |
398 | gfn_t gfn) | |
399 | { | |
400 | struct kvmgt_pgfn *p; | |
401 | ||
402 | p = __kvmgt_protect_table_find(info, gfn); | |
403 | if (p) { | |
404 | hash_del(&p->hnode); | |
405 | kfree(p); | |
406 | } | |
407 | } | |
408 | ||
b851adea TZ |
409 | static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf, |
410 | size_t count, loff_t *ppos, bool iswrite) | |
411 | { | |
412 | unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - | |
413 | VFIO_PCI_NUM_REGIONS; | |
414 | void *base = vgpu->vdev.region[i].data; | |
415 | loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; | |
416 | ||
417 | if (pos >= vgpu->vdev.region[i].size || iswrite) { | |
418 | gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n"); | |
419 | return -EINVAL; | |
420 | } | |
421 | count = min(count, (size_t)(vgpu->vdev.region[i].size - pos)); | |
422 | memcpy(buf, base + pos, count); | |
423 | ||
424 | return count; | |
425 | } | |
426 | ||
427 | static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu, | |
428 | struct vfio_region *region) | |
429 | { | |
430 | } | |
431 | ||
432 | static const struct intel_vgpu_regops intel_vgpu_regops_opregion = { | |
433 | .rw = intel_vgpu_reg_rw_opregion, | |
434 | .release = intel_vgpu_reg_release_opregion, | |
435 | }; | |
436 | ||
39c68e87 HY |
437 | static int handle_edid_regs(struct intel_vgpu *vgpu, |
438 | struct vfio_edid_region *region, char *buf, | |
439 | size_t count, u16 offset, bool is_write) | |
440 | { | |
441 | struct vfio_region_gfx_edid *regs = ®ion->vfio_edid_regs; | |
442 | unsigned int data; | |
443 | ||
444 | if (offset + count > sizeof(*regs)) | |
445 | return -EINVAL; | |
446 | ||
447 | if (count != 4) | |
448 | return -EINVAL; | |
449 | ||
450 | if (is_write) { | |
451 | data = *((unsigned int *)buf); | |
452 | switch (offset) { | |
453 | case offsetof(struct vfio_region_gfx_edid, link_state): | |
454 | if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) { | |
455 | if (!drm_edid_block_valid( | |
456 | (u8 *)region->edid_blob, | |
457 | 0, | |
458 | true, | |
459 | NULL)) { | |
460 | gvt_vgpu_err("invalid EDID blob\n"); | |
461 | return -EINVAL; | |
462 | } | |
463 | intel_gvt_ops->emulate_hotplug(vgpu, true); | |
464 | } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN) | |
465 | intel_gvt_ops->emulate_hotplug(vgpu, false); | |
466 | else { | |
467 | gvt_vgpu_err("invalid EDID link state %d\n", | |
468 | regs->link_state); | |
469 | return -EINVAL; | |
470 | } | |
471 | regs->link_state = data; | |
472 | break; | |
473 | case offsetof(struct vfio_region_gfx_edid, edid_size): | |
474 | if (data > regs->edid_max_size) { | |
475 | gvt_vgpu_err("EDID size is bigger than %d!\n", | |
476 | regs->edid_max_size); | |
477 | return -EINVAL; | |
478 | } | |
479 | regs->edid_size = data; | |
480 | break; | |
481 | default: | |
482 | /* read-only regs */ | |
483 | gvt_vgpu_err("write read-only EDID region at offset %d\n", | |
484 | offset); | |
485 | return -EPERM; | |
486 | } | |
487 | } else { | |
488 | memcpy(buf, (char *)regs + offset, count); | |
489 | } | |
490 | ||
491 | return count; | |
492 | } | |
493 | ||
494 | static int handle_edid_blob(struct vfio_edid_region *region, char *buf, | |
495 | size_t count, u16 offset, bool is_write) | |
496 | { | |
497 | if (offset + count > region->vfio_edid_regs.edid_size) | |
498 | return -EINVAL; | |
499 | ||
500 | if (is_write) | |
501 | memcpy(region->edid_blob + offset, buf, count); | |
502 | else | |
503 | memcpy(buf, region->edid_blob + offset, count); | |
504 | ||
505 | return count; | |
506 | } | |
507 | ||
508 | static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf, | |
509 | size_t count, loff_t *ppos, bool iswrite) | |
510 | { | |
511 | int ret; | |
512 | unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - | |
513 | VFIO_PCI_NUM_REGIONS; | |
514 | struct vfio_edid_region *region = | |
515 | (struct vfio_edid_region *)vgpu->vdev.region[i].data; | |
516 | loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; | |
517 | ||
518 | if (pos < region->vfio_edid_regs.edid_offset) { | |
519 | ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite); | |
520 | } else { | |
521 | pos -= EDID_BLOB_OFFSET; | |
522 | ret = handle_edid_blob(region, buf, count, pos, iswrite); | |
523 | } | |
524 | ||
525 | if (ret < 0) | |
526 | gvt_vgpu_err("failed to access EDID region\n"); | |
527 | ||
528 | return ret; | |
529 | } | |
530 | ||
531 | static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu, | |
532 | struct vfio_region *region) | |
533 | { | |
534 | kfree(region->data); | |
535 | } | |
536 | ||
537 | static const struct intel_vgpu_regops intel_vgpu_regops_edid = { | |
538 | .rw = intel_vgpu_reg_rw_edid, | |
539 | .release = intel_vgpu_reg_release_edid, | |
540 | }; | |
541 | ||
b851adea TZ |
542 | static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, |
543 | unsigned int type, unsigned int subtype, | |
544 | const struct intel_vgpu_regops *ops, | |
545 | size_t size, u32 flags, void *data) | |
546 | { | |
547 | struct vfio_region *region; | |
548 | ||
549 | region = krealloc(vgpu->vdev.region, | |
550 | (vgpu->vdev.num_regions + 1) * sizeof(*region), | |
551 | GFP_KERNEL); | |
552 | if (!region) | |
553 | return -ENOMEM; | |
554 | ||
555 | vgpu->vdev.region = region; | |
556 | vgpu->vdev.region[vgpu->vdev.num_regions].type = type; | |
557 | vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype; | |
558 | vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops; | |
559 | vgpu->vdev.region[vgpu->vdev.num_regions].size = size; | |
560 | vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags; | |
561 | vgpu->vdev.region[vgpu->vdev.num_regions].data = data; | |
562 | vgpu->vdev.num_regions++; | |
e546e281 TZ |
563 | return 0; |
564 | } | |
565 | ||
566 | static int kvmgt_get_vfio_device(void *p_vgpu) | |
567 | { | |
568 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; | |
b851adea | 569 | |
e546e281 TZ |
570 | vgpu->vdev.vfio_device = vfio_device_get_from_dev( |
571 | mdev_dev(vgpu->vdev.mdev)); | |
572 | if (!vgpu->vdev.vfio_device) { | |
573 | gvt_vgpu_err("failed to get vfio device\n"); | |
574 | return -ENODEV; | |
575 | } | |
b851adea TZ |
576 | return 0; |
577 | } | |
578 | ||
e546e281 | 579 | |
b851adea TZ |
580 | static int kvmgt_set_opregion(void *p_vgpu) |
581 | { | |
582 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; | |
583 | void *base; | |
584 | int ret; | |
585 | ||
586 | /* Each vgpu has its own opregion, although VFIO would create another | |
587 | * one later. This one is used to expose opregion to VFIO. And the | |
588 | * other one created by VFIO later, is used by guest actually. | |
589 | */ | |
590 | base = vgpu_opregion(vgpu)->va; | |
591 | if (!base) | |
592 | return -ENOMEM; | |
593 | ||
594 | if (memcmp(base, OPREGION_SIGNATURE, 16)) { | |
595 | memunmap(base); | |
596 | return -EINVAL; | |
597 | } | |
598 | ||
599 | ret = intel_vgpu_register_reg(vgpu, | |
600 | PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, | |
601 | VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, | |
602 | &intel_vgpu_regops_opregion, OPREGION_SIZE, | |
603 | VFIO_REGION_INFO_FLAG_READ, base); | |
604 | ||
605 | return ret; | |
606 | } | |
607 | ||
39c68e87 HY |
608 | static int kvmgt_set_edid(void *p_vgpu, int port_num) |
609 | { | |
610 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; | |
611 | struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); | |
612 | struct vfio_edid_region *base; | |
613 | int ret; | |
614 | ||
615 | base = kzalloc(sizeof(*base), GFP_KERNEL); | |
616 | if (!base) | |
617 | return -ENOMEM; | |
618 | ||
619 | /* TODO: Add multi-port and EDID extension block support */ | |
620 | base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET; | |
621 | base->vfio_edid_regs.edid_max_size = EDID_SIZE; | |
622 | base->vfio_edid_regs.edid_size = EDID_SIZE; | |
623 | base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id); | |
624 | base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id); | |
625 | base->edid_blob = port->edid->edid_block; | |
626 | ||
627 | ret = intel_vgpu_register_reg(vgpu, | |
628 | VFIO_REGION_TYPE_GFX, | |
629 | VFIO_REGION_SUBTYPE_GFX_EDID, | |
630 | &intel_vgpu_regops_edid, EDID_SIZE, | |
631 | VFIO_REGION_INFO_FLAG_READ | | |
632 | VFIO_REGION_INFO_FLAG_WRITE | | |
633 | VFIO_REGION_INFO_FLAG_CAPS, base); | |
634 | ||
635 | return ret; | |
636 | } | |
637 | ||
e546e281 TZ |
638 | static void kvmgt_put_vfio_device(void *vgpu) |
639 | { | |
640 | if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device)) | |
641 | return; | |
642 | ||
643 | vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device); | |
644 | } | |
645 | ||
659643f7 JS |
646 | static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) |
647 | { | |
695fbc08 | 648 | struct intel_vgpu *vgpu = NULL; |
659643f7 JS |
649 | struct intel_vgpu_type *type; |
650 | struct device *pdev; | |
651 | void *gvt; | |
5753394b | 652 | int ret; |
659643f7 | 653 | |
9372e6fe | 654 | pdev = mdev_parent_dev(mdev); |
659643f7 JS |
655 | gvt = kdev_to_i915(pdev)->gvt; |
656 | ||
6aa23ced | 657 | type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj)); |
659643f7 | 658 | if (!type) { |
695fbc08 | 659 | gvt_vgpu_err("failed to find type %s to create\n", |
659643f7 | 660 | kobject_name(kobj)); |
5753394b JS |
661 | ret = -EINVAL; |
662 | goto out; | |
659643f7 JS |
663 | } |
664 | ||
665 | vgpu = intel_gvt_ops->vgpu_create(gvt, type); | |
666 | if (IS_ERR_OR_NULL(vgpu)) { | |
5753394b | 667 | ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); |
64c066a9 | 668 | gvt_err("failed to create intel vgpu: %d\n", ret); |
5753394b | 669 | goto out; |
659643f7 JS |
670 | } |
671 | ||
672 | INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); | |
673 | ||
674 | vgpu->vdev.mdev = mdev; | |
675 | mdev_set_drvdata(mdev, vgpu); | |
676 | ||
677 | gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", | |
99e3123e | 678 | dev_name(mdev_dev(mdev))); |
5753394b JS |
679 | ret = 0; |
680 | ||
681 | out: | |
682 | return ret; | |
659643f7 JS |
683 | } |
684 | ||
685 | static int intel_vgpu_remove(struct mdev_device *mdev) | |
686 | { | |
687 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
688 | ||
689 | if (handle_valid(vgpu->handle)) | |
690 | return -EBUSY; | |
691 | ||
692 | intel_gvt_ops->vgpu_destroy(vgpu); | |
693 | return 0; | |
694 | } | |
695 | ||
696 | static int intel_vgpu_iommu_notifier(struct notifier_block *nb, | |
697 | unsigned long action, void *data) | |
698 | { | |
699 | struct intel_vgpu *vgpu = container_of(nb, | |
700 | struct intel_vgpu, | |
701 | vdev.iommu_notifier); | |
702 | ||
703 | if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { | |
704 | struct vfio_iommu_type1_dma_unmap *unmap = data; | |
cf4ee73f CD |
705 | struct gvt_dma *entry; |
706 | unsigned long iov_pfn, end_iov_pfn; | |
707 | ||
708 | iov_pfn = unmap->iova >> PAGE_SHIFT; | |
709 | end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE; | |
659643f7 | 710 | |
cf4ee73f CD |
711 | mutex_lock(&vgpu->vdev.cache_lock); |
712 | for (; iov_pfn < end_iov_pfn; iov_pfn++) { | |
713 | entry = __gvt_cache_find_gfn(vgpu, iov_pfn); | |
714 | if (!entry) | |
715 | continue; | |
659643f7 | 716 | |
79e542f5 CD |
717 | gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr, |
718 | entry->size); | |
cf4ee73f CD |
719 | __gvt_cache_remove_entry(vgpu, entry); |
720 | } | |
721 | mutex_unlock(&vgpu->vdev.cache_lock); | |
659643f7 JS |
722 | } |
723 | ||
724 | return NOTIFY_OK; | |
725 | } | |
726 | ||
727 | static int intel_vgpu_group_notifier(struct notifier_block *nb, | |
728 | unsigned long action, void *data) | |
729 | { | |
730 | struct intel_vgpu *vgpu = container_of(nb, | |
731 | struct intel_vgpu, | |
732 | vdev.group_notifier); | |
733 | ||
734 | /* the only action we care about */ | |
735 | if (action == VFIO_GROUP_NOTIFY_SET_KVM) { | |
736 | vgpu->vdev.kvm = data; | |
737 | ||
738 | if (!data) | |
739 | schedule_work(&vgpu->vdev.release_work); | |
740 | } | |
741 | ||
742 | return NOTIFY_OK; | |
743 | } | |
744 | ||
745 | static int intel_vgpu_open(struct mdev_device *mdev) | |
746 | { | |
747 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
748 | unsigned long events; | |
749 | int ret; | |
750 | ||
751 | vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; | |
752 | vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier; | |
753 | ||
754 | events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; | |
99e3123e | 755 | ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, |
659643f7 JS |
756 | &vgpu->vdev.iommu_notifier); |
757 | if (ret != 0) { | |
695fbc08 TZ |
758 | gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", |
759 | ret); | |
659643f7 JS |
760 | goto out; |
761 | } | |
762 | ||
763 | events = VFIO_GROUP_NOTIFY_SET_KVM; | |
99e3123e | 764 | ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, |
659643f7 JS |
765 | &vgpu->vdev.group_notifier); |
766 | if (ret != 0) { | |
695fbc08 TZ |
767 | gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", |
768 | ret); | |
659643f7 JS |
769 | goto undo_iommu; |
770 | } | |
771 | ||
9bdb0734 ZW |
772 | /* Take a module reference as mdev core doesn't take |
773 | * a reference for vendor driver. | |
774 | */ | |
775 | if (!try_module_get(THIS_MODULE)) | |
776 | goto undo_group; | |
777 | ||
364fb6b7 JS |
778 | ret = kvmgt_guest_init(mdev); |
779 | if (ret) | |
780 | goto undo_group; | |
781 | ||
b79c52ae ZW |
782 | intel_gvt_ops->vgpu_activate(vgpu); |
783 | ||
364fb6b7 JS |
784 | atomic_set(&vgpu->vdev.released, 0); |
785 | return ret; | |
786 | ||
787 | undo_group: | |
5824f924 | 788 | vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, |
364fb6b7 | 789 | &vgpu->vdev.group_notifier); |
659643f7 JS |
790 | |
791 | undo_iommu: | |
99e3123e | 792 | vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, |
659643f7 JS |
793 | &vgpu->vdev.iommu_notifier); |
794 | out: | |
795 | return ret; | |
796 | } | |
797 | ||
d54e7934 XZ |
798 | static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) |
799 | { | |
800 | struct eventfd_ctx *trigger; | |
801 | ||
802 | trigger = vgpu->vdev.msi_trigger; | |
803 | if (trigger) { | |
804 | eventfd_ctx_put(trigger); | |
805 | vgpu->vdev.msi_trigger = NULL; | |
806 | } | |
807 | } | |
808 | ||
659643f7 JS |
809 | static void __intel_vgpu_release(struct intel_vgpu *vgpu) |
810 | { | |
811 | struct kvmgt_guest_info *info; | |
364fb6b7 | 812 | int ret; |
659643f7 JS |
813 | |
814 | if (!handle_valid(vgpu->handle)) | |
815 | return; | |
816 | ||
364fb6b7 JS |
817 | if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) |
818 | return; | |
819 | ||
f9090d4c | 820 | intel_gvt_ops->vgpu_release(vgpu); |
b79c52ae | 821 | |
5824f924 | 822 | ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, |
659643f7 | 823 | &vgpu->vdev.iommu_notifier); |
364fb6b7 JS |
824 | WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); |
825 | ||
5824f924 | 826 | ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY, |
659643f7 | 827 | &vgpu->vdev.group_notifier); |
364fb6b7 | 828 | WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret); |
659643f7 | 829 | |
9bdb0734 ZW |
830 | /* dereference module reference taken at open */ |
831 | module_put(THIS_MODULE); | |
832 | ||
659643f7 JS |
833 | info = (struct kvmgt_guest_info *)vgpu->handle; |
834 | kvmgt_guest_exit(info); | |
364fb6b7 | 835 | |
d54e7934 XZ |
836 | intel_vgpu_release_msi_eventfd_ctx(vgpu); |
837 | ||
364fb6b7 | 838 | vgpu->vdev.kvm = NULL; |
659643f7 JS |
839 | vgpu->handle = 0; |
840 | } | |
841 | ||
842 | static void intel_vgpu_release(struct mdev_device *mdev) | |
843 | { | |
844 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
845 | ||
846 | __intel_vgpu_release(vgpu); | |
847 | } | |
848 | ||
849 | static void intel_vgpu_release_work(struct work_struct *work) | |
850 | { | |
851 | struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, | |
852 | vdev.release_work); | |
8ff842fd | 853 | |
659643f7 JS |
854 | __intel_vgpu_release(vgpu); |
855 | } | |
856 | ||
2e679d48 | 857 | static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) |
659643f7 JS |
858 | { |
859 | u32 start_lo, start_hi; | |
860 | u32 mem_type; | |
659643f7 | 861 | |
f090a00d | 862 | start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & |
659643f7 | 863 | PCI_BASE_ADDRESS_MEM_MASK; |
f090a00d | 864 | mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & |
659643f7 JS |
865 | PCI_BASE_ADDRESS_MEM_TYPE_MASK; |
866 | ||
867 | switch (mem_type) { | |
868 | case PCI_BASE_ADDRESS_MEM_TYPE_64: | |
869 | start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space | |
f090a00d | 870 | + bar + 4)); |
659643f7 JS |
871 | break; |
872 | case PCI_BASE_ADDRESS_MEM_TYPE_32: | |
873 | case PCI_BASE_ADDRESS_MEM_TYPE_1M: | |
874 | /* 1M mem BAR treated as 32-bit BAR */ | |
875 | default: | |
876 | /* mem unknown type treated as 32-bit BAR */ | |
877 | start_hi = 0; | |
878 | break; | |
879 | } | |
880 | ||
881 | return ((u64)start_hi << 32) | start_lo; | |
882 | } | |
883 | ||
2e679d48 | 884 | static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off, |
f090a00d CD |
885 | void *buf, unsigned int count, bool is_write) |
886 | { | |
2e679d48 | 887 | u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar); |
f090a00d CD |
888 | int ret; |
889 | ||
890 | if (is_write) | |
891 | ret = intel_gvt_ops->emulate_mmio_write(vgpu, | |
892 | bar_start + off, buf, count); | |
893 | else | |
894 | ret = intel_gvt_ops->emulate_mmio_read(vgpu, | |
895 | bar_start + off, buf, count); | |
896 | return ret; | |
897 | } | |
898 | ||
2e679d48 | 899 | static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off) |
d480b28a CD |
900 | { |
901 | return off >= vgpu_aperture_offset(vgpu) && | |
902 | off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu); | |
903 | } | |
904 | ||
2e679d48 | 905 | static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, |
d480b28a CD |
906 | void *buf, unsigned long count, bool is_write) |
907 | { | |
908 | void *aperture_va; | |
909 | ||
910 | if (!intel_vgpu_in_aperture(vgpu, off) || | |
911 | !intel_vgpu_in_aperture(vgpu, off + count)) { | |
912 | gvt_vgpu_err("Invalid aperture offset %llu\n", off); | |
913 | return -EINVAL; | |
914 | } | |
915 | ||
916 | aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap, | |
917 | ALIGN_DOWN(off, PAGE_SIZE), | |
918 | count + offset_in_page(off)); | |
919 | if (!aperture_va) | |
920 | return -EIO; | |
921 | ||
922 | if (is_write) | |
923 | memcpy(aperture_va + offset_in_page(off), buf, count); | |
924 | else | |
925 | memcpy(buf, aperture_va + offset_in_page(off), count); | |
926 | ||
927 | io_mapping_unmap(aperture_va); | |
928 | ||
929 | return 0; | |
930 | } | |
931 | ||
659643f7 JS |
932 | static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, |
933 | size_t count, loff_t *ppos, bool is_write) | |
934 | { | |
935 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
936 | unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); | |
2e679d48 | 937 | u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; |
659643f7 JS |
938 | int ret = -EINVAL; |
939 | ||
940 | ||
b851adea | 941 | if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) { |
695fbc08 | 942 | gvt_vgpu_err("invalid index: %u\n", index); |
659643f7 JS |
943 | return -EINVAL; |
944 | } | |
945 | ||
946 | switch (index) { | |
947 | case VFIO_PCI_CONFIG_REGION_INDEX: | |
948 | if (is_write) | |
949 | ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos, | |
950 | buf, count); | |
951 | else | |
952 | ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos, | |
953 | buf, count); | |
954 | break; | |
955 | case VFIO_PCI_BAR0_REGION_INDEX: | |
f090a00d CD |
956 | ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos, |
957 | buf, count, is_write); | |
659643f7 JS |
958 | break; |
959 | case VFIO_PCI_BAR2_REGION_INDEX: | |
d480b28a | 960 | ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write); |
f090a00d CD |
961 | break; |
962 | case VFIO_PCI_BAR1_REGION_INDEX: | |
659643f7 JS |
963 | case VFIO_PCI_BAR3_REGION_INDEX: |
964 | case VFIO_PCI_BAR4_REGION_INDEX: | |
965 | case VFIO_PCI_BAR5_REGION_INDEX: | |
966 | case VFIO_PCI_VGA_REGION_INDEX: | |
967 | case VFIO_PCI_ROM_REGION_INDEX: | |
b851adea | 968 | break; |
659643f7 | 969 | default: |
b851adea TZ |
970 | if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) |
971 | return -EINVAL; | |
972 | ||
973 | index -= VFIO_PCI_NUM_REGIONS; | |
974 | return vgpu->vdev.region[index].ops->rw(vgpu, buf, count, | |
975 | ppos, is_write); | |
659643f7 JS |
976 | } |
977 | ||
978 | return ret == 0 ? count : ret; | |
979 | } | |
980 | ||
a26ca6ad TZ |
981 | static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) |
982 | { | |
983 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
984 | unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); | |
985 | struct intel_gvt *gvt = vgpu->gvt; | |
986 | int offset; | |
987 | ||
988 | /* Only allow MMIO GGTT entry access */ | |
989 | if (index != PCI_BASE_ADDRESS_0) | |
990 | return false; | |
991 | ||
992 | offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) - | |
993 | intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); | |
994 | ||
995 | return (offset >= gvt->device_info.gtt_start_offset && | |
996 | offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ? | |
997 | true : false; | |
998 | } | |
999 | ||
659643f7 JS |
1000 | static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, |
1001 | size_t count, loff_t *ppos) | |
1002 | { | |
1003 | unsigned int done = 0; | |
1004 | int ret; | |
1005 | ||
1006 | while (count) { | |
1007 | size_t filled; | |
1008 | ||
a26ca6ad TZ |
1009 | /* Only support GGTT entry 8 bytes read */ |
1010 | if (count >= 8 && !(*ppos % 8) && | |
1011 | gtt_entry(mdev, ppos)) { | |
1012 | u64 val; | |
1013 | ||
1014 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
1015 | ppos, false); | |
1016 | if (ret <= 0) | |
1017 | goto read_err; | |
1018 | ||
1019 | if (copy_to_user(buf, &val, sizeof(val))) | |
1020 | goto read_err; | |
1021 | ||
1022 | filled = 8; | |
1023 | } else if (count >= 4 && !(*ppos % 4)) { | |
659643f7 JS |
1024 | u32 val; |
1025 | ||
1026 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
1027 | ppos, false); | |
1028 | if (ret <= 0) | |
1029 | goto read_err; | |
1030 | ||
1031 | if (copy_to_user(buf, &val, sizeof(val))) | |
1032 | goto read_err; | |
1033 | ||
1034 | filled = 4; | |
1035 | } else if (count >= 2 && !(*ppos % 2)) { | |
1036 | u16 val; | |
1037 | ||
1038 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
1039 | ppos, false); | |
1040 | if (ret <= 0) | |
1041 | goto read_err; | |
1042 | ||
1043 | if (copy_to_user(buf, &val, sizeof(val))) | |
1044 | goto read_err; | |
1045 | ||
1046 | filled = 2; | |
1047 | } else { | |
1048 | u8 val; | |
1049 | ||
1050 | ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos, | |
1051 | false); | |
1052 | if (ret <= 0) | |
1053 | goto read_err; | |
1054 | ||
1055 | if (copy_to_user(buf, &val, sizeof(val))) | |
1056 | goto read_err; | |
1057 | ||
1058 | filled = 1; | |
1059 | } | |
1060 | ||
1061 | count -= filled; | |
1062 | done += filled; | |
1063 | *ppos += filled; | |
1064 | buf += filled; | |
1065 | } | |
1066 | ||
1067 | return done; | |
1068 | ||
1069 | read_err: | |
1070 | return -EFAULT; | |
1071 | } | |
1072 | ||
1073 | static ssize_t intel_vgpu_write(struct mdev_device *mdev, | |
1074 | const char __user *buf, | |
1075 | size_t count, loff_t *ppos) | |
1076 | { | |
1077 | unsigned int done = 0; | |
1078 | int ret; | |
1079 | ||
1080 | while (count) { | |
1081 | size_t filled; | |
1082 | ||
a26ca6ad TZ |
1083 | /* Only support GGTT entry 8 bytes write */ |
1084 | if (count >= 8 && !(*ppos % 8) && | |
1085 | gtt_entry(mdev, ppos)) { | |
1086 | u64 val; | |
1087 | ||
1088 | if (copy_from_user(&val, buf, sizeof(val))) | |
1089 | goto write_err; | |
1090 | ||
1091 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
1092 | ppos, true); | |
1093 | if (ret <= 0) | |
1094 | goto write_err; | |
1095 | ||
1096 | filled = 8; | |
1097 | } else if (count >= 4 && !(*ppos % 4)) { | |
659643f7 JS |
1098 | u32 val; |
1099 | ||
1100 | if (copy_from_user(&val, buf, sizeof(val))) | |
1101 | goto write_err; | |
1102 | ||
1103 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
1104 | ppos, true); | |
1105 | if (ret <= 0) | |
1106 | goto write_err; | |
1107 | ||
1108 | filled = 4; | |
1109 | } else if (count >= 2 && !(*ppos % 2)) { | |
1110 | u16 val; | |
1111 | ||
1112 | if (copy_from_user(&val, buf, sizeof(val))) | |
1113 | goto write_err; | |
1114 | ||
1115 | ret = intel_vgpu_rw(mdev, (char *)&val, | |
1116 | sizeof(val), ppos, true); | |
1117 | if (ret <= 0) | |
1118 | goto write_err; | |
1119 | ||
1120 | filled = 2; | |
1121 | } else { | |
1122 | u8 val; | |
1123 | ||
1124 | if (copy_from_user(&val, buf, sizeof(val))) | |
1125 | goto write_err; | |
1126 | ||
1127 | ret = intel_vgpu_rw(mdev, &val, sizeof(val), | |
1128 | ppos, true); | |
1129 | if (ret <= 0) | |
1130 | goto write_err; | |
1131 | ||
1132 | filled = 1; | |
1133 | } | |
1134 | ||
1135 | count -= filled; | |
1136 | done += filled; | |
1137 | *ppos += filled; | |
1138 | buf += filled; | |
1139 | } | |
1140 | ||
1141 | return done; | |
1142 | write_err: | |
1143 | return -EFAULT; | |
1144 | } | |
1145 | ||
1146 | static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |
1147 | { | |
1148 | unsigned int index; | |
1149 | u64 virtaddr; | |
1150 | unsigned long req_size, pgoff = 0; | |
1151 | pgprot_t pg_prot; | |
1152 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
1153 | ||
1154 | index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); | |
1155 | if (index >= VFIO_PCI_ROM_REGION_INDEX) | |
1156 | return -EINVAL; | |
1157 | ||
1158 | if (vma->vm_end < vma->vm_start) | |
1159 | return -EINVAL; | |
1160 | if ((vma->vm_flags & VM_SHARED) == 0) | |
1161 | return -EINVAL; | |
1162 | if (index != VFIO_PCI_BAR2_REGION_INDEX) | |
1163 | return -EINVAL; | |
1164 | ||
1165 | pg_prot = vma->vm_page_prot; | |
1166 | virtaddr = vma->vm_start; | |
1167 | req_size = vma->vm_end - vma->vm_start; | |
1168 | pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; | |
1169 | ||
1170 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); | |
1171 | } | |
1172 | ||
1173 | static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type) | |
1174 | { | |
1175 | if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX) | |
1176 | return 1; | |
1177 | ||
1178 | return 0; | |
1179 | } | |
1180 | ||
1181 | static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu, | |
1182 | unsigned int index, unsigned int start, | |
2e679d48 | 1183 | unsigned int count, u32 flags, |
659643f7 JS |
1184 | void *data) |
1185 | { | |
1186 | return 0; | |
1187 | } | |
1188 | ||
1189 | static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu, | |
1190 | unsigned int index, unsigned int start, | |
2e679d48 | 1191 | unsigned int count, u32 flags, void *data) |
659643f7 JS |
1192 | { |
1193 | return 0; | |
1194 | } | |
1195 | ||
1196 | static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu, | |
1197 | unsigned int index, unsigned int start, unsigned int count, | |
2e679d48 | 1198 | u32 flags, void *data) |
659643f7 JS |
1199 | { |
1200 | return 0; | |
1201 | } | |
1202 | ||
1203 | static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, | |
1204 | unsigned int index, unsigned int start, unsigned int count, | |
2e679d48 | 1205 | u32 flags, void *data) |
659643f7 JS |
1206 | { |
1207 | struct eventfd_ctx *trigger; | |
1208 | ||
1209 | if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { | |
1210 | int fd = *(int *)data; | |
1211 | ||
1212 | trigger = eventfd_ctx_fdget(fd); | |
1213 | if (IS_ERR(trigger)) { | |
695fbc08 | 1214 | gvt_vgpu_err("eventfd_ctx_fdget failed\n"); |
659643f7 JS |
1215 | return PTR_ERR(trigger); |
1216 | } | |
1217 | vgpu->vdev.msi_trigger = trigger; | |
d54e7934 XZ |
1218 | } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count) |
1219 | intel_vgpu_release_msi_eventfd_ctx(vgpu); | |
659643f7 JS |
1220 | |
1221 | return 0; | |
1222 | } | |
1223 | ||
2e679d48 | 1224 | static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags, |
659643f7 JS |
1225 | unsigned int index, unsigned int start, unsigned int count, |
1226 | void *data) | |
1227 | { | |
1228 | int (*func)(struct intel_vgpu *vgpu, unsigned int index, | |
2e679d48 | 1229 | unsigned int start, unsigned int count, u32 flags, |
659643f7 JS |
1230 | void *data) = NULL; |
1231 | ||
1232 | switch (index) { | |
1233 | case VFIO_PCI_INTX_IRQ_INDEX: | |
1234 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
1235 | case VFIO_IRQ_SET_ACTION_MASK: | |
1236 | func = intel_vgpu_set_intx_mask; | |
1237 | break; | |
1238 | case VFIO_IRQ_SET_ACTION_UNMASK: | |
1239 | func = intel_vgpu_set_intx_unmask; | |
1240 | break; | |
1241 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
1242 | func = intel_vgpu_set_intx_trigger; | |
1243 | break; | |
1244 | } | |
1245 | break; | |
1246 | case VFIO_PCI_MSI_IRQ_INDEX: | |
1247 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
1248 | case VFIO_IRQ_SET_ACTION_MASK: | |
1249 | case VFIO_IRQ_SET_ACTION_UNMASK: | |
1250 | /* XXX Need masking support exported */ | |
1251 | break; | |
1252 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
1253 | func = intel_vgpu_set_msi_trigger; | |
1254 | break; | |
1255 | } | |
1256 | break; | |
1257 | } | |
1258 | ||
1259 | if (!func) | |
1260 | return -ENOTTY; | |
1261 | ||
1262 | return func(vgpu, index, start, count, flags, data); | |
1263 | } | |
1264 | ||
1265 | static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, | |
1266 | unsigned long arg) | |
1267 | { | |
1268 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
1269 | unsigned long minsz; | |
1270 | ||
1271 | gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); | |
1272 | ||
1273 | if (cmd == VFIO_DEVICE_GET_INFO) { | |
1274 | struct vfio_device_info info; | |
1275 | ||
1276 | minsz = offsetofend(struct vfio_device_info, num_irqs); | |
1277 | ||
1278 | if (copy_from_user(&info, (void __user *)arg, minsz)) | |
1279 | return -EFAULT; | |
1280 | ||
1281 | if (info.argsz < minsz) | |
1282 | return -EINVAL; | |
1283 | ||
1284 | info.flags = VFIO_DEVICE_FLAGS_PCI; | |
1285 | info.flags |= VFIO_DEVICE_FLAGS_RESET; | |
b851adea TZ |
1286 | info.num_regions = VFIO_PCI_NUM_REGIONS + |
1287 | vgpu->vdev.num_regions; | |
659643f7 JS |
1288 | info.num_irqs = VFIO_PCI_NUM_IRQS; |
1289 | ||
1290 | return copy_to_user((void __user *)arg, &info, minsz) ? | |
1291 | -EFAULT : 0; | |
1292 | ||
1293 | } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { | |
1294 | struct vfio_region_info info; | |
1295 | struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; | |
de5372da GS |
1296 | unsigned int i; |
1297 | int ret; | |
659643f7 JS |
1298 | struct vfio_region_info_cap_sparse_mmap *sparse = NULL; |
1299 | size_t size; | |
1300 | int nr_areas = 1; | |
1301 | int cap_type_id; | |
1302 | ||
1303 | minsz = offsetofend(struct vfio_region_info, offset); | |
1304 | ||
1305 | if (copy_from_user(&info, (void __user *)arg, minsz)) | |
1306 | return -EFAULT; | |
1307 | ||
1308 | if (info.argsz < minsz) | |
1309 | return -EINVAL; | |
1310 | ||
1311 | switch (info.index) { | |
1312 | case VFIO_PCI_CONFIG_REGION_INDEX: | |
1313 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
02d578e5 | 1314 | info.size = vgpu->gvt->device_info.cfg_space_size; |
659643f7 JS |
1315 | info.flags = VFIO_REGION_INFO_FLAG_READ | |
1316 | VFIO_REGION_INFO_FLAG_WRITE; | |
1317 | break; | |
1318 | case VFIO_PCI_BAR0_REGION_INDEX: | |
1319 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
1320 | info.size = vgpu->cfg_space.bar[info.index].size; | |
1321 | if (!info.size) { | |
1322 | info.flags = 0; | |
1323 | break; | |
1324 | } | |
1325 | ||
1326 | info.flags = VFIO_REGION_INFO_FLAG_READ | | |
1327 | VFIO_REGION_INFO_FLAG_WRITE; | |
1328 | break; | |
1329 | case VFIO_PCI_BAR1_REGION_INDEX: | |
1330 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
1331 | info.size = 0; | |
1332 | info.flags = 0; | |
1333 | break; | |
1334 | case VFIO_PCI_BAR2_REGION_INDEX: | |
1335 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
1336 | info.flags = VFIO_REGION_INFO_FLAG_CAPS | | |
1337 | VFIO_REGION_INFO_FLAG_MMAP | | |
1338 | VFIO_REGION_INFO_FLAG_READ | | |
1339 | VFIO_REGION_INFO_FLAG_WRITE; | |
1340 | info.size = gvt_aperture_sz(vgpu->gvt); | |
1341 | ||
1342 | size = sizeof(*sparse) + | |
1343 | (nr_areas * sizeof(*sparse->areas)); | |
1344 | sparse = kzalloc(size, GFP_KERNEL); | |
1345 | if (!sparse) | |
1346 | return -ENOMEM; | |
1347 | ||
dda01f78 AW |
1348 | sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; |
1349 | sparse->header.version = 1; | |
659643f7 JS |
1350 | sparse->nr_areas = nr_areas; |
1351 | cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; | |
1352 | sparse->areas[0].offset = | |
1353 | PAGE_ALIGN(vgpu_aperture_offset(vgpu)); | |
1354 | sparse->areas[0].size = vgpu_aperture_sz(vgpu); | |
659643f7 JS |
1355 | break; |
1356 | ||
1357 | case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: | |
1358 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
1359 | info.size = 0; | |
659643f7 | 1360 | info.flags = 0; |
072ec93d | 1361 | |
659643f7 JS |
1362 | gvt_dbg_core("get region info bar:%d\n", info.index); |
1363 | break; | |
1364 | ||
1365 | case VFIO_PCI_ROM_REGION_INDEX: | |
1366 | case VFIO_PCI_VGA_REGION_INDEX: | |
072ec93d PZ |
1367 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); |
1368 | info.size = 0; | |
1369 | info.flags = 0; | |
1370 | ||
659643f7 JS |
1371 | gvt_dbg_core("get region info index:%d\n", info.index); |
1372 | break; | |
1373 | default: | |
1374 | { | |
dda01f78 AW |
1375 | struct vfio_region_info_cap_type cap_type = { |
1376 | .header.id = VFIO_REGION_INFO_CAP_TYPE, | |
1377 | .header.version = 1 }; | |
659643f7 JS |
1378 | |
1379 | if (info.index >= VFIO_PCI_NUM_REGIONS + | |
1380 | vgpu->vdev.num_regions) | |
1381 | return -EINVAL; | |
de5372da GS |
1382 | info.index = |
1383 | array_index_nospec(info.index, | |
1384 | VFIO_PCI_NUM_REGIONS + | |
1385 | vgpu->vdev.num_regions); | |
659643f7 JS |
1386 | |
1387 | i = info.index - VFIO_PCI_NUM_REGIONS; | |
1388 | ||
1389 | info.offset = | |
1390 | VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
1391 | info.size = vgpu->vdev.region[i].size; | |
1392 | info.flags = vgpu->vdev.region[i].flags; | |
1393 | ||
1394 | cap_type.type = vgpu->vdev.region[i].type; | |
1395 | cap_type.subtype = vgpu->vdev.region[i].subtype; | |
1396 | ||
1397 | ret = vfio_info_add_capability(&caps, | |
dda01f78 AW |
1398 | &cap_type.header, |
1399 | sizeof(cap_type)); | |
659643f7 JS |
1400 | if (ret) |
1401 | return ret; | |
1402 | } | |
1403 | } | |
1404 | ||
1405 | if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) { | |
1406 | switch (cap_type_id) { | |
1407 | case VFIO_REGION_INFO_CAP_SPARSE_MMAP: | |
1408 | ret = vfio_info_add_capability(&caps, | |
dda01f78 AW |
1409 | &sparse->header, sizeof(*sparse) + |
1410 | (sparse->nr_areas * | |
1411 | sizeof(*sparse->areas))); | |
7590ebb8 YW |
1412 | if (ret) { |
1413 | kfree(sparse); | |
659643f7 | 1414 | return ret; |
7590ebb8 | 1415 | } |
659643f7 JS |
1416 | break; |
1417 | default: | |
7590ebb8 | 1418 | kfree(sparse); |
659643f7 JS |
1419 | return -EINVAL; |
1420 | } | |
1421 | } | |
1422 | ||
1423 | if (caps.size) { | |
b851adea | 1424 | info.flags |= VFIO_REGION_INFO_FLAG_CAPS; |
659643f7 JS |
1425 | if (info.argsz < sizeof(info) + caps.size) { |
1426 | info.argsz = sizeof(info) + caps.size; | |
1427 | info.cap_offset = 0; | |
1428 | } else { | |
1429 | vfio_info_cap_shift(&caps, sizeof(info)); | |
1430 | if (copy_to_user((void __user *)arg + | |
1431 | sizeof(info), caps.buf, | |
1432 | caps.size)) { | |
1433 | kfree(caps.buf); | |
7590ebb8 | 1434 | kfree(sparse); |
659643f7 JS |
1435 | return -EFAULT; |
1436 | } | |
1437 | info.cap_offset = sizeof(info); | |
1438 | } | |
1439 | ||
1440 | kfree(caps.buf); | |
1441 | } | |
1442 | ||
7590ebb8 | 1443 | kfree(sparse); |
659643f7 JS |
1444 | return copy_to_user((void __user *)arg, &info, minsz) ? |
1445 | -EFAULT : 0; | |
1446 | } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { | |
1447 | struct vfio_irq_info info; | |
1448 | ||
1449 | minsz = offsetofend(struct vfio_irq_info, count); | |
1450 | ||
1451 | if (copy_from_user(&info, (void __user *)arg, minsz)) | |
1452 | return -EFAULT; | |
1453 | ||
1454 | if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS) | |
1455 | return -EINVAL; | |
1456 | ||
1457 | switch (info.index) { | |
1458 | case VFIO_PCI_INTX_IRQ_INDEX: | |
1459 | case VFIO_PCI_MSI_IRQ_INDEX: | |
1460 | break; | |
1461 | default: | |
1462 | return -EINVAL; | |
1463 | } | |
1464 | ||
1465 | info.flags = VFIO_IRQ_INFO_EVENTFD; | |
1466 | ||
1467 | info.count = intel_vgpu_get_irq_count(vgpu, info.index); | |
1468 | ||
1469 | if (info.index == VFIO_PCI_INTX_IRQ_INDEX) | |
1470 | info.flags |= (VFIO_IRQ_INFO_MASKABLE | | |
1471 | VFIO_IRQ_INFO_AUTOMASKED); | |
1472 | else | |
1473 | info.flags |= VFIO_IRQ_INFO_NORESIZE; | |
1474 | ||
1475 | return copy_to_user((void __user *)arg, &info, minsz) ? | |
1476 | -EFAULT : 0; | |
1477 | } else if (cmd == VFIO_DEVICE_SET_IRQS) { | |
1478 | struct vfio_irq_set hdr; | |
1479 | u8 *data = NULL; | |
1480 | int ret = 0; | |
1481 | size_t data_size = 0; | |
1482 | ||
1483 | minsz = offsetofend(struct vfio_irq_set, count); | |
1484 | ||
1485 | if (copy_from_user(&hdr, (void __user *)arg, minsz)) | |
1486 | return -EFAULT; | |
1487 | ||
1488 | if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { | |
1489 | int max = intel_vgpu_get_irq_count(vgpu, hdr.index); | |
1490 | ||
1491 | ret = vfio_set_irqs_validate_and_prepare(&hdr, max, | |
1492 | VFIO_PCI_NUM_IRQS, &data_size); | |
1493 | if (ret) { | |
695fbc08 | 1494 | gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); |
659643f7 JS |
1495 | return -EINVAL; |
1496 | } | |
1497 | if (data_size) { | |
1498 | data = memdup_user((void __user *)(arg + minsz), | |
1499 | data_size); | |
1500 | if (IS_ERR(data)) | |
1501 | return PTR_ERR(data); | |
1502 | } | |
1503 | } | |
1504 | ||
1505 | ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index, | |
1506 | hdr.start, hdr.count, data); | |
1507 | kfree(data); | |
1508 | ||
1509 | return ret; | |
1510 | } else if (cmd == VFIO_DEVICE_RESET) { | |
1511 | intel_gvt_ops->vgpu_reset(vgpu); | |
1512 | return 0; | |
e546e281 TZ |
1513 | } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) { |
1514 | struct vfio_device_gfx_plane_info dmabuf; | |
1515 | int ret = 0; | |
1516 | ||
1517 | minsz = offsetofend(struct vfio_device_gfx_plane_info, | |
1518 | dmabuf_id); | |
1519 | if (copy_from_user(&dmabuf, (void __user *)arg, minsz)) | |
1520 | return -EFAULT; | |
1521 | if (dmabuf.argsz < minsz) | |
1522 | return -EINVAL; | |
1523 | ||
1524 | ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf); | |
1525 | if (ret != 0) | |
1526 | return ret; | |
1527 | ||
1528 | return copy_to_user((void __user *)arg, &dmabuf, minsz) ? | |
1529 | -EFAULT : 0; | |
1530 | } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) { | |
1531 | __u32 dmabuf_id; | |
1532 | __s32 dmabuf_fd; | |
1533 | ||
1534 | if (get_user(dmabuf_id, (__u32 __user *)arg)) | |
1535 | return -EFAULT; | |
1536 | ||
1537 | dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id); | |
1538 | return dmabuf_fd; | |
1539 | ||
659643f7 JS |
1540 | } |
1541 | ||
9f591ae6 | 1542 | return -ENOTTY; |
659643f7 JS |
1543 | } |
1544 | ||
7a7a6561 ZW |
1545 | static ssize_t |
1546 | vgpu_id_show(struct device *dev, struct device_attribute *attr, | |
1547 | char *buf) | |
1548 | { | |
1549 | struct mdev_device *mdev = mdev_from_dev(dev); | |
1550 | ||
1551 | if (mdev) { | |
1552 | struct intel_vgpu *vgpu = (struct intel_vgpu *) | |
1553 | mdev_get_drvdata(mdev); | |
1554 | return sprintf(buf, "%d\n", vgpu->id); | |
1555 | } | |
1556 | return sprintf(buf, "\n"); | |
1557 | } | |
1558 | ||
a45050d7 ZW |
1559 | static ssize_t |
1560 | hw_id_show(struct device *dev, struct device_attribute *attr, | |
1561 | char *buf) | |
1562 | { | |
1563 | struct mdev_device *mdev = mdev_from_dev(dev); | |
1564 | ||
1565 | if (mdev) { | |
1566 | struct intel_vgpu *vgpu = (struct intel_vgpu *) | |
1567 | mdev_get_drvdata(mdev); | |
1568 | return sprintf(buf, "%u\n", | |
1406a14b | 1569 | vgpu->submission.shadow_ctx->hw_id); |
a45050d7 ZW |
1570 | } |
1571 | return sprintf(buf, "\n"); | |
1572 | } | |
1573 | ||
7a7a6561 | 1574 | static DEVICE_ATTR_RO(vgpu_id); |
a45050d7 | 1575 | static DEVICE_ATTR_RO(hw_id); |
7a7a6561 ZW |
1576 | |
1577 | static struct attribute *intel_vgpu_attrs[] = { | |
1578 | &dev_attr_vgpu_id.attr, | |
a45050d7 | 1579 | &dev_attr_hw_id.attr, |
7a7a6561 ZW |
1580 | NULL |
1581 | }; | |
1582 | ||
1583 | static const struct attribute_group intel_vgpu_group = { | |
1584 | .name = "intel_vgpu", | |
1585 | .attrs = intel_vgpu_attrs, | |
1586 | }; | |
1587 | ||
1588 | static const struct attribute_group *intel_vgpu_groups[] = { | |
1589 | &intel_vgpu_group, | |
1590 | NULL, | |
1591 | }; | |
1592 | ||
6aa23ced | 1593 | static struct mdev_parent_ops intel_vgpu_ops = { |
7a7a6561 | 1594 | .mdev_attr_groups = intel_vgpu_groups, |
659643f7 JS |
1595 | .create = intel_vgpu_create, |
1596 | .remove = intel_vgpu_remove, | |
1597 | ||
1598 | .open = intel_vgpu_open, | |
1599 | .release = intel_vgpu_release, | |
1600 | ||
1601 | .read = intel_vgpu_read, | |
1602 | .write = intel_vgpu_write, | |
1603 | .mmap = intel_vgpu_mmap, | |
1604 | .ioctl = intel_vgpu_ioctl, | |
1605 | }; | |
1606 | ||
f30437c5 JS |
1607 | static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) |
1608 | { | |
6aa23ced | 1609 | struct attribute **kvm_type_attrs; |
1610 | struct attribute_group **kvm_vgpu_type_groups; | |
f30437c5 JS |
1611 | |
1612 | intel_gvt_ops = ops; | |
6aa23ced | 1613 | if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs, |
1614 | &kvm_vgpu_type_groups)) | |
1615 | return -EFAULT; | |
1616 | intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups; | |
f30437c5 | 1617 | |
659643f7 | 1618 | return mdev_register_device(dev, &intel_vgpu_ops); |
f30437c5 JS |
1619 | } |
1620 | ||
a2b8419a | 1621 | static void kvmgt_host_exit(struct device *dev) |
f30437c5 | 1622 | { |
659643f7 | 1623 | mdev_unregister_device(dev); |
f30437c5 JS |
1624 | } |
1625 | ||
f66e5ff7 | 1626 | static int kvmgt_page_track_add(unsigned long handle, u64 gfn) |
f30437c5 | 1627 | { |
659643f7 JS |
1628 | struct kvmgt_guest_info *info; |
1629 | struct kvm *kvm; | |
f30437c5 JS |
1630 | struct kvm_memory_slot *slot; |
1631 | int idx; | |
1632 | ||
659643f7 JS |
1633 | if (!handle_valid(handle)) |
1634 | return -ESRCH; | |
1635 | ||
1636 | info = (struct kvmgt_guest_info *)handle; | |
1637 | kvm = info->kvm; | |
1638 | ||
f30437c5 JS |
1639 | idx = srcu_read_lock(&kvm->srcu); |
1640 | slot = gfn_to_memslot(kvm, gfn); | |
faaaa53b JS |
1641 | if (!slot) { |
1642 | srcu_read_unlock(&kvm->srcu, idx); | |
1643 | return -EINVAL; | |
1644 | } | |
f30437c5 JS |
1645 | |
1646 | spin_lock(&kvm->mmu_lock); | |
1647 | ||
1648 | if (kvmgt_gfn_is_write_protected(info, gfn)) | |
1649 | goto out; | |
1650 | ||
1651 | kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); | |
1652 | kvmgt_protect_table_add(info, gfn); | |
1653 | ||
1654 | out: | |
1655 | spin_unlock(&kvm->mmu_lock); | |
1656 | srcu_read_unlock(&kvm->srcu, idx); | |
1657 | return 0; | |
1658 | } | |
1659 | ||
f66e5ff7 | 1660 | static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) |
f30437c5 | 1661 | { |
659643f7 JS |
1662 | struct kvmgt_guest_info *info; |
1663 | struct kvm *kvm; | |
f30437c5 JS |
1664 | struct kvm_memory_slot *slot; |
1665 | int idx; | |
1666 | ||
659643f7 JS |
1667 | if (!handle_valid(handle)) |
1668 | return 0; | |
1669 | ||
1670 | info = (struct kvmgt_guest_info *)handle; | |
1671 | kvm = info->kvm; | |
1672 | ||
f30437c5 JS |
1673 | idx = srcu_read_lock(&kvm->srcu); |
1674 | slot = gfn_to_memslot(kvm, gfn); | |
faaaa53b JS |
1675 | if (!slot) { |
1676 | srcu_read_unlock(&kvm->srcu, idx); | |
1677 | return -EINVAL; | |
1678 | } | |
f30437c5 JS |
1679 | |
1680 | spin_lock(&kvm->mmu_lock); | |
1681 | ||
1682 | if (!kvmgt_gfn_is_write_protected(info, gfn)) | |
1683 | goto out; | |
1684 | ||
1685 | kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); | |
1686 | kvmgt_protect_table_del(info, gfn); | |
1687 | ||
1688 | out: | |
1689 | spin_unlock(&kvm->mmu_lock); | |
1690 | srcu_read_unlock(&kvm->srcu, idx); | |
1691 | return 0; | |
1692 | } | |
1693 | ||
1694 | static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |
1695 | const u8 *val, int len, | |
1696 | struct kvm_page_track_notifier_node *node) | |
1697 | { | |
1698 | struct kvmgt_guest_info *info = container_of(node, | |
1699 | struct kvmgt_guest_info, track_node); | |
1700 | ||
1701 | if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) | |
4fafba2d ZW |
1702 | intel_gvt_ops->write_protect_handler(info->vgpu, gpa, |
1703 | (void *)val, len); | |
f30437c5 JS |
1704 | } |
1705 | ||
1706 | static void kvmgt_page_track_flush_slot(struct kvm *kvm, | |
1707 | struct kvm_memory_slot *slot, | |
1708 | struct kvm_page_track_notifier_node *node) | |
1709 | { | |
1710 | int i; | |
1711 | gfn_t gfn; | |
1712 | struct kvmgt_guest_info *info = container_of(node, | |
1713 | struct kvmgt_guest_info, track_node); | |
1714 | ||
1715 | spin_lock(&kvm->mmu_lock); | |
1716 | for (i = 0; i < slot->npages; i++) { | |
1717 | gfn = slot->base_gfn + i; | |
1718 | if (kvmgt_gfn_is_write_protected(info, gfn)) { | |
1719 | kvm_slot_page_track_remove_page(kvm, slot, gfn, | |
1720 | KVM_PAGE_TRACK_WRITE); | |
1721 | kvmgt_protect_table_del(info, gfn); | |
1722 | } | |
1723 | } | |
1724 | spin_unlock(&kvm->mmu_lock); | |
1725 | } | |
1726 | ||
659643f7 JS |
1727 | static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) |
1728 | { | |
1729 | struct intel_vgpu *itr; | |
1730 | struct kvmgt_guest_info *info; | |
1731 | int id; | |
1732 | bool ret = false; | |
1733 | ||
1734 | mutex_lock(&vgpu->gvt->lock); | |
1735 | for_each_active_vgpu(vgpu->gvt, itr, id) { | |
1736 | if (!handle_valid(itr->handle)) | |
1737 | continue; | |
1738 | ||
1739 | info = (struct kvmgt_guest_info *)itr->handle; | |
1740 | if (kvm && kvm == info->kvm) { | |
1741 | ret = true; | |
1742 | goto out; | |
1743 | } | |
1744 | } | |
1745 | out: | |
1746 | mutex_unlock(&vgpu->gvt->lock); | |
1747 | return ret; | |
1748 | } | |
1749 | ||
1750 | static int kvmgt_guest_init(struct mdev_device *mdev) | |
1751 | { | |
1752 | struct kvmgt_guest_info *info; | |
1753 | struct intel_vgpu *vgpu; | |
1754 | struct kvm *kvm; | |
1755 | ||
1756 | vgpu = mdev_get_drvdata(mdev); | |
1757 | if (handle_valid(vgpu->handle)) | |
1758 | return -EEXIST; | |
1759 | ||
1760 | kvm = vgpu->vdev.kvm; | |
1761 | if (!kvm || kvm->mm != current->mm) { | |
695fbc08 | 1762 | gvt_vgpu_err("KVM is required to use Intel vGPU\n"); |
659643f7 JS |
1763 | return -ESRCH; |
1764 | } | |
1765 | ||
1766 | if (__kvmgt_vgpu_exist(vgpu, kvm)) | |
1767 | return -EEXIST; | |
1768 | ||
1769 | info = vzalloc(sizeof(struct kvmgt_guest_info)); | |
1770 | if (!info) | |
1771 | return -ENOMEM; | |
1772 | ||
1773 | vgpu->handle = (unsigned long)info; | |
1774 | info->vgpu = vgpu; | |
1775 | info->kvm = kvm; | |
93a15b58 | 1776 | kvm_get_kvm(info->kvm); |
659643f7 JS |
1777 | |
1778 | kvmgt_protect_table_init(info); | |
1779 | gvt_cache_init(vgpu); | |
1780 | ||
e546e281 TZ |
1781 | init_completion(&vgpu->vblank_done); |
1782 | ||
659643f7 JS |
1783 | info->track_node.track_write = kvmgt_page_track_write; |
1784 | info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; | |
1785 | kvm_page_track_register_notifier(kvm, &info->track_node); | |
1786 | ||
6846dfeb CD |
1787 | info->debugfs_cache_entries = debugfs_create_ulong( |
1788 | "kvmgt_nr_cache_entries", | |
1789 | 0444, vgpu->debugfs, | |
1790 | &vgpu->vdev.nr_cache_entries); | |
1791 | if (!info->debugfs_cache_entries) | |
1792 | gvt_vgpu_err("Cannot create kvmgt debugfs entry\n"); | |
1793 | ||
659643f7 JS |
1794 | return 0; |
1795 | } | |
1796 | ||
1797 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) | |
1798 | { | |
6846dfeb CD |
1799 | debugfs_remove(info->debugfs_cache_entries); |
1800 | ||
659643f7 | 1801 | kvm_page_track_unregister_notifier(info->kvm, &info->track_node); |
93a15b58 | 1802 | kvm_put_kvm(info->kvm); |
659643f7 | 1803 | kvmgt_protect_table_destroy(info); |
8ff842fd | 1804 | gvt_cache_destroy(info->vgpu); |
659643f7 JS |
1805 | vfree(info); |
1806 | ||
1807 | return true; | |
1808 | } | |
1809 | ||
f30437c5 JS |
1810 | static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) |
1811 | { | |
1812 | /* nothing to do here */ | |
1813 | return 0; | |
1814 | } | |
1815 | ||
1816 | static void kvmgt_detach_vgpu(unsigned long handle) | |
1817 | { | |
1818 | /* nothing to do here */ | |
1819 | } | |
1820 | ||
1821 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) | |
1822 | { | |
659643f7 JS |
1823 | struct kvmgt_guest_info *info; |
1824 | struct intel_vgpu *vgpu; | |
f30437c5 | 1825 | |
659643f7 JS |
1826 | if (!handle_valid(handle)) |
1827 | return -ESRCH; | |
f30437c5 | 1828 | |
659643f7 JS |
1829 | info = (struct kvmgt_guest_info *)handle; |
1830 | vgpu = info->vgpu; | |
1831 | ||
d54e7934 XZ |
1832 | /* |
1833 | * When guest is poweroff, msi_trigger is set to NULL, but vgpu's | |
1834 | * config and mmio register isn't restored to default during guest | |
1835 | * poweroff. If this vgpu is still used in next vm, this vgpu's pipe | |
1836 | * may be enabled, then once this vgpu is active, it will get inject | |
1837 | * vblank interrupt request. But msi_trigger is null until msi is | |
1838 | * enabled by guest. so if msi_trigger is null, success is still | |
1839 | * returned and don't inject interrupt into guest. | |
1840 | */ | |
1841 | if (vgpu->vdev.msi_trigger == NULL) | |
1842 | return 0; | |
1843 | ||
659643f7 JS |
1844 | if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1) |
1845 | return 0; | |
1846 | ||
1847 | return -EFAULT; | |
f30437c5 JS |
1848 | } |
1849 | ||
1850 | static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | |
1851 | { | |
659643f7 | 1852 | struct kvmgt_guest_info *info; |
cf4ee73f | 1853 | kvm_pfn_t pfn; |
f30437c5 | 1854 | |
659643f7 JS |
1855 | if (!handle_valid(handle)) |
1856 | return INTEL_GVT_INVALID_ADDR; | |
1857 | ||
1858 | info = (struct kvmgt_guest_info *)handle; | |
cf4ee73f CD |
1859 | |
1860 | pfn = gfn_to_pfn(info->kvm, gfn); | |
1861 | if (is_error_noslot_pfn(pfn)) | |
4a0b3444 | 1862 | return INTEL_GVT_INVALID_ADDR; |
cf4ee73f CD |
1863 | |
1864 | return pfn; | |
1865 | } | |
1866 | ||
63ef2623 | 1867 | static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, |
79e542f5 | 1868 | unsigned long size, dma_addr_t *dma_addr) |
cf4ee73f CD |
1869 | { |
1870 | struct kvmgt_guest_info *info; | |
1871 | struct intel_vgpu *vgpu; | |
1872 | struct gvt_dma *entry; | |
1873 | int ret; | |
1874 | ||
1875 | if (!handle_valid(handle)) | |
1876 | return -EINVAL; | |
1877 | ||
1878 | info = (struct kvmgt_guest_info *)handle; | |
1879 | vgpu = info->vgpu; | |
1880 | ||
1881 | mutex_lock(&info->vgpu->vdev.cache_lock); | |
1882 | ||
1883 | entry = __gvt_cache_find_gfn(info->vgpu, gfn); | |
1884 | if (!entry) { | |
79e542f5 | 1885 | ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); |
5cd4223e CD |
1886 | if (ret) |
1887 | goto err_unlock; | |
1888 | ||
79e542f5 | 1889 | ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); |
5cd4223e CD |
1890 | if (ret) |
1891 | goto err_unmap; | |
cf4ee73f CD |
1892 | } else { |
1893 | kref_get(&entry->ref); | |
1894 | *dma_addr = entry->dma_addr; | |
4a0b3444 | 1895 | } |
f30437c5 | 1896 | |
cf4ee73f CD |
1897 | mutex_unlock(&info->vgpu->vdev.cache_lock); |
1898 | return 0; | |
5cd4223e CD |
1899 | |
1900 | err_unmap: | |
79e542f5 | 1901 | gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); |
5cd4223e CD |
1902 | err_unlock: |
1903 | mutex_unlock(&info->vgpu->vdev.cache_lock); | |
1904 | return ret; | |
cf4ee73f CD |
1905 | } |
1906 | ||
1907 | static void __gvt_dma_release(struct kref *ref) | |
1908 | { | |
1909 | struct gvt_dma *entry = container_of(ref, typeof(*entry), ref); | |
1910 | ||
79e542f5 CD |
1911 | gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr, |
1912 | entry->size); | |
cf4ee73f CD |
1913 | __gvt_cache_remove_entry(entry->vgpu, entry); |
1914 | } | |
1915 | ||
63ef2623 | 1916 | static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) |
cf4ee73f CD |
1917 | { |
1918 | struct kvmgt_guest_info *info; | |
1919 | struct gvt_dma *entry; | |
1920 | ||
1921 | if (!handle_valid(handle)) | |
1922 | return; | |
1923 | ||
1924 | info = (struct kvmgt_guest_info *)handle; | |
1925 | ||
1926 | mutex_lock(&info->vgpu->vdev.cache_lock); | |
1927 | entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); | |
1928 | if (entry) | |
1929 | kref_put(&entry->ref, __gvt_dma_release); | |
1930 | mutex_unlock(&info->vgpu->vdev.cache_lock); | |
f30437c5 JS |
1931 | } |
1932 | ||
f30437c5 JS |
1933 | static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, |
1934 | void *buf, unsigned long len, bool write) | |
1935 | { | |
f440c8a5 JS |
1936 | struct kvmgt_guest_info *info; |
1937 | struct kvm *kvm; | |
5180edc2 | 1938 | int idx, ret; |
f440c8a5 | 1939 | bool kthread = current->mm == NULL; |
f30437c5 | 1940 | |
659643f7 JS |
1941 | if (!handle_valid(handle)) |
1942 | return -ESRCH; | |
1943 | ||
f440c8a5 JS |
1944 | info = (struct kvmgt_guest_info *)handle; |
1945 | kvm = info->kvm; | |
f30437c5 | 1946 | |
0a1b60d7 ZW |
1947 | if (kthread) { |
1948 | if (!mmget_not_zero(kvm->mm)) | |
1949 | return -EFAULT; | |
f440c8a5 | 1950 | use_mm(kvm->mm); |
0a1b60d7 | 1951 | } |
f30437c5 | 1952 | |
5180edc2 | 1953 | idx = srcu_read_lock(&kvm->srcu); |
f440c8a5 JS |
1954 | ret = write ? kvm_write_guest(kvm, gpa, buf, len) : |
1955 | kvm_read_guest(kvm, gpa, buf, len); | |
5180edc2 | 1956 | srcu_read_unlock(&kvm->srcu, idx); |
f440c8a5 | 1957 | |
0a1b60d7 | 1958 | if (kthread) { |
f440c8a5 | 1959 | unuse_mm(kvm->mm); |
0a1b60d7 ZW |
1960 | mmput(kvm->mm); |
1961 | } | |
f440c8a5 JS |
1962 | |
1963 | return ret; | |
f30437c5 JS |
1964 | } |
1965 | ||
1966 | static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, | |
1967 | void *buf, unsigned long len) | |
1968 | { | |
1969 | return kvmgt_rw_gpa(handle, gpa, buf, len, false); | |
1970 | } | |
1971 | ||
1972 | static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa, | |
1973 | void *buf, unsigned long len) | |
1974 | { | |
1975 | return kvmgt_rw_gpa(handle, gpa, buf, len, true); | |
1976 | } | |
1977 | ||
1978 | static unsigned long kvmgt_virt_to_pfn(void *addr) | |
1979 | { | |
1980 | return PFN_DOWN(__pa(addr)); | |
1981 | } | |
1982 | ||
cc753fbe HY |
1983 | static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) |
1984 | { | |
1985 | struct kvmgt_guest_info *info; | |
1986 | struct kvm *kvm; | |
a1ac5f09 WL |
1987 | int idx; |
1988 | bool ret; | |
cc753fbe HY |
1989 | |
1990 | if (!handle_valid(handle)) | |
1991 | return false; | |
1992 | ||
1993 | info = (struct kvmgt_guest_info *)handle; | |
1994 | kvm = info->kvm; | |
1995 | ||
a1ac5f09 WL |
1996 | idx = srcu_read_lock(&kvm->srcu); |
1997 | ret = kvm_is_visible_gfn(kvm, gfn); | |
1998 | srcu_read_unlock(&kvm->srcu, idx); | |
cc753fbe | 1999 | |
a1ac5f09 | 2000 | return ret; |
cc753fbe HY |
2001 | } |
2002 | ||
9bdb0734 ZW |
2003 | static struct intel_gvt_mpt kvmgt_mpt = { |
2004 | .type = INTEL_GVT_HYPERVISOR_KVM, | |
f30437c5 JS |
2005 | .host_init = kvmgt_host_init, |
2006 | .host_exit = kvmgt_host_exit, | |
2007 | .attach_vgpu = kvmgt_attach_vgpu, | |
2008 | .detach_vgpu = kvmgt_detach_vgpu, | |
2009 | .inject_msi = kvmgt_inject_msi, | |
2010 | .from_virt_to_mfn = kvmgt_virt_to_pfn, | |
f66e5ff7 CD |
2011 | .enable_page_track = kvmgt_page_track_add, |
2012 | .disable_page_track = kvmgt_page_track_remove, | |
f30437c5 JS |
2013 | .read_gpa = kvmgt_read_gpa, |
2014 | .write_gpa = kvmgt_write_gpa, | |
2015 | .gfn_to_mfn = kvmgt_gfn_to_pfn, | |
cf4ee73f CD |
2016 | .dma_map_guest_page = kvmgt_dma_map_guest_page, |
2017 | .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, | |
b851adea | 2018 | .set_opregion = kvmgt_set_opregion, |
39c68e87 | 2019 | .set_edid = kvmgt_set_edid, |
e546e281 TZ |
2020 | .get_vfio_device = kvmgt_get_vfio_device, |
2021 | .put_vfio_device = kvmgt_put_vfio_device, | |
cc753fbe | 2022 | .is_valid_gfn = kvmgt_is_valid_gfn, |
f30437c5 | 2023 | }; |
f30437c5 JS |
2024 | |
2025 | static int __init kvmgt_init(void) | |
2026 | { | |
9bdb0734 ZW |
2027 | if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0) |
2028 | return -ENODEV; | |
f30437c5 JS |
2029 | return 0; |
2030 | } | |
2031 | ||
2032 | static void __exit kvmgt_exit(void) | |
2033 | { | |
9bdb0734 | 2034 | intel_gvt_unregister_hypervisor(); |
f30437c5 JS |
2035 | } |
2036 | ||
2037 | module_init(kvmgt_init); | |
2038 | module_exit(kvmgt_exit); | |
2039 | ||
2040 | MODULE_LICENSE("GPL and additional rights"); | |
2041 | MODULE_AUTHOR("Intel Corporation"); |