Commit | Line | Data |
---|---|---|
f30437c5 JS |
1 | /* |
2 | * KVMGT - the implementation of Intel mediated pass-through framework for KVM | |
3 | * | |
4 | * Copyright(c) 2014-2016 Intel Corporation. All rights reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the next | |
14 | * paragraph) shall be included in all copies or substantial portions of the | |
15 | * Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
23 | * SOFTWARE. | |
24 | * | |
25 | * Authors: | |
26 | * Kevin Tian <kevin.tian@intel.com> | |
27 | * Jike Song <jike.song@intel.com> | |
28 | * Xiaoguang Chen <xiaoguang.chen@intel.com> | |
29 | */ | |
30 | ||
31 | #include <linux/init.h> | |
32 | #include <linux/device.h> | |
33 | #include <linux/mm.h> | |
9bf5b9eb | 34 | #include <linux/kthread.h> |
0a1b60d7 | 35 | #include <linux/sched/mm.h> |
f30437c5 JS |
36 | #include <linux/types.h> |
37 | #include <linux/list.h> | |
38 | #include <linux/rbtree.h> | |
39 | #include <linux/spinlock.h> | |
40 | #include <linux/eventfd.h> | |
41 | #include <linux/uuid.h> | |
42 | #include <linux/kvm_host.h> | |
43 | #include <linux/vfio.h> | |
659643f7 | 44 | #include <linux/mdev.h> |
6846dfeb | 45 | #include <linux/debugfs.h> |
f30437c5 | 46 | |
de5372da GS |
47 | #include <linux/nospec.h> |
48 | ||
f30437c5 JS |
49 | #include "i915_drv.h" |
50 | #include "gvt.h" | |
51 | ||
f30437c5 JS |
52 | static const struct intel_gvt_ops *intel_gvt_ops; |
53 | ||
f30437c5 JS |
54 | /* helper macros copied from vfio-pci */ |
55 | #define VFIO_PCI_OFFSET_SHIFT 40 | |
56 | #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) | |
57 | #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) | |
58 | #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) | |
59 | ||
39c68e87 HY |
60 | #define EDID_BLOB_OFFSET (PAGE_SIZE/2) |
61 | ||
b851adea TZ |
62 | #define OPREGION_SIGNATURE "IntelGraphicsMem" |
63 | ||
64 | struct vfio_region; | |
65 | struct intel_vgpu_regops { | |
66 | size_t (*rw)(struct intel_vgpu *vgpu, char *buf, | |
67 | size_t count, loff_t *ppos, bool iswrite); | |
68 | void (*release)(struct intel_vgpu *vgpu, | |
69 | struct vfio_region *region); | |
70 | }; | |
71 | ||
f30437c5 JS |
72 | struct vfio_region { |
73 | u32 type; | |
74 | u32 subtype; | |
75 | size_t size; | |
76 | u32 flags; | |
b851adea TZ |
77 | const struct intel_vgpu_regops *ops; |
78 | void *data; | |
f30437c5 JS |
79 | }; |
80 | ||
39c68e87 HY |
81 | struct vfio_edid_region { |
82 | struct vfio_region_gfx_edid vfio_edid_regs; | |
83 | void *edid_blob; | |
84 | }; | |
85 | ||
f30437c5 JS |
86 | struct kvmgt_pgfn { |
87 | gfn_t gfn; | |
88 | struct hlist_node hnode; | |
89 | }; | |
90 | ||
91 | struct kvmgt_guest_info { | |
92 | struct kvm *kvm; | |
93 | struct intel_vgpu *vgpu; | |
94 | struct kvm_page_track_notifier_node track_node; | |
95 | #define NR_BKT (1 << 18) | |
96 | struct hlist_head ptable[NR_BKT]; | |
97 | #undef NR_BKT | |
6846dfeb | 98 | struct dentry *debugfs_cache_entries; |
f30437c5 JS |
99 | }; |
100 | ||
101 | struct gvt_dma { | |
cf4ee73f CD |
102 | struct intel_vgpu *vgpu; |
103 | struct rb_node gfn_node; | |
104 | struct rb_node dma_addr_node; | |
f30437c5 | 105 | gfn_t gfn; |
cf4ee73f | 106 | dma_addr_t dma_addr; |
79e542f5 | 107 | unsigned long size; |
cf4ee73f | 108 | struct kref ref; |
f30437c5 JS |
109 | }; |
110 | ||
06d63c48 JS |
111 | struct kvmgt_vdev { |
112 | struct intel_vgpu *vgpu; | |
113 | struct mdev_device *mdev; | |
114 | struct vfio_region *region; | |
115 | int num_regions; | |
116 | struct eventfd_ctx *intx_trigger; | |
117 | struct eventfd_ctx *msi_trigger; | |
118 | ||
119 | /* | |
120 | * Two caches are used to avoid mapping duplicated pages (eg. | |
121 | * scratch pages). This help to reduce dma setup overhead. | |
122 | */ | |
123 | struct rb_root gfn_cache; | |
124 | struct rb_root dma_addr_cache; | |
125 | unsigned long nr_cache_entries; | |
126 | struct mutex cache_lock; | |
127 | ||
128 | struct notifier_block iommu_notifier; | |
129 | struct notifier_block group_notifier; | |
130 | struct kvm *kvm; | |
131 | struct work_struct release_work; | |
132 | atomic_t released; | |
133 | struct vfio_device *vfio_device; | |
776d95b7 | 134 | struct vfio_group *vfio_group; |
06d63c48 JS |
135 | }; |
136 | ||
137 | static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu) | |
138 | { | |
139 | return intel_vgpu_vdev(vgpu); | |
140 | } | |
141 | ||
659643f7 JS |
142 | static inline bool handle_valid(unsigned long handle) |
143 | { | |
144 | return !!(handle & ~0xff); | |
145 | } | |
146 | ||
147 | static int kvmgt_guest_init(struct mdev_device *mdev); | |
148 | static void intel_vgpu_release_work(struct work_struct *work); | |
149 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); | |
150 | ||
79e542f5 CD |
151 | static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, |
152 | unsigned long size) | |
153 | { | |
a61ac1e7 | 154 | struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
ec7301d5 | 155 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
79e542f5 CD |
156 | int total_pages; |
157 | int npage; | |
158 | int ret; | |
159 | ||
160 | total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; | |
161 | ||
162 | for (npage = 0; npage < total_pages; npage++) { | |
163 | unsigned long cur_gfn = gfn + npage; | |
164 | ||
ec7301d5 | 165 | ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1); |
12d58619 | 166 | drm_WARN_ON(&i915->drm, ret != 1); |
79e542f5 CD |
167 | } |
168 | } | |
169 | ||
170 | /* Pin a normal or compound guest page for dma. */ | |
171 | static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, | |
172 | unsigned long size, struct page **page) | |
173 | { | |
ec7301d5 | 174 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
79e542f5 CD |
175 | unsigned long base_pfn = 0; |
176 | int total_pages; | |
177 | int npage; | |
178 | int ret; | |
179 | ||
180 | total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; | |
181 | /* | |
182 | * We pin the pages one-by-one to avoid allocating a big arrary | |
183 | * on stack to hold pfns. | |
184 | */ | |
185 | for (npage = 0; npage < total_pages; npage++) { | |
186 | unsigned long cur_gfn = gfn + npage; | |
187 | unsigned long pfn; | |
188 | ||
ec7301d5 YZ |
189 | ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1, |
190 | IOMMU_READ | IOMMU_WRITE, &pfn); | |
79e542f5 CD |
191 | if (ret != 1) { |
192 | gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", | |
193 | cur_gfn, ret); | |
194 | goto err; | |
195 | } | |
196 | ||
197 | if (!pfn_valid(pfn)) { | |
198 | gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn); | |
199 | npage++; | |
200 | ret = -EFAULT; | |
201 | goto err; | |
202 | } | |
203 | ||
204 | if (npage == 0) | |
205 | base_pfn = pfn; | |
206 | else if (base_pfn + npage != pfn) { | |
207 | gvt_vgpu_err("The pages are not continuous\n"); | |
208 | ret = -EINVAL; | |
209 | npage++; | |
210 | goto err; | |
211 | } | |
212 | } | |
213 | ||
214 | *page = pfn_to_page(base_pfn); | |
215 | return 0; | |
216 | err: | |
217 | gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); | |
218 | return ret; | |
219 | } | |
220 | ||
cf4ee73f | 221 | static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, |
79e542f5 | 222 | dma_addr_t *dma_addr, unsigned long size) |
b86dc6ed | 223 | { |
a61ac1e7 | 224 | struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; |
79e542f5 | 225 | struct page *page = NULL; |
cf4ee73f | 226 | int ret; |
b86dc6ed | 227 | |
79e542f5 CD |
228 | ret = gvt_pin_guest_page(vgpu, gfn, size, &page); |
229 | if (ret) | |
230 | return ret; | |
b86dc6ed | 231 | |
cf4ee73f | 232 | /* Setup DMA mapping. */ |
79e542f5 | 233 | *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL); |
13bdff33 | 234 | if (dma_mapping_error(dev, *dma_addr)) { |
79e542f5 CD |
235 | gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n", |
236 | page_to_pfn(page), ret); | |
237 | gvt_unpin_guest_page(vgpu, gfn, size); | |
13bdff33 | 238 | return -ENOMEM; |
cf4ee73f | 239 | } |
b86dc6ed | 240 | |
13bdff33 | 241 | return 0; |
b86dc6ed CD |
242 | } |
243 | ||
cf4ee73f | 244 | static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, |
79e542f5 | 245 | dma_addr_t dma_addr, unsigned long size) |
b86dc6ed | 246 | { |
a61ac1e7 | 247 | struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; |
b86dc6ed | 248 | |
79e542f5 CD |
249 | dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); |
250 | gvt_unpin_guest_page(vgpu, gfn, size); | |
b86dc6ed CD |
251 | } |
252 | ||
cf4ee73f CD |
253 | static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, |
254 | dma_addr_t dma_addr) | |
f30437c5 | 255 | { |
06d63c48 | 256 | struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node; |
cf4ee73f | 257 | struct gvt_dma *itr; |
f30437c5 JS |
258 | |
259 | while (node) { | |
cf4ee73f | 260 | itr = rb_entry(node, struct gvt_dma, dma_addr_node); |
f30437c5 | 261 | |
cf4ee73f | 262 | if (dma_addr < itr->dma_addr) |
f30437c5 | 263 | node = node->rb_left; |
cf4ee73f | 264 | else if (dma_addr > itr->dma_addr) |
f30437c5 | 265 | node = node->rb_right; |
cf4ee73f CD |
266 | else |
267 | return itr; | |
f30437c5 | 268 | } |
cf4ee73f | 269 | return NULL; |
f30437c5 JS |
270 | } |
271 | ||
cf4ee73f | 272 | static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) |
f30437c5 | 273 | { |
06d63c48 | 274 | struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node; |
cf4ee73f | 275 | struct gvt_dma *itr; |
f30437c5 | 276 | |
cf4ee73f CD |
277 | while (node) { |
278 | itr = rb_entry(node, struct gvt_dma, gfn_node); | |
f30437c5 | 279 | |
cf4ee73f CD |
280 | if (gfn < itr->gfn) |
281 | node = node->rb_left; | |
282 | else if (gfn > itr->gfn) | |
283 | node = node->rb_right; | |
284 | else | |
285 | return itr; | |
286 | } | |
287 | return NULL; | |
f30437c5 JS |
288 | } |
289 | ||
5cd4223e | 290 | static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, |
79e542f5 | 291 | dma_addr_t dma_addr, unsigned long size) |
f30437c5 JS |
292 | { |
293 | struct gvt_dma *new, *itr; | |
cf4ee73f | 294 | struct rb_node **link, *parent = NULL; |
06d63c48 | 295 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
f30437c5 JS |
296 | |
297 | new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); | |
298 | if (!new) | |
5cd4223e | 299 | return -ENOMEM; |
f30437c5 | 300 | |
cf4ee73f | 301 | new->vgpu = vgpu; |
f30437c5 | 302 | new->gfn = gfn; |
cf4ee73f | 303 | new->dma_addr = dma_addr; |
79e542f5 | 304 | new->size = size; |
cf4ee73f | 305 | kref_init(&new->ref); |
f30437c5 | 306 | |
cf4ee73f | 307 | /* gfn_cache maps gfn to struct gvt_dma. */ |
06d63c48 | 308 | link = &vdev->gfn_cache.rb_node; |
f30437c5 JS |
309 | while (*link) { |
310 | parent = *link; | |
cf4ee73f | 311 | itr = rb_entry(parent, struct gvt_dma, gfn_node); |
f30437c5 | 312 | |
cf4ee73f | 313 | if (gfn < itr->gfn) |
f30437c5 JS |
314 | link = &parent->rb_left; |
315 | else | |
316 | link = &parent->rb_right; | |
317 | } | |
cf4ee73f | 318 | rb_link_node(&new->gfn_node, parent, link); |
06d63c48 | 319 | rb_insert_color(&new->gfn_node, &vdev->gfn_cache); |
f30437c5 | 320 | |
cf4ee73f CD |
321 | /* dma_addr_cache maps dma addr to struct gvt_dma. */ |
322 | parent = NULL; | |
06d63c48 | 323 | link = &vdev->dma_addr_cache.rb_node; |
cf4ee73f CD |
324 | while (*link) { |
325 | parent = *link; | |
326 | itr = rb_entry(parent, struct gvt_dma, dma_addr_node); | |
f30437c5 | 327 | |
cf4ee73f CD |
328 | if (dma_addr < itr->dma_addr) |
329 | link = &parent->rb_left; | |
330 | else | |
331 | link = &parent->rb_right; | |
332 | } | |
333 | rb_link_node(&new->dma_addr_node, parent, link); | |
06d63c48 | 334 | rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache); |
6846dfeb | 335 | |
06d63c48 | 336 | vdev->nr_cache_entries++; |
5cd4223e | 337 | return 0; |
f30437c5 JS |
338 | } |
339 | ||
340 | static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, | |
341 | struct gvt_dma *entry) | |
342 | { | |
06d63c48 JS |
343 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
344 | ||
345 | rb_erase(&entry->gfn_node, &vdev->gfn_cache); | |
346 | rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache); | |
f30437c5 | 347 | kfree(entry); |
06d63c48 | 348 | vdev->nr_cache_entries--; |
f30437c5 JS |
349 | } |
350 | ||
f30437c5 JS |
351 | static void gvt_cache_destroy(struct intel_vgpu *vgpu) |
352 | { | |
353 | struct gvt_dma *dma; | |
354 | struct rb_node *node = NULL; | |
06d63c48 | 355 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
f30437c5 | 356 | |
f16bd3dd | 357 | for (;;) { |
06d63c48 JS |
358 | mutex_lock(&vdev->cache_lock); |
359 | node = rb_first(&vdev->gfn_cache); | |
f16bd3dd | 360 | if (!node) { |
06d63c48 | 361 | mutex_unlock(&vdev->cache_lock); |
f16bd3dd CD |
362 | break; |
363 | } | |
cf4ee73f | 364 | dma = rb_entry(node, struct gvt_dma, gfn_node); |
79e542f5 | 365 | gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); |
f30437c5 | 366 | __gvt_cache_remove_entry(vgpu, dma); |
06d63c48 | 367 | mutex_unlock(&vdev->cache_lock); |
f30437c5 | 368 | } |
f30437c5 JS |
369 | } |
370 | ||
cf4ee73f CD |
371 | static void gvt_cache_init(struct intel_vgpu *vgpu) |
372 | { | |
06d63c48 JS |
373 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
374 | ||
375 | vdev->gfn_cache = RB_ROOT; | |
376 | vdev->dma_addr_cache = RB_ROOT; | |
377 | vdev->nr_cache_entries = 0; | |
378 | mutex_init(&vdev->cache_lock); | |
cf4ee73f CD |
379 | } |
380 | ||
f30437c5 JS |
381 | static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) |
382 | { | |
383 | hash_init(info->ptable); | |
384 | } | |
385 | ||
386 | static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info) | |
387 | { | |
388 | struct kvmgt_pgfn *p; | |
389 | struct hlist_node *tmp; | |
390 | int i; | |
391 | ||
392 | hash_for_each_safe(info->ptable, i, tmp, p, hnode) { | |
393 | hash_del(&p->hnode); | |
394 | kfree(p); | |
395 | } | |
396 | } | |
397 | ||
398 | static struct kvmgt_pgfn * | |
399 | __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) | |
400 | { | |
401 | struct kvmgt_pgfn *p, *res = NULL; | |
402 | ||
403 | hash_for_each_possible(info->ptable, p, hnode, gfn) { | |
404 | if (gfn == p->gfn) { | |
405 | res = p; | |
406 | break; | |
407 | } | |
408 | } | |
409 | ||
410 | return res; | |
411 | } | |
412 | ||
413 | static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info, | |
414 | gfn_t gfn) | |
415 | { | |
416 | struct kvmgt_pgfn *p; | |
417 | ||
418 | p = __kvmgt_protect_table_find(info, gfn); | |
419 | return !!p; | |
420 | } | |
421 | ||
422 | static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) | |
423 | { | |
424 | struct kvmgt_pgfn *p; | |
425 | ||
426 | if (kvmgt_gfn_is_write_protected(info, gfn)) | |
427 | return; | |
428 | ||
c55b1de0 | 429 | p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC); |
f30437c5 JS |
430 | if (WARN(!p, "gfn: 0x%llx\n", gfn)) |
431 | return; | |
432 | ||
433 | p->gfn = gfn; | |
434 | hash_add(info->ptable, &p->hnode, gfn); | |
435 | } | |
436 | ||
437 | static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, | |
438 | gfn_t gfn) | |
439 | { | |
440 | struct kvmgt_pgfn *p; | |
441 | ||
442 | p = __kvmgt_protect_table_find(info, gfn); | |
443 | if (p) { | |
444 | hash_del(&p->hnode); | |
445 | kfree(p); | |
446 | } | |
447 | } | |
448 | ||
b851adea TZ |
449 | static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf, |
450 | size_t count, loff_t *ppos, bool iswrite) | |
451 | { | |
06d63c48 | 452 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
b851adea TZ |
453 | unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - |
454 | VFIO_PCI_NUM_REGIONS; | |
06d63c48 | 455 | void *base = vdev->region[i].data; |
b851adea TZ |
456 | loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; |
457 | ||
06d63c48 JS |
458 | |
459 | if (pos >= vdev->region[i].size || iswrite) { | |
b851adea TZ |
460 | gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n"); |
461 | return -EINVAL; | |
462 | } | |
06d63c48 | 463 | count = min(count, (size_t)(vdev->region[i].size - pos)); |
b851adea TZ |
464 | memcpy(buf, base + pos, count); |
465 | ||
466 | return count; | |
467 | } | |
468 | ||
469 | static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu, | |
470 | struct vfio_region *region) | |
471 | { | |
472 | } | |
473 | ||
474 | static const struct intel_vgpu_regops intel_vgpu_regops_opregion = { | |
475 | .rw = intel_vgpu_reg_rw_opregion, | |
476 | .release = intel_vgpu_reg_release_opregion, | |
477 | }; | |
478 | ||
39c68e87 HY |
479 | static int handle_edid_regs(struct intel_vgpu *vgpu, |
480 | struct vfio_edid_region *region, char *buf, | |
481 | size_t count, u16 offset, bool is_write) | |
482 | { | |
483 | struct vfio_region_gfx_edid *regs = ®ion->vfio_edid_regs; | |
484 | unsigned int data; | |
485 | ||
486 | if (offset + count > sizeof(*regs)) | |
487 | return -EINVAL; | |
488 | ||
489 | if (count != 4) | |
490 | return -EINVAL; | |
491 | ||
492 | if (is_write) { | |
493 | data = *((unsigned int *)buf); | |
494 | switch (offset) { | |
495 | case offsetof(struct vfio_region_gfx_edid, link_state): | |
496 | if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) { | |
497 | if (!drm_edid_block_valid( | |
498 | (u8 *)region->edid_blob, | |
499 | 0, | |
500 | true, | |
501 | NULL)) { | |
502 | gvt_vgpu_err("invalid EDID blob\n"); | |
503 | return -EINVAL; | |
504 | } | |
505 | intel_gvt_ops->emulate_hotplug(vgpu, true); | |
506 | } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN) | |
507 | intel_gvt_ops->emulate_hotplug(vgpu, false); | |
508 | else { | |
509 | gvt_vgpu_err("invalid EDID link state %d\n", | |
510 | regs->link_state); | |
511 | return -EINVAL; | |
512 | } | |
513 | regs->link_state = data; | |
514 | break; | |
515 | case offsetof(struct vfio_region_gfx_edid, edid_size): | |
516 | if (data > regs->edid_max_size) { | |
517 | gvt_vgpu_err("EDID size is bigger than %d!\n", | |
518 | regs->edid_max_size); | |
519 | return -EINVAL; | |
520 | } | |
521 | regs->edid_size = data; | |
522 | break; | |
523 | default: | |
524 | /* read-only regs */ | |
525 | gvt_vgpu_err("write read-only EDID region at offset %d\n", | |
526 | offset); | |
527 | return -EPERM; | |
528 | } | |
529 | } else { | |
530 | memcpy(buf, (char *)regs + offset, count); | |
531 | } | |
532 | ||
533 | return count; | |
534 | } | |
535 | ||
536 | static int handle_edid_blob(struct vfio_edid_region *region, char *buf, | |
537 | size_t count, u16 offset, bool is_write) | |
538 | { | |
539 | if (offset + count > region->vfio_edid_regs.edid_size) | |
540 | return -EINVAL; | |
541 | ||
542 | if (is_write) | |
543 | memcpy(region->edid_blob + offset, buf, count); | |
544 | else | |
545 | memcpy(buf, region->edid_blob + offset, count); | |
546 | ||
547 | return count; | |
548 | } | |
549 | ||
550 | static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf, | |
551 | size_t count, loff_t *ppos, bool iswrite) | |
552 | { | |
553 | int ret; | |
554 | unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - | |
555 | VFIO_PCI_NUM_REGIONS; | |
556 | struct vfio_edid_region *region = | |
06d63c48 | 557 | (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data; |
39c68e87 HY |
558 | loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; |
559 | ||
560 | if (pos < region->vfio_edid_regs.edid_offset) { | |
561 | ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite); | |
562 | } else { | |
563 | pos -= EDID_BLOB_OFFSET; | |
564 | ret = handle_edid_blob(region, buf, count, pos, iswrite); | |
565 | } | |
566 | ||
567 | if (ret < 0) | |
568 | gvt_vgpu_err("failed to access EDID region\n"); | |
569 | ||
570 | return ret; | |
571 | } | |
572 | ||
573 | static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu, | |
574 | struct vfio_region *region) | |
575 | { | |
576 | kfree(region->data); | |
577 | } | |
578 | ||
579 | static const struct intel_vgpu_regops intel_vgpu_regops_edid = { | |
580 | .rw = intel_vgpu_reg_rw_edid, | |
581 | .release = intel_vgpu_reg_release_edid, | |
582 | }; | |
583 | ||
b851adea TZ |
584 | static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, |
585 | unsigned int type, unsigned int subtype, | |
586 | const struct intel_vgpu_regops *ops, | |
587 | size_t size, u32 flags, void *data) | |
588 | { | |
06d63c48 | 589 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
b851adea TZ |
590 | struct vfio_region *region; |
591 | ||
06d63c48 JS |
592 | region = krealloc(vdev->region, |
593 | (vdev->num_regions + 1) * sizeof(*region), | |
b851adea TZ |
594 | GFP_KERNEL); |
595 | if (!region) | |
596 | return -ENOMEM; | |
597 | ||
06d63c48 JS |
598 | vdev->region = region; |
599 | vdev->region[vdev->num_regions].type = type; | |
600 | vdev->region[vdev->num_regions].subtype = subtype; | |
601 | vdev->region[vdev->num_regions].ops = ops; | |
602 | vdev->region[vdev->num_regions].size = size; | |
603 | vdev->region[vdev->num_regions].flags = flags; | |
604 | vdev->region[vdev->num_regions].data = data; | |
605 | vdev->num_regions++; | |
e546e281 TZ |
606 | return 0; |
607 | } | |
608 | ||
609 | static int kvmgt_get_vfio_device(void *p_vgpu) | |
610 | { | |
611 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; | |
06d63c48 | 612 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
b851adea | 613 | |
06d63c48 JS |
614 | vdev->vfio_device = vfio_device_get_from_dev( |
615 | mdev_dev(vdev->mdev)); | |
616 | if (!vdev->vfio_device) { | |
e546e281 TZ |
617 | gvt_vgpu_err("failed to get vfio device\n"); |
618 | return -ENODEV; | |
619 | } | |
b851adea TZ |
620 | return 0; |
621 | } | |
622 | ||
e546e281 | 623 | |
b851adea TZ |
624 | static int kvmgt_set_opregion(void *p_vgpu) |
625 | { | |
626 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; | |
627 | void *base; | |
628 | int ret; | |
629 | ||
630 | /* Each vgpu has its own opregion, although VFIO would create another | |
631 | * one later. This one is used to expose opregion to VFIO. And the | |
632 | * other one created by VFIO later, is used by guest actually. | |
633 | */ | |
634 | base = vgpu_opregion(vgpu)->va; | |
635 | if (!base) | |
636 | return -ENOMEM; | |
637 | ||
638 | if (memcmp(base, OPREGION_SIGNATURE, 16)) { | |
639 | memunmap(base); | |
640 | return -EINVAL; | |
641 | } | |
642 | ||
643 | ret = intel_vgpu_register_reg(vgpu, | |
644 | PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, | |
645 | VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, | |
646 | &intel_vgpu_regops_opregion, OPREGION_SIZE, | |
647 | VFIO_REGION_INFO_FLAG_READ, base); | |
648 | ||
649 | return ret; | |
650 | } | |
651 | ||
39c68e87 HY |
652 | static int kvmgt_set_edid(void *p_vgpu, int port_num) |
653 | { | |
654 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; | |
655 | struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); | |
656 | struct vfio_edid_region *base; | |
657 | int ret; | |
658 | ||
659 | base = kzalloc(sizeof(*base), GFP_KERNEL); | |
660 | if (!base) | |
661 | return -ENOMEM; | |
662 | ||
663 | /* TODO: Add multi-port and EDID extension block support */ | |
664 | base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET; | |
665 | base->vfio_edid_regs.edid_max_size = EDID_SIZE; | |
666 | base->vfio_edid_regs.edid_size = EDID_SIZE; | |
667 | base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id); | |
668 | base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id); | |
669 | base->edid_blob = port->edid->edid_block; | |
670 | ||
671 | ret = intel_vgpu_register_reg(vgpu, | |
672 | VFIO_REGION_TYPE_GFX, | |
673 | VFIO_REGION_SUBTYPE_GFX_EDID, | |
674 | &intel_vgpu_regops_edid, EDID_SIZE, | |
675 | VFIO_REGION_INFO_FLAG_READ | | |
676 | VFIO_REGION_INFO_FLAG_WRITE | | |
677 | VFIO_REGION_INFO_FLAG_CAPS, base); | |
678 | ||
679 | return ret; | |
680 | } | |
681 | ||
e546e281 TZ |
682 | static void kvmgt_put_vfio_device(void *vgpu) |
683 | { | |
06d63c48 JS |
684 | struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu); |
685 | ||
686 | if (WARN_ON(!vdev->vfio_device)) | |
e546e281 TZ |
687 | return; |
688 | ||
06d63c48 | 689 | vfio_device_put(vdev->vfio_device); |
e546e281 TZ |
690 | } |
691 | ||
659643f7 JS |
692 | static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) |
693 | { | |
695fbc08 | 694 | struct intel_vgpu *vgpu = NULL; |
659643f7 JS |
695 | struct intel_vgpu_type *type; |
696 | struct device *pdev; | |
697 | void *gvt; | |
5753394b | 698 | int ret; |
659643f7 | 699 | |
9372e6fe | 700 | pdev = mdev_parent_dev(mdev); |
659643f7 JS |
701 | gvt = kdev_to_i915(pdev)->gvt; |
702 | ||
6aa23ced | 703 | type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj)); |
659643f7 | 704 | if (!type) { |
695fbc08 | 705 | gvt_vgpu_err("failed to find type %s to create\n", |
659643f7 | 706 | kobject_name(kobj)); |
5753394b JS |
707 | ret = -EINVAL; |
708 | goto out; | |
659643f7 JS |
709 | } |
710 | ||
711 | vgpu = intel_gvt_ops->vgpu_create(gvt, type); | |
712 | if (IS_ERR_OR_NULL(vgpu)) { | |
5753394b | 713 | ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); |
64c066a9 | 714 | gvt_err("failed to create intel vgpu: %d\n", ret); |
5753394b | 715 | goto out; |
659643f7 JS |
716 | } |
717 | ||
06d63c48 | 718 | INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work); |
659643f7 | 719 | |
06d63c48 | 720 | kvmgt_vdev(vgpu)->mdev = mdev; |
659643f7 JS |
721 | mdev_set_drvdata(mdev, vgpu); |
722 | ||
723 | gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", | |
99e3123e | 724 | dev_name(mdev_dev(mdev))); |
5753394b JS |
725 | ret = 0; |
726 | ||
727 | out: | |
728 | return ret; | |
659643f7 JS |
729 | } |
730 | ||
731 | static int intel_vgpu_remove(struct mdev_device *mdev) | |
732 | { | |
733 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
734 | ||
735 | if (handle_valid(vgpu->handle)) | |
736 | return -EBUSY; | |
737 | ||
738 | intel_gvt_ops->vgpu_destroy(vgpu); | |
739 | return 0; | |
740 | } | |
741 | ||
742 | static int intel_vgpu_iommu_notifier(struct notifier_block *nb, | |
743 | unsigned long action, void *data) | |
744 | { | |
06d63c48 JS |
745 | struct kvmgt_vdev *vdev = container_of(nb, |
746 | struct kvmgt_vdev, | |
747 | iommu_notifier); | |
748 | struct intel_vgpu *vgpu = vdev->vgpu; | |
659643f7 JS |
749 | |
750 | if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { | |
751 | struct vfio_iommu_type1_dma_unmap *unmap = data; | |
cf4ee73f CD |
752 | struct gvt_dma *entry; |
753 | unsigned long iov_pfn, end_iov_pfn; | |
754 | ||
755 | iov_pfn = unmap->iova >> PAGE_SHIFT; | |
756 | end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE; | |
659643f7 | 757 | |
06d63c48 | 758 | mutex_lock(&vdev->cache_lock); |
cf4ee73f CD |
759 | for (; iov_pfn < end_iov_pfn; iov_pfn++) { |
760 | entry = __gvt_cache_find_gfn(vgpu, iov_pfn); | |
761 | if (!entry) | |
762 | continue; | |
659643f7 | 763 | |
79e542f5 CD |
764 | gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr, |
765 | entry->size); | |
cf4ee73f CD |
766 | __gvt_cache_remove_entry(vgpu, entry); |
767 | } | |
06d63c48 | 768 | mutex_unlock(&vdev->cache_lock); |
659643f7 JS |
769 | } |
770 | ||
771 | return NOTIFY_OK; | |
772 | } | |
773 | ||
774 | static int intel_vgpu_group_notifier(struct notifier_block *nb, | |
775 | unsigned long action, void *data) | |
776 | { | |
06d63c48 JS |
777 | struct kvmgt_vdev *vdev = container_of(nb, |
778 | struct kvmgt_vdev, | |
779 | group_notifier); | |
659643f7 JS |
780 | |
781 | /* the only action we care about */ | |
782 | if (action == VFIO_GROUP_NOTIFY_SET_KVM) { | |
06d63c48 | 783 | vdev->kvm = data; |
659643f7 JS |
784 | |
785 | if (!data) | |
06d63c48 | 786 | schedule_work(&vdev->release_work); |
659643f7 JS |
787 | } |
788 | ||
789 | return NOTIFY_OK; | |
790 | } | |
791 | ||
792 | static int intel_vgpu_open(struct mdev_device *mdev) | |
793 | { | |
794 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
06d63c48 | 795 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
659643f7 JS |
796 | unsigned long events; |
797 | int ret; | |
776d95b7 | 798 | struct vfio_group *vfio_group; |
659643f7 | 799 | |
06d63c48 JS |
800 | vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; |
801 | vdev->group_notifier.notifier_call = intel_vgpu_group_notifier; | |
659643f7 JS |
802 | |
803 | events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; | |
99e3123e | 804 | ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, |
06d63c48 | 805 | &vdev->iommu_notifier); |
659643f7 | 806 | if (ret != 0) { |
695fbc08 TZ |
807 | gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", |
808 | ret); | |
659643f7 JS |
809 | goto out; |
810 | } | |
811 | ||
812 | events = VFIO_GROUP_NOTIFY_SET_KVM; | |
99e3123e | 813 | ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, |
06d63c48 | 814 | &vdev->group_notifier); |
659643f7 | 815 | if (ret != 0) { |
695fbc08 TZ |
816 | gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", |
817 | ret); | |
659643f7 JS |
818 | goto undo_iommu; |
819 | } | |
820 | ||
776d95b7 YZ |
821 | vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev)); |
822 | if (IS_ERR_OR_NULL(vfio_group)) { | |
823 | ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group); | |
824 | gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n"); | |
825 | goto undo_register; | |
826 | } | |
827 | vdev->vfio_group = vfio_group; | |
828 | ||
9bdb0734 ZW |
829 | /* Take a module reference as mdev core doesn't take |
830 | * a reference for vendor driver. | |
831 | */ | |
4ec2b69d XW |
832 | if (!try_module_get(THIS_MODULE)) { |
833 | ret = -ENODEV; | |
9bdb0734 | 834 | goto undo_group; |
4ec2b69d | 835 | } |
9bdb0734 | 836 | |
364fb6b7 JS |
837 | ret = kvmgt_guest_init(mdev); |
838 | if (ret) | |
839 | goto undo_group; | |
840 | ||
b79c52ae ZW |
841 | intel_gvt_ops->vgpu_activate(vgpu); |
842 | ||
06d63c48 | 843 | atomic_set(&vdev->released, 0); |
364fb6b7 JS |
844 | return ret; |
845 | ||
846 | undo_group: | |
776d95b7 YZ |
847 | vfio_group_put_external_user(vdev->vfio_group); |
848 | vdev->vfio_group = NULL; | |
849 | ||
850 | undo_register: | |
5824f924 | 851 | vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, |
06d63c48 | 852 | &vdev->group_notifier); |
659643f7 JS |
853 | |
854 | undo_iommu: | |
99e3123e | 855 | vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, |
06d63c48 | 856 | &vdev->iommu_notifier); |
659643f7 JS |
857 | out: |
858 | return ret; | |
859 | } | |
860 | ||
d54e7934 XZ |
861 | static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) |
862 | { | |
06d63c48 | 863 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
d54e7934 XZ |
864 | struct eventfd_ctx *trigger; |
865 | ||
06d63c48 | 866 | trigger = vdev->msi_trigger; |
d54e7934 XZ |
867 | if (trigger) { |
868 | eventfd_ctx_put(trigger); | |
06d63c48 | 869 | vdev->msi_trigger = NULL; |
d54e7934 XZ |
870 | } |
871 | } | |
872 | ||
659643f7 JS |
873 | static void __intel_vgpu_release(struct intel_vgpu *vgpu) |
874 | { | |
06d63c48 | 875 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
a61ac1e7 | 876 | struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
659643f7 | 877 | struct kvmgt_guest_info *info; |
364fb6b7 | 878 | int ret; |
659643f7 JS |
879 | |
880 | if (!handle_valid(vgpu->handle)) | |
881 | return; | |
882 | ||
06d63c48 | 883 | if (atomic_cmpxchg(&vdev->released, 0, 1)) |
364fb6b7 JS |
884 | return; |
885 | ||
f9090d4c | 886 | intel_gvt_ops->vgpu_release(vgpu); |
b79c52ae | 887 | |
06d63c48 JS |
888 | ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY, |
889 | &vdev->iommu_notifier); | |
12d58619 PB |
890 | drm_WARN(&i915->drm, ret, |
891 | "vfio_unregister_notifier for iommu failed: %d\n", ret); | |
364fb6b7 | 892 | |
06d63c48 JS |
893 | ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY, |
894 | &vdev->group_notifier); | |
12d58619 PB |
895 | drm_WARN(&i915->drm, ret, |
896 | "vfio_unregister_notifier for group failed: %d\n", ret); | |
659643f7 | 897 | |
9bdb0734 ZW |
898 | /* dereference module reference taken at open */ |
899 | module_put(THIS_MODULE); | |
900 | ||
659643f7 JS |
901 | info = (struct kvmgt_guest_info *)vgpu->handle; |
902 | kvmgt_guest_exit(info); | |
364fb6b7 | 903 | |
d54e7934 | 904 | intel_vgpu_release_msi_eventfd_ctx(vgpu); |
776d95b7 | 905 | vfio_group_put_external_user(vdev->vfio_group); |
d54e7934 | 906 | |
06d63c48 | 907 | vdev->kvm = NULL; |
659643f7 JS |
908 | vgpu->handle = 0; |
909 | } | |
910 | ||
911 | static void intel_vgpu_release(struct mdev_device *mdev) | |
912 | { | |
913 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
914 | ||
915 | __intel_vgpu_release(vgpu); | |
916 | } | |
917 | ||
918 | static void intel_vgpu_release_work(struct work_struct *work) | |
919 | { | |
06d63c48 JS |
920 | struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev, |
921 | release_work); | |
8ff842fd | 922 | |
06d63c48 | 923 | __intel_vgpu_release(vdev->vgpu); |
659643f7 JS |
924 | } |
925 | ||
2e679d48 | 926 | static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) |
659643f7 JS |
927 | { |
928 | u32 start_lo, start_hi; | |
929 | u32 mem_type; | |
659643f7 | 930 | |
f090a00d | 931 | start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & |
659643f7 | 932 | PCI_BASE_ADDRESS_MEM_MASK; |
f090a00d | 933 | mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & |
659643f7 JS |
934 | PCI_BASE_ADDRESS_MEM_TYPE_MASK; |
935 | ||
936 | switch (mem_type) { | |
937 | case PCI_BASE_ADDRESS_MEM_TYPE_64: | |
938 | start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space | |
f090a00d | 939 | + bar + 4)); |
659643f7 JS |
940 | break; |
941 | case PCI_BASE_ADDRESS_MEM_TYPE_32: | |
942 | case PCI_BASE_ADDRESS_MEM_TYPE_1M: | |
943 | /* 1M mem BAR treated as 32-bit BAR */ | |
944 | default: | |
945 | /* mem unknown type treated as 32-bit BAR */ | |
946 | start_hi = 0; | |
947 | break; | |
948 | } | |
949 | ||
950 | return ((u64)start_hi << 32) | start_lo; | |
951 | } | |
952 | ||
2e679d48 | 953 | static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off, |
f090a00d CD |
954 | void *buf, unsigned int count, bool is_write) |
955 | { | |
2e679d48 | 956 | u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar); |
f090a00d CD |
957 | int ret; |
958 | ||
959 | if (is_write) | |
960 | ret = intel_gvt_ops->emulate_mmio_write(vgpu, | |
961 | bar_start + off, buf, count); | |
962 | else | |
963 | ret = intel_gvt_ops->emulate_mmio_read(vgpu, | |
964 | bar_start + off, buf, count); | |
965 | return ret; | |
966 | } | |
967 | ||
2e679d48 | 968 | static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off) |
d480b28a CD |
969 | { |
970 | return off >= vgpu_aperture_offset(vgpu) && | |
971 | off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu); | |
972 | } | |
973 | ||
2e679d48 | 974 | static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, |
d480b28a CD |
975 | void *buf, unsigned long count, bool is_write) |
976 | { | |
196a6627 | 977 | void __iomem *aperture_va; |
d480b28a CD |
978 | |
979 | if (!intel_vgpu_in_aperture(vgpu, off) || | |
980 | !intel_vgpu_in_aperture(vgpu, off + count)) { | |
981 | gvt_vgpu_err("Invalid aperture offset %llu\n", off); | |
982 | return -EINVAL; | |
983 | } | |
984 | ||
a61ac1e7 | 985 | aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap, |
d480b28a CD |
986 | ALIGN_DOWN(off, PAGE_SIZE), |
987 | count + offset_in_page(off)); | |
988 | if (!aperture_va) | |
989 | return -EIO; | |
990 | ||
991 | if (is_write) | |
196a6627 | 992 | memcpy_toio(aperture_va + offset_in_page(off), buf, count); |
d480b28a | 993 | else |
196a6627 | 994 | memcpy_fromio(buf, aperture_va + offset_in_page(off), count); |
d480b28a CD |
995 | |
996 | io_mapping_unmap(aperture_va); | |
997 | ||
998 | return 0; | |
999 | } | |
1000 | ||
659643f7 JS |
1001 | static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, |
1002 | size_t count, loff_t *ppos, bool is_write) | |
1003 | { | |
1004 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
06d63c48 | 1005 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
659643f7 | 1006 | unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); |
2e679d48 | 1007 | u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; |
659643f7 JS |
1008 | int ret = -EINVAL; |
1009 | ||
1010 | ||
06d63c48 | 1011 | if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) { |
695fbc08 | 1012 | gvt_vgpu_err("invalid index: %u\n", index); |
659643f7 JS |
1013 | return -EINVAL; |
1014 | } | |
1015 | ||
1016 | switch (index) { | |
1017 | case VFIO_PCI_CONFIG_REGION_INDEX: | |
1018 | if (is_write) | |
1019 | ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos, | |
1020 | buf, count); | |
1021 | else | |
1022 | ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos, | |
1023 | buf, count); | |
1024 | break; | |
1025 | case VFIO_PCI_BAR0_REGION_INDEX: | |
f090a00d CD |
1026 | ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos, |
1027 | buf, count, is_write); | |
659643f7 JS |
1028 | break; |
1029 | case VFIO_PCI_BAR2_REGION_INDEX: | |
d480b28a | 1030 | ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write); |
f090a00d CD |
1031 | break; |
1032 | case VFIO_PCI_BAR1_REGION_INDEX: | |
659643f7 JS |
1033 | case VFIO_PCI_BAR3_REGION_INDEX: |
1034 | case VFIO_PCI_BAR4_REGION_INDEX: | |
1035 | case VFIO_PCI_BAR5_REGION_INDEX: | |
1036 | case VFIO_PCI_VGA_REGION_INDEX: | |
1037 | case VFIO_PCI_ROM_REGION_INDEX: | |
b851adea | 1038 | break; |
659643f7 | 1039 | default: |
06d63c48 | 1040 | if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) |
b851adea TZ |
1041 | return -EINVAL; |
1042 | ||
1043 | index -= VFIO_PCI_NUM_REGIONS; | |
06d63c48 | 1044 | return vdev->region[index].ops->rw(vgpu, buf, count, |
b851adea | 1045 | ppos, is_write); |
659643f7 JS |
1046 | } |
1047 | ||
1048 | return ret == 0 ? count : ret; | |
1049 | } | |
1050 | ||
a26ca6ad TZ |
1051 | static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) |
1052 | { | |
1053 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
1054 | unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); | |
1055 | struct intel_gvt *gvt = vgpu->gvt; | |
1056 | int offset; | |
1057 | ||
1058 | /* Only allow MMIO GGTT entry access */ | |
1059 | if (index != PCI_BASE_ADDRESS_0) | |
1060 | return false; | |
1061 | ||
1062 | offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) - | |
1063 | intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); | |
1064 | ||
1065 | return (offset >= gvt->device_info.gtt_start_offset && | |
1066 | offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ? | |
1067 | true : false; | |
1068 | } | |
1069 | ||
659643f7 JS |
1070 | static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, |
1071 | size_t count, loff_t *ppos) | |
1072 | { | |
1073 | unsigned int done = 0; | |
1074 | int ret; | |
1075 | ||
1076 | while (count) { | |
1077 | size_t filled; | |
1078 | ||
a26ca6ad TZ |
1079 | /* Only support GGTT entry 8 bytes read */ |
1080 | if (count >= 8 && !(*ppos % 8) && | |
1081 | gtt_entry(mdev, ppos)) { | |
1082 | u64 val; | |
1083 | ||
1084 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
1085 | ppos, false); | |
1086 | if (ret <= 0) | |
1087 | goto read_err; | |
1088 | ||
1089 | if (copy_to_user(buf, &val, sizeof(val))) | |
1090 | goto read_err; | |
1091 | ||
1092 | filled = 8; | |
1093 | } else if (count >= 4 && !(*ppos % 4)) { | |
659643f7 JS |
1094 | u32 val; |
1095 | ||
1096 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
1097 | ppos, false); | |
1098 | if (ret <= 0) | |
1099 | goto read_err; | |
1100 | ||
1101 | if (copy_to_user(buf, &val, sizeof(val))) | |
1102 | goto read_err; | |
1103 | ||
1104 | filled = 4; | |
1105 | } else if (count >= 2 && !(*ppos % 2)) { | |
1106 | u16 val; | |
1107 | ||
1108 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
1109 | ppos, false); | |
1110 | if (ret <= 0) | |
1111 | goto read_err; | |
1112 | ||
1113 | if (copy_to_user(buf, &val, sizeof(val))) | |
1114 | goto read_err; | |
1115 | ||
1116 | filled = 2; | |
1117 | } else { | |
1118 | u8 val; | |
1119 | ||
1120 | ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos, | |
1121 | false); | |
1122 | if (ret <= 0) | |
1123 | goto read_err; | |
1124 | ||
1125 | if (copy_to_user(buf, &val, sizeof(val))) | |
1126 | goto read_err; | |
1127 | ||
1128 | filled = 1; | |
1129 | } | |
1130 | ||
1131 | count -= filled; | |
1132 | done += filled; | |
1133 | *ppos += filled; | |
1134 | buf += filled; | |
1135 | } | |
1136 | ||
1137 | return done; | |
1138 | ||
1139 | read_err: | |
1140 | return -EFAULT; | |
1141 | } | |
1142 | ||
1143 | static ssize_t intel_vgpu_write(struct mdev_device *mdev, | |
1144 | const char __user *buf, | |
1145 | size_t count, loff_t *ppos) | |
1146 | { | |
1147 | unsigned int done = 0; | |
1148 | int ret; | |
1149 | ||
1150 | while (count) { | |
1151 | size_t filled; | |
1152 | ||
a26ca6ad TZ |
1153 | /* Only support GGTT entry 8 bytes write */ |
1154 | if (count >= 8 && !(*ppos % 8) && | |
1155 | gtt_entry(mdev, ppos)) { | |
1156 | u64 val; | |
1157 | ||
1158 | if (copy_from_user(&val, buf, sizeof(val))) | |
1159 | goto write_err; | |
1160 | ||
1161 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
1162 | ppos, true); | |
1163 | if (ret <= 0) | |
1164 | goto write_err; | |
1165 | ||
1166 | filled = 8; | |
1167 | } else if (count >= 4 && !(*ppos % 4)) { | |
659643f7 JS |
1168 | u32 val; |
1169 | ||
1170 | if (copy_from_user(&val, buf, sizeof(val))) | |
1171 | goto write_err; | |
1172 | ||
1173 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
1174 | ppos, true); | |
1175 | if (ret <= 0) | |
1176 | goto write_err; | |
1177 | ||
1178 | filled = 4; | |
1179 | } else if (count >= 2 && !(*ppos % 2)) { | |
1180 | u16 val; | |
1181 | ||
1182 | if (copy_from_user(&val, buf, sizeof(val))) | |
1183 | goto write_err; | |
1184 | ||
1185 | ret = intel_vgpu_rw(mdev, (char *)&val, | |
1186 | sizeof(val), ppos, true); | |
1187 | if (ret <= 0) | |
1188 | goto write_err; | |
1189 | ||
1190 | filled = 2; | |
1191 | } else { | |
1192 | u8 val; | |
1193 | ||
1194 | if (copy_from_user(&val, buf, sizeof(val))) | |
1195 | goto write_err; | |
1196 | ||
1197 | ret = intel_vgpu_rw(mdev, &val, sizeof(val), | |
1198 | ppos, true); | |
1199 | if (ret <= 0) | |
1200 | goto write_err; | |
1201 | ||
1202 | filled = 1; | |
1203 | } | |
1204 | ||
1205 | count -= filled; | |
1206 | done += filled; | |
1207 | *ppos += filled; | |
1208 | buf += filled; | |
1209 | } | |
1210 | ||
1211 | return done; | |
1212 | write_err: | |
1213 | return -EFAULT; | |
1214 | } | |
1215 | ||
1216 | static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |
1217 | { | |
1218 | unsigned int index; | |
1219 | u64 virtaddr; | |
51b00d85 | 1220 | unsigned long req_size, pgoff, req_start; |
659643f7 JS |
1221 | pgprot_t pg_prot; |
1222 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
1223 | ||
1224 | index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); | |
1225 | if (index >= VFIO_PCI_ROM_REGION_INDEX) | |
1226 | return -EINVAL; | |
1227 | ||
1228 | if (vma->vm_end < vma->vm_start) | |
1229 | return -EINVAL; | |
1230 | if ((vma->vm_flags & VM_SHARED) == 0) | |
1231 | return -EINVAL; | |
1232 | if (index != VFIO_PCI_BAR2_REGION_INDEX) | |
1233 | return -EINVAL; | |
1234 | ||
1235 | pg_prot = vma->vm_page_prot; | |
1236 | virtaddr = vma->vm_start; | |
1237 | req_size = vma->vm_end - vma->vm_start; | |
51b00d85 ZW |
1238 | pgoff = vma->vm_pgoff & |
1239 | ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); | |
1240 | req_start = pgoff << PAGE_SHIFT; | |
1241 | ||
1242 | if (!intel_vgpu_in_aperture(vgpu, req_start)) | |
1243 | return -EINVAL; | |
1244 | if (req_start + req_size > | |
1245 | vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) | |
1246 | return -EINVAL; | |
1247 | ||
1248 | pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; | |
659643f7 JS |
1249 | |
1250 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); | |
1251 | } | |
1252 | ||
1253 | static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type) | |
1254 | { | |
1255 | if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX) | |
1256 | return 1; | |
1257 | ||
1258 | return 0; | |
1259 | } | |
1260 | ||
1261 | static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu, | |
1262 | unsigned int index, unsigned int start, | |
2e679d48 | 1263 | unsigned int count, u32 flags, |
659643f7 JS |
1264 | void *data) |
1265 | { | |
1266 | return 0; | |
1267 | } | |
1268 | ||
1269 | static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu, | |
1270 | unsigned int index, unsigned int start, | |
2e679d48 | 1271 | unsigned int count, u32 flags, void *data) |
659643f7 JS |
1272 | { |
1273 | return 0; | |
1274 | } | |
1275 | ||
1276 | static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu, | |
1277 | unsigned int index, unsigned int start, unsigned int count, | |
2e679d48 | 1278 | u32 flags, void *data) |
659643f7 JS |
1279 | { |
1280 | return 0; | |
1281 | } | |
1282 | ||
1283 | static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, | |
1284 | unsigned int index, unsigned int start, unsigned int count, | |
2e679d48 | 1285 | u32 flags, void *data) |
659643f7 JS |
1286 | { |
1287 | struct eventfd_ctx *trigger; | |
1288 | ||
1289 | if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { | |
1290 | int fd = *(int *)data; | |
1291 | ||
1292 | trigger = eventfd_ctx_fdget(fd); | |
1293 | if (IS_ERR(trigger)) { | |
695fbc08 | 1294 | gvt_vgpu_err("eventfd_ctx_fdget failed\n"); |
659643f7 JS |
1295 | return PTR_ERR(trigger); |
1296 | } | |
06d63c48 | 1297 | kvmgt_vdev(vgpu)->msi_trigger = trigger; |
d54e7934 XZ |
1298 | } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count) |
1299 | intel_vgpu_release_msi_eventfd_ctx(vgpu); | |
659643f7 JS |
1300 | |
1301 | return 0; | |
1302 | } | |
1303 | ||
2e679d48 | 1304 | static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags, |
659643f7 JS |
1305 | unsigned int index, unsigned int start, unsigned int count, |
1306 | void *data) | |
1307 | { | |
1308 | int (*func)(struct intel_vgpu *vgpu, unsigned int index, | |
2e679d48 | 1309 | unsigned int start, unsigned int count, u32 flags, |
659643f7 JS |
1310 | void *data) = NULL; |
1311 | ||
1312 | switch (index) { | |
1313 | case VFIO_PCI_INTX_IRQ_INDEX: | |
1314 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
1315 | case VFIO_IRQ_SET_ACTION_MASK: | |
1316 | func = intel_vgpu_set_intx_mask; | |
1317 | break; | |
1318 | case VFIO_IRQ_SET_ACTION_UNMASK: | |
1319 | func = intel_vgpu_set_intx_unmask; | |
1320 | break; | |
1321 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
1322 | func = intel_vgpu_set_intx_trigger; | |
1323 | break; | |
1324 | } | |
1325 | break; | |
1326 | case VFIO_PCI_MSI_IRQ_INDEX: | |
1327 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
1328 | case VFIO_IRQ_SET_ACTION_MASK: | |
1329 | case VFIO_IRQ_SET_ACTION_UNMASK: | |
1330 | /* XXX Need masking support exported */ | |
1331 | break; | |
1332 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
1333 | func = intel_vgpu_set_msi_trigger; | |
1334 | break; | |
1335 | } | |
1336 | break; | |
1337 | } | |
1338 | ||
1339 | if (!func) | |
1340 | return -ENOTTY; | |
1341 | ||
1342 | return func(vgpu, index, start, count, flags, data); | |
1343 | } | |
1344 | ||
1345 | static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, | |
1346 | unsigned long arg) | |
1347 | { | |
1348 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
06d63c48 | 1349 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
659643f7 JS |
1350 | unsigned long minsz; |
1351 | ||
1352 | gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); | |
1353 | ||
1354 | if (cmd == VFIO_DEVICE_GET_INFO) { | |
1355 | struct vfio_device_info info; | |
1356 | ||
1357 | minsz = offsetofend(struct vfio_device_info, num_irqs); | |
1358 | ||
1359 | if (copy_from_user(&info, (void __user *)arg, minsz)) | |
1360 | return -EFAULT; | |
1361 | ||
1362 | if (info.argsz < minsz) | |
1363 | return -EINVAL; | |
1364 | ||
1365 | info.flags = VFIO_DEVICE_FLAGS_PCI; | |
1366 | info.flags |= VFIO_DEVICE_FLAGS_RESET; | |
b851adea | 1367 | info.num_regions = VFIO_PCI_NUM_REGIONS + |
06d63c48 | 1368 | vdev->num_regions; |
659643f7 JS |
1369 | info.num_irqs = VFIO_PCI_NUM_IRQS; |
1370 | ||
1371 | return copy_to_user((void __user *)arg, &info, minsz) ? | |
1372 | -EFAULT : 0; | |
1373 | ||
1374 | } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { | |
1375 | struct vfio_region_info info; | |
1376 | struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; | |
de5372da GS |
1377 | unsigned int i; |
1378 | int ret; | |
659643f7 | 1379 | struct vfio_region_info_cap_sparse_mmap *sparse = NULL; |
659643f7 JS |
1380 | int nr_areas = 1; |
1381 | int cap_type_id; | |
1382 | ||
1383 | minsz = offsetofend(struct vfio_region_info, offset); | |
1384 | ||
1385 | if (copy_from_user(&info, (void __user *)arg, minsz)) | |
1386 | return -EFAULT; | |
1387 | ||
1388 | if (info.argsz < minsz) | |
1389 | return -EINVAL; | |
1390 | ||
1391 | switch (info.index) { | |
1392 | case VFIO_PCI_CONFIG_REGION_INDEX: | |
1393 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
02d578e5 | 1394 | info.size = vgpu->gvt->device_info.cfg_space_size; |
659643f7 JS |
1395 | info.flags = VFIO_REGION_INFO_FLAG_READ | |
1396 | VFIO_REGION_INFO_FLAG_WRITE; | |
1397 | break; | |
1398 | case VFIO_PCI_BAR0_REGION_INDEX: | |
1399 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
1400 | info.size = vgpu->cfg_space.bar[info.index].size; | |
1401 | if (!info.size) { | |
1402 | info.flags = 0; | |
1403 | break; | |
1404 | } | |
1405 | ||
1406 | info.flags = VFIO_REGION_INFO_FLAG_READ | | |
1407 | VFIO_REGION_INFO_FLAG_WRITE; | |
1408 | break; | |
1409 | case VFIO_PCI_BAR1_REGION_INDEX: | |
1410 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
1411 | info.size = 0; | |
1412 | info.flags = 0; | |
1413 | break; | |
1414 | case VFIO_PCI_BAR2_REGION_INDEX: | |
1415 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
1416 | info.flags = VFIO_REGION_INFO_FLAG_CAPS | | |
1417 | VFIO_REGION_INFO_FLAG_MMAP | | |
1418 | VFIO_REGION_INFO_FLAG_READ | | |
1419 | VFIO_REGION_INFO_FLAG_WRITE; | |
1420 | info.size = gvt_aperture_sz(vgpu->gvt); | |
1421 | ||
cd3e0583 GS |
1422 | sparse = kzalloc(struct_size(sparse, areas, nr_areas), |
1423 | GFP_KERNEL); | |
659643f7 JS |
1424 | if (!sparse) |
1425 | return -ENOMEM; | |
1426 | ||
dda01f78 AW |
1427 | sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; |
1428 | sparse->header.version = 1; | |
659643f7 JS |
1429 | sparse->nr_areas = nr_areas; |
1430 | cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; | |
1431 | sparse->areas[0].offset = | |
1432 | PAGE_ALIGN(vgpu_aperture_offset(vgpu)); | |
1433 | sparse->areas[0].size = vgpu_aperture_sz(vgpu); | |
659643f7 JS |
1434 | break; |
1435 | ||
1436 | case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: | |
1437 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
1438 | info.size = 0; | |
659643f7 | 1439 | info.flags = 0; |
072ec93d | 1440 | |
659643f7 JS |
1441 | gvt_dbg_core("get region info bar:%d\n", info.index); |
1442 | break; | |
1443 | ||
1444 | case VFIO_PCI_ROM_REGION_INDEX: | |
1445 | case VFIO_PCI_VGA_REGION_INDEX: | |
072ec93d PZ |
1446 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); |
1447 | info.size = 0; | |
1448 | info.flags = 0; | |
1449 | ||
659643f7 JS |
1450 | gvt_dbg_core("get region info index:%d\n", info.index); |
1451 | break; | |
1452 | default: | |
1453 | { | |
dda01f78 AW |
1454 | struct vfio_region_info_cap_type cap_type = { |
1455 | .header.id = VFIO_REGION_INFO_CAP_TYPE, | |
1456 | .header.version = 1 }; | |
659643f7 JS |
1457 | |
1458 | if (info.index >= VFIO_PCI_NUM_REGIONS + | |
06d63c48 | 1459 | vdev->num_regions) |
659643f7 | 1460 | return -EINVAL; |
de5372da GS |
1461 | info.index = |
1462 | array_index_nospec(info.index, | |
1463 | VFIO_PCI_NUM_REGIONS + | |
06d63c48 | 1464 | vdev->num_regions); |
659643f7 JS |
1465 | |
1466 | i = info.index - VFIO_PCI_NUM_REGIONS; | |
1467 | ||
1468 | info.offset = | |
1469 | VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
06d63c48 JS |
1470 | info.size = vdev->region[i].size; |
1471 | info.flags = vdev->region[i].flags; | |
659643f7 | 1472 | |
06d63c48 JS |
1473 | cap_type.type = vdev->region[i].type; |
1474 | cap_type.subtype = vdev->region[i].subtype; | |
659643f7 JS |
1475 | |
1476 | ret = vfio_info_add_capability(&caps, | |
dda01f78 AW |
1477 | &cap_type.header, |
1478 | sizeof(cap_type)); | |
659643f7 JS |
1479 | if (ret) |
1480 | return ret; | |
1481 | } | |
1482 | } | |
1483 | ||
1484 | if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) { | |
1485 | switch (cap_type_id) { | |
1486 | case VFIO_REGION_INFO_CAP_SPARSE_MMAP: | |
1487 | ret = vfio_info_add_capability(&caps, | |
cd3e0583 GS |
1488 | &sparse->header, |
1489 | struct_size(sparse, areas, | |
1490 | sparse->nr_areas)); | |
7590ebb8 YW |
1491 | if (ret) { |
1492 | kfree(sparse); | |
659643f7 | 1493 | return ret; |
7590ebb8 | 1494 | } |
659643f7 JS |
1495 | break; |
1496 | default: | |
7590ebb8 | 1497 | kfree(sparse); |
659643f7 JS |
1498 | return -EINVAL; |
1499 | } | |
1500 | } | |
1501 | ||
1502 | if (caps.size) { | |
b851adea | 1503 | info.flags |= VFIO_REGION_INFO_FLAG_CAPS; |
659643f7 JS |
1504 | if (info.argsz < sizeof(info) + caps.size) { |
1505 | info.argsz = sizeof(info) + caps.size; | |
1506 | info.cap_offset = 0; | |
1507 | } else { | |
1508 | vfio_info_cap_shift(&caps, sizeof(info)); | |
1509 | if (copy_to_user((void __user *)arg + | |
1510 | sizeof(info), caps.buf, | |
1511 | caps.size)) { | |
1512 | kfree(caps.buf); | |
7590ebb8 | 1513 | kfree(sparse); |
659643f7 JS |
1514 | return -EFAULT; |
1515 | } | |
1516 | info.cap_offset = sizeof(info); | |
1517 | } | |
1518 | ||
1519 | kfree(caps.buf); | |
1520 | } | |
1521 | ||
7590ebb8 | 1522 | kfree(sparse); |
659643f7 JS |
1523 | return copy_to_user((void __user *)arg, &info, minsz) ? |
1524 | -EFAULT : 0; | |
1525 | } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { | |
1526 | struct vfio_irq_info info; | |
1527 | ||
1528 | minsz = offsetofend(struct vfio_irq_info, count); | |
1529 | ||
1530 | if (copy_from_user(&info, (void __user *)arg, minsz)) | |
1531 | return -EFAULT; | |
1532 | ||
1533 | if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS) | |
1534 | return -EINVAL; | |
1535 | ||
1536 | switch (info.index) { | |
1537 | case VFIO_PCI_INTX_IRQ_INDEX: | |
1538 | case VFIO_PCI_MSI_IRQ_INDEX: | |
1539 | break; | |
1540 | default: | |
1541 | return -EINVAL; | |
1542 | } | |
1543 | ||
1544 | info.flags = VFIO_IRQ_INFO_EVENTFD; | |
1545 | ||
1546 | info.count = intel_vgpu_get_irq_count(vgpu, info.index); | |
1547 | ||
1548 | if (info.index == VFIO_PCI_INTX_IRQ_INDEX) | |
1549 | info.flags |= (VFIO_IRQ_INFO_MASKABLE | | |
1550 | VFIO_IRQ_INFO_AUTOMASKED); | |
1551 | else | |
1552 | info.flags |= VFIO_IRQ_INFO_NORESIZE; | |
1553 | ||
1554 | return copy_to_user((void __user *)arg, &info, minsz) ? | |
1555 | -EFAULT : 0; | |
1556 | } else if (cmd == VFIO_DEVICE_SET_IRQS) { | |
1557 | struct vfio_irq_set hdr; | |
1558 | u8 *data = NULL; | |
1559 | int ret = 0; | |
1560 | size_t data_size = 0; | |
1561 | ||
1562 | minsz = offsetofend(struct vfio_irq_set, count); | |
1563 | ||
1564 | if (copy_from_user(&hdr, (void __user *)arg, minsz)) | |
1565 | return -EFAULT; | |
1566 | ||
1567 | if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { | |
1568 | int max = intel_vgpu_get_irq_count(vgpu, hdr.index); | |
1569 | ||
1570 | ret = vfio_set_irqs_validate_and_prepare(&hdr, max, | |
1571 | VFIO_PCI_NUM_IRQS, &data_size); | |
1572 | if (ret) { | |
695fbc08 | 1573 | gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); |
659643f7 JS |
1574 | return -EINVAL; |
1575 | } | |
1576 | if (data_size) { | |
1577 | data = memdup_user((void __user *)(arg + minsz), | |
1578 | data_size); | |
1579 | if (IS_ERR(data)) | |
1580 | return PTR_ERR(data); | |
1581 | } | |
1582 | } | |
1583 | ||
1584 | ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index, | |
1585 | hdr.start, hdr.count, data); | |
1586 | kfree(data); | |
1587 | ||
1588 | return ret; | |
1589 | } else if (cmd == VFIO_DEVICE_RESET) { | |
1590 | intel_gvt_ops->vgpu_reset(vgpu); | |
1591 | return 0; | |
e546e281 TZ |
1592 | } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) { |
1593 | struct vfio_device_gfx_plane_info dmabuf; | |
1594 | int ret = 0; | |
1595 | ||
1596 | minsz = offsetofend(struct vfio_device_gfx_plane_info, | |
1597 | dmabuf_id); | |
1598 | if (copy_from_user(&dmabuf, (void __user *)arg, minsz)) | |
1599 | return -EFAULT; | |
1600 | if (dmabuf.argsz < minsz) | |
1601 | return -EINVAL; | |
1602 | ||
1603 | ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf); | |
1604 | if (ret != 0) | |
1605 | return ret; | |
1606 | ||
1607 | return copy_to_user((void __user *)arg, &dmabuf, minsz) ? | |
1608 | -EFAULT : 0; | |
1609 | } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) { | |
1610 | __u32 dmabuf_id; | |
1611 | __s32 dmabuf_fd; | |
1612 | ||
1613 | if (get_user(dmabuf_id, (__u32 __user *)arg)) | |
1614 | return -EFAULT; | |
1615 | ||
1616 | dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id); | |
1617 | return dmabuf_fd; | |
1618 | ||
659643f7 JS |
1619 | } |
1620 | ||
9f591ae6 | 1621 | return -ENOTTY; |
659643f7 JS |
1622 | } |
1623 | ||
7a7a6561 ZW |
1624 | static ssize_t |
1625 | vgpu_id_show(struct device *dev, struct device_attribute *attr, | |
1626 | char *buf) | |
1627 | { | |
1628 | struct mdev_device *mdev = mdev_from_dev(dev); | |
1629 | ||
1630 | if (mdev) { | |
1631 | struct intel_vgpu *vgpu = (struct intel_vgpu *) | |
1632 | mdev_get_drvdata(mdev); | |
1633 | return sprintf(buf, "%d\n", vgpu->id); | |
1634 | } | |
1635 | return sprintf(buf, "\n"); | |
1636 | } | |
1637 | ||
1638 | static DEVICE_ATTR_RO(vgpu_id); | |
1639 | ||
1640 | static struct attribute *intel_vgpu_attrs[] = { | |
1641 | &dev_attr_vgpu_id.attr, | |
1642 | NULL | |
1643 | }; | |
1644 | ||
1645 | static const struct attribute_group intel_vgpu_group = { | |
1646 | .name = "intel_vgpu", | |
1647 | .attrs = intel_vgpu_attrs, | |
1648 | }; | |
1649 | ||
1650 | static const struct attribute_group *intel_vgpu_groups[] = { | |
1651 | &intel_vgpu_group, | |
1652 | NULL, | |
1653 | }; | |
1654 | ||
6aa23ced | 1655 | static struct mdev_parent_ops intel_vgpu_ops = { |
7a7a6561 | 1656 | .mdev_attr_groups = intel_vgpu_groups, |
659643f7 JS |
1657 | .create = intel_vgpu_create, |
1658 | .remove = intel_vgpu_remove, | |
1659 | ||
1660 | .open = intel_vgpu_open, | |
1661 | .release = intel_vgpu_release, | |
1662 | ||
1663 | .read = intel_vgpu_read, | |
1664 | .write = intel_vgpu_write, | |
1665 | .mmap = intel_vgpu_mmap, | |
1666 | .ioctl = intel_vgpu_ioctl, | |
1667 | }; | |
1668 | ||
f30437c5 JS |
1669 | static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) |
1670 | { | |
6aa23ced | 1671 | struct attribute_group **kvm_vgpu_type_groups; |
f30437c5 JS |
1672 | |
1673 | intel_gvt_ops = ops; | |
c5bd8535 | 1674 | if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups)) |
6aa23ced | 1675 | return -EFAULT; |
1676 | intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups; | |
f30437c5 | 1677 | |
659643f7 | 1678 | return mdev_register_device(dev, &intel_vgpu_ops); |
f30437c5 JS |
1679 | } |
1680 | ||
a2b8419a | 1681 | static void kvmgt_host_exit(struct device *dev) |
f30437c5 | 1682 | { |
659643f7 | 1683 | mdev_unregister_device(dev); |
f30437c5 JS |
1684 | } |
1685 | ||
f66e5ff7 | 1686 | static int kvmgt_page_track_add(unsigned long handle, u64 gfn) |
f30437c5 | 1687 | { |
659643f7 JS |
1688 | struct kvmgt_guest_info *info; |
1689 | struct kvm *kvm; | |
f30437c5 JS |
1690 | struct kvm_memory_slot *slot; |
1691 | int idx; | |
1692 | ||
659643f7 JS |
1693 | if (!handle_valid(handle)) |
1694 | return -ESRCH; | |
1695 | ||
1696 | info = (struct kvmgt_guest_info *)handle; | |
1697 | kvm = info->kvm; | |
1698 | ||
f30437c5 JS |
1699 | idx = srcu_read_lock(&kvm->srcu); |
1700 | slot = gfn_to_memslot(kvm, gfn); | |
faaaa53b JS |
1701 | if (!slot) { |
1702 | srcu_read_unlock(&kvm->srcu, idx); | |
1703 | return -EINVAL; | |
1704 | } | |
f30437c5 JS |
1705 | |
1706 | spin_lock(&kvm->mmu_lock); | |
1707 | ||
1708 | if (kvmgt_gfn_is_write_protected(info, gfn)) | |
1709 | goto out; | |
1710 | ||
1711 | kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); | |
1712 | kvmgt_protect_table_add(info, gfn); | |
1713 | ||
1714 | out: | |
1715 | spin_unlock(&kvm->mmu_lock); | |
1716 | srcu_read_unlock(&kvm->srcu, idx); | |
1717 | return 0; | |
1718 | } | |
1719 | ||
f66e5ff7 | 1720 | static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) |
f30437c5 | 1721 | { |
659643f7 JS |
1722 | struct kvmgt_guest_info *info; |
1723 | struct kvm *kvm; | |
f30437c5 JS |
1724 | struct kvm_memory_slot *slot; |
1725 | int idx; | |
1726 | ||
659643f7 JS |
1727 | if (!handle_valid(handle)) |
1728 | return 0; | |
1729 | ||
1730 | info = (struct kvmgt_guest_info *)handle; | |
1731 | kvm = info->kvm; | |
1732 | ||
f30437c5 JS |
1733 | idx = srcu_read_lock(&kvm->srcu); |
1734 | slot = gfn_to_memslot(kvm, gfn); | |
faaaa53b JS |
1735 | if (!slot) { |
1736 | srcu_read_unlock(&kvm->srcu, idx); | |
1737 | return -EINVAL; | |
1738 | } | |
f30437c5 JS |
1739 | |
1740 | spin_lock(&kvm->mmu_lock); | |
1741 | ||
1742 | if (!kvmgt_gfn_is_write_protected(info, gfn)) | |
1743 | goto out; | |
1744 | ||
1745 | kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); | |
1746 | kvmgt_protect_table_del(info, gfn); | |
1747 | ||
1748 | out: | |
1749 | spin_unlock(&kvm->mmu_lock); | |
1750 | srcu_read_unlock(&kvm->srcu, idx); | |
1751 | return 0; | |
1752 | } | |
1753 | ||
1754 | static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |
1755 | const u8 *val, int len, | |
1756 | struct kvm_page_track_notifier_node *node) | |
1757 | { | |
1758 | struct kvmgt_guest_info *info = container_of(node, | |
1759 | struct kvmgt_guest_info, track_node); | |
1760 | ||
1761 | if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) | |
4fafba2d ZW |
1762 | intel_gvt_ops->write_protect_handler(info->vgpu, gpa, |
1763 | (void *)val, len); | |
f30437c5 JS |
1764 | } |
1765 | ||
1766 | static void kvmgt_page_track_flush_slot(struct kvm *kvm, | |
1767 | struct kvm_memory_slot *slot, | |
1768 | struct kvm_page_track_notifier_node *node) | |
1769 | { | |
1770 | int i; | |
1771 | gfn_t gfn; | |
1772 | struct kvmgt_guest_info *info = container_of(node, | |
1773 | struct kvmgt_guest_info, track_node); | |
1774 | ||
1775 | spin_lock(&kvm->mmu_lock); | |
1776 | for (i = 0; i < slot->npages; i++) { | |
1777 | gfn = slot->base_gfn + i; | |
1778 | if (kvmgt_gfn_is_write_protected(info, gfn)) { | |
1779 | kvm_slot_page_track_remove_page(kvm, slot, gfn, | |
1780 | KVM_PAGE_TRACK_WRITE); | |
1781 | kvmgt_protect_table_del(info, gfn); | |
1782 | } | |
1783 | } | |
1784 | spin_unlock(&kvm->mmu_lock); | |
1785 | } | |
1786 | ||
659643f7 JS |
1787 | static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) |
1788 | { | |
1789 | struct intel_vgpu *itr; | |
1790 | struct kvmgt_guest_info *info; | |
1791 | int id; | |
1792 | bool ret = false; | |
1793 | ||
1794 | mutex_lock(&vgpu->gvt->lock); | |
1795 | for_each_active_vgpu(vgpu->gvt, itr, id) { | |
1796 | if (!handle_valid(itr->handle)) | |
1797 | continue; | |
1798 | ||
1799 | info = (struct kvmgt_guest_info *)itr->handle; | |
1800 | if (kvm && kvm == info->kvm) { | |
1801 | ret = true; | |
1802 | goto out; | |
1803 | } | |
1804 | } | |
1805 | out: | |
1806 | mutex_unlock(&vgpu->gvt->lock); | |
1807 | return ret; | |
1808 | } | |
1809 | ||
1810 | static int kvmgt_guest_init(struct mdev_device *mdev) | |
1811 | { | |
1812 | struct kvmgt_guest_info *info; | |
1813 | struct intel_vgpu *vgpu; | |
06d63c48 | 1814 | struct kvmgt_vdev *vdev; |
659643f7 JS |
1815 | struct kvm *kvm; |
1816 | ||
1817 | vgpu = mdev_get_drvdata(mdev); | |
1818 | if (handle_valid(vgpu->handle)) | |
1819 | return -EEXIST; | |
1820 | ||
06d63c48 JS |
1821 | vdev = kvmgt_vdev(vgpu); |
1822 | kvm = vdev->kvm; | |
659643f7 | 1823 | if (!kvm || kvm->mm != current->mm) { |
695fbc08 | 1824 | gvt_vgpu_err("KVM is required to use Intel vGPU\n"); |
659643f7 JS |
1825 | return -ESRCH; |
1826 | } | |
1827 | ||
1828 | if (__kvmgt_vgpu_exist(vgpu, kvm)) | |
1829 | return -EEXIST; | |
1830 | ||
1831 | info = vzalloc(sizeof(struct kvmgt_guest_info)); | |
1832 | if (!info) | |
1833 | return -ENOMEM; | |
1834 | ||
1835 | vgpu->handle = (unsigned long)info; | |
1836 | info->vgpu = vgpu; | |
1837 | info->kvm = kvm; | |
93a15b58 | 1838 | kvm_get_kvm(info->kvm); |
659643f7 JS |
1839 | |
1840 | kvmgt_protect_table_init(info); | |
1841 | gvt_cache_init(vgpu); | |
1842 | ||
1843 | info->track_node.track_write = kvmgt_page_track_write; | |
1844 | info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; | |
1845 | kvm_page_track_register_notifier(kvm, &info->track_node); | |
1846 | ||
6846dfeb CD |
1847 | info->debugfs_cache_entries = debugfs_create_ulong( |
1848 | "kvmgt_nr_cache_entries", | |
1849 | 0444, vgpu->debugfs, | |
06d63c48 | 1850 | &vdev->nr_cache_entries); |
659643f7 JS |
1851 | return 0; |
1852 | } | |
1853 | ||
1854 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) | |
1855 | { | |
6846dfeb CD |
1856 | debugfs_remove(info->debugfs_cache_entries); |
1857 | ||
659643f7 | 1858 | kvm_page_track_unregister_notifier(info->kvm, &info->track_node); |
93a15b58 | 1859 | kvm_put_kvm(info->kvm); |
659643f7 | 1860 | kvmgt_protect_table_destroy(info); |
8ff842fd | 1861 | gvt_cache_destroy(info->vgpu); |
659643f7 JS |
1862 | vfree(info); |
1863 | ||
1864 | return true; | |
1865 | } | |
1866 | ||
06d63c48 | 1867 | static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle) |
f30437c5 | 1868 | { |
06d63c48 JS |
1869 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; |
1870 | ||
1871 | vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL); | |
1872 | ||
1873 | if (!vgpu->vdev) | |
1874 | return -ENOMEM; | |
1875 | ||
1876 | kvmgt_vdev(vgpu)->vgpu = vgpu; | |
1877 | ||
f30437c5 JS |
1878 | return 0; |
1879 | } | |
1880 | ||
6c2d0f99 | 1881 | static void kvmgt_detach_vgpu(void *p_vgpu) |
f30437c5 | 1882 | { |
6c2d0f99 HY |
1883 | int i; |
1884 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; | |
06d63c48 | 1885 | struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
6c2d0f99 | 1886 | |
06d63c48 | 1887 | if (!vdev->region) |
6c2d0f99 HY |
1888 | return; |
1889 | ||
06d63c48 JS |
1890 | for (i = 0; i < vdev->num_regions; i++) |
1891 | if (vdev->region[i].ops->release) | |
1892 | vdev->region[i].ops->release(vgpu, | |
1893 | &vdev->region[i]); | |
1894 | vdev->num_regions = 0; | |
1895 | kfree(vdev->region); | |
1896 | vdev->region = NULL; | |
1897 | ||
1898 | kfree(vdev); | |
f30437c5 JS |
1899 | } |
1900 | ||
1901 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) | |
1902 | { | |
659643f7 JS |
1903 | struct kvmgt_guest_info *info; |
1904 | struct intel_vgpu *vgpu; | |
06d63c48 | 1905 | struct kvmgt_vdev *vdev; |
f30437c5 | 1906 | |
659643f7 JS |
1907 | if (!handle_valid(handle)) |
1908 | return -ESRCH; | |
f30437c5 | 1909 | |
659643f7 JS |
1910 | info = (struct kvmgt_guest_info *)handle; |
1911 | vgpu = info->vgpu; | |
06d63c48 | 1912 | vdev = kvmgt_vdev(vgpu); |
659643f7 | 1913 | |
d54e7934 XZ |
1914 | /* |
1915 | * When guest is poweroff, msi_trigger is set to NULL, but vgpu's | |
1916 | * config and mmio register isn't restored to default during guest | |
1917 | * poweroff. If this vgpu is still used in next vm, this vgpu's pipe | |
1918 | * may be enabled, then once this vgpu is active, it will get inject | |
1919 | * vblank interrupt request. But msi_trigger is null until msi is | |
1920 | * enabled by guest. so if msi_trigger is null, success is still | |
1921 | * returned and don't inject interrupt into guest. | |
1922 | */ | |
06d63c48 | 1923 | if (vdev->msi_trigger == NULL) |
d54e7934 XZ |
1924 | return 0; |
1925 | ||
06d63c48 | 1926 | if (eventfd_signal(vdev->msi_trigger, 1) == 1) |
659643f7 JS |
1927 | return 0; |
1928 | ||
1929 | return -EFAULT; | |
f30437c5 JS |
1930 | } |
1931 | ||
1932 | static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | |
1933 | { | |
659643f7 | 1934 | struct kvmgt_guest_info *info; |
cf4ee73f | 1935 | kvm_pfn_t pfn; |
f30437c5 | 1936 | |
659643f7 JS |
1937 | if (!handle_valid(handle)) |
1938 | return INTEL_GVT_INVALID_ADDR; | |
1939 | ||
1940 | info = (struct kvmgt_guest_info *)handle; | |
cf4ee73f CD |
1941 | |
1942 | pfn = gfn_to_pfn(info->kvm, gfn); | |
1943 | if (is_error_noslot_pfn(pfn)) | |
4a0b3444 | 1944 | return INTEL_GVT_INVALID_ADDR; |
cf4ee73f CD |
1945 | |
1946 | return pfn; | |
1947 | } | |
1948 | ||
63ef2623 | 1949 | static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, |
79e542f5 | 1950 | unsigned long size, dma_addr_t *dma_addr) |
cf4ee73f | 1951 | { |
cf4ee73f | 1952 | struct intel_vgpu *vgpu; |
06d63c48 | 1953 | struct kvmgt_vdev *vdev; |
cf4ee73f CD |
1954 | struct gvt_dma *entry; |
1955 | int ret; | |
1956 | ||
1957 | if (!handle_valid(handle)) | |
1958 | return -EINVAL; | |
1959 | ||
06d63c48 JS |
1960 | vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; |
1961 | vdev = kvmgt_vdev(vgpu); | |
cf4ee73f | 1962 | |
06d63c48 | 1963 | mutex_lock(&vdev->cache_lock); |
cf4ee73f | 1964 | |
06d63c48 | 1965 | entry = __gvt_cache_find_gfn(vgpu, gfn); |
cf4ee73f | 1966 | if (!entry) { |
7366aeb7 XZ |
1967 | ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); |
1968 | if (ret) | |
1969 | goto err_unlock; | |
1970 | ||
06d63c48 | 1971 | ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size); |
7366aeb7 XZ |
1972 | if (ret) |
1973 | goto err_unmap; | |
1974 | } else if (entry->size != size) { | |
1975 | /* the same gfn with different size: unmap and re-map */ | |
1976 | gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size); | |
1977 | __gvt_cache_remove_entry(vgpu, entry); | |
1978 | ||
79e542f5 | 1979 | ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); |
5cd4223e CD |
1980 | if (ret) |
1981 | goto err_unlock; | |
1982 | ||
06d63c48 | 1983 | ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size); |
5cd4223e CD |
1984 | if (ret) |
1985 | goto err_unmap; | |
cf4ee73f CD |
1986 | } else { |
1987 | kref_get(&entry->ref); | |
1988 | *dma_addr = entry->dma_addr; | |
4a0b3444 | 1989 | } |
f30437c5 | 1990 | |
06d63c48 | 1991 | mutex_unlock(&vdev->cache_lock); |
cf4ee73f | 1992 | return 0; |
5cd4223e CD |
1993 | |
1994 | err_unmap: | |
79e542f5 | 1995 | gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); |
5cd4223e | 1996 | err_unlock: |
06d63c48 | 1997 | mutex_unlock(&vdev->cache_lock); |
5cd4223e | 1998 | return ret; |
cf4ee73f CD |
1999 | } |
2000 | ||
9f674c81 TZ |
2001 | static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) |
2002 | { | |
2003 | struct kvmgt_guest_info *info; | |
06d63c48 | 2004 | struct kvmgt_vdev *vdev; |
9f674c81 TZ |
2005 | struct gvt_dma *entry; |
2006 | int ret = 0; | |
2007 | ||
2008 | if (!handle_valid(handle)) | |
2009 | return -ENODEV; | |
2010 | ||
2011 | info = (struct kvmgt_guest_info *)handle; | |
06d63c48 | 2012 | vdev = kvmgt_vdev(info->vgpu); |
9f674c81 | 2013 | |
06d63c48 | 2014 | mutex_lock(&vdev->cache_lock); |
9f674c81 TZ |
2015 | entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); |
2016 | if (entry) | |
2017 | kref_get(&entry->ref); | |
2018 | else | |
2019 | ret = -ENOMEM; | |
06d63c48 | 2020 | mutex_unlock(&vdev->cache_lock); |
9f674c81 TZ |
2021 | |
2022 | return ret; | |
2023 | } | |
2024 | ||
cf4ee73f CD |
2025 | static void __gvt_dma_release(struct kref *ref) |
2026 | { | |
2027 | struct gvt_dma *entry = container_of(ref, typeof(*entry), ref); | |
2028 | ||
79e542f5 CD |
2029 | gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr, |
2030 | entry->size); | |
cf4ee73f CD |
2031 | __gvt_cache_remove_entry(entry->vgpu, entry); |
2032 | } | |
2033 | ||
63ef2623 | 2034 | static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) |
cf4ee73f | 2035 | { |
06d63c48 JS |
2036 | struct intel_vgpu *vgpu; |
2037 | struct kvmgt_vdev *vdev; | |
cf4ee73f CD |
2038 | struct gvt_dma *entry; |
2039 | ||
2040 | if (!handle_valid(handle)) | |
2041 | return; | |
2042 | ||
06d63c48 JS |
2043 | vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; |
2044 | vdev = kvmgt_vdev(vgpu); | |
cf4ee73f | 2045 | |
06d63c48 JS |
2046 | mutex_lock(&vdev->cache_lock); |
2047 | entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); | |
cf4ee73f CD |
2048 | if (entry) |
2049 | kref_put(&entry->ref, __gvt_dma_release); | |
06d63c48 | 2050 | mutex_unlock(&vdev->cache_lock); |
f30437c5 JS |
2051 | } |
2052 | ||
f30437c5 JS |
2053 | static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, |
2054 | void *buf, unsigned long len, bool write) | |
2055 | { | |
f440c8a5 | 2056 | struct kvmgt_guest_info *info; |
f30437c5 | 2057 | |
659643f7 JS |
2058 | if (!handle_valid(handle)) |
2059 | return -ESRCH; | |
2060 | ||
f440c8a5 | 2061 | info = (struct kvmgt_guest_info *)handle; |
f30437c5 | 2062 | |
b59b2a3e YZ |
2063 | return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group, |
2064 | gpa, buf, len, write); | |
f30437c5 JS |
2065 | } |
2066 | ||
2067 | static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, | |
2068 | void *buf, unsigned long len) | |
2069 | { | |
2070 | return kvmgt_rw_gpa(handle, gpa, buf, len, false); | |
2071 | } | |
2072 | ||
2073 | static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa, | |
2074 | void *buf, unsigned long len) | |
2075 | { | |
2076 | return kvmgt_rw_gpa(handle, gpa, buf, len, true); | |
2077 | } | |
2078 | ||
2079 | static unsigned long kvmgt_virt_to_pfn(void *addr) | |
2080 | { | |
2081 | return PFN_DOWN(__pa(addr)); | |
2082 | } | |
2083 | ||
cc753fbe HY |
2084 | static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) |
2085 | { | |
2086 | struct kvmgt_guest_info *info; | |
2087 | struct kvm *kvm; | |
a1ac5f09 WL |
2088 | int idx; |
2089 | bool ret; | |
cc753fbe HY |
2090 | |
2091 | if (!handle_valid(handle)) | |
2092 | return false; | |
2093 | ||
2094 | info = (struct kvmgt_guest_info *)handle; | |
2095 | kvm = info->kvm; | |
2096 | ||
a1ac5f09 WL |
2097 | idx = srcu_read_lock(&kvm->srcu); |
2098 | ret = kvm_is_visible_gfn(kvm, gfn); | |
2099 | srcu_read_unlock(&kvm->srcu, idx); | |
cc753fbe | 2100 | |
a1ac5f09 | 2101 | return ret; |
cc753fbe HY |
2102 | } |
2103 | ||
9a3a238b | 2104 | static const struct intel_gvt_mpt kvmgt_mpt = { |
9bdb0734 | 2105 | .type = INTEL_GVT_HYPERVISOR_KVM, |
f30437c5 JS |
2106 | .host_init = kvmgt_host_init, |
2107 | .host_exit = kvmgt_host_exit, | |
2108 | .attach_vgpu = kvmgt_attach_vgpu, | |
2109 | .detach_vgpu = kvmgt_detach_vgpu, | |
2110 | .inject_msi = kvmgt_inject_msi, | |
2111 | .from_virt_to_mfn = kvmgt_virt_to_pfn, | |
f66e5ff7 CD |
2112 | .enable_page_track = kvmgt_page_track_add, |
2113 | .disable_page_track = kvmgt_page_track_remove, | |
f30437c5 JS |
2114 | .read_gpa = kvmgt_read_gpa, |
2115 | .write_gpa = kvmgt_write_gpa, | |
2116 | .gfn_to_mfn = kvmgt_gfn_to_pfn, | |
cf4ee73f CD |
2117 | .dma_map_guest_page = kvmgt_dma_map_guest_page, |
2118 | .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, | |
9f674c81 | 2119 | .dma_pin_guest_page = kvmgt_dma_pin_guest_page, |
b851adea | 2120 | .set_opregion = kvmgt_set_opregion, |
39c68e87 | 2121 | .set_edid = kvmgt_set_edid, |
e546e281 TZ |
2122 | .get_vfio_device = kvmgt_get_vfio_device, |
2123 | .put_vfio_device = kvmgt_put_vfio_device, | |
cc753fbe | 2124 | .is_valid_gfn = kvmgt_is_valid_gfn, |
f30437c5 | 2125 | }; |
f30437c5 JS |
2126 | |
2127 | static int __init kvmgt_init(void) | |
2128 | { | |
9bdb0734 ZW |
2129 | if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0) |
2130 | return -ENODEV; | |
f30437c5 JS |
2131 | return 0; |
2132 | } | |
2133 | ||
2134 | static void __exit kvmgt_exit(void) | |
2135 | { | |
9bdb0734 | 2136 | intel_gvt_unregister_hypervisor(); |
f30437c5 JS |
2137 | } |
2138 | ||
2139 | module_init(kvmgt_init); | |
2140 | module_exit(kvmgt_exit); | |
2141 | ||
2142 | MODULE_LICENSE("GPL and additional rights"); | |
2143 | MODULE_AUTHOR("Intel Corporation"); |