Merge tag 'drm-next-2019-05-16' of git://anongit.freedesktop.org/drm/drm
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gvt / gtt.c
CommitLineData
2707e444
ZW
1/*
2 * GTT virtualization
3 *
4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
29 *
30 * Contributors:
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
33 *
34 */
35
36#include "i915_drv.h"
feddf6e8
ZW
37#include "gvt.h"
38#include "i915_pvinfo.h"
2707e444
ZW
39#include "trace.h"
40
bc37ab56
CD
41#if defined(VERBOSE_DEBUG)
42#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
43#else
44#define gvt_vdbg_mm(fmt, args...)
45#endif
46
2707e444
ZW
47static bool enable_out_of_sync = false;
48static int preallocated_oos_pages = 8192;
49
50/*
51 * validate a gm address and related range size,
52 * translate it to host gm address
53 */
54bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
55{
56 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
57 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
695fbc08
TZ
58 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
59 addr, size);
2707e444
ZW
60 return false;
61 }
62 return true;
63}
64
65/* translate a guest gmadr to host gmadr */
66int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
67{
68 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
69 "invalid guest gmadr %llx\n", g_addr))
70 return -EACCES;
71
72 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
73 *h_addr = vgpu_aperture_gmadr_base(vgpu)
74 + (g_addr - vgpu_aperture_offset(vgpu));
75 else
76 *h_addr = vgpu_hidden_gmadr_base(vgpu)
77 + (g_addr - vgpu_hidden_offset(vgpu));
78 return 0;
79}
80
81/* translate a host gmadr to guest gmadr */
82int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
83{
84 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
85 "invalid host gmadr %llx\n", h_addr))
86 return -EACCES;
87
88 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
89 *g_addr = vgpu_aperture_gmadr_base(vgpu)
90 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
91 else
92 *g_addr = vgpu_hidden_gmadr_base(vgpu)
93 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
94 return 0;
95}
96
97int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
98 unsigned long *h_index)
99{
100 u64 h_addr;
101 int ret;
102
9556e118 103 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
2707e444
ZW
104 &h_addr);
105 if (ret)
106 return ret;
107
9556e118 108 *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
2707e444
ZW
109 return 0;
110}
111
112int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
113 unsigned long *g_index)
114{
115 u64 g_addr;
116 int ret;
117
9556e118 118 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
2707e444
ZW
119 &g_addr);
120 if (ret)
121 return ret;
122
9556e118 123 *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
2707e444
ZW
124 return 0;
125}
126
127#define gtt_type_is_entry(type) \
128 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
129 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
130 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
131
132#define gtt_type_is_pt(type) \
133 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
134
135#define gtt_type_is_pte_pt(type) \
136 (type == GTT_TYPE_PPGTT_PTE_PT)
137
138#define gtt_type_is_root_pointer(type) \
139 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
140
141#define gtt_init_entry(e, t, p, v) do { \
142 (e)->type = t; \
143 (e)->pdev = p; \
144 memcpy(&(e)->val64, &v, sizeof(v)); \
145} while (0)
146
2707e444
ZW
147/*
148 * Mappings between GTT_TYPE* enumerations.
149 * Following information can be found according to the given type:
150 * - type of next level page table
151 * - type of entry inside this level page table
152 * - type of entry with PSE set
153 *
154 * If the given type doesn't have such a kind of information,
155 * e.g. give a l4 root entry type, then request to get its PSE type,
156 * give a PTE page table type, then request to get its next level page
157 * table type, as we know l4 root entry doesn't have a PSE bit,
158 * and a PTE page table doesn't have a next level page table type,
159 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
160 * page table.
161 */
162
163struct gtt_type_table_entry {
164 int entry_type;
054f4eba 165 int pt_type;
2707e444
ZW
166 int next_pt_type;
167 int pse_entry_type;
168};
169
054f4eba 170#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
2707e444
ZW
171 [type] = { \
172 .entry_type = e_type, \
054f4eba 173 .pt_type = cpt_type, \
2707e444
ZW
174 .next_pt_type = npt_type, \
175 .pse_entry_type = pse_type, \
176 }
177
178static struct gtt_type_table_entry gtt_type_table[] = {
179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
180 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
054f4eba 181 GTT_TYPE_INVALID,
2707e444
ZW
182 GTT_TYPE_PPGTT_PML4_PT,
183 GTT_TYPE_INVALID),
184 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
185 GTT_TYPE_PPGTT_PML4_ENTRY,
054f4eba 186 GTT_TYPE_PPGTT_PML4_PT,
2707e444
ZW
187 GTT_TYPE_PPGTT_PDP_PT,
188 GTT_TYPE_INVALID),
189 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
190 GTT_TYPE_PPGTT_PML4_ENTRY,
054f4eba 191 GTT_TYPE_PPGTT_PML4_PT,
2707e444
ZW
192 GTT_TYPE_PPGTT_PDP_PT,
193 GTT_TYPE_INVALID),
194 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
195 GTT_TYPE_PPGTT_PDP_ENTRY,
054f4eba 196 GTT_TYPE_PPGTT_PDP_PT,
2707e444
ZW
197 GTT_TYPE_PPGTT_PDE_PT,
198 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
200 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
054f4eba 201 GTT_TYPE_INVALID,
2707e444
ZW
202 GTT_TYPE_PPGTT_PDE_PT,
203 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
204 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
205 GTT_TYPE_PPGTT_PDP_ENTRY,
054f4eba 206 GTT_TYPE_PPGTT_PDP_PT,
2707e444
ZW
207 GTT_TYPE_PPGTT_PDE_PT,
208 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
209 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
210 GTT_TYPE_PPGTT_PDE_ENTRY,
054f4eba 211 GTT_TYPE_PPGTT_PDE_PT,
2707e444
ZW
212 GTT_TYPE_PPGTT_PTE_PT,
213 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
214 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
215 GTT_TYPE_PPGTT_PDE_ENTRY,
054f4eba 216 GTT_TYPE_PPGTT_PDE_PT,
2707e444
ZW
217 GTT_TYPE_PPGTT_PTE_PT,
218 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
b294657d 219 /* We take IPS bit as 'PSE' for PTE level. */
2707e444
ZW
220 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
221 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
054f4eba 222 GTT_TYPE_PPGTT_PTE_PT,
2707e444 223 GTT_TYPE_INVALID,
b294657d 224 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
2707e444
ZW
225 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
226 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
054f4eba 227 GTT_TYPE_PPGTT_PTE_PT,
2707e444 228 GTT_TYPE_INVALID,
b294657d
CD
229 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
230 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
231 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
232 GTT_TYPE_PPGTT_PTE_PT,
233 GTT_TYPE_INVALID,
234 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
2707e444
ZW
235 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
236 GTT_TYPE_PPGTT_PDE_ENTRY,
054f4eba 237 GTT_TYPE_PPGTT_PDE_PT,
2707e444
ZW
238 GTT_TYPE_INVALID,
239 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
240 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
241 GTT_TYPE_PPGTT_PDP_ENTRY,
054f4eba 242 GTT_TYPE_PPGTT_PDP_PT,
2707e444
ZW
243 GTT_TYPE_INVALID,
244 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
245 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
246 GTT_TYPE_GGTT_PTE,
247 GTT_TYPE_INVALID,
054f4eba 248 GTT_TYPE_INVALID,
2707e444
ZW
249 GTT_TYPE_INVALID),
250};
251
252static inline int get_next_pt_type(int type)
253{
254 return gtt_type_table[type].next_pt_type;
255}
256
054f4eba
ZW
257static inline int get_pt_type(int type)
258{
259 return gtt_type_table[type].pt_type;
260}
261
2707e444
ZW
262static inline int get_entry_type(int type)
263{
264 return gtt_type_table[type].entry_type;
265}
266
267static inline int get_pse_type(int type)
268{
269 return gtt_type_table[type].pse_entry_type;
270}
271
272static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
273{
321927db 274 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
905a5035
CD
275
276 return readq(addr);
2707e444
ZW
277}
278
a143cef7 279static void ggtt_invalidate(struct drm_i915_private *dev_priv)
af2c6399
CD
280{
281 mmio_hw_access_pre(dev_priv);
282 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
283 mmio_hw_access_post(dev_priv);
284}
285
2707e444
ZW
286static void write_pte64(struct drm_i915_private *dev_priv,
287 unsigned long index, u64 pte)
288{
321927db 289 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
2707e444 290
2707e444 291 writeq(pte, addr);
2707e444
ZW
292}
293
4b2dbbc2 294static inline int gtt_get_entry64(void *pt,
2707e444
ZW
295 struct intel_gvt_gtt_entry *e,
296 unsigned long index, bool hypervisor_access, unsigned long gpa,
297 struct intel_vgpu *vgpu)
298{
299 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
300 int ret;
301
302 if (WARN_ON(info->gtt_entry_size != 8))
4b2dbbc2 303 return -EINVAL;
2707e444
ZW
304
305 if (hypervisor_access) {
306 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
307 (index << info->gtt_entry_size_shift),
308 &e->val64, 8);
4b2dbbc2
CD
309 if (WARN_ON(ret))
310 return ret;
2707e444
ZW
311 } else if (!pt) {
312 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
313 } else {
314 e->val64 = *((u64 *)pt + index);
315 }
4b2dbbc2 316 return 0;
2707e444
ZW
317}
318
4b2dbbc2 319static inline int gtt_set_entry64(void *pt,
2707e444
ZW
320 struct intel_gvt_gtt_entry *e,
321 unsigned long index, bool hypervisor_access, unsigned long gpa,
322 struct intel_vgpu *vgpu)
323{
324 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
325 int ret;
326
327 if (WARN_ON(info->gtt_entry_size != 8))
4b2dbbc2 328 return -EINVAL;
2707e444
ZW
329
330 if (hypervisor_access) {
331 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
332 (index << info->gtt_entry_size_shift),
333 &e->val64, 8);
4b2dbbc2
CD
334 if (WARN_ON(ret))
335 return ret;
2707e444
ZW
336 } else if (!pt) {
337 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
338 } else {
339 *((u64 *)pt + index) = e->val64;
340 }
4b2dbbc2 341 return 0;
2707e444
ZW
342}
343
344#define GTT_HAW 46
345
420fba78
CD
346#define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
347#define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
b294657d 348#define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
420fba78 349#define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
2707e444 350
71634848
CD
351#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
352#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
353
4c9414d7
CD
354#define GTT_64K_PTE_STRIDE 16
355
2707e444
ZW
356static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
357{
358 unsigned long pfn;
359
360 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
d861ca23 361 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
2707e444 362 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
d861ca23 363 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
b294657d
CD
364 else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
365 pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
2707e444 366 else
d861ca23 367 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
2707e444
ZW
368 return pfn;
369}
370
371static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
372{
373 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
374 e->val64 &= ~ADDR_1G_MASK;
d861ca23 375 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
2707e444
ZW
376 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
377 e->val64 &= ~ADDR_2M_MASK;
d861ca23 378 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
b294657d
CD
379 } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
380 e->val64 &= ~ADDR_64K_MASK;
381 pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
2707e444
ZW
382 } else {
383 e->val64 &= ~ADDR_4K_MASK;
d861ca23 384 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
2707e444
ZW
385 }
386
d861ca23 387 e->val64 |= (pfn << PAGE_SHIFT);
2707e444
ZW
388}
389
390static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
391{
40b27176 392 return !!(e->val64 & _PAGE_PSE);
2707e444 393}
2707e444 394
c3e69763
CD
395static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
396{
397 if (gen8_gtt_test_pse(e)) {
398 switch (e->type) {
399 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
400 e->val64 &= ~_PAGE_PSE;
401 e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
402 break;
403 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
404 e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
405 e->val64 &= ~_PAGE_PSE;
406 break;
407 default:
408 WARN_ON(1);
409 }
410 }
411}
412
6fd79378
CD
413static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
414{
415 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
2707e444
ZW
416 return false;
417
6fd79378
CD
418 return !!(e->val64 & GEN8_PDE_IPS_64K);
419}
420
421static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
422{
423 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
424 return;
425
426 e->val64 &= ~GEN8_PDE_IPS_64K;
2707e444
ZW
427}
428
429static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
430{
431 /*
432 * i915 writes PDP root pointer registers without present bit,
433 * it also works, so we need to treat root pointer entry
434 * specifically.
435 */
436 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
437 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
438 return (e->val64 != 0);
439 else
d861ca23 440 return (e->val64 & _PAGE_PRESENT);
2707e444
ZW
441}
442
443static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
444{
d861ca23 445 e->val64 &= ~_PAGE_PRESENT;
2707e444
ZW
446}
447
655c64ef
ZW
448static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
449{
d861ca23 450 e->val64 |= _PAGE_PRESENT;
2707e444
ZW
451}
452
71634848
CD
453static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
454{
455 return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
456}
457
458static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
459{
460 e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
461}
462
463static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
464{
465 e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
466}
467
2707e444
ZW
468/*
469 * Per-platform GMA routines.
470 */
471static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
472{
9556e118 473 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
2707e444
ZW
474
475 trace_gma_index(__func__, gma, x);
476 return x;
477}
478
479#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
480static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
481{ \
482 unsigned long x = (exp); \
483 trace_gma_index(__func__, gma, x); \
484 return x; \
485}
486
487DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
488DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
489DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
490DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
491DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
492
493static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
494 .get_entry = gtt_get_entry64,
495 .set_entry = gtt_set_entry64,
496 .clear_present = gtt_entry_clear_present,
655c64ef 497 .set_present = gtt_entry_set_present,
2707e444
ZW
498 .test_present = gen8_gtt_test_present,
499 .test_pse = gen8_gtt_test_pse,
c3e69763 500 .clear_pse = gen8_gtt_clear_pse,
6fd79378
CD
501 .clear_ips = gen8_gtt_clear_ips,
502 .test_ips = gen8_gtt_test_ips,
71634848
CD
503 .clear_64k_splited = gen8_gtt_clear_64k_splited,
504 .set_64k_splited = gen8_gtt_set_64k_splited,
505 .test_64k_splited = gen8_gtt_test_64k_splited,
2707e444
ZW
506 .get_pfn = gen8_gtt_get_pfn,
507 .set_pfn = gen8_gtt_set_pfn,
508};
509
510static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
511 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
512 .gma_to_pte_index = gen8_gma_to_pte_index,
513 .gma_to_pde_index = gen8_gma_to_pde_index,
514 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
515 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
516 .gma_to_pml4_index = gen8_gma_to_pml4_index,
517};
518
40b27176
CD
519/* Update entry type per pse and ips bit. */
520static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
521 struct intel_gvt_gtt_entry *entry, bool ips)
522{
523 switch (entry->type) {
524 case GTT_TYPE_PPGTT_PDE_ENTRY:
525 case GTT_TYPE_PPGTT_PDP_ENTRY:
526 if (pte_ops->test_pse(entry))
527 entry->type = get_pse_type(entry->type);
528 break;
529 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
530 if (ips)
531 entry->type = get_pse_type(entry->type);
532 break;
533 default:
534 GEM_BUG_ON(!gtt_type_is_entry(entry->type));
535 }
536
537 GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
538}
539
2707e444
ZW
540/*
541 * MM helpers.
542 */
3aff3512
CD
543static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
544 struct intel_gvt_gtt_entry *entry, unsigned long index,
545 bool guest)
2707e444 546{
3aff3512 547 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
2707e444 548
3aff3512 549 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
2707e444 550
3aff3512
CD
551 entry->type = mm->ppgtt_mm.root_entry_type;
552 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
553 mm->ppgtt_mm.shadow_pdps,
554 entry, index, false, 0, mm->vgpu);
40b27176 555 update_entry_type_for_real(pte_ops, entry, false);
2707e444
ZW
556}
557
3aff3512
CD
558static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
559 struct intel_gvt_gtt_entry *entry, unsigned long index)
2707e444 560{
3aff3512
CD
561 _ppgtt_get_root_entry(mm, entry, index, true);
562}
563
564static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
565 struct intel_gvt_gtt_entry *entry, unsigned long index)
566{
567 _ppgtt_get_root_entry(mm, entry, index, false);
568}
569
570static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
571 struct intel_gvt_gtt_entry *entry, unsigned long index,
572 bool guest)
573{
574 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
575
576 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
577 mm->ppgtt_mm.shadow_pdps,
578 entry, index, false, 0, mm->vgpu);
579}
580
581static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
582 struct intel_gvt_gtt_entry *entry, unsigned long index)
583{
584 _ppgtt_set_root_entry(mm, entry, index, true);
585}
586
587static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
588 struct intel_gvt_gtt_entry *entry, unsigned long index)
589{
590 _ppgtt_set_root_entry(mm, entry, index, false);
591}
592
593static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
594 struct intel_gvt_gtt_entry *entry, unsigned long index)
595{
596 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
597
598 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
599
600 entry->type = GTT_TYPE_GGTT_PTE;
601 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
602 false, 0, mm->vgpu);
603}
604
605static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
606 struct intel_gvt_gtt_entry *entry, unsigned long index)
607{
608 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
609
610 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
611
612 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
613 false, 0, mm->vgpu);
614}
615
7598e870
CD
616static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
617 struct intel_gvt_gtt_entry *entry, unsigned long index)
618{
619 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
620
621 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
622
623 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
624}
625
3aff3512
CD
626static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
627 struct intel_gvt_gtt_entry *entry, unsigned long index)
628{
629 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
630
631 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
2707e444 632
3aff3512 633 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
2707e444
ZW
634}
635
636/*
637 * PPGTT shadow page table helpers.
638 */
4b2dbbc2 639static inline int ppgtt_spt_get_entry(
2707e444
ZW
640 struct intel_vgpu_ppgtt_spt *spt,
641 void *page_table, int type,
642 struct intel_gvt_gtt_entry *e, unsigned long index,
643 bool guest)
644{
645 struct intel_gvt *gvt = spt->vgpu->gvt;
646 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
4b2dbbc2 647 int ret;
2707e444
ZW
648
649 e->type = get_entry_type(type);
650
651 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
4b2dbbc2 652 return -EINVAL;
2707e444 653
4b2dbbc2 654 ret = ops->get_entry(page_table, e, index, guest,
e502a2af 655 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
2707e444 656 spt->vgpu);
4b2dbbc2
CD
657 if (ret)
658 return ret;
659
40b27176
CD
660 update_entry_type_for_real(ops, e, guest ?
661 spt->guest_page.pde_ips : false);
bc37ab56
CD
662
663 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
664 type, e->type, index, e->val64);
4b2dbbc2 665 return 0;
2707e444
ZW
666}
667
4b2dbbc2 668static inline int ppgtt_spt_set_entry(
2707e444
ZW
669 struct intel_vgpu_ppgtt_spt *spt,
670 void *page_table, int type,
671 struct intel_gvt_gtt_entry *e, unsigned long index,
672 bool guest)
673{
674 struct intel_gvt *gvt = spt->vgpu->gvt;
675 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
676
677 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
4b2dbbc2 678 return -EINVAL;
2707e444 679
bc37ab56
CD
680 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
681 type, e->type, index, e->val64);
682
2707e444 683 return ops->set_entry(page_table, e, index, guest,
e502a2af 684 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
2707e444
ZW
685 spt->vgpu);
686}
687
688#define ppgtt_get_guest_entry(spt, e, index) \
689 ppgtt_spt_get_entry(spt, NULL, \
44b46733 690 spt->guest_page.type, e, index, true)
2707e444
ZW
691
692#define ppgtt_set_guest_entry(spt, e, index) \
693 ppgtt_spt_set_entry(spt, NULL, \
44b46733 694 spt->guest_page.type, e, index, true)
2707e444
ZW
695
696#define ppgtt_get_shadow_entry(spt, e, index) \
697 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
698 spt->shadow_page.type, e, index, false)
699
700#define ppgtt_set_shadow_entry(spt, e, index) \
701 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
702 spt->shadow_page.type, e, index, false)
703
44b46733 704static void *alloc_spt(gfp_t gfp_mask)
2707e444 705{
44b46733 706 struct intel_vgpu_ppgtt_spt *spt;
2707e444 707
44b46733
CD
708 spt = kzalloc(sizeof(*spt), gfp_mask);
709 if (!spt)
710 return NULL;
2707e444 711
44b46733
CD
712 spt->shadow_page.page = alloc_page(gfp_mask);
713 if (!spt->shadow_page.page) {
714 kfree(spt);
715 return NULL;
716 }
717 return spt;
2707e444
ZW
718}
719
44b46733 720static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
2707e444 721{
44b46733
CD
722 __free_page(spt->shadow_page.page);
723 kfree(spt);
2707e444
ZW
724}
725
7d1e5cdf
ZW
726static int detach_oos_page(struct intel_vgpu *vgpu,
727 struct intel_vgpu_oos_page *oos_page);
728
d87f5ff3 729static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
2707e444 730{
44b46733 731 struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
2707e444 732
44b46733 733 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
7d1e5cdf 734
44b46733
CD
735 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
736 PCI_DMA_BIDIRECTIONAL);
b6c126a3
CD
737
738 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
2707e444 739
155521c9
CD
740 if (spt->guest_page.gfn) {
741 if (spt->guest_page.oos_page)
742 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
2707e444 743
155521c9
CD
744 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
745 }
2707e444 746
2707e444 747 list_del_init(&spt->post_shadow_list);
2707e444
ZW
748 free_spt(spt);
749}
750
d87f5ff3 751static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
2707e444 752{
968a85b1 753 struct intel_vgpu_ppgtt_spt *spt, *spn;
b6c126a3 754 struct radix_tree_iter iter;
968a85b1
CW
755 LIST_HEAD(all_spt);
756 void __rcu **slot;
2707e444 757
968a85b1 758 rcu_read_lock();
b6c126a3
CD
759 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
760 spt = radix_tree_deref_slot(slot);
968a85b1 761 list_move(&spt->post_shadow_list, &all_spt);
b6c126a3 762 }
968a85b1
CW
763 rcu_read_unlock();
764
765 list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
766 ppgtt_free_spt(spt);
2707e444
ZW
767}
768
7d1e5cdf 769static int ppgtt_handle_guest_write_page_table_bytes(
44b46733 770 struct intel_vgpu_ppgtt_spt *spt,
2707e444
ZW
771 u64 pa, void *p_data, int bytes);
772
e502a2af
CD
773static int ppgtt_write_protection_handler(
774 struct intel_vgpu_page_track *page_track,
775 u64 gpa, void *data, int bytes)
2707e444 776{
e502a2af
CD
777 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
778
2707e444
ZW
779 int ret;
780
781 if (bytes != 4 && bytes != 8)
782 return -EINVAL;
783
e502a2af 784 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
2707e444
ZW
785 if (ret)
786 return ret;
787 return ret;
788}
789
44b46733
CD
790/* Find a spt by guest gfn. */
791static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
792 struct intel_vgpu *vgpu, unsigned long gfn)
793{
794 struct intel_vgpu_page_track *track;
795
e502a2af
CD
796 track = intel_vgpu_find_page_track(vgpu, gfn);
797 if (track && track->handler == ppgtt_write_protection_handler)
798 return track->priv_data;
44b46733
CD
799
800 return NULL;
801}
802
803/* Find the spt by shadow page mfn. */
b6c126a3 804static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
44b46733
CD
805 struct intel_vgpu *vgpu, unsigned long mfn)
806{
b6c126a3 807 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
44b46733
CD
808}
809
ede9d0cf 810static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
2707e444 811
155521c9 812/* Allocate shadow page table without guest page. */
d87f5ff3 813static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
0cf8f58d 814 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
2707e444 815{
44b46733 816 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2707e444 817 struct intel_vgpu_ppgtt_spt *spt = NULL;
44b46733 818 dma_addr_t daddr;
e502a2af 819 int ret;
2707e444
ZW
820
821retry:
822 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
823 if (!spt) {
ede9d0cf 824 if (reclaim_one_ppgtt_mm(vgpu->gvt))
2707e444
ZW
825 goto retry;
826
695fbc08 827 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
2707e444
ZW
828 return ERR_PTR(-ENOMEM);
829 }
830
831 spt->vgpu = vgpu;
2707e444
ZW
832 atomic_set(&spt->refcount, 1);
833 INIT_LIST_HEAD(&spt->post_shadow_list);
834
835 /*
44b46733 836 * Init shadow_page.
2707e444 837 */
44b46733
CD
838 spt->shadow_page.type = type;
839 daddr = dma_map_page(kdev, spt->shadow_page.page,
840 0, 4096, PCI_DMA_BIDIRECTIONAL);
841 if (dma_mapping_error(kdev, daddr)) {
842 gvt_vgpu_err("fail to map dma addr\n");
b6c126a3
CD
843 ret = -EINVAL;
844 goto err_free_spt;
2707e444 845 }
44b46733
CD
846 spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
847 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
2707e444 848
b6c126a3
CD
849 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
850 if (ret)
155521c9 851 goto err_unmap_dma;
2707e444 852
44b46733 853 return spt;
b6c126a3 854
b6c126a3
CD
855err_unmap_dma:
856 dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
857err_free_spt:
858 free_spt(spt);
859 return ERR_PTR(ret);
2707e444
ZW
860}
861
155521c9
CD
862/* Allocate shadow page table associated with specific gfn. */
863static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
0cf8f58d 864 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
155521c9
CD
865 unsigned long gfn, bool guest_pde_ips)
866{
867 struct intel_vgpu_ppgtt_spt *spt;
868 int ret;
869
870 spt = ppgtt_alloc_spt(vgpu, type);
871 if (IS_ERR(spt))
872 return spt;
873
874 /*
875 * Init guest_page.
876 */
877 ret = intel_vgpu_register_page_track(vgpu, gfn,
878 ppgtt_write_protection_handler, spt);
879 if (ret) {
880 ppgtt_free_spt(spt);
881 return ERR_PTR(ret);
882 }
883
884 spt->guest_page.type = type;
885 spt->guest_page.gfn = gfn;
886 spt->guest_page.pde_ips = guest_pde_ips;
887
888 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
889
890 return spt;
891}
892
2707e444
ZW
893#define pt_entry_size_shift(spt) \
894 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
895
896#define pt_entries(spt) \
9556e118 897 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
2707e444
ZW
898
899#define for_each_present_guest_entry(spt, e, i) \
4c9414d7
CD
900 for (i = 0; i < pt_entries(spt); \
901 i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
4b2dbbc2
CD
902 if (!ppgtt_get_guest_entry(spt, e, i) && \
903 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
2707e444
ZW
904
905#define for_each_present_shadow_entry(spt, e, i) \
4c9414d7
CD
906 for (i = 0; i < pt_entries(spt); \
907 i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
4b2dbbc2
CD
908 if (!ppgtt_get_shadow_entry(spt, e, i) && \
909 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
2707e444 910
b901b252
CD
911#define for_each_shadow_entry(spt, e, i) \
912 for (i = 0; i < pt_entries(spt); \
913 i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
914 if (!ppgtt_get_shadow_entry(spt, e, i))
915
80e76ea6 916static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
2707e444
ZW
917{
918 int v = atomic_read(&spt->refcount);
919
920 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
2707e444
ZW
921 atomic_inc(&spt->refcount);
922}
923
80e76ea6
CD
924static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
925{
926 int v = atomic_read(&spt->refcount);
927
928 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
929 return atomic_dec_return(&spt->refcount);
930}
931
d87f5ff3 932static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
2707e444 933
d87f5ff3 934static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
2707e444
ZW
935 struct intel_gvt_gtt_entry *e)
936{
937 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
938 struct intel_vgpu_ppgtt_spt *s;
0cf8f58d 939 enum intel_gvt_gtt_type cur_pt_type;
2707e444 940
72f03d7e 941 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
2707e444 942
3b6411c2
PG
943 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
944 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
945 cur_pt_type = get_next_pt_type(e->type) + 1;
946 if (ops->get_pfn(e) ==
947 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
948 return 0;
949 }
44b46733 950 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2707e444 951 if (!s) {
695fbc08
TZ
952 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
953 ops->get_pfn(e));
2707e444
ZW
954 return -ENXIO;
955 }
d87f5ff3 956 return ppgtt_invalidate_spt(s);
2707e444
ZW
957}
958
cf4ee73f
CD
959static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
960 struct intel_gvt_gtt_entry *entry)
961{
962 struct intel_vgpu *vgpu = spt->vgpu;
963 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
964 unsigned long pfn;
965 int type;
966
967 pfn = ops->get_pfn(entry);
968 type = spt->shadow_page.type;
969
b901b252
CD
970 /* Uninitialized spte or unshadowed spte. */
971 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
cf4ee73f
CD
972 return;
973
974 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
975}
976
d87f5ff3 977static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
2707e444 978{
695fbc08 979 struct intel_vgpu *vgpu = spt->vgpu;
2707e444
ZW
980 struct intel_gvt_gtt_entry e;
981 unsigned long index;
982 int ret;
2707e444
ZW
983
984 trace_spt_change(spt->vgpu->id, "die", spt,
44b46733 985 spt->guest_page.gfn, spt->shadow_page.type);
2707e444 986
80e76ea6 987 if (ppgtt_put_spt(spt) > 0)
2707e444
ZW
988 return 0;
989
2707e444 990 for_each_present_shadow_entry(spt, &e, index) {
72f03d7e
CD
991 switch (e.type) {
992 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
993 gvt_vdbg_mm("invalidate 4K entry\n");
cf4ee73f
CD
994 ppgtt_invalidate_pte(spt, &e);
995 break;
b294657d 996 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
eb3a3530
CD
997 /* We don't setup 64K shadow entry so far. */
998 WARN(1, "suspicious 64K gtt entry\n");
999 continue;
72f03d7e 1000 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
b901b252
CD
1001 gvt_vdbg_mm("invalidate 2M entry\n");
1002 continue;
72f03d7e 1003 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
b901b252 1004 WARN(1, "GVT doesn't support 1GB page\n");
72f03d7e
CD
1005 continue;
1006 case GTT_TYPE_PPGTT_PML4_ENTRY:
1007 case GTT_TYPE_PPGTT_PDP_ENTRY:
1008 case GTT_TYPE_PPGTT_PDE_ENTRY:
1009 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
d87f5ff3 1010 ret = ppgtt_invalidate_spt_by_shadow_entry(
72f03d7e
CD
1011 spt->vgpu, &e);
1012 if (ret)
1013 goto fail;
1014 break;
1015 default:
1016 GEM_BUG_ON(1);
2707e444 1017 }
2707e444 1018 }
cf4ee73f 1019
2707e444 1020 trace_spt_change(spt->vgpu->id, "release", spt,
44b46733 1021 spt->guest_page.gfn, spt->shadow_page.type);
d87f5ff3 1022 ppgtt_free_spt(spt);
2707e444
ZW
1023 return 0;
1024fail:
695fbc08
TZ
1025 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
1026 spt, e.val64, e.type);
2707e444
ZW
1027 return ret;
1028}
1029
40b27176
CD
1030static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
1031{
1032 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1033
1034 if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
1035 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
1036 GAMW_ECO_ENABLE_64K_IPS_FIELD;
1037
1038 return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
1039 } else if (INTEL_GEN(dev_priv) >= 11) {
1040 /* 64K paging only controlled by IPS bit in PTE now. */
1041 return true;
1042 } else
1043 return false;
1044}
1045
d87f5ff3 1046static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
2707e444 1047
d87f5ff3 1048static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
2707e444
ZW
1049 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
1050{
1051 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
44b46733 1052 struct intel_vgpu_ppgtt_spt *spt = NULL;
40b27176 1053 bool ips = false;
2707e444
ZW
1054 int ret;
1055
72f03d7e 1056 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
2707e444 1057
54c81653
CD
1058 if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1059 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1060
44b46733 1061 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
54c81653 1062 if (spt) {
d87f5ff3 1063 ppgtt_get_spt(spt);
2707e444 1064
54c81653
CD
1065 if (ips != spt->guest_page.pde_ips) {
1066 spt->guest_page.pde_ips = ips;
1067
1068 gvt_dbg_mm("reshadow PDE since ips changed\n");
1069 clear_page(spt->shadow_page.vaddr);
1070 ret = ppgtt_populate_spt(spt);
80e76ea6
CD
1071 if (ret) {
1072 ppgtt_put_spt(spt);
1073 goto err;
1074 }
54c81653
CD
1075 }
1076 } else {
2707e444
ZW
1077 int type = get_next_pt_type(we->type);
1078
930c8dfe
AG
1079 if (!gtt_type_is_pt(type))
1080 goto err;
1081
155521c9 1082 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
44b46733
CD
1083 if (IS_ERR(spt)) {
1084 ret = PTR_ERR(spt);
80e76ea6 1085 goto err;
2707e444
ZW
1086 }
1087
e502a2af 1088 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
2707e444 1089 if (ret)
80e76ea6 1090 goto err_free_spt;
2707e444 1091
d87f5ff3 1092 ret = ppgtt_populate_spt(spt);
2707e444 1093 if (ret)
80e76ea6 1094 goto err_free_spt;
2707e444 1095
44b46733
CD
1096 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1097 spt->shadow_page.type);
2707e444 1098 }
44b46733 1099 return spt;
80e76ea6
CD
1100
1101err_free_spt:
1102 ppgtt_free_spt(spt);
1103err:
695fbc08 1104 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
44b46733 1105 spt, we->val64, we->type);
2707e444
ZW
1106 return ERR_PTR(ret);
1107}
1108
1109static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
1110 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
1111{
1112 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1113
1114 se->type = ge->type;
1115 se->val64 = ge->val64;
1116
eb3a3530
CD
1117 /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
1118 if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1119 ops->clear_ips(se);
1120
2707e444
ZW
1121 ops->set_pfn(se, s->shadow_page.mfn);
1122}
1123
b901b252 1124/**
a752b070
ZW
1125 * Check if can do 2M page
1126 * @vgpu: target vgpu
1127 * @entry: target pfn's gtt entry
1128 *
b901b252
CD
1129 * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
1130 * negtive if found err.
1131 */
1132static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
1133 struct intel_gvt_gtt_entry *entry)
1134{
1135 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1136 unsigned long pfn;
1137
1138 if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
1139 return 0;
1140
1141 pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
1142 if (pfn == INTEL_GVT_INVALID_ADDR)
1143 return -EINVAL;
1144
1145 return PageTransHuge(pfn_to_page(pfn));
1146}
1147
1148static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
1149 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1150 struct intel_gvt_gtt_entry *se)
1151{
1152 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1153 struct intel_vgpu_ppgtt_spt *sub_spt;
1154 struct intel_gvt_gtt_entry sub_se;
1155 unsigned long start_gfn;
1156 dma_addr_t dma_addr;
1157 unsigned long sub_index;
1158 int ret;
1159
1160 gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
1161
1162 start_gfn = ops->get_pfn(se);
1163
1164 sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
1165 if (IS_ERR(sub_spt))
1166 return PTR_ERR(sub_spt);
1167
1168 for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
1169 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
1170 start_gfn + sub_index, PAGE_SIZE, &dma_addr);
1171 if (ret) {
1172 ppgtt_invalidate_spt(spt);
1173 return ret;
1174 }
1175 sub_se.val64 = se->val64;
1176
1177 /* Copy the PAT field from PDE. */
1178 sub_se.val64 &= ~_PAGE_PAT;
1179 sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
1180
1181 ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
1182 ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
1183 }
1184
1185 /* Clear dirty field. */
1186 se->val64 &= ~_PAGE_DIRTY;
1187
1188 ops->clear_pse(se);
1189 ops->clear_ips(se);
1190 ops->set_pfn(se, sub_spt->shadow_page.mfn);
1191 ppgtt_set_shadow_entry(spt, se, index);
1192 return 0;
1193}
1194
eb3a3530
CD
1195static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
1196 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1197 struct intel_gvt_gtt_entry *se)
1198{
1199 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1200 struct intel_gvt_gtt_entry entry = *se;
1201 unsigned long start_gfn;
1202 dma_addr_t dma_addr;
1203 int i, ret;
1204
1205 gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
1206
1207 GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
1208
1209 start_gfn = ops->get_pfn(se);
1210
1211 entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
1212 ops->set_64k_splited(&entry);
1213
1214 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1215 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
79e542f5 1216 start_gfn + i, PAGE_SIZE, &dma_addr);
eb3a3530
CD
1217 if (ret)
1218 return ret;
1219
1220 ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
1221 ppgtt_set_shadow_entry(spt, &entry, index + i);
1222 }
1223 return 0;
1224}
1225
72f03d7e
CD
1226static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1227 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1228 struct intel_gvt_gtt_entry *ge)
1229{
1230 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1231 struct intel_gvt_gtt_entry se = *ge;
b901b252 1232 unsigned long gfn, page_size = PAGE_SIZE;
cf4ee73f
CD
1233 dma_addr_t dma_addr;
1234 int ret;
72f03d7e
CD
1235
1236 if (!pte_ops->test_present(ge))
1237 return 0;
1238
1239 gfn = pte_ops->get_pfn(ge);
1240
1241 switch (ge->type) {
1242 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1243 gvt_vdbg_mm("shadow 4K gtt entry\n");
1244 break;
b294657d 1245 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
eb3a3530
CD
1246 gvt_vdbg_mm("shadow 64K gtt entry\n");
1247 /*
1248 * The layout of 64K page is special, the page size is
1249 * controlled by uper PDE. To be simple, we always split
1250 * 64K page to smaller 4K pages in shadow PT.
1251 */
1252 return split_64KB_gtt_entry(vgpu, spt, index, &se);
72f03d7e 1253 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
b901b252
CD
1254 gvt_vdbg_mm("shadow 2M gtt entry\n");
1255 ret = is_2MB_gtt_possible(vgpu, ge);
1256 if (ret == 0)
1257 return split_2MB_gtt_entry(vgpu, spt, index, &se);
1258 else if (ret < 0)
1259 return ret;
1260 page_size = I915_GTT_PAGE_SIZE_2M;
1261 break;
72f03d7e 1262 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
b901b252 1263 gvt_vgpu_err("GVT doesn't support 1GB entry\n");
72f03d7e
CD
1264 return -EINVAL;
1265 default:
1266 GEM_BUG_ON(1);
1267 };
1268
1269 /* direct shadow */
b901b252
CD
1270 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
1271 &dma_addr);
cf4ee73f 1272 if (ret)
72f03d7e
CD
1273 return -ENXIO;
1274
cf4ee73f 1275 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
72f03d7e
CD
1276 ppgtt_set_shadow_entry(spt, &se, index);
1277 return 0;
1278}
1279
d87f5ff3 1280static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
2707e444
ZW
1281{
1282 struct intel_vgpu *vgpu = spt->vgpu;
cc753fbe
HY
1283 struct intel_gvt *gvt = vgpu->gvt;
1284 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2707e444
ZW
1285 struct intel_vgpu_ppgtt_spt *s;
1286 struct intel_gvt_gtt_entry se, ge;
cc753fbe 1287 unsigned long gfn, i;
2707e444
ZW
1288 int ret;
1289
1290 trace_spt_change(spt->vgpu->id, "born", spt,
e502a2af 1291 spt->guest_page.gfn, spt->shadow_page.type);
2707e444 1292
72f03d7e
CD
1293 for_each_present_guest_entry(spt, &ge, i) {
1294 if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
d87f5ff3 1295 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
72f03d7e
CD
1296 if (IS_ERR(s)) {
1297 ret = PTR_ERR(s);
1298 goto fail;
1299 }
1300 ppgtt_get_shadow_entry(spt, &se, i);
1301 ppgtt_generate_shadow_entry(&se, s, &ge);
1302 ppgtt_set_shadow_entry(spt, &se, i);
1303 } else {
cc753fbe 1304 gfn = ops->get_pfn(&ge);
72f03d7e 1305 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
cc753fbe 1306 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
72f03d7e
CD
1307 ppgtt_set_shadow_entry(spt, &se, i);
1308 continue;
1309 }
2707e444 1310
72f03d7e
CD
1311 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1312 if (ret)
1313 goto fail;
2707e444 1314 }
2707e444
ZW
1315 }
1316 return 0;
1317fail:
695fbc08
TZ
1318 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1319 spt, ge.val64, ge.type);
2707e444
ZW
1320 return ret;
1321}
1322
44b46733 1323static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
6b3816d6 1324 struct intel_gvt_gtt_entry *se, unsigned long index)
2707e444 1325{
2707e444
ZW
1326 struct intel_vgpu *vgpu = spt->vgpu;
1327 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2707e444
ZW
1328 int ret;
1329
44b46733
CD
1330 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1331 spt->shadow_page.type, se->val64, index);
9baf0920 1332
bc37ab56
CD
1333 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1334 se->type, index, se->val64);
1335
6b3816d6 1336 if (!ops->test_present(se))
2707e444
ZW
1337 return 0;
1338
44b46733
CD
1339 if (ops->get_pfn(se) ==
1340 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
2707e444
ZW
1341 return 0;
1342
6b3816d6 1343 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
9baf0920 1344 struct intel_vgpu_ppgtt_spt *s =
44b46733 1345 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
9baf0920 1346 if (!s) {
695fbc08 1347 gvt_vgpu_err("fail to find guest page\n");
2707e444
ZW
1348 ret = -ENXIO;
1349 goto fail;
1350 }
d87f5ff3 1351 ret = ppgtt_invalidate_spt(s);
2707e444
ZW
1352 if (ret)
1353 goto fail;
eb3a3530
CD
1354 } else {
1355 /* We don't setup 64K shadow entry so far. */
1356 WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
1357 "suspicious 64K entry\n");
cf4ee73f 1358 ppgtt_invalidate_pte(spt, se);
eb3a3530 1359 }
cf4ee73f 1360
2707e444
ZW
1361 return 0;
1362fail:
695fbc08 1363 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
6b3816d6 1364 spt, se->val64, se->type);
2707e444
ZW
1365 return ret;
1366}
1367
44b46733 1368static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
2707e444
ZW
1369 struct intel_gvt_gtt_entry *we, unsigned long index)
1370{
2707e444
ZW
1371 struct intel_vgpu *vgpu = spt->vgpu;
1372 struct intel_gvt_gtt_entry m;
1373 struct intel_vgpu_ppgtt_spt *s;
1374 int ret;
1375
44b46733
CD
1376 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1377 we->val64, index);
2707e444 1378
bc37ab56
CD
1379 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1380 we->type, index, we->val64);
1381
2707e444 1382 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
d87f5ff3 1383 s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
2707e444
ZW
1384 if (IS_ERR(s)) {
1385 ret = PTR_ERR(s);
1386 goto fail;
1387 }
1388 ppgtt_get_shadow_entry(spt, &m, index);
1389 ppgtt_generate_shadow_entry(&m, s, we);
1390 ppgtt_set_shadow_entry(spt, &m, index);
1391 } else {
72f03d7e 1392 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
2707e444
ZW
1393 if (ret)
1394 goto fail;
2707e444
ZW
1395 }
1396 return 0;
1397fail:
695fbc08
TZ
1398 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1399 spt, we->val64, we->type);
2707e444
ZW
1400 return ret;
1401}
1402
1403static int sync_oos_page(struct intel_vgpu *vgpu,
1404 struct intel_vgpu_oos_page *oos_page)
1405{
1406 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1407 struct intel_gvt *gvt = vgpu->gvt;
1408 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
44b46733 1409 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
72f03d7e 1410 struct intel_gvt_gtt_entry old, new;
2707e444
ZW
1411 int index;
1412 int ret;
1413
1414 trace_oos_change(vgpu->id, "sync", oos_page->id,
44b46733 1415 spt, spt->guest_page.type);
2707e444 1416
44b46733 1417 old.type = new.type = get_entry_type(spt->guest_page.type);
2707e444
ZW
1418 old.val64 = new.val64 = 0;
1419
9556e118
ZW
1420 for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1421 info->gtt_entry_size_shift); index++) {
2707e444
ZW
1422 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1423 ops->get_entry(NULL, &new, index, true,
44b46733 1424 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
2707e444
ZW
1425
1426 if (old.val64 == new.val64
1427 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1428 continue;
1429
1430 trace_oos_sync(vgpu->id, oos_page->id,
44b46733 1431 spt, spt->guest_page.type,
2707e444
ZW
1432 new.val64, index);
1433
72f03d7e 1434 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
2707e444
ZW
1435 if (ret)
1436 return ret;
1437
1438 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
2707e444
ZW
1439 }
1440
44b46733 1441 spt->guest_page.write_cnt = 0;
2707e444
ZW
1442 list_del_init(&spt->post_shadow_list);
1443 return 0;
1444}
1445
1446static int detach_oos_page(struct intel_vgpu *vgpu,
1447 struct intel_vgpu_oos_page *oos_page)
1448{
1449 struct intel_gvt *gvt = vgpu->gvt;
44b46733 1450 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
2707e444
ZW
1451
1452 trace_oos_change(vgpu->id, "detach", oos_page->id,
44b46733 1453 spt, spt->guest_page.type);
2707e444 1454
44b46733
CD
1455 spt->guest_page.write_cnt = 0;
1456 spt->guest_page.oos_page = NULL;
1457 oos_page->spt = NULL;
2707e444
ZW
1458
1459 list_del_init(&oos_page->vm_list);
1460 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1461
1462 return 0;
1463}
1464
44b46733
CD
1465static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
1466 struct intel_vgpu_ppgtt_spt *spt)
2707e444 1467{
44b46733 1468 struct intel_gvt *gvt = spt->vgpu->gvt;
2707e444
ZW
1469 int ret;
1470
44b46733
CD
1471 ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
1472 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
9556e118 1473 oos_page->mem, I915_GTT_PAGE_SIZE);
2707e444
ZW
1474 if (ret)
1475 return ret;
1476
44b46733
CD
1477 oos_page->spt = spt;
1478 spt->guest_page.oos_page = oos_page;
2707e444
ZW
1479
1480 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1481
44b46733
CD
1482 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1483 spt, spt->guest_page.type);
2707e444
ZW
1484 return 0;
1485}
1486
44b46733 1487static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
2707e444 1488{
44b46733 1489 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
2707e444
ZW
1490 int ret;
1491
e502a2af 1492 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
2707e444
ZW
1493 if (ret)
1494 return ret;
1495
44b46733
CD
1496 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1497 spt, spt->guest_page.type);
2707e444 1498
44b46733
CD
1499 list_del_init(&oos_page->vm_list);
1500 return sync_oos_page(spt->vgpu, oos_page);
2707e444
ZW
1501}
1502
44b46733 1503static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
2707e444 1504{
44b46733 1505 struct intel_gvt *gvt = spt->vgpu->gvt;
2707e444 1506 struct intel_gvt_gtt *gtt = &gvt->gtt;
44b46733 1507 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
2707e444
ZW
1508 int ret;
1509
1510 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1511
1512 if (list_empty(&gtt->oos_page_free_list_head)) {
1513 oos_page = container_of(gtt->oos_page_use_list_head.next,
1514 struct intel_vgpu_oos_page, list);
44b46733 1515 ret = ppgtt_set_guest_page_sync(oos_page->spt);
2707e444
ZW
1516 if (ret)
1517 return ret;
44b46733 1518 ret = detach_oos_page(spt->vgpu, oos_page);
2707e444
ZW
1519 if (ret)
1520 return ret;
1521 } else
1522 oos_page = container_of(gtt->oos_page_free_list_head.next,
1523 struct intel_vgpu_oos_page, list);
44b46733 1524 return attach_oos_page(oos_page, spt);
2707e444
ZW
1525}
1526
44b46733 1527static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
2707e444 1528{
44b46733 1529 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
2707e444
ZW
1530
1531 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1532 return -EINVAL;
1533
44b46733
CD
1534 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1535 spt, spt->guest_page.type);
2707e444 1536
44b46733 1537 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
e502a2af 1538 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
2707e444
ZW
1539}
1540
1541/**
1542 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1543 * @vgpu: a vGPU
1544 *
1545 * This function is called before submitting a guest workload to host,
1546 * to sync all the out-of-synced shadow for vGPU
1547 *
1548 * Returns:
1549 * Zero on success, negative error code if failed.
1550 */
1551int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1552{
1553 struct list_head *pos, *n;
1554 struct intel_vgpu_oos_page *oos_page;
1555 int ret;
1556
1557 if (!enable_out_of_sync)
1558 return 0;
1559
1560 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1561 oos_page = container_of(pos,
1562 struct intel_vgpu_oos_page, vm_list);
44b46733 1563 ret = ppgtt_set_guest_page_sync(oos_page->spt);
2707e444
ZW
1564 if (ret)
1565 return ret;
1566 }
1567 return 0;
1568}
1569
1570/*
1571 * The heart of PPGTT shadow page table.
1572 */
1573static int ppgtt_handle_guest_write_page_table(
44b46733 1574 struct intel_vgpu_ppgtt_spt *spt,
2707e444
ZW
1575 struct intel_gvt_gtt_entry *we, unsigned long index)
1576{
2707e444 1577 struct intel_vgpu *vgpu = spt->vgpu;
6b3816d6 1578 int type = spt->shadow_page.type;
2707e444 1579 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
72f03d7e 1580 struct intel_gvt_gtt_entry old_se;
9baf0920 1581 int new_present;
eb3a3530 1582 int i, ret;
2707e444 1583
2707e444
ZW
1584 new_present = ops->test_present(we);
1585
6b3816d6
TZ
1586 /*
1587 * Adding the new entry first and then removing the old one, that can
1588 * guarantee the ppgtt table is validated during the window between
1589 * adding and removal.
1590 */
72f03d7e 1591 ppgtt_get_shadow_entry(spt, &old_se, index);
2707e444 1592
2707e444 1593 if (new_present) {
44b46733 1594 ret = ppgtt_handle_guest_entry_add(spt, we, index);
2707e444
ZW
1595 if (ret)
1596 goto fail;
1597 }
6b3816d6 1598
44b46733 1599 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
6b3816d6
TZ
1600 if (ret)
1601 goto fail;
1602
1603 if (!new_present) {
eb3a3530
CD
1604 /* For 64KB splited entries, we need clear them all. */
1605 if (ops->test_64k_splited(&old_se) &&
1606 !(index % GTT_64K_PTE_STRIDE)) {
1607 gvt_vdbg_mm("remove splited 64K shadow entries\n");
1608 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1609 ops->clear_64k_splited(&old_se);
1610 ops->set_pfn(&old_se,
1611 vgpu->gtt.scratch_pt[type].page_mfn);
1612 ppgtt_set_shadow_entry(spt, &old_se, index + i);
1613 }
b901b252
CD
1614 } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
1615 old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
1616 ops->clear_pse(&old_se);
1617 ops->set_pfn(&old_se,
1618 vgpu->gtt.scratch_pt[type].page_mfn);
1619 ppgtt_set_shadow_entry(spt, &old_se, index);
eb3a3530
CD
1620 } else {
1621 ops->set_pfn(&old_se,
1622 vgpu->gtt.scratch_pt[type].page_mfn);
1623 ppgtt_set_shadow_entry(spt, &old_se, index);
1624 }
6b3816d6
TZ
1625 }
1626
2707e444
ZW
1627 return 0;
1628fail:
695fbc08
TZ
1629 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1630 spt, we->val64, we->type);
2707e444
ZW
1631 return ret;
1632}
1633
72f03d7e
CD
1634
1635
44b46733 1636static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
2707e444
ZW
1637{
1638 return enable_out_of_sync
44b46733
CD
1639 && gtt_type_is_pte_pt(spt->guest_page.type)
1640 && spt->guest_page.write_cnt >= 2;
2707e444
ZW
1641}
1642
1643static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1644 unsigned long index)
1645{
1646 set_bit(index, spt->post_shadow_bitmap);
1647 if (!list_empty(&spt->post_shadow_list))
1648 return;
1649
1650 list_add_tail(&spt->post_shadow_list,
1651 &spt->vgpu->gtt.post_shadow_list_head);
1652}
1653
1654/**
1655 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1656 * @vgpu: a vGPU
1657 *
1658 * This function is called before submitting a guest workload to host,
1659 * to flush all the post shadows for a vGPU.
1660 *
1661 * Returns:
1662 * Zero on success, negative error code if failed.
1663 */
1664int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1665{
1666 struct list_head *pos, *n;
1667 struct intel_vgpu_ppgtt_spt *spt;
9baf0920 1668 struct intel_gvt_gtt_entry ge;
2707e444
ZW
1669 unsigned long index;
1670 int ret;
1671
1672 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1673 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1674 post_shadow_list);
1675
1676 for_each_set_bit(index, spt->post_shadow_bitmap,
1677 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1678 ppgtt_get_guest_entry(spt, &ge, index);
2707e444 1679
44b46733
CD
1680 ret = ppgtt_handle_guest_write_page_table(spt,
1681 &ge, index);
2707e444
ZW
1682 if (ret)
1683 return ret;
1684 clear_bit(index, spt->post_shadow_bitmap);
1685 }
1686 list_del_init(&spt->post_shadow_list);
1687 }
1688 return 0;
1689}
1690
7d1e5cdf 1691static int ppgtt_handle_guest_write_page_table_bytes(
44b46733 1692 struct intel_vgpu_ppgtt_spt *spt,
2707e444
ZW
1693 u64 pa, void *p_data, int bytes)
1694{
2707e444
ZW
1695 struct intel_vgpu *vgpu = spt->vgpu;
1696 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1697 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
6b3816d6 1698 struct intel_gvt_gtt_entry we, se;
2707e444
ZW
1699 unsigned long index;
1700 int ret;
1701
1702 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1703
1704 ppgtt_get_guest_entry(spt, &we, index);
2707e444 1705
eb3a3530
CD
1706 /*
1707 * For page table which has 64K gtt entry, only PTE#0, PTE#16,
1708 * PTE#32, ... PTE#496 are used. Unused PTEs update should be
1709 * ignored.
1710 */
1711 if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
1712 (index % GTT_64K_PTE_STRIDE)) {
1713 gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
1714 index);
1715 return 0;
1716 }
2707e444
ZW
1717
1718 if (bytes == info->gtt_entry_size) {
44b46733 1719 ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
2707e444
ZW
1720 if (ret)
1721 return ret;
1722 } else {
2707e444 1723 if (!test_bit(index, spt->post_shadow_bitmap)) {
121d760d
ZW
1724 int type = spt->shadow_page.type;
1725
6b3816d6 1726 ppgtt_get_shadow_entry(spt, &se, index);
44b46733 1727 ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
2707e444
ZW
1728 if (ret)
1729 return ret;
121d760d
ZW
1730 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1731 ppgtt_set_shadow_entry(spt, &se, index);
2707e444 1732 }
2707e444 1733 ppgtt_set_post_shadow(spt, index);
2707e444
ZW
1734 }
1735
1736 if (!enable_out_of_sync)
1737 return 0;
1738
44b46733 1739 spt->guest_page.write_cnt++;
2707e444 1740
44b46733
CD
1741 if (spt->guest_page.oos_page)
1742 ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
2707e444
ZW
1743 false, 0, vgpu);
1744
44b46733
CD
1745 if (can_do_out_of_sync(spt)) {
1746 if (!spt->guest_page.oos_page)
1747 ppgtt_allocate_oos_page(spt);
2707e444 1748
44b46733 1749 ret = ppgtt_set_guest_page_oos(spt);
2707e444
ZW
1750 if (ret < 0)
1751 return ret;
1752 }
1753 return 0;
1754}
1755
ede9d0cf 1756static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
2707e444
ZW
1757{
1758 struct intel_vgpu *vgpu = mm->vgpu;
1759 struct intel_gvt *gvt = vgpu->gvt;
1760 struct intel_gvt_gtt *gtt = &gvt->gtt;
1761 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1762 struct intel_gvt_gtt_entry se;
ede9d0cf 1763 int index;
2707e444 1764
ede9d0cf 1765 if (!mm->ppgtt_mm.shadowed)
2707e444
ZW
1766 return;
1767
ede9d0cf
CD
1768 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1769 ppgtt_get_shadow_root_entry(mm, &se, index);
1770
2707e444
ZW
1771 if (!ops->test_present(&se))
1772 continue;
ede9d0cf 1773
d87f5ff3 1774 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
2707e444 1775 se.val64 = 0;
ede9d0cf 1776 ppgtt_set_shadow_root_entry(mm, &se, index);
2707e444 1777
44b46733
CD
1778 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1779 NULL, se.type, se.val64, index);
2707e444 1780 }
2707e444 1781
ede9d0cf 1782 mm->ppgtt_mm.shadowed = false;
2707e444
ZW
1783}
1784
ede9d0cf
CD
1785
1786static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
2707e444
ZW
1787{
1788 struct intel_vgpu *vgpu = mm->vgpu;
1789 struct intel_gvt *gvt = vgpu->gvt;
1790 struct intel_gvt_gtt *gtt = &gvt->gtt;
1791 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1792 struct intel_vgpu_ppgtt_spt *spt;
1793 struct intel_gvt_gtt_entry ge, se;
ede9d0cf 1794 int index, ret;
2707e444 1795
ede9d0cf 1796 if (mm->ppgtt_mm.shadowed)
2707e444
ZW
1797 return 0;
1798
ede9d0cf
CD
1799 mm->ppgtt_mm.shadowed = true;
1800
1801 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1802 ppgtt_get_guest_root_entry(mm, &ge, index);
2707e444 1803
2707e444
ZW
1804 if (!ops->test_present(&ge))
1805 continue;
1806
44b46733
CD
1807 trace_spt_guest_change(vgpu->id, __func__, NULL,
1808 ge.type, ge.val64, index);
2707e444 1809
d87f5ff3 1810 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
2707e444 1811 if (IS_ERR(spt)) {
695fbc08 1812 gvt_vgpu_err("fail to populate guest root pointer\n");
2707e444
ZW
1813 ret = PTR_ERR(spt);
1814 goto fail;
1815 }
1816 ppgtt_generate_shadow_entry(&se, spt, &ge);
ede9d0cf 1817 ppgtt_set_shadow_root_entry(mm, &se, index);
2707e444 1818
44b46733
CD
1819 trace_spt_guest_change(vgpu->id, "populate root pointer",
1820 NULL, se.type, se.val64, index);
2707e444 1821 }
ede9d0cf 1822
2707e444
ZW
1823 return 0;
1824fail:
ede9d0cf 1825 invalidate_ppgtt_mm(mm);
2707e444
ZW
1826 return ret;
1827}
1828
ede9d0cf
CD
1829static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1830{
1831 struct intel_vgpu_mm *mm;
1832
1833 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1834 if (!mm)
1835 return NULL;
1836
1837 mm->vgpu = vgpu;
1838 kref_init(&mm->ref);
1839 atomic_set(&mm->pincount, 0);
1840
1841 return mm;
1842}
1843
1844static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1845{
1846 kfree(mm);
1847}
1848
2707e444 1849/**
ede9d0cf 1850 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
2707e444 1851 * @vgpu: a vGPU
ede9d0cf
CD
1852 * @root_entry_type: ppgtt root entry type
1853 * @pdps: guest pdps.
2707e444 1854 *
ede9d0cf 1855 * This function is used to create a ppgtt mm object for a vGPU.
2707e444
ZW
1856 *
1857 * Returns:
1858 * Zero on success, negative error code in pointer if failed.
1859 */
ede9d0cf 1860struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
0cf8f58d 1861 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
2707e444
ZW
1862{
1863 struct intel_gvt *gvt = vgpu->gvt;
2707e444
ZW
1864 struct intel_vgpu_mm *mm;
1865 int ret;
1866
ede9d0cf
CD
1867 mm = vgpu_alloc_mm(vgpu);
1868 if (!mm)
1869 return ERR_PTR(-ENOMEM);
2707e444 1870
ede9d0cf 1871 mm->type = INTEL_GVT_MM_PPGTT;
2707e444 1872
ede9d0cf
CD
1873 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1874 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1875 mm->ppgtt_mm.root_entry_type = root_entry_type;
2707e444 1876
ede9d0cf
CD
1877 INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1878 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
2707e444 1879
ede9d0cf
CD
1880 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1881 mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1882 else
1883 memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1884 sizeof(mm->ppgtt_mm.guest_pdps));
2707e444 1885
ede9d0cf 1886 ret = shadow_ppgtt_mm(mm);
2707e444 1887 if (ret) {
ede9d0cf
CD
1888 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1889 vgpu_free_mm(mm);
1890 return ERR_PTR(ret);
2707e444
ZW
1891 }
1892
ede9d0cf 1893 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
72aabfb8
ZW
1894
1895 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
ede9d0cf 1896 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
72aabfb8
ZW
1897 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1898
ede9d0cf
CD
1899 return mm;
1900}
2707e444 1901
ede9d0cf
CD
1902static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1903{
1904 struct intel_vgpu_mm *mm;
1905 unsigned long nr_entries;
2707e444 1906
ede9d0cf
CD
1907 mm = vgpu_alloc_mm(vgpu);
1908 if (!mm)
1909 return ERR_PTR(-ENOMEM);
1910
1911 mm->type = INTEL_GVT_MM_GGTT;
1912
1913 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
fad953ce
KC
1914 mm->ggtt_mm.virtual_ggtt =
1915 vzalloc(array_size(nr_entries,
1916 vgpu->gvt->device_info.gtt_entry_size));
ede9d0cf
CD
1917 if (!mm->ggtt_mm.virtual_ggtt) {
1918 vgpu_free_mm(mm);
1919 return ERR_PTR(-ENOMEM);
2707e444 1920 }
ede9d0cf 1921
2707e444 1922 return mm;
ede9d0cf
CD
1923}
1924
1925/**
1bc25851 1926 * _intel_vgpu_mm_release - destroy a mm object
ede9d0cf
CD
1927 * @mm_ref: a kref object
1928 *
1929 * This function is used to destroy a mm object for vGPU
1930 *
1931 */
1bc25851 1932void _intel_vgpu_mm_release(struct kref *mm_ref)
ede9d0cf
CD
1933{
1934 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1935
1936 if (GEM_WARN_ON(atomic_read(&mm->pincount)))
1937 gvt_err("vgpu mm pin count bug detected\n");
1938
1939 if (mm->type == INTEL_GVT_MM_PPGTT) {
1940 list_del(&mm->ppgtt_mm.list);
1941 list_del(&mm->ppgtt_mm.lru_list);
1942 invalidate_ppgtt_mm(mm);
1943 } else {
1944 vfree(mm->ggtt_mm.virtual_ggtt);
1945 }
1946
1947 vgpu_free_mm(mm);
2707e444
ZW
1948}
1949
1950/**
1951 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1952 * @mm: a vGPU mm object
1953 *
1954 * This function is called when user doesn't want to use a vGPU mm object
1955 */
1956void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1957{
663a50ce 1958 atomic_dec_if_positive(&mm->pincount);
2707e444
ZW
1959}
1960
1961/**
1962 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
a752b070 1963 * @mm: target vgpu mm
2707e444
ZW
1964 *
1965 * This function is called when user wants to use a vGPU mm object. If this
1966 * mm object hasn't been shadowed yet, the shadow will be populated at this
1967 * time.
1968 *
1969 * Returns:
1970 * Zero on success, negative error code if failed.
1971 */
1972int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1973{
1974 int ret;
1975
ede9d0cf 1976 atomic_inc(&mm->pincount);
2707e444 1977
ede9d0cf
CD
1978 if (mm->type == INTEL_GVT_MM_PPGTT) {
1979 ret = shadow_ppgtt_mm(mm);
2707e444
ZW
1980 if (ret)
1981 return ret;
ede9d0cf 1982
72aabfb8 1983 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
ede9d0cf
CD
1984 list_move_tail(&mm->ppgtt_mm.lru_list,
1985 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
72aabfb8 1986 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2707e444
ZW
1987 }
1988
2707e444
ZW
1989 return 0;
1990}
1991
ede9d0cf 1992static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
2707e444
ZW
1993{
1994 struct intel_vgpu_mm *mm;
1995 struct list_head *pos, *n;
1996
72aabfb8
ZW
1997 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1998
ede9d0cf
CD
1999 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
2000 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
2707e444 2001
2707e444
ZW
2002 if (atomic_read(&mm->pincount))
2003 continue;
2004
ede9d0cf 2005 list_del_init(&mm->ppgtt_mm.lru_list);
72aabfb8 2006 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
ede9d0cf 2007 invalidate_ppgtt_mm(mm);
2707e444
ZW
2008 return 1;
2009 }
72aabfb8 2010 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2707e444
ZW
2011 return 0;
2012}
2013
2014/*
2015 * GMA translation APIs.
2016 */
2017static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
2018 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
2019{
2020 struct intel_vgpu *vgpu = mm->vgpu;
2021 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2022 struct intel_vgpu_ppgtt_spt *s;
2023
44b46733 2024 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2707e444
ZW
2025 if (!s)
2026 return -ENXIO;
2027
2028 if (!guest)
2029 ppgtt_get_shadow_entry(s, e, index);
2030 else
2031 ppgtt_get_guest_entry(s, e, index);
2032 return 0;
2033}
2034
2035/**
2036 * intel_vgpu_gma_to_gpa - translate a gma to GPA
2037 * @mm: mm object. could be a PPGTT or GGTT mm object
2038 * @gma: graphics memory address in this mm object
2039 *
2040 * This function is used to translate a graphics memory address in specific
2041 * graphics memory space to guest physical address.
2042 *
2043 * Returns:
2044 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
2045 */
2046unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
2047{
2048 struct intel_vgpu *vgpu = mm->vgpu;
2049 struct intel_gvt *gvt = vgpu->gvt;
2050 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
2051 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
2052 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
2053 unsigned long gma_index[4];
2054 struct intel_gvt_gtt_entry e;
ede9d0cf 2055 int i, levels = 0;
2707e444
ZW
2056 int ret;
2057
ede9d0cf
CD
2058 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
2059 mm->type != INTEL_GVT_MM_PPGTT);
2707e444
ZW
2060
2061 if (mm->type == INTEL_GVT_MM_GGTT) {
2062 if (!vgpu_gmadr_is_valid(vgpu, gma))
2063 goto err;
2064
ede9d0cf
CD
2065 ggtt_get_guest_entry(mm, &e,
2066 gma_ops->gma_to_ggtt_pte_index(gma));
2067
9556e118
ZW
2068 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
2069 + (gma & ~I915_GTT_PAGE_MASK);
2707e444
ZW
2070
2071 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
ede9d0cf
CD
2072 } else {
2073 switch (mm->ppgtt_mm.root_entry_type) {
2074 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2075 ppgtt_get_shadow_root_entry(mm, &e, 0);
2076
2077 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
2078 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
2079 gma_index[2] = gma_ops->gma_to_pde_index(gma);
2080 gma_index[3] = gma_ops->gma_to_pte_index(gma);
2081 levels = 4;
2082 break;
2083 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2084 ppgtt_get_shadow_root_entry(mm, &e,
2085 gma_ops->gma_to_l3_pdp_index(gma));
2086
2087 gma_index[0] = gma_ops->gma_to_pde_index(gma);
2088 gma_index[1] = gma_ops->gma_to_pte_index(gma);
2089 levels = 2;
2090 break;
2091 default:
2092 GEM_BUG_ON(1);
2093 }
2707e444 2094
ede9d0cf
CD
2095 /* walk the shadow page table and get gpa from guest entry */
2096 for (i = 0; i < levels; i++) {
2097 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
2098 (i == levels - 1));
2099 if (ret)
2100 goto err;
4b2dbbc2 2101
ede9d0cf
CD
2102 if (!pte_ops->test_present(&e)) {
2103 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
2104 goto err;
2105 }
4b2dbbc2 2106 }
2707e444 2107
ede9d0cf
CD
2108 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
2109 (gma & ~I915_GTT_PAGE_MASK);
2110 trace_gma_translate(vgpu->id, "ppgtt", 0,
2111 mm->ppgtt_mm.root_entry_type, gma, gpa);
2112 }
2707e444 2113
2707e444
ZW
2114 return gpa;
2115err:
695fbc08 2116 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
2707e444
ZW
2117 return INTEL_GVT_INVALID_ADDR;
2118}
2119
a143cef7 2120static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2707e444
ZW
2121 unsigned int off, void *p_data, unsigned int bytes)
2122{
2123 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2124 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2125 unsigned long index = off >> info->gtt_entry_size_shift;
2126 struct intel_gvt_gtt_entry e;
2127
2128 if (bytes != 4 && bytes != 8)
2129 return -EINVAL;
2130
2131 ggtt_get_guest_entry(ggtt_mm, &e, index);
2132 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
2133 bytes);
2134 return 0;
2135}
2136
2137/**
2138 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
2139 * @vgpu: a vGPU
2140 * @off: register offset
2141 * @p_data: data will be returned to guest
2142 * @bytes: data length
2143 *
2144 * This function is used to emulate the GTT MMIO register read
2145 *
2146 * Returns:
2147 * Zero on success, error code if failed.
2148 */
a143cef7 2149int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2707e444
ZW
2150 void *p_data, unsigned int bytes)
2151{
2152 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2153 int ret;
2154
2155 if (bytes != 4 && bytes != 8)
2156 return -EINVAL;
2157
2158 off -= info->gtt_start_offset;
a143cef7 2159 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2707e444
ZW
2160 return ret;
2161}
2162
7598e870
CD
2163static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
2164 struct intel_gvt_gtt_entry *entry)
2165{
2166 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2167 unsigned long pfn;
2168
2169 pfn = pte_ops->get_pfn(entry);
2170 if (pfn != vgpu->gvt->gtt.scratch_mfn)
2171 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
2172 pfn << PAGE_SHIFT);
2173}
2174
a143cef7 2175static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2707e444
ZW
2176 void *p_data, unsigned int bytes)
2177{
2178 struct intel_gvt *gvt = vgpu->gvt;
2179 const struct intel_gvt_device_info *info = &gvt->device_info;
2180 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2181 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2182 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
cf4ee73f 2183 unsigned long gma, gfn;
2707e444 2184 struct intel_gvt_gtt_entry e, m;
cf4ee73f
CD
2185 dma_addr_t dma_addr;
2186 int ret;
bc0686ff
HY
2187 struct intel_gvt_partial_pte *partial_pte, *pos, *n;
2188 bool partial_update = false;
2707e444
ZW
2189
2190 if (bytes != 4 && bytes != 8)
2191 return -EINVAL;
2192
9556e118 2193 gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
2707e444
ZW
2194
2195 /* the VM may configure the whole GM space when ballooning is used */
7c28135c 2196 if (!vgpu_gmadr_is_valid(vgpu, gma))
2707e444 2197 return 0;
2707e444 2198
bc0686ff 2199 e.type = GTT_TYPE_GGTT_PTE;
2707e444
ZW
2200 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2201 bytes);
2202
510fe10b 2203 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
bc0686ff
HY
2204 * write, save the first 4 bytes in a list and update virtual
2205 * PTE. Only update shadow PTE when the second 4 bytes comes.
510fe10b
ZY
2206 */
2207 if (bytes < info->gtt_entry_size) {
bc0686ff
HY
2208 bool found = false;
2209
2210 list_for_each_entry_safe(pos, n,
2211 &ggtt_mm->ggtt_mm.partial_pte_list, list) {
2212 if (g_gtt_index == pos->offset >>
2213 info->gtt_entry_size_shift) {
2214 if (off != pos->offset) {
2215 /* the second partial part*/
2216 int last_off = pos->offset &
2217 (info->gtt_entry_size - 1);
2218
2219 memcpy((void *)&e.val64 + last_off,
2220 (void *)&pos->data + last_off,
2221 bytes);
2222
2223 list_del(&pos->list);
2224 kfree(pos);
2225 found = true;
2226 break;
2227 }
2228
2229 /* update of the first partial part */
2230 pos->data = e.val64;
2231 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2232 return 0;
2233 }
2234 }
510fe10b 2235
bc0686ff
HY
2236 if (!found) {
2237 /* the first partial part */
2238 partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
2239 if (!partial_pte)
2240 return -ENOMEM;
2241 partial_pte->offset = off;
2242 partial_pte->data = e.val64;
2243 list_add_tail(&partial_pte->list,
2244 &ggtt_mm->ggtt_mm.partial_pte_list);
2245 partial_update = true;
510fe10b
ZY
2246 }
2247 }
2248
bc0686ff 2249 if (!partial_update && (ops->test_present(&e))) {
cc753fbe 2250 gfn = ops->get_pfn(&e);
7598e870 2251 m = e;
cc753fbe
HY
2252
2253 /* one PTE update may be issued in multiple writes and the
2254 * first write may not construct a valid gfn
2255 */
2256 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
2257 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2258 goto out;
2259 }
2260
cf4ee73f 2261 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
79e542f5 2262 PAGE_SIZE, &dma_addr);
cf4ee73f 2263 if (ret) {
72f03d7e 2264 gvt_vgpu_err("fail to populate guest ggtt entry\n");
359b6931
XC
2265 /* guest driver may read/write the entry when partial
2266 * update the entry in this situation p2m will fail
2267 * settting the shadow entry to point to a scratch page
2268 */
22115cef 2269 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
72f03d7e 2270 } else
cf4ee73f 2271 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
7598e870 2272 } else {
22115cef 2273 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
7598e870
CD
2274 ops->clear_present(&m);
2275 }
2707e444 2276
cc753fbe 2277out:
f42259ef
HY
2278 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2279
2280 ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
2281 ggtt_invalidate_pte(vgpu, &e);
2282
3aff3512 2283 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
a143cef7 2284 ggtt_invalidate(gvt->dev_priv);
2707e444
ZW
2285 return 0;
2286}
2287
2288/*
a143cef7 2289 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2707e444
ZW
2290 * @vgpu: a vGPU
2291 * @off: register offset
2292 * @p_data: data from guest write
2293 * @bytes: data length
2294 *
2295 * This function is used to emulate the GTT MMIO register write
2296 *
2297 * Returns:
2298 * Zero on success, error code if failed.
2299 */
a143cef7
CD
2300int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2301 unsigned int off, void *p_data, unsigned int bytes)
2707e444
ZW
2302{
2303 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2304 int ret;
2305
2306 if (bytes != 4 && bytes != 8)
2307 return -EINVAL;
2308
2309 off -= info->gtt_start_offset;
a143cef7 2310 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2707e444
ZW
2311 return ret;
2312}
2313
3b6411c2 2314static int alloc_scratch_pages(struct intel_vgpu *vgpu,
0cf8f58d 2315 enum intel_gvt_gtt_type type)
2707e444
ZW
2316{
2317 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
3b6411c2 2318 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
5c35258d 2319 int page_entry_num = I915_GTT_PAGE_SIZE >>
3b6411c2 2320 vgpu->gvt->device_info.gtt_entry_size_shift;
9631739f 2321 void *scratch_pt;
3b6411c2 2322 int i;
5de6bd4c
CD
2323 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2324 dma_addr_t daddr;
2707e444 2325
3b6411c2
PG
2326 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
2327 return -EINVAL;
2328
9631739f 2329 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
3b6411c2 2330 if (!scratch_pt) {
695fbc08 2331 gvt_vgpu_err("fail to allocate scratch page\n");
2707e444
ZW
2332 return -ENOMEM;
2333 }
2334
5de6bd4c
CD
2335 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
2336 4096, PCI_DMA_BIDIRECTIONAL);
2337 if (dma_mapping_error(dev, daddr)) {
695fbc08 2338 gvt_vgpu_err("fail to dmamap scratch_pt\n");
5de6bd4c
CD
2339 __free_page(virt_to_page(scratch_pt));
2340 return -ENOMEM;
3b6411c2 2341 }
5de6bd4c 2342 gtt->scratch_pt[type].page_mfn =
5c35258d 2343 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
9631739f 2344 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
3b6411c2 2345 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
5de6bd4c 2346 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
3b6411c2
PG
2347
2348 /* Build the tree by full filled the scratch pt with the entries which
2349 * point to the next level scratch pt or scratch page. The
2350 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2351 * 'type' pt.
2352 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
9631739f 2353 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
3b6411c2
PG
2354 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2355 */
65957195 2356 if (type > GTT_TYPE_PPGTT_PTE_PT) {
3b6411c2
PG
2357 struct intel_gvt_gtt_entry se;
2358
2359 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2360 se.type = get_entry_type(type - 1);
2361 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2362
2363 /* The entry parameters like present/writeable/cache type
2364 * set to the same as i915's scratch page tree.
2365 */
2366 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
2367 if (type == GTT_TYPE_PPGTT_PDE_PT)
c095b97c 2368 se.val64 |= PPAT_CACHED;
3b6411c2
PG
2369
2370 for (i = 0; i < page_entry_num; i++)
9631739f 2371 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
3b6411c2
PG
2372 }
2373
3b6411c2
PG
2374 return 0;
2375}
2707e444 2376
3b6411c2
PG
2377static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2378{
2379 int i;
5de6bd4c
CD
2380 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2381 dma_addr_t daddr;
3b6411c2
PG
2382
2383 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2384 if (vgpu->gtt.scratch_pt[i].page != NULL) {
5de6bd4c 2385 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
5c35258d 2386 I915_GTT_PAGE_SHIFT);
5de6bd4c 2387 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
3b6411c2
PG
2388 __free_page(vgpu->gtt.scratch_pt[i].page);
2389 vgpu->gtt.scratch_pt[i].page = NULL;
2390 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2391 }
2707e444
ZW
2392 }
2393
2707e444
ZW
2394 return 0;
2395}
2396
3b6411c2 2397static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2707e444 2398{
3b6411c2
PG
2399 int i, ret;
2400
2401 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2402 ret = alloc_scratch_pages(vgpu, i);
2403 if (ret)
2404 goto err;
2707e444 2405 }
3b6411c2
PG
2406
2407 return 0;
2408
2409err:
2410 release_scratch_page_tree(vgpu);
2411 return ret;
2707e444
ZW
2412}
2413
2414/**
2415 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2416 * @vgpu: a vGPU
2417 *
2418 * This function is used to initialize per-vGPU graphics memory virtualization
2419 * components.
2420 *
2421 * Returns:
2422 * Zero on success, error code if failed.
2423 */
2424int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2425{
2426 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2707e444 2427
b6c126a3 2428 INIT_RADIX_TREE(&gtt->spt_tree, GFP_KERNEL);
2707e444 2429
ede9d0cf 2430 INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
2707e444
ZW
2431 INIT_LIST_HEAD(&gtt->oos_page_list_head);
2432 INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2433
ede9d0cf
CD
2434 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2435 if (IS_ERR(gtt->ggtt_mm)) {
695fbc08 2436 gvt_vgpu_err("fail to create mm for ggtt.\n");
ede9d0cf 2437 return PTR_ERR(gtt->ggtt_mm);
2707e444
ZW
2438 }
2439
f4c43db3 2440 intel_vgpu_reset_ggtt(vgpu, false);
2707e444 2441
bc0686ff
HY
2442 INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
2443
3b6411c2 2444 return create_scratch_page_tree(vgpu);
2707e444
ZW
2445}
2446
ede9d0cf 2447static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
da9cc8de
PG
2448{
2449 struct list_head *pos, *n;
2450 struct intel_vgpu_mm *mm;
2451
ede9d0cf
CD
2452 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2453 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
1bc25851 2454 intel_vgpu_destroy_mm(mm);
da9cc8de 2455 }
ede9d0cf
CD
2456
2457 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
84f69ba0 2458 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
ede9d0cf 2459
b6c126a3 2460 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
ede9d0cf 2461 gvt_err("Why we still has spt not freed?\n");
d87f5ff3 2462 ppgtt_free_all_spt(vgpu);
ede9d0cf
CD
2463 }
2464}
2465
2466static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2467{
7513edbc 2468 struct intel_gvt_partial_pte *pos, *next;
bc0686ff 2469
7513edbc
CW
2470 list_for_each_entry_safe(pos, next,
2471 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2472 list) {
bc0686ff
HY
2473 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2474 pos->offset, pos->data);
2475 kfree(pos);
2476 }
1bc25851 2477 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
ede9d0cf 2478 vgpu->gtt.ggtt_mm = NULL;
da9cc8de
PG
2479}
2480
2707e444
ZW
2481/**
2482 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2483 * @vgpu: a vGPU
2484 *
2485 * This function is used to clean up per-vGPU graphics memory virtualization
2486 * components.
2487 *
2488 * Returns:
2489 * Zero on success, error code if failed.
2490 */
2491void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2492{
ede9d0cf
CD
2493 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2494 intel_vgpu_destroy_ggtt_mm(vgpu);
3b6411c2 2495 release_scratch_page_tree(vgpu);
2707e444
ZW
2496}
2497
2498static void clean_spt_oos(struct intel_gvt *gvt)
2499{
2500 struct intel_gvt_gtt *gtt = &gvt->gtt;
2501 struct list_head *pos, *n;
2502 struct intel_vgpu_oos_page *oos_page;
2503
2504 WARN(!list_empty(&gtt->oos_page_use_list_head),
2505 "someone is still using oos page\n");
2506
2507 list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
2508 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2509 list_del(&oos_page->list);
ed47c5cb 2510 free_page((unsigned long)oos_page->mem);
2707e444
ZW
2511 kfree(oos_page);
2512 }
2513}
2514
2515static int setup_spt_oos(struct intel_gvt *gvt)
2516{
2517 struct intel_gvt_gtt *gtt = &gvt->gtt;
2518 struct intel_vgpu_oos_page *oos_page;
2519 int i;
2520 int ret;
2521
2522 INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
2523 INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
2524
2525 for (i = 0; i < preallocated_oos_pages; i++) {
2526 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2527 if (!oos_page) {
2707e444
ZW
2528 ret = -ENOMEM;
2529 goto fail;
2530 }
ed47c5cb
ZY
2531 oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
2532 if (!oos_page->mem) {
2533 ret = -ENOMEM;
2534 kfree(oos_page);
2535 goto fail;
2536 }
2707e444
ZW
2537
2538 INIT_LIST_HEAD(&oos_page->list);
2539 INIT_LIST_HEAD(&oos_page->vm_list);
2540 oos_page->id = i;
2541 list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
2542 }
2543
2544 gvt_dbg_mm("%d oos pages preallocated\n", i);
2545
2546 return 0;
2547fail:
2548 clean_spt_oos(gvt);
2549 return ret;
2550}
2551
2552/**
2553 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2554 * @vgpu: a vGPU
a752b070 2555 * @pdps: pdp root array
2707e444
ZW
2556 *
2557 * This function is used to find a PPGTT mm object from mm object pool
2558 *
2559 * Returns:
2560 * pointer to mm object on success, NULL if failed.
2561 */
2562struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
ede9d0cf 2563 u64 pdps[])
2707e444 2564{
2707e444 2565 struct intel_vgpu_mm *mm;
ede9d0cf 2566 struct list_head *pos;
2707e444 2567
ede9d0cf
CD
2568 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2569 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2707e444 2570
ede9d0cf
CD
2571 switch (mm->ppgtt_mm.root_entry_type) {
2572 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2573 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
2707e444 2574 return mm;
ede9d0cf
CD
2575 break;
2576 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2577 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2578 sizeof(mm->ppgtt_mm.guest_pdps)))
2707e444 2579 return mm;
ede9d0cf
CD
2580 break;
2581 default:
2582 GEM_BUG_ON(1);
2707e444
ZW
2583 }
2584 }
2585 return NULL;
2586}
2587
2588/**
e6e9c46f 2589 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2707e444 2590 * @vgpu: a vGPU
ede9d0cf
CD
2591 * @root_entry_type: ppgtt root entry type
2592 * @pdps: guest pdps
2707e444 2593 *
e6e9c46f 2594 * This function is used to find or create a PPGTT mm object from a guest.
2707e444
ZW
2595 *
2596 * Returns:
2597 * Zero on success, negative error code if failed.
2598 */
e6e9c46f 2599struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
0cf8f58d 2600 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
2707e444 2601{
2707e444
ZW
2602 struct intel_vgpu_mm *mm;
2603
ede9d0cf 2604 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2707e444 2605 if (mm) {
1bc25851 2606 intel_vgpu_mm_get(mm);
2707e444 2607 } else {
ede9d0cf 2608 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
e6e9c46f 2609 if (IS_ERR(mm))
695fbc08 2610 gvt_vgpu_err("fail to create mm\n");
2707e444 2611 }
e6e9c46f 2612 return mm;
2707e444
ZW
2613}
2614
2615/**
e6e9c46f 2616 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2707e444 2617 * @vgpu: a vGPU
ede9d0cf 2618 * @pdps: guest pdps
2707e444 2619 *
e6e9c46f 2620 * This function is used to find a PPGTT mm object from a guest and destroy it.
2707e444
ZW
2621 *
2622 * Returns:
2623 * Zero on success, negative error code if failed.
2624 */
e6e9c46f 2625int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2707e444 2626{
2707e444
ZW
2627 struct intel_vgpu_mm *mm;
2628
ede9d0cf 2629 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2707e444 2630 if (!mm) {
695fbc08 2631 gvt_vgpu_err("fail to find ppgtt instance.\n");
2707e444
ZW
2632 return -EINVAL;
2633 }
1bc25851 2634 intel_vgpu_mm_put(mm);
2707e444
ZW
2635 return 0;
2636}
2637
2638/**
2639 * intel_gvt_init_gtt - initialize mm components of a GVT device
2640 * @gvt: GVT device
2641 *
2642 * This function is called at the initialization stage, to initialize
2643 * the mm components of a GVT device.
2644 *
2645 * Returns:
2646 * zero on success, negative error code if failed.
2647 */
2648int intel_gvt_init_gtt(struct intel_gvt *gvt)
2649{
2650 int ret;
9631739f 2651 void *page;
5de6bd4c
CD
2652 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2653 dma_addr_t daddr;
2707e444
ZW
2654
2655 gvt_dbg_core("init gtt\n");
2656
665004b8
CX
2657 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2658 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2707e444 2659
9631739f
JS
2660 page = (void *)get_zeroed_page(GFP_KERNEL);
2661 if (!page) {
d650ac06
PG
2662 gvt_err("fail to allocate scratch ggtt page\n");
2663 return -ENOMEM;
2664 }
2665
5de6bd4c
CD
2666 daddr = dma_map_page(dev, virt_to_page(page), 0,
2667 4096, PCI_DMA_BIDIRECTIONAL);
2668 if (dma_mapping_error(dev, daddr)) {
2669 gvt_err("fail to dmamap scratch ggtt page\n");
2670 __free_page(virt_to_page(page));
2671 return -ENOMEM;
d650ac06 2672 }
22115cef
ZW
2673
2674 gvt->gtt.scratch_page = virt_to_page(page);
2675 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
d650ac06 2676
2707e444
ZW
2677 if (enable_out_of_sync) {
2678 ret = setup_spt_oos(gvt);
2679 if (ret) {
2680 gvt_err("fail to initialize SPT oos\n");
0de98709 2681 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
22115cef 2682 __free_page(gvt->gtt.scratch_page);
2707e444
ZW
2683 return ret;
2684 }
2685 }
ede9d0cf 2686 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
72aabfb8 2687 mutex_init(&gvt->gtt.ppgtt_mm_lock);
2707e444
ZW
2688 return 0;
2689}
2690
2691/**
2692 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2693 * @gvt: GVT device
2694 *
2695 * This function is called at the driver unloading stage, to clean up the
2696 * the mm components of a GVT device.
2697 *
2698 */
2699void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2700{
5de6bd4c 2701 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
22115cef 2702 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
9556e118 2703 I915_GTT_PAGE_SHIFT);
5de6bd4c
CD
2704
2705 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2706
22115cef 2707 __free_page(gvt->gtt.scratch_page);
d650ac06 2708
2707e444
ZW
2709 if (enable_out_of_sync)
2710 clean_spt_oos(gvt);
2711}
d650ac06 2712
730c8ead
ZW
2713/**
2714 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2715 * @vgpu: a vGPU
2716 *
2717 * This function is called when invalidate all PPGTT instances of a vGPU.
2718 *
2719 */
2720void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2721{
2722 struct list_head *pos, *n;
2723 struct intel_vgpu_mm *mm;
2724
2725 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2726 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2727 if (mm->type == INTEL_GVT_MM_PPGTT) {
72aabfb8 2728 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
730c8ead 2729 list_del_init(&mm->ppgtt_mm.lru_list);
72aabfb8 2730 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
730c8ead
ZW
2731 if (mm->ppgtt_mm.shadowed)
2732 invalidate_ppgtt_mm(mm);
2733 }
2734 }
2735}
2736
d650ac06
PG
2737/**
2738 * intel_vgpu_reset_ggtt - reset the GGTT entry
2739 * @vgpu: a vGPU
f4c43db3 2740 * @invalidate_old: invalidate old entries
d650ac06
PG
2741 *
2742 * This function is called at the vGPU create stage
2743 * to reset all the GGTT entries.
2744 *
2745 */
f4c43db3 2746void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
d650ac06
PG
2747{
2748 struct intel_gvt *gvt = vgpu->gvt;
5ad59bf0 2749 struct drm_i915_private *dev_priv = gvt->dev_priv;
b0c766bf
CD
2750 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2751 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
f4c43db3 2752 struct intel_gvt_gtt_entry old_entry;
d650ac06 2753 u32 index;
d650ac06 2754 u32 num_entries;
d650ac06 2755
b0c766bf
CD
2756 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2757 pte_ops->set_present(&entry);
d650ac06
PG
2758
2759 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2760 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
f4c43db3
CD
2761 while (num_entries--) {
2762 if (invalidate_old) {
2763 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2764 ggtt_invalidate_pte(vgpu, &old_entry);
2765 }
b0c766bf 2766 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
f4c43db3 2767 }
d650ac06
PG
2768
2769 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2770 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
f4c43db3
CD
2771 while (num_entries--) {
2772 if (invalidate_old) {
2773 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2774 ggtt_invalidate_pte(vgpu, &old_entry);
2775 }
b0c766bf 2776 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
f4c43db3 2777 }
5ad59bf0 2778
a143cef7 2779 ggtt_invalidate(dev_priv);
d650ac06 2780}
b611581b
CD
2781
2782/**
2783 * intel_vgpu_reset_gtt - reset the all GTT related status
2784 * @vgpu: a vGPU
b611581b
CD
2785 *
2786 * This function is called from vfio core to reset reset all
2787 * GTT related status, including GGTT, PPGTT, scratch page.
2788 *
2789 */
4d3e67bb 2790void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
b611581b 2791{
da9cc8de
PG
2792 /* Shadow pages are only created when there is no page
2793 * table tracking data, so remove page tracking data after
2794 * removing the shadow pages.
2795 */
ede9d0cf 2796 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
f4c43db3 2797 intel_vgpu_reset_ggtt(vgpu, true);
b611581b 2798}