drm/i915/ehl: Update MOCS table for EHL
[linux-block.git] / drivers / gpu / drm / i915 / gt / intel_ggtt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5
6 #include <asm/set_memory.h>
7 #include <asm/smp.h>
8 #include <linux/types.h>
9 #include <linux/stop_machine.h>
10
11 #include <drm/i915_drm.h>
12 #include <drm/intel-gtt.h>
13
14 #include "gem/i915_gem_lmem.h"
15
16 #include "intel_ggtt_gmch.h"
17 #include "intel_gt.h"
18 #include "intel_gt_regs.h"
19 #include "i915_drv.h"
20 #include "i915_scatterlist.h"
21 #include "i915_utils.h"
22 #include "i915_vgpu.h"
23
24 #include "intel_gtt.h"
25 #include "gen8_ppgtt.h"
26
27 static inline bool suspend_retains_ptes(struct i915_address_space *vm)
28 {
29         return GRAPHICS_VER(vm->i915) >= 8 &&
30                 !HAS_LMEM(vm->i915) &&
31                 vm->is_ggtt;
32 }
33
34 static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
35                                    unsigned long color,
36                                    u64 *start,
37                                    u64 *end)
38 {
39         if (i915_node_color_differs(node, color))
40                 *start += I915_GTT_PAGE_SIZE;
41
42         /*
43          * Also leave a space between the unallocated reserved node after the
44          * GTT and any objects within the GTT, i.e. we use the color adjustment
45          * to insert a guard page to prevent prefetches crossing over the
46          * GTT boundary.
47          */
48         node = list_next_entry(node, node_list);
49         if (node->color != color)
50                 *end -= I915_GTT_PAGE_SIZE;
51 }
52
53 static int ggtt_init_hw(struct i915_ggtt *ggtt)
54 {
55         struct drm_i915_private *i915 = ggtt->vm.i915;
56
57         i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
58
59         ggtt->vm.is_ggtt = true;
60
61         /* Only VLV supports read-only GGTT mappings */
62         ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
63
64         if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
65                 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
66
67         if (ggtt->mappable_end) {
68                 if (!io_mapping_init_wc(&ggtt->iomap,
69                                         ggtt->gmadr.start,
70                                         ggtt->mappable_end)) {
71                         ggtt->vm.cleanup(&ggtt->vm);
72                         return -EIO;
73                 }
74
75                 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
76                                               ggtt->mappable_end);
77         }
78
79         intel_ggtt_init_fences(ggtt);
80
81         return 0;
82 }
83
84 /**
85  * i915_ggtt_init_hw - Initialize GGTT hardware
86  * @i915: i915 device
87  */
88 int i915_ggtt_init_hw(struct drm_i915_private *i915)
89 {
90         int ret;
91
92         /*
93          * Note that we use page colouring to enforce a guard page at the
94          * end of the address space. This is required as the CS may prefetch
95          * beyond the end of the batch buffer, across the page boundary,
96          * and beyond the end of the GTT if we do not provide a guard.
97          */
98         ret = ggtt_init_hw(to_gt(i915)->ggtt);
99         if (ret)
100                 return ret;
101
102         return 0;
103 }
104
105 /*
106  * Return the value of the last GGTT pte cast to an u64, if
107  * the system is supposed to retain ptes across resume. 0 otherwise.
108  */
109 static u64 read_last_pte(struct i915_address_space *vm)
110 {
111         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
112         gen8_pte_t __iomem *ptep;
113
114         if (!suspend_retains_ptes(vm))
115                 return 0;
116
117         GEM_BUG_ON(GRAPHICS_VER(vm->i915) < 8);
118         ptep = (typeof(ptep))ggtt->gsm + (ggtt_total_entries(ggtt) - 1);
119         return readq(ptep);
120 }
121
122 /**
123  * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
124  * @vm: The VM to suspend the mappings for
125  *
126  * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
127  * DPT page table.
128  */
129 void i915_ggtt_suspend_vm(struct i915_address_space *vm)
130 {
131         struct i915_vma *vma, *vn;
132         int save_skip_rewrite;
133
134         drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
135
136 retry:
137         i915_gem_drain_freed_objects(vm->i915);
138
139         mutex_lock(&vm->mutex);
140
141         /*
142          * Skip rewriting PTE on VMA unbind.
143          * FIXME: Use an argument to i915_vma_unbind() instead?
144          */
145         save_skip_rewrite = vm->skip_pte_rewrite;
146         vm->skip_pte_rewrite = true;
147
148         list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
149                 struct drm_i915_gem_object *obj = vma->obj;
150
151                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
152
153                 if (i915_vma_is_pinned(vma) || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
154                         continue;
155
156                 /* unlikely to race when GPU is idle, so no worry about slowpath.. */
157                 if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) {
158                         /*
159                          * No dead objects should appear here, GPU should be
160                          * completely idle, and userspace suspended
161                          */
162                         i915_gem_object_get(obj);
163
164                         mutex_unlock(&vm->mutex);
165
166                         i915_gem_object_lock(obj, NULL);
167                         GEM_WARN_ON(i915_vma_unbind(vma));
168                         i915_gem_object_unlock(obj);
169                         i915_gem_object_put(obj);
170
171                         vm->skip_pte_rewrite = save_skip_rewrite;
172                         goto retry;
173                 }
174
175                 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
176                         i915_vma_wait_for_bind(vma);
177
178                         __i915_vma_evict(vma, false);
179                         drm_mm_remove_node(&vma->node);
180                 }
181
182                 i915_gem_object_unlock(obj);
183         }
184
185         if (!suspend_retains_ptes(vm))
186                 vm->clear_range(vm, 0, vm->total);
187         else
188                 i915_vm_to_ggtt(vm)->probed_pte = read_last_pte(vm);
189
190         vm->skip_pte_rewrite = save_skip_rewrite;
191
192         mutex_unlock(&vm->mutex);
193 }
194
195 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
196 {
197         i915_ggtt_suspend_vm(&ggtt->vm);
198         ggtt->invalidate(ggtt);
199
200         intel_gt_check_and_clear_faults(ggtt->vm.gt);
201 }
202
203 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
204 {
205         struct intel_uncore *uncore = ggtt->vm.gt->uncore;
206
207         spin_lock_irq(&uncore->lock);
208         intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
209         intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
210         spin_unlock_irq(&uncore->lock);
211 }
212
213 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
214 {
215         struct intel_uncore *uncore = ggtt->vm.gt->uncore;
216
217         /*
218          * Note that as an uncached mmio write, this will flush the
219          * WCB of the writes into the GGTT before it triggers the invalidate.
220          */
221         intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
222 }
223
224 static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
225 {
226         struct intel_uncore *uncore = ggtt->vm.gt->uncore;
227         struct drm_i915_private *i915 = ggtt->vm.i915;
228
229         gen8_ggtt_invalidate(ggtt);
230
231         if (GRAPHICS_VER(i915) >= 12)
232                 intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
233                                       GEN12_GUC_TLB_INV_CR_INVALIDATE);
234         else
235                 intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
236 }
237
238 u64 gen8_ggtt_pte_encode(dma_addr_t addr,
239                          enum i915_cache_level level,
240                          u32 flags)
241 {
242         gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
243
244         if (flags & PTE_LM)
245                 pte |= GEN12_GGTT_PTE_LM;
246
247         return pte;
248 }
249
250 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
251 {
252         writeq(pte, addr);
253 }
254
255 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
256                                   dma_addr_t addr,
257                                   u64 offset,
258                                   enum i915_cache_level level,
259                                   u32 flags)
260 {
261         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
262         gen8_pte_t __iomem *pte =
263                 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
264
265         gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
266
267         ggtt->invalidate(ggtt);
268 }
269
270 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
271                                      struct i915_vma_resource *vma_res,
272                                      enum i915_cache_level level,
273                                      u32 flags)
274 {
275         const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
276         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
277         gen8_pte_t __iomem *gte;
278         gen8_pte_t __iomem *end;
279         struct sgt_iter iter;
280         dma_addr_t addr;
281
282         /*
283          * Note that we ignore PTE_READ_ONLY here. The caller must be careful
284          * not to allow the user to override access to a read only page.
285          */
286
287         gte = (gen8_pte_t __iomem *)ggtt->gsm;
288         gte += vma_res->start / I915_GTT_PAGE_SIZE;
289         end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
290
291         for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
292                 gen8_set_pte(gte++, pte_encode | addr);
293         GEM_BUG_ON(gte > end);
294
295         /* Fill the allocated but "unused" space beyond the end of the buffer */
296         while (gte < end)
297                 gen8_set_pte(gte++, vm->scratch[0]->encode);
298
299         /*
300          * We want to flush the TLBs only after we're certain all the PTE
301          * updates have finished.
302          */
303         ggtt->invalidate(ggtt);
304 }
305
306 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
307                                   dma_addr_t addr,
308                                   u64 offset,
309                                   enum i915_cache_level level,
310                                   u32 flags)
311 {
312         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
313         gen6_pte_t __iomem *pte =
314                 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
315
316         iowrite32(vm->pte_encode(addr, level, flags), pte);
317
318         ggtt->invalidate(ggtt);
319 }
320
321 /*
322  * Binds an object into the global gtt with the specified cache level.
323  * The object will be accessible to the GPU via commands whose operands
324  * reference offsets within the global GTT as well as accessible by the GPU
325  * through the GMADR mapped BAR (i915->mm.gtt->gtt).
326  */
327 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
328                                      struct i915_vma_resource *vma_res,
329                                      enum i915_cache_level level,
330                                      u32 flags)
331 {
332         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
333         gen6_pte_t __iomem *gte;
334         gen6_pte_t __iomem *end;
335         struct sgt_iter iter;
336         dma_addr_t addr;
337
338         gte = (gen6_pte_t __iomem *)ggtt->gsm;
339         gte += vma_res->start / I915_GTT_PAGE_SIZE;
340         end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
341
342         for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
343                 iowrite32(vm->pte_encode(addr, level, flags), gte++);
344         GEM_BUG_ON(gte > end);
345
346         /* Fill the allocated but "unused" space beyond the end of the buffer */
347         while (gte < end)
348                 iowrite32(vm->scratch[0]->encode, gte++);
349
350         /*
351          * We want to flush the TLBs only after we're certain all the PTE
352          * updates have finished.
353          */
354         ggtt->invalidate(ggtt);
355 }
356
357 static void nop_clear_range(struct i915_address_space *vm,
358                             u64 start, u64 length)
359 {
360 }
361
362 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
363                                   u64 start, u64 length)
364 {
365         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
366         unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
367         unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
368         const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
369         gen8_pte_t __iomem *gtt_base =
370                 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
371         const int max_entries = ggtt_total_entries(ggtt) - first_entry;
372         int i;
373
374         if (WARN(num_entries > max_entries,
375                  "First entry = %d; Num entries = %d (max=%d)\n",
376                  first_entry, num_entries, max_entries))
377                 num_entries = max_entries;
378
379         for (i = 0; i < num_entries; i++)
380                 gen8_set_pte(&gtt_base[i], scratch_pte);
381 }
382
383 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
384 {
385         /*
386          * Make sure the internal GAM fifo has been cleared of all GTT
387          * writes before exiting stop_machine(). This guarantees that
388          * any aperture accesses waiting to start in another process
389          * cannot back up behind the GTT writes causing a hang.
390          * The register can be any arbitrary GAM register.
391          */
392         intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
393 }
394
395 struct insert_page {
396         struct i915_address_space *vm;
397         dma_addr_t addr;
398         u64 offset;
399         enum i915_cache_level level;
400 };
401
402 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
403 {
404         struct insert_page *arg = _arg;
405
406         gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
407         bxt_vtd_ggtt_wa(arg->vm);
408
409         return 0;
410 }
411
412 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
413                                           dma_addr_t addr,
414                                           u64 offset,
415                                           enum i915_cache_level level,
416                                           u32 unused)
417 {
418         struct insert_page arg = { vm, addr, offset, level };
419
420         stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
421 }
422
423 struct insert_entries {
424         struct i915_address_space *vm;
425         struct i915_vma_resource *vma_res;
426         enum i915_cache_level level;
427         u32 flags;
428 };
429
430 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
431 {
432         struct insert_entries *arg = _arg;
433
434         gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
435         bxt_vtd_ggtt_wa(arg->vm);
436
437         return 0;
438 }
439
440 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
441                                              struct i915_vma_resource *vma_res,
442                                              enum i915_cache_level level,
443                                              u32 flags)
444 {
445         struct insert_entries arg = { vm, vma_res, level, flags };
446
447         stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
448 }
449
450 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
451                                   u64 start, u64 length)
452 {
453         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
454         unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
455         unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
456         gen6_pte_t scratch_pte, __iomem *gtt_base =
457                 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
458         const int max_entries = ggtt_total_entries(ggtt) - first_entry;
459         int i;
460
461         if (WARN(num_entries > max_entries,
462                  "First entry = %d; Num entries = %d (max=%d)\n",
463                  first_entry, num_entries, max_entries))
464                 num_entries = max_entries;
465
466         scratch_pte = vm->scratch[0]->encode;
467         for (i = 0; i < num_entries; i++)
468                 iowrite32(scratch_pte, &gtt_base[i]);
469 }
470
471 void intel_ggtt_bind_vma(struct i915_address_space *vm,
472                          struct i915_vm_pt_stash *stash,
473                          struct i915_vma_resource *vma_res,
474                          enum i915_cache_level cache_level,
475                          u32 flags)
476 {
477         u32 pte_flags;
478
479         if (vma_res->bound_flags & (~flags & I915_VMA_BIND_MASK))
480                 return;
481
482         vma_res->bound_flags |= flags;
483
484         /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
485         pte_flags = 0;
486         if (vma_res->bi.readonly)
487                 pte_flags |= PTE_READ_ONLY;
488         if (vma_res->bi.lmem)
489                 pte_flags |= PTE_LM;
490
491         vm->insert_entries(vm, vma_res, cache_level, pte_flags);
492         vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
493 }
494
495 void intel_ggtt_unbind_vma(struct i915_address_space *vm,
496                            struct i915_vma_resource *vma_res)
497 {
498         vm->clear_range(vm, vma_res->start, vma_res->vma_size);
499 }
500
501 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
502 {
503         u64 size;
504         int ret;
505
506         if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
507                 return 0;
508
509         GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
510         size = ggtt->vm.total - GUC_GGTT_TOP;
511
512         ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
513                                    GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
514                                    PIN_NOEVICT);
515         if (ret)
516                 drm_dbg(&ggtt->vm.i915->drm,
517                         "Failed to reserve top of GGTT for GuC\n");
518
519         return ret;
520 }
521
522 static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
523 {
524         if (drm_mm_node_allocated(&ggtt->uc_fw))
525                 drm_mm_remove_node(&ggtt->uc_fw);
526 }
527
528 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
529 {
530         ggtt_release_guc_top(ggtt);
531         if (drm_mm_node_allocated(&ggtt->error_capture))
532                 drm_mm_remove_node(&ggtt->error_capture);
533         mutex_destroy(&ggtt->error_mutex);
534 }
535
536 static int init_ggtt(struct i915_ggtt *ggtt)
537 {
538         /*
539          * Let GEM Manage all of the aperture.
540          *
541          * However, leave one page at the end still bound to the scratch page.
542          * There are a number of places where the hardware apparently prefetches
543          * past the end of the object, and we've seen multiple hangs with the
544          * GPU head pointer stuck in a batchbuffer bound at the last page of the
545          * aperture.  One page should be enough to keep any prefetching inside
546          * of the aperture.
547          */
548         unsigned long hole_start, hole_end;
549         struct drm_mm_node *entry;
550         int ret;
551
552         ggtt->pte_lost = true;
553
554         /*
555          * GuC requires all resources that we're sharing with it to be placed in
556          * non-WOPCM memory. If GuC is not present or not in use we still need a
557          * small bias as ring wraparound at offset 0 sometimes hangs. No idea
558          * why.
559          */
560         ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
561                                intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
562
563         ret = intel_vgt_balloon(ggtt);
564         if (ret)
565                 return ret;
566
567         mutex_init(&ggtt->error_mutex);
568         if (ggtt->mappable_end) {
569                 /*
570                  * Reserve a mappable slot for our lockless error capture.
571                  *
572                  * We strongly prefer taking address 0x0 in order to protect
573                  * other critical buffers against accidental overwrites,
574                  * as writing to address 0 is a very common mistake.
575                  *
576                  * Since 0 may already be in use by the system (e.g. the BIOS
577                  * framebuffer), we let the reservation fail quietly and hope
578                  * 0 remains reserved always.
579                  *
580                  * If we fail to reserve 0, and then fail to find any space
581                  * for an error-capture, remain silent. We can afford not
582                  * to reserve an error_capture node as we have fallback
583                  * paths, and we trust that 0 will remain reserved. However,
584                  * the only likely reason for failure to insert is a driver
585                  * bug, which we expect to cause other failures...
586                  */
587                 ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
588                 ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
589                 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
590                         drm_mm_insert_node_in_range(&ggtt->vm.mm,
591                                                     &ggtt->error_capture,
592                                                     ggtt->error_capture.size, 0,
593                                                     ggtt->error_capture.color,
594                                                     0, ggtt->mappable_end,
595                                                     DRM_MM_INSERT_LOW);
596         }
597         if (drm_mm_node_allocated(&ggtt->error_capture))
598                 drm_dbg(&ggtt->vm.i915->drm,
599                         "Reserved GGTT:[%llx, %llx] for use by error capture\n",
600                         ggtt->error_capture.start,
601                         ggtt->error_capture.start + ggtt->error_capture.size);
602
603         /*
604          * The upper portion of the GuC address space has a sizeable hole
605          * (several MB) that is inaccessible by GuC. Reserve this range within
606          * GGTT as it can comfortably hold GuC/HuC firmware images.
607          */
608         ret = ggtt_reserve_guc_top(ggtt);
609         if (ret)
610                 goto err;
611
612         /* Clear any non-preallocated blocks */
613         drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
614                 drm_dbg(&ggtt->vm.i915->drm,
615                         "clearing unused GTT space: [%lx, %lx]\n",
616                         hole_start, hole_end);
617                 ggtt->vm.clear_range(&ggtt->vm, hole_start,
618                                      hole_end - hole_start);
619         }
620
621         /* And finally clear the reserved guard page */
622         ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
623
624         return 0;
625
626 err:
627         cleanup_init_ggtt(ggtt);
628         return ret;
629 }
630
631 static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
632                                   struct i915_vm_pt_stash *stash,
633                                   struct i915_vma_resource *vma_res,
634                                   enum i915_cache_level cache_level,
635                                   u32 flags)
636 {
637         u32 pte_flags;
638
639         /* Currently applicable only to VLV */
640         pte_flags = 0;
641         if (vma_res->bi.readonly)
642                 pte_flags |= PTE_READ_ONLY;
643
644         if (flags & I915_VMA_LOCAL_BIND)
645                 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
646                                stash, vma_res, cache_level, flags);
647
648         if (flags & I915_VMA_GLOBAL_BIND)
649                 vm->insert_entries(vm, vma_res, cache_level, pte_flags);
650
651         vma_res->bound_flags |= flags;
652 }
653
654 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
655                                     struct i915_vma_resource *vma_res)
656 {
657         if (vma_res->bound_flags & I915_VMA_GLOBAL_BIND)
658                 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
659
660         if (vma_res->bound_flags & I915_VMA_LOCAL_BIND)
661                 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
662 }
663
664 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
665 {
666         struct i915_vm_pt_stash stash = {};
667         struct i915_ppgtt *ppgtt;
668         int err;
669
670         ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
671         if (IS_ERR(ppgtt))
672                 return PTR_ERR(ppgtt);
673
674         if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
675                 err = -ENODEV;
676                 goto err_ppgtt;
677         }
678
679         err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
680         if (err)
681                 goto err_ppgtt;
682
683         i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
684         err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
685         i915_gem_object_unlock(ppgtt->vm.scratch[0]);
686         if (err)
687                 goto err_stash;
688
689         /*
690          * Note we only pre-allocate as far as the end of the global
691          * GTT. On 48b / 4-level page-tables, the difference is very,
692          * very significant! We have to preallocate as GVT/vgpu does
693          * not like the page directory disappearing.
694          */
695         ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
696
697         ggtt->alias = ppgtt;
698         ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
699
700         GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma);
701         ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
702
703         GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma);
704         ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
705
706         i915_vm_free_pt_stash(&ppgtt->vm, &stash);
707         return 0;
708
709 err_stash:
710         i915_vm_free_pt_stash(&ppgtt->vm, &stash);
711 err_ppgtt:
712         i915_vm_put(&ppgtt->vm);
713         return err;
714 }
715
716 static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
717 {
718         struct i915_ppgtt *ppgtt;
719
720         ppgtt = fetch_and_zero(&ggtt->alias);
721         if (!ppgtt)
722                 return;
723
724         i915_vm_put(&ppgtt->vm);
725
726         ggtt->vm.vma_ops.bind_vma   = intel_ggtt_bind_vma;
727         ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
728 }
729
730 int i915_init_ggtt(struct drm_i915_private *i915)
731 {
732         int ret;
733
734         ret = init_ggtt(to_gt(i915)->ggtt);
735         if (ret)
736                 return ret;
737
738         if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
739                 ret = init_aliasing_ppgtt(to_gt(i915)->ggtt);
740                 if (ret)
741                         cleanup_init_ggtt(to_gt(i915)->ggtt);
742         }
743
744         return 0;
745 }
746
747 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
748 {
749         struct i915_vma *vma, *vn;
750
751         flush_workqueue(ggtt->vm.i915->wq);
752         i915_gem_drain_freed_objects(ggtt->vm.i915);
753
754         mutex_lock(&ggtt->vm.mutex);
755
756         ggtt->vm.skip_pte_rewrite = true;
757
758         list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
759                 struct drm_i915_gem_object *obj = vma->obj;
760                 bool trylock;
761
762                 trylock = i915_gem_object_trylock(obj, NULL);
763                 WARN_ON(!trylock);
764
765                 WARN_ON(__i915_vma_unbind(vma));
766                 if (trylock)
767                         i915_gem_object_unlock(obj);
768         }
769
770         if (drm_mm_node_allocated(&ggtt->error_capture))
771                 drm_mm_remove_node(&ggtt->error_capture);
772         mutex_destroy(&ggtt->error_mutex);
773
774         ggtt_release_guc_top(ggtt);
775         intel_vgt_deballoon(ggtt);
776
777         ggtt->vm.cleanup(&ggtt->vm);
778
779         mutex_unlock(&ggtt->vm.mutex);
780         i915_address_space_fini(&ggtt->vm);
781
782         arch_phys_wc_del(ggtt->mtrr);
783
784         if (ggtt->iomap.size)
785                 io_mapping_fini(&ggtt->iomap);
786 }
787
788 /**
789  * i915_ggtt_driver_release - Clean up GGTT hardware initialization
790  * @i915: i915 device
791  */
792 void i915_ggtt_driver_release(struct drm_i915_private *i915)
793 {
794         struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
795
796         fini_aliasing_ppgtt(ggtt);
797
798         intel_ggtt_fini_fences(ggtt);
799         ggtt_cleanup_hw(ggtt);
800 }
801
802 /**
803  * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
804  * all free objects have been drained.
805  * @i915: i915 device
806  */
807 void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
808 {
809         struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
810
811         GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
812         dma_resv_fini(&ggtt->vm._resv);
813 }
814
815 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
816 {
817         snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
818         snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
819         return snb_gmch_ctl << 20;
820 }
821
822 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
823 {
824         bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
825         bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
826         if (bdw_gmch_ctl)
827                 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
828
829 #ifdef CONFIG_X86_32
830         /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
831         if (bdw_gmch_ctl > 4)
832                 bdw_gmch_ctl = 4;
833 #endif
834
835         return bdw_gmch_ctl << 20;
836 }
837
838 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
839 {
840         gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
841         gmch_ctrl &= SNB_GMCH_GGMS_MASK;
842
843         if (gmch_ctrl)
844                 return 1 << (20 + gmch_ctrl);
845
846         return 0;
847 }
848
849 static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
850 {
851         /*
852          * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
853          * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
854          */
855         GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
856         return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
857 }
858
859 static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
860 {
861         return gen6_gttmmadr_size(i915) / 2;
862 }
863
864 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
865 {
866         struct drm_i915_private *i915 = ggtt->vm.i915;
867         struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
868         phys_addr_t phys_addr;
869         u32 pte_flags;
870         int ret;
871
872         GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
873         phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
874
875         /*
876          * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
877          * will be dropped. For WC mappings in general we have 64 byte burst
878          * writes when the WC buffer is flushed, so we can't use it, but have to
879          * resort to an uncached mapping. The WC issue is easily caught by the
880          * readback check when writing GTT PTE entries.
881          */
882         if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
883                 ggtt->gsm = ioremap(phys_addr, size);
884         else
885                 ggtt->gsm = ioremap_wc(phys_addr, size);
886         if (!ggtt->gsm) {
887                 drm_err(&i915->drm, "Failed to map the ggtt page table\n");
888                 return -ENOMEM;
889         }
890
891         kref_init(&ggtt->vm.resv_ref);
892         ret = setup_scratch_page(&ggtt->vm);
893         if (ret) {
894                 drm_err(&i915->drm, "Scratch setup failed\n");
895                 /* iounmap will also get called at remove, but meh */
896                 iounmap(ggtt->gsm);
897                 return ret;
898         }
899
900         pte_flags = 0;
901         if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
902                 pte_flags |= PTE_LM;
903
904         ggtt->vm.scratch[0]->encode =
905                 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
906                                     I915_CACHE_NONE, pte_flags);
907
908         return 0;
909 }
910
911 static void gen6_gmch_remove(struct i915_address_space *vm)
912 {
913         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
914
915         iounmap(ggtt->gsm);
916         free_scratch(vm);
917 }
918
919 static struct resource pci_resource(struct pci_dev *pdev, int bar)
920 {
921         return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
922                                                pci_resource_len(pdev, bar));
923 }
924
925 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
926 {
927         struct drm_i915_private *i915 = ggtt->vm.i915;
928         struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
929         unsigned int size;
930         u16 snb_gmch_ctl;
931
932         if (!HAS_LMEM(i915) && !HAS_BAR2_SMEM_STOLEN(i915)) {
933                 ggtt->gmadr = pci_resource(pdev, 2);
934                 ggtt->mappable_end = resource_size(&ggtt->gmadr);
935         }
936
937         pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
938         if (IS_CHERRYVIEW(i915))
939                 size = chv_get_total_gtt_size(snb_gmch_ctl);
940         else
941                 size = gen8_get_total_gtt_size(snb_gmch_ctl);
942
943         ggtt->vm.alloc_pt_dma = alloc_pt_dma;
944         ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
945         ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
946
947         ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
948         ggtt->vm.cleanup = gen6_gmch_remove;
949         ggtt->vm.insert_page = gen8_ggtt_insert_page;
950         ggtt->vm.clear_range = nop_clear_range;
951         if (intel_scanout_needs_vtd_wa(i915))
952                 ggtt->vm.clear_range = gen8_ggtt_clear_range;
953
954         ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
955
956         /*
957          * Serialize GTT updates with aperture access on BXT if VT-d is on,
958          * and always on CHV.
959          */
960         if (intel_vm_no_concurrent_access_wa(i915)) {
961                 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
962                 ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
963
964                 /*
965                  * Calling stop_machine() version of GGTT update function
966                  * at error capture/reset path will raise lockdep warning.
967                  * Allow calling gen8_ggtt_insert_* directly at reset path
968                  * which is safe from parallel GGTT updates.
969                  */
970                 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
971                 ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
972
973                 ggtt->vm.bind_async_flags =
974                         I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
975         }
976
977         ggtt->invalidate = gen8_ggtt_invalidate;
978
979         ggtt->vm.vma_ops.bind_vma    = intel_ggtt_bind_vma;
980         ggtt->vm.vma_ops.unbind_vma  = intel_ggtt_unbind_vma;
981
982         ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
983
984         setup_private_pat(ggtt->vm.gt->uncore);
985
986         return ggtt_probe_common(ggtt, size);
987 }
988
989 static u64 snb_pte_encode(dma_addr_t addr,
990                           enum i915_cache_level level,
991                           u32 flags)
992 {
993         gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
994
995         switch (level) {
996         case I915_CACHE_L3_LLC:
997         case I915_CACHE_LLC:
998                 pte |= GEN6_PTE_CACHE_LLC;
999                 break;
1000         case I915_CACHE_NONE:
1001                 pte |= GEN6_PTE_UNCACHED;
1002                 break;
1003         default:
1004                 MISSING_CASE(level);
1005         }
1006
1007         return pte;
1008 }
1009
1010 static u64 ivb_pte_encode(dma_addr_t addr,
1011                           enum i915_cache_level level,
1012                           u32 flags)
1013 {
1014         gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1015
1016         switch (level) {
1017         case I915_CACHE_L3_LLC:
1018                 pte |= GEN7_PTE_CACHE_L3_LLC;
1019                 break;
1020         case I915_CACHE_LLC:
1021                 pte |= GEN6_PTE_CACHE_LLC;
1022                 break;
1023         case I915_CACHE_NONE:
1024                 pte |= GEN6_PTE_UNCACHED;
1025                 break;
1026         default:
1027                 MISSING_CASE(level);
1028         }
1029
1030         return pte;
1031 }
1032
1033 static u64 byt_pte_encode(dma_addr_t addr,
1034                           enum i915_cache_level level,
1035                           u32 flags)
1036 {
1037         gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1038
1039         if (!(flags & PTE_READ_ONLY))
1040                 pte |= BYT_PTE_WRITEABLE;
1041
1042         if (level != I915_CACHE_NONE)
1043                 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
1044
1045         return pte;
1046 }
1047
1048 static u64 hsw_pte_encode(dma_addr_t addr,
1049                           enum i915_cache_level level,
1050                           u32 flags)
1051 {
1052         gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1053
1054         if (level != I915_CACHE_NONE)
1055                 pte |= HSW_WB_LLC_AGE3;
1056
1057         return pte;
1058 }
1059
1060 static u64 iris_pte_encode(dma_addr_t addr,
1061                            enum i915_cache_level level,
1062                            u32 flags)
1063 {
1064         gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1065
1066         switch (level) {
1067         case I915_CACHE_NONE:
1068                 break;
1069         case I915_CACHE_WT:
1070                 pte |= HSW_WT_ELLC_LLC_AGE3;
1071                 break;
1072         default:
1073                 pte |= HSW_WB_ELLC_LLC_AGE3;
1074                 break;
1075         }
1076
1077         return pte;
1078 }
1079
1080 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
1081 {
1082         struct drm_i915_private *i915 = ggtt->vm.i915;
1083         struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1084         unsigned int size;
1085         u16 snb_gmch_ctl;
1086
1087         ggtt->gmadr = pci_resource(pdev, 2);
1088         ggtt->mappable_end = resource_size(&ggtt->gmadr);
1089
1090         /*
1091          * 64/512MB is the current min/max we actually know of, but this is
1092          * just a coarse sanity check.
1093          */
1094         if (ggtt->mappable_end < (64 << 20) ||
1095             ggtt->mappable_end > (512 << 20)) {
1096                 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
1097                         &ggtt->mappable_end);
1098                 return -ENXIO;
1099         }
1100
1101         pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1102
1103         size = gen6_get_total_gtt_size(snb_gmch_ctl);
1104         ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1105
1106         ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1107         ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1108
1109         ggtt->vm.clear_range = nop_clear_range;
1110         if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
1111                 ggtt->vm.clear_range = gen6_ggtt_clear_range;
1112         ggtt->vm.insert_page = gen6_ggtt_insert_page;
1113         ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1114         ggtt->vm.cleanup = gen6_gmch_remove;
1115
1116         ggtt->invalidate = gen6_ggtt_invalidate;
1117
1118         if (HAS_EDRAM(i915))
1119                 ggtt->vm.pte_encode = iris_pte_encode;
1120         else if (IS_HASWELL(i915))
1121                 ggtt->vm.pte_encode = hsw_pte_encode;
1122         else if (IS_VALLEYVIEW(i915))
1123                 ggtt->vm.pte_encode = byt_pte_encode;
1124         else if (GRAPHICS_VER(i915) >= 7)
1125                 ggtt->vm.pte_encode = ivb_pte_encode;
1126         else
1127                 ggtt->vm.pte_encode = snb_pte_encode;
1128
1129         ggtt->vm.vma_ops.bind_vma    = intel_ggtt_bind_vma;
1130         ggtt->vm.vma_ops.unbind_vma  = intel_ggtt_unbind_vma;
1131
1132         return ggtt_probe_common(ggtt, size);
1133 }
1134
1135 static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
1136 {
1137         struct drm_i915_private *i915 = gt->i915;
1138         int ret;
1139
1140         ggtt->vm.gt = gt;
1141         ggtt->vm.i915 = i915;
1142         ggtt->vm.dma = i915->drm.dev;
1143         dma_resv_init(&ggtt->vm._resv);
1144
1145         if (GRAPHICS_VER(i915) >= 8)
1146                 ret = gen8_gmch_probe(ggtt);
1147         else if (GRAPHICS_VER(i915) >= 6)
1148                 ret = gen6_gmch_probe(ggtt);
1149         else
1150                 ret = intel_ggtt_gmch_probe(ggtt);
1151
1152         if (ret) {
1153                 dma_resv_fini(&ggtt->vm._resv);
1154                 return ret;
1155         }
1156
1157         if ((ggtt->vm.total - 1) >> 32) {
1158                 drm_err(&i915->drm,
1159                         "We never expected a Global GTT with more than 32bits"
1160                         " of address space! Found %lldM!\n",
1161                         ggtt->vm.total >> 20);
1162                 ggtt->vm.total = 1ULL << 32;
1163                 ggtt->mappable_end =
1164                         min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1165         }
1166
1167         if (ggtt->mappable_end > ggtt->vm.total) {
1168                 drm_err(&i915->drm,
1169                         "mappable aperture extends past end of GGTT,"
1170                         " aperture=%pa, total=%llx\n",
1171                         &ggtt->mappable_end, ggtt->vm.total);
1172                 ggtt->mappable_end = ggtt->vm.total;
1173         }
1174
1175         /* GMADR is the PCI mmio aperture into the global GTT. */
1176         drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1177         drm_dbg(&i915->drm, "GMADR size = %lluM\n",
1178                 (u64)ggtt->mappable_end >> 20);
1179         drm_dbg(&i915->drm, "DSM size = %lluM\n",
1180                 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
1181
1182         return 0;
1183 }
1184
1185 /**
1186  * i915_ggtt_probe_hw - Probe GGTT hardware location
1187  * @i915: i915 device
1188  */
1189 int i915_ggtt_probe_hw(struct drm_i915_private *i915)
1190 {
1191         int ret;
1192
1193         ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
1194         if (ret)
1195                 return ret;
1196
1197         if (i915_vtd_active(i915))
1198                 drm_info(&i915->drm, "VT-d active for gfx access\n");
1199
1200         return 0;
1201 }
1202
1203 int i915_ggtt_enable_hw(struct drm_i915_private *i915)
1204 {
1205         if (GRAPHICS_VER(i915) < 6)
1206                 return intel_ggtt_gmch_enable_hw(i915);
1207
1208         return 0;
1209 }
1210
1211 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
1212 {
1213         GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
1214
1215         ggtt->invalidate = guc_ggtt_invalidate;
1216
1217         ggtt->invalidate(ggtt);
1218 }
1219
1220 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
1221 {
1222         /* XXX Temporary pardon for error unload */
1223         if (ggtt->invalidate == gen8_ggtt_invalidate)
1224                 return;
1225
1226         /* We should only be called after i915_ggtt_enable_guc() */
1227         GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
1228
1229         ggtt->invalidate = gen8_ggtt_invalidate;
1230
1231         ggtt->invalidate(ggtt);
1232 }
1233
1234 /**
1235  * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
1236  * @vm: The VM to restore the mappings for
1237  *
1238  * Restore the memory mappings for all objects mapped to HW via the GGTT or a
1239  * DPT page table.
1240  *
1241  * Returns %true if restoring the mapping for any object that was in a write
1242  * domain before suspend.
1243  */
1244 bool i915_ggtt_resume_vm(struct i915_address_space *vm)
1245 {
1246         struct i915_vma *vma;
1247         bool write_domain_objs = false;
1248         bool retained_ptes;
1249
1250         drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
1251
1252         /*
1253          * First fill our portion of the GTT with scratch pages if
1254          * they were not retained across suspend.
1255          */
1256         retained_ptes = suspend_retains_ptes(vm) &&
1257                 !i915_vm_to_ggtt(vm)->pte_lost &&
1258                 !GEM_WARN_ON(i915_vm_to_ggtt(vm)->probed_pte != read_last_pte(vm));
1259
1260         if (!retained_ptes)
1261                 vm->clear_range(vm, 0, vm->total);
1262
1263         /* clflush objects bound into the GGTT and rebind them. */
1264         list_for_each_entry(vma, &vm->bound_list, vm_link) {
1265                 struct drm_i915_gem_object *obj = vma->obj;
1266                 unsigned int was_bound =
1267                         atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
1268
1269                 GEM_BUG_ON(!was_bound);
1270                 if (!retained_ptes)
1271                         vma->ops->bind_vma(vm, NULL, vma->resource,
1272                                            obj ? obj->cache_level : 0,
1273                                            was_bound);
1274                 if (obj) { /* only used during resume => exclusive access */
1275                         write_domain_objs |= fetch_and_zero(&obj->write_domain);
1276                         obj->read_domains |= I915_GEM_DOMAIN_GTT;
1277                 }
1278         }
1279
1280         return write_domain_objs;
1281 }
1282
1283 void i915_ggtt_resume(struct i915_ggtt *ggtt)
1284 {
1285         bool flush;
1286
1287         intel_gt_check_and_clear_faults(ggtt->vm.gt);
1288
1289         flush = i915_ggtt_resume_vm(&ggtt->vm);
1290
1291         ggtt->invalidate(ggtt);
1292
1293         if (flush)
1294                 wbinvd_on_all_cpus();
1295
1296         if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
1297                 setup_private_pat(ggtt->vm.gt->uncore);
1298
1299         intel_ggtt_restore_fences(ggtt);
1300 }
1301
1302 void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val)
1303 {
1304         to_gt(i915)->ggtt->pte_lost = val;
1305 }