f92a39fc511cfb8f7dee634dfda028f66b115be9
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
1 /*
2  * Copyright © 2010 Daniel Vetter
3  * Copyright © 2011-2014 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25
26 #include <linux/slab.h> /* fault-inject.h is not standalone! */
27
28 #include <linux/fault-inject.h>
29 #include <linux/log2.h>
30 #include <linux/random.h>
31 #include <linux/seq_file.h>
32 #include <linux/stop_machine.h>
33
34 #include <asm/set_memory.h>
35
36 #include <drm/drmP.h>
37 #include <drm/i915_drm.h>
38
39 #include "i915_drv.h"
40 #include "i915_vgpu.h"
41 #include "i915_trace.h"
42 #include "intel_drv.h"
43 #include "intel_frontbuffer.h"
44
45 #define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
46
47 /**
48  * DOC: Global GTT views
49  *
50  * Background and previous state
51  *
52  * Historically objects could exists (be bound) in global GTT space only as
53  * singular instances with a view representing all of the object's backing pages
54  * in a linear fashion. This view will be called a normal view.
55  *
56  * To support multiple views of the same object, where the number of mapped
57  * pages is not equal to the backing store, or where the layout of the pages
58  * is not linear, concept of a GGTT view was added.
59  *
60  * One example of an alternative view is a stereo display driven by a single
61  * image. In this case we would have a framebuffer looking like this
62  * (2x2 pages):
63  *
64  *    12
65  *    34
66  *
67  * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68  * rendering. In contrast, fed to the display engine would be an alternative
69  * view which could look something like this:
70  *
71  *   1212
72  *   3434
73  *
74  * In this example both the size and layout of pages in the alternative view is
75  * different from the normal view.
76  *
77  * Implementation and usage
78  *
79  * GGTT views are implemented using VMAs and are distinguished via enum
80  * i915_ggtt_view_type and struct i915_ggtt_view.
81  *
82  * A new flavour of core GEM functions which work with GGTT bound objects were
83  * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84  * renaming  in large amounts of code. They take the struct i915_ggtt_view
85  * parameter encapsulating all metadata required to implement a view.
86  *
87  * As a helper for callers which are only interested in the normal view,
88  * globally const i915_ggtt_view_normal singleton instance exists. All old core
89  * GEM API functions, the ones not taking the view parameter, are operating on,
90  * or with the normal GGTT view.
91  *
92  * Code wanting to add or use a new GGTT view needs to:
93  *
94  * 1. Add a new enum with a suitable name.
95  * 2. Extend the metadata in the i915_ggtt_view structure if required.
96  * 3. Add support to i915_get_vma_pages().
97  *
98  * New views are required to build a scatter-gather table from within the
99  * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100  * exists for the lifetime of an VMA.
101  *
102  * Core API is designed to have copy semantics which means that passed in
103  * struct i915_ggtt_view does not need to be persistent (left around after
104  * calling the core API functions).
105  *
106  */
107
108 static int
109 i915_get_ggtt_vma_pages(struct i915_vma *vma);
110
111 static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
112 {
113         /* Note that as an uncached mmio write, this should flush the
114          * WCB of the writes into the GGTT before it triggers the invalidate.
115          */
116         I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
117 }
118
119 static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
120 {
121         gen6_ggtt_invalidate(dev_priv);
122         I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
123 }
124
125 static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
126 {
127         intel_gtt_chipset_flush();
128 }
129
130 static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
131 {
132         i915->ggtt.invalidate(i915);
133 }
134
135 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
136                                 int enable_ppgtt)
137 {
138         bool has_full_ppgtt;
139         bool has_full_48bit_ppgtt;
140
141         if (!dev_priv->info.has_aliasing_ppgtt)
142                 return 0;
143
144         has_full_ppgtt = dev_priv->info.has_full_ppgtt;
145         has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
146
147         if (intel_vgpu_active(dev_priv)) {
148                 /* GVT-g has no support for 32bit ppgtt */
149                 has_full_ppgtt = false;
150                 has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
151         }
152
153         /*
154          * We don't allow disabling PPGTT for gen9+ as it's a requirement for
155          * execlists, the sole mechanism available to submit work.
156          */
157         if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
158                 return 0;
159
160         if (enable_ppgtt == 1)
161                 return 1;
162
163         if (enable_ppgtt == 2 && has_full_ppgtt)
164                 return 2;
165
166         if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
167                 return 3;
168
169         /* Disable ppgtt on SNB if VT-d is on. */
170         if (IS_GEN6(dev_priv) && intel_vtd_active()) {
171                 DRM_INFO("Disabling PPGTT because VT-d is on\n");
172                 return 0;
173         }
174
175         /* Early VLV doesn't have this */
176         if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
177                 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
178                 return 0;
179         }
180
181         if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
182                 if (has_full_48bit_ppgtt)
183                         return 3;
184
185                 if (has_full_ppgtt)
186                         return 2;
187         }
188
189         return 1;
190 }
191
192 static int ppgtt_bind_vma(struct i915_vma *vma,
193                           enum i915_cache_level cache_level,
194                           u32 unused)
195 {
196         u32 pte_flags;
197         int ret;
198
199         if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
200                 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
201                                                  vma->size);
202                 if (ret)
203                         return ret;
204         }
205
206         /* Currently applicable only to VLV */
207         pte_flags = 0;
208         if (vma->obj->gt_ro)
209                 pte_flags |= PTE_READ_ONLY;
210
211         vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
212
213         return 0;
214 }
215
216 static void ppgtt_unbind_vma(struct i915_vma *vma)
217 {
218         vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
219 }
220
221 static int ppgtt_set_pages(struct i915_vma *vma)
222 {
223         GEM_BUG_ON(vma->pages);
224
225         vma->pages = vma->obj->mm.pages;
226
227         vma->page_sizes = vma->obj->mm.page_sizes;
228
229         return 0;
230 }
231
232 static void clear_pages(struct i915_vma *vma)
233 {
234         GEM_BUG_ON(!vma->pages);
235
236         if (vma->pages != vma->obj->mm.pages) {
237                 sg_free_table(vma->pages);
238                 kfree(vma->pages);
239         }
240         vma->pages = NULL;
241
242         memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
243 }
244
245 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
246                                   enum i915_cache_level level)
247 {
248         gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
249         pte |= addr;
250
251         switch (level) {
252         case I915_CACHE_NONE:
253                 pte |= PPAT_UNCACHED;
254                 break;
255         case I915_CACHE_WT:
256                 pte |= PPAT_DISPLAY_ELLC;
257                 break;
258         default:
259                 pte |= PPAT_CACHED;
260                 break;
261         }
262
263         return pte;
264 }
265
266 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
267                                   const enum i915_cache_level level)
268 {
269         gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
270         pde |= addr;
271         if (level != I915_CACHE_NONE)
272                 pde |= PPAT_CACHED_PDE;
273         else
274                 pde |= PPAT_UNCACHED;
275         return pde;
276 }
277
278 #define gen8_pdpe_encode gen8_pde_encode
279 #define gen8_pml4e_encode gen8_pde_encode
280
281 static gen6_pte_t snb_pte_encode(dma_addr_t addr,
282                                  enum i915_cache_level level,
283                                  u32 unused)
284 {
285         gen6_pte_t pte = GEN6_PTE_VALID;
286         pte |= GEN6_PTE_ADDR_ENCODE(addr);
287
288         switch (level) {
289         case I915_CACHE_L3_LLC:
290         case I915_CACHE_LLC:
291                 pte |= GEN6_PTE_CACHE_LLC;
292                 break;
293         case I915_CACHE_NONE:
294                 pte |= GEN6_PTE_UNCACHED;
295                 break;
296         default:
297                 MISSING_CASE(level);
298         }
299
300         return pte;
301 }
302
303 static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
304                                  enum i915_cache_level level,
305                                  u32 unused)
306 {
307         gen6_pte_t pte = GEN6_PTE_VALID;
308         pte |= GEN6_PTE_ADDR_ENCODE(addr);
309
310         switch (level) {
311         case I915_CACHE_L3_LLC:
312                 pte |= GEN7_PTE_CACHE_L3_LLC;
313                 break;
314         case I915_CACHE_LLC:
315                 pte |= GEN6_PTE_CACHE_LLC;
316                 break;
317         case I915_CACHE_NONE:
318                 pte |= GEN6_PTE_UNCACHED;
319                 break;
320         default:
321                 MISSING_CASE(level);
322         }
323
324         return pte;
325 }
326
327 static gen6_pte_t byt_pte_encode(dma_addr_t addr,
328                                  enum i915_cache_level level,
329                                  u32 flags)
330 {
331         gen6_pte_t pte = GEN6_PTE_VALID;
332         pte |= GEN6_PTE_ADDR_ENCODE(addr);
333
334         if (!(flags & PTE_READ_ONLY))
335                 pte |= BYT_PTE_WRITEABLE;
336
337         if (level != I915_CACHE_NONE)
338                 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
339
340         return pte;
341 }
342
343 static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
344                                  enum i915_cache_level level,
345                                  u32 unused)
346 {
347         gen6_pte_t pte = GEN6_PTE_VALID;
348         pte |= HSW_PTE_ADDR_ENCODE(addr);
349
350         if (level != I915_CACHE_NONE)
351                 pte |= HSW_WB_LLC_AGE3;
352
353         return pte;
354 }
355
356 static gen6_pte_t iris_pte_encode(dma_addr_t addr,
357                                   enum i915_cache_level level,
358                                   u32 unused)
359 {
360         gen6_pte_t pte = GEN6_PTE_VALID;
361         pte |= HSW_PTE_ADDR_ENCODE(addr);
362
363         switch (level) {
364         case I915_CACHE_NONE:
365                 break;
366         case I915_CACHE_WT:
367                 pte |= HSW_WT_ELLC_LLC_AGE3;
368                 break;
369         default:
370                 pte |= HSW_WB_ELLC_LLC_AGE3;
371                 break;
372         }
373
374         return pte;
375 }
376
377 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
378 {
379         struct pagevec *pvec = &vm->free_pages;
380
381         if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
382                 i915_gem_shrink_all(vm->i915);
383
384         if (likely(pvec->nr))
385                 return pvec->pages[--pvec->nr];
386
387         if (!vm->pt_kmap_wc)
388                 return alloc_page(gfp);
389
390         /* A placeholder for a specific mutex to guard the WC stash */
391         lockdep_assert_held(&vm->i915->drm.struct_mutex);
392
393         /* Look in our global stash of WC pages... */
394         pvec = &vm->i915->mm.wc_stash;
395         if (likely(pvec->nr))
396                 return pvec->pages[--pvec->nr];
397
398         /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
399         do {
400                 struct page *page;
401
402                 page = alloc_page(gfp);
403                 if (unlikely(!page))
404                         break;
405
406                 pvec->pages[pvec->nr++] = page;
407         } while (pagevec_space(pvec));
408
409         if (unlikely(!pvec->nr))
410                 return NULL;
411
412         set_pages_array_wc(pvec->pages, pvec->nr);
413
414         return pvec->pages[--pvec->nr];
415 }
416
417 static void vm_free_pages_release(struct i915_address_space *vm,
418                                   bool immediate)
419 {
420         struct pagevec *pvec = &vm->free_pages;
421
422         GEM_BUG_ON(!pagevec_count(pvec));
423
424         if (vm->pt_kmap_wc) {
425                 struct pagevec *stash = &vm->i915->mm.wc_stash;
426
427                 /* When we use WC, first fill up the global stash and then
428                  * only if full immediately free the overflow.
429                  */
430
431                 lockdep_assert_held(&vm->i915->drm.struct_mutex);
432                 if (pagevec_space(stash)) {
433                         do {
434                                 stash->pages[stash->nr++] =
435                                         pvec->pages[--pvec->nr];
436                                 if (!pvec->nr)
437                                         return;
438                         } while (pagevec_space(stash));
439
440                         /* As we have made some room in the VM's free_pages,
441                          * we can wait for it to fill again. Unless we are
442                          * inside i915_address_space_fini() and must
443                          * immediately release the pages!
444                          */
445                         if (!immediate)
446                                 return;
447                 }
448
449                 set_pages_array_wb(pvec->pages, pvec->nr);
450         }
451
452         __pagevec_release(pvec);
453 }
454
455 static void vm_free_page(struct i915_address_space *vm, struct page *page)
456 {
457         /*
458          * On !llc, we need to change the pages back to WB. We only do so
459          * in bulk, so we rarely need to change the page attributes here,
460          * but doing so requires a stop_machine() from deep inside arch/x86/mm.
461          * To make detection of the possible sleep more likely, use an
462          * unconditional might_sleep() for everybody.
463          */
464         might_sleep();
465         if (!pagevec_add(&vm->free_pages, page))
466                 vm_free_pages_release(vm, false);
467 }
468
469 static int __setup_page_dma(struct i915_address_space *vm,
470                             struct i915_page_dma *p,
471                             gfp_t gfp)
472 {
473         p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
474         if (unlikely(!p->page))
475                 return -ENOMEM;
476
477         p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
478                                 PCI_DMA_BIDIRECTIONAL);
479         if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
480                 vm_free_page(vm, p->page);
481                 return -ENOMEM;
482         }
483
484         return 0;
485 }
486
487 static int setup_page_dma(struct i915_address_space *vm,
488                           struct i915_page_dma *p)
489 {
490         return __setup_page_dma(vm, p, I915_GFP_DMA);
491 }
492
493 static void cleanup_page_dma(struct i915_address_space *vm,
494                              struct i915_page_dma *p)
495 {
496         dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
497         vm_free_page(vm, p->page);
498 }
499
500 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
501
502 #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
503 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
504 #define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
505 #define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
506
507 static void fill_page_dma(struct i915_address_space *vm,
508                           struct i915_page_dma *p,
509                           const u64 val)
510 {
511         u64 * const vaddr = kmap_atomic(p->page);
512
513         memset64(vaddr, val, PAGE_SIZE / sizeof(val));
514
515         kunmap_atomic(vaddr);
516 }
517
518 static void fill_page_dma_32(struct i915_address_space *vm,
519                              struct i915_page_dma *p,
520                              const u32 v)
521 {
522         fill_page_dma(vm, p, (u64)v << 32 | v);
523 }
524
525 static int
526 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
527 {
528         struct page *page = NULL;
529         dma_addr_t addr;
530         int order;
531
532         /*
533          * In order to utilize 64K pages for an object with a size < 2M, we will
534          * need to support a 64K scratch page, given that every 16th entry for a
535          * page-table operating in 64K mode must point to a properly aligned 64K
536          * region, including any PTEs which happen to point to scratch.
537          *
538          * This is only relevant for the 48b PPGTT where we support
539          * huge-gtt-pages, see also i915_vma_insert().
540          *
541          * TODO: we should really consider write-protecting the scratch-page and
542          * sharing between ppgtt
543          */
544         if (i915_vm_is_48bit(vm) &&
545             HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
546                 order = get_order(I915_GTT_PAGE_SIZE_64K);
547                 page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order);
548                 if (page) {
549                         addr = dma_map_page(vm->dma, page, 0,
550                                             I915_GTT_PAGE_SIZE_64K,
551                                             PCI_DMA_BIDIRECTIONAL);
552                         if (unlikely(dma_mapping_error(vm->dma, addr))) {
553                                 __free_pages(page, order);
554                                 page = NULL;
555                         }
556
557                         if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
558                                 dma_unmap_page(vm->dma, addr,
559                                                I915_GTT_PAGE_SIZE_64K,
560                                                PCI_DMA_BIDIRECTIONAL);
561                                 __free_pages(page, order);
562                                 page = NULL;
563                         }
564                 }
565         }
566
567         if (!page) {
568                 order = 0;
569                 page = alloc_page(gfp | __GFP_ZERO);
570                 if (unlikely(!page))
571                         return -ENOMEM;
572
573                 addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
574                                     PCI_DMA_BIDIRECTIONAL);
575                 if (unlikely(dma_mapping_error(vm->dma, addr))) {
576                         __free_page(page);
577                         return -ENOMEM;
578                 }
579         }
580
581         vm->scratch_page.page = page;
582         vm->scratch_page.daddr = addr;
583         vm->scratch_page.order = order;
584
585         return 0;
586 }
587
588 static void cleanup_scratch_page(struct i915_address_space *vm)
589 {
590         struct i915_page_dma *p = &vm->scratch_page;
591
592         dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
593                        PCI_DMA_BIDIRECTIONAL);
594         __free_pages(p->page, p->order);
595 }
596
597 static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
598 {
599         struct i915_page_table *pt;
600
601         pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
602         if (unlikely(!pt))
603                 return ERR_PTR(-ENOMEM);
604
605         if (unlikely(setup_px(vm, pt))) {
606                 kfree(pt);
607                 return ERR_PTR(-ENOMEM);
608         }
609
610         pt->used_ptes = 0;
611         return pt;
612 }
613
614 static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
615 {
616         cleanup_px(vm, pt);
617         kfree(pt);
618 }
619
620 static void gen8_initialize_pt(struct i915_address_space *vm,
621                                struct i915_page_table *pt)
622 {
623         fill_px(vm, pt,
624                 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
625 }
626
627 static void gen6_initialize_pt(struct i915_address_space *vm,
628                                struct i915_page_table *pt)
629 {
630         fill32_px(vm, pt,
631                   vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
632 }
633
634 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
635 {
636         struct i915_page_directory *pd;
637
638         pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
639         if (unlikely(!pd))
640                 return ERR_PTR(-ENOMEM);
641
642         if (unlikely(setup_px(vm, pd))) {
643                 kfree(pd);
644                 return ERR_PTR(-ENOMEM);
645         }
646
647         pd->used_pdes = 0;
648         return pd;
649 }
650
651 static void free_pd(struct i915_address_space *vm,
652                     struct i915_page_directory *pd)
653 {
654         cleanup_px(vm, pd);
655         kfree(pd);
656 }
657
658 static void gen8_initialize_pd(struct i915_address_space *vm,
659                                struct i915_page_directory *pd)
660 {
661         unsigned int i;
662
663         fill_px(vm, pd,
664                 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
665         for (i = 0; i < I915_PDES; i++)
666                 pd->page_table[i] = vm->scratch_pt;
667 }
668
669 static int __pdp_init(struct i915_address_space *vm,
670                       struct i915_page_directory_pointer *pdp)
671 {
672         const unsigned int pdpes = i915_pdpes_per_pdp(vm);
673         unsigned int i;
674
675         pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
676                                             GFP_KERNEL | __GFP_NOWARN);
677         if (unlikely(!pdp->page_directory))
678                 return -ENOMEM;
679
680         for (i = 0; i < pdpes; i++)
681                 pdp->page_directory[i] = vm->scratch_pd;
682
683         return 0;
684 }
685
686 static void __pdp_fini(struct i915_page_directory_pointer *pdp)
687 {
688         kfree(pdp->page_directory);
689         pdp->page_directory = NULL;
690 }
691
692 static inline bool use_4lvl(const struct i915_address_space *vm)
693 {
694         return i915_vm_is_48bit(vm);
695 }
696
697 static struct i915_page_directory_pointer *
698 alloc_pdp(struct i915_address_space *vm)
699 {
700         struct i915_page_directory_pointer *pdp;
701         int ret = -ENOMEM;
702
703         WARN_ON(!use_4lvl(vm));
704
705         pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
706         if (!pdp)
707                 return ERR_PTR(-ENOMEM);
708
709         ret = __pdp_init(vm, pdp);
710         if (ret)
711                 goto fail_bitmap;
712
713         ret = setup_px(vm, pdp);
714         if (ret)
715                 goto fail_page_m;
716
717         return pdp;
718
719 fail_page_m:
720         __pdp_fini(pdp);
721 fail_bitmap:
722         kfree(pdp);
723
724         return ERR_PTR(ret);
725 }
726
727 static void free_pdp(struct i915_address_space *vm,
728                      struct i915_page_directory_pointer *pdp)
729 {
730         __pdp_fini(pdp);
731
732         if (!use_4lvl(vm))
733                 return;
734
735         cleanup_px(vm, pdp);
736         kfree(pdp);
737 }
738
739 static void gen8_initialize_pdp(struct i915_address_space *vm,
740                                 struct i915_page_directory_pointer *pdp)
741 {
742         gen8_ppgtt_pdpe_t scratch_pdpe;
743
744         scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
745
746         fill_px(vm, pdp, scratch_pdpe);
747 }
748
749 static void gen8_initialize_pml4(struct i915_address_space *vm,
750                                  struct i915_pml4 *pml4)
751 {
752         unsigned int i;
753
754         fill_px(vm, pml4,
755                 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
756         for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
757                 pml4->pdps[i] = vm->scratch_pdp;
758 }
759
760 /* Broadwell Page Directory Pointer Descriptors */
761 static int gen8_write_pdp(struct drm_i915_gem_request *req,
762                           unsigned entry,
763                           dma_addr_t addr)
764 {
765         struct intel_engine_cs *engine = req->engine;
766         u32 *cs;
767
768         BUG_ON(entry >= 4);
769
770         cs = intel_ring_begin(req, 6);
771         if (IS_ERR(cs))
772                 return PTR_ERR(cs);
773
774         *cs++ = MI_LOAD_REGISTER_IMM(1);
775         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
776         *cs++ = upper_32_bits(addr);
777         *cs++ = MI_LOAD_REGISTER_IMM(1);
778         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
779         *cs++ = lower_32_bits(addr);
780         intel_ring_advance(req, cs);
781
782         return 0;
783 }
784
785 static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
786                                struct drm_i915_gem_request *req)
787 {
788         int i, ret;
789
790         for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
791                 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
792
793                 ret = gen8_write_pdp(req, i, pd_daddr);
794                 if (ret)
795                         return ret;
796         }
797
798         return 0;
799 }
800
801 static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
802                                struct drm_i915_gem_request *req)
803 {
804         return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
805 }
806
807 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
808  * the page table structures, we mark them dirty so that
809  * context switching/execlist queuing code takes extra steps
810  * to ensure that tlbs are flushed.
811  */
812 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
813 {
814         ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
815 }
816
817 /* Removes entries from a single page table, releasing it if it's empty.
818  * Caller can use the return value to update higher-level entries.
819  */
820 static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
821                                 struct i915_page_table *pt,
822                                 u64 start, u64 length)
823 {
824         unsigned int num_entries = gen8_pte_count(start, length);
825         unsigned int pte = gen8_pte_index(start);
826         unsigned int pte_end = pte + num_entries;
827         const gen8_pte_t scratch_pte =
828                 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
829         gen8_pte_t *vaddr;
830
831         GEM_BUG_ON(num_entries > pt->used_ptes);
832
833         pt->used_ptes -= num_entries;
834         if (!pt->used_ptes)
835                 return true;
836
837         vaddr = kmap_atomic_px(pt);
838         while (pte < pte_end)
839                 vaddr[pte++] = scratch_pte;
840         kunmap_atomic(vaddr);
841
842         return false;
843 }
844
845 static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
846                                struct i915_page_directory *pd,
847                                struct i915_page_table *pt,
848                                unsigned int pde)
849 {
850         gen8_pde_t *vaddr;
851
852         pd->page_table[pde] = pt;
853
854         vaddr = kmap_atomic_px(pd);
855         vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
856         kunmap_atomic(vaddr);
857 }
858
859 static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
860                                 struct i915_page_directory *pd,
861                                 u64 start, u64 length)
862 {
863         struct i915_page_table *pt;
864         u32 pde;
865
866         gen8_for_each_pde(pt, pd, start, length, pde) {
867                 GEM_BUG_ON(pt == vm->scratch_pt);
868
869                 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
870                         continue;
871
872                 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
873                 GEM_BUG_ON(!pd->used_pdes);
874                 pd->used_pdes--;
875
876                 free_pt(vm, pt);
877         }
878
879         return !pd->used_pdes;
880 }
881
882 static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
883                                 struct i915_page_directory_pointer *pdp,
884                                 struct i915_page_directory *pd,
885                                 unsigned int pdpe)
886 {
887         gen8_ppgtt_pdpe_t *vaddr;
888
889         pdp->page_directory[pdpe] = pd;
890         if (!use_4lvl(vm))
891                 return;
892
893         vaddr = kmap_atomic_px(pdp);
894         vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
895         kunmap_atomic(vaddr);
896 }
897
898 /* Removes entries from a single page dir pointer, releasing it if it's empty.
899  * Caller can use the return value to update higher-level entries
900  */
901 static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
902                                  struct i915_page_directory_pointer *pdp,
903                                  u64 start, u64 length)
904 {
905         struct i915_page_directory *pd;
906         unsigned int pdpe;
907
908         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
909                 GEM_BUG_ON(pd == vm->scratch_pd);
910
911                 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
912                         continue;
913
914                 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
915                 GEM_BUG_ON(!pdp->used_pdpes);
916                 pdp->used_pdpes--;
917
918                 free_pd(vm, pd);
919         }
920
921         return !pdp->used_pdpes;
922 }
923
924 static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
925                                   u64 start, u64 length)
926 {
927         gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
928 }
929
930 static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
931                                  struct i915_page_directory_pointer *pdp,
932                                  unsigned int pml4e)
933 {
934         gen8_ppgtt_pml4e_t *vaddr;
935
936         pml4->pdps[pml4e] = pdp;
937
938         vaddr = kmap_atomic_px(pml4);
939         vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
940         kunmap_atomic(vaddr);
941 }
942
943 /* Removes entries from a single pml4.
944  * This is the top-level structure in 4-level page tables used on gen8+.
945  * Empty entries are always scratch pml4e.
946  */
947 static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
948                                   u64 start, u64 length)
949 {
950         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
951         struct i915_pml4 *pml4 = &ppgtt->pml4;
952         struct i915_page_directory_pointer *pdp;
953         unsigned int pml4e;
954
955         GEM_BUG_ON(!use_4lvl(vm));
956
957         gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
958                 GEM_BUG_ON(pdp == vm->scratch_pdp);
959
960                 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
961                         continue;
962
963                 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
964
965                 free_pdp(vm, pdp);
966         }
967 }
968
969 static inline struct sgt_dma {
970         struct scatterlist *sg;
971         dma_addr_t dma, max;
972 } sgt_dma(struct i915_vma *vma) {
973         struct scatterlist *sg = vma->pages->sgl;
974         dma_addr_t addr = sg_dma_address(sg);
975         return (struct sgt_dma) { sg, addr, addr + sg->length };
976 }
977
978 struct gen8_insert_pte {
979         u16 pml4e;
980         u16 pdpe;
981         u16 pde;
982         u16 pte;
983 };
984
985 static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
986 {
987         return (struct gen8_insert_pte) {
988                  gen8_pml4e_index(start),
989                  gen8_pdpe_index(start),
990                  gen8_pde_index(start),
991                  gen8_pte_index(start),
992         };
993 }
994
995 static __always_inline bool
996 gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
997                               struct i915_page_directory_pointer *pdp,
998                               struct sgt_dma *iter,
999                               struct gen8_insert_pte *idx,
1000                               enum i915_cache_level cache_level)
1001 {
1002         struct i915_page_directory *pd;
1003         const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
1004         gen8_pte_t *vaddr;
1005         bool ret;
1006
1007         GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
1008         pd = pdp->page_directory[idx->pdpe];
1009         vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
1010         do {
1011                 vaddr[idx->pte] = pte_encode | iter->dma;
1012
1013                 iter->dma += PAGE_SIZE;
1014                 if (iter->dma >= iter->max) {
1015                         iter->sg = __sg_next(iter->sg);
1016                         if (!iter->sg) {
1017                                 ret = false;
1018                                 break;
1019                         }
1020
1021                         iter->dma = sg_dma_address(iter->sg);
1022                         iter->max = iter->dma + iter->sg->length;
1023                 }
1024
1025                 if (++idx->pte == GEN8_PTES) {
1026                         idx->pte = 0;
1027
1028                         if (++idx->pde == I915_PDES) {
1029                                 idx->pde = 0;
1030
1031                                 /* Limited by sg length for 3lvl */
1032                                 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
1033                                         idx->pdpe = 0;
1034                                         ret = true;
1035                                         break;
1036                                 }
1037
1038                                 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
1039                                 pd = pdp->page_directory[idx->pdpe];
1040                         }
1041
1042                         kunmap_atomic(vaddr);
1043                         vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
1044                 }
1045         } while (1);
1046         kunmap_atomic(vaddr);
1047
1048         return ret;
1049 }
1050
1051 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
1052                                    struct i915_vma *vma,
1053                                    enum i915_cache_level cache_level,
1054                                    u32 unused)
1055 {
1056         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1057         struct sgt_dma iter = sgt_dma(vma);
1058         struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1059
1060         gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
1061                                       cache_level);
1062
1063         vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1064 }
1065
1066 static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1067                                            struct i915_page_directory_pointer **pdps,
1068                                            struct sgt_dma *iter,
1069                                            enum i915_cache_level cache_level)
1070 {
1071         const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
1072         u64 start = vma->node.start;
1073         dma_addr_t rem = iter->sg->length;
1074
1075         do {
1076                 struct gen8_insert_pte idx = gen8_insert_pte(start);
1077                 struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
1078                 struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
1079                 unsigned int page_size;
1080                 bool maybe_64K = false;
1081                 gen8_pte_t encode = pte_encode;
1082                 gen8_pte_t *vaddr;
1083                 u16 index, max;
1084
1085                 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1086                     IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1087                     rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
1088                         index = idx.pde;
1089                         max = I915_PDES;
1090                         page_size = I915_GTT_PAGE_SIZE_2M;
1091
1092                         encode |= GEN8_PDE_PS_2M;
1093
1094                         vaddr = kmap_atomic_px(pd);
1095                 } else {
1096                         struct i915_page_table *pt = pd->page_table[idx.pde];
1097
1098                         index = idx.pte;
1099                         max = GEN8_PTES;
1100                         page_size = I915_GTT_PAGE_SIZE;
1101
1102                         if (!index &&
1103                             vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1104                             IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1105                             (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1106                              rem >= (max - index) << PAGE_SHIFT))
1107                                 maybe_64K = true;
1108
1109                         vaddr = kmap_atomic_px(pt);
1110                 }
1111
1112                 do {
1113                         GEM_BUG_ON(iter->sg->length < page_size);
1114                         vaddr[index++] = encode | iter->dma;
1115
1116                         start += page_size;
1117                         iter->dma += page_size;
1118                         rem -= page_size;
1119                         if (iter->dma >= iter->max) {
1120                                 iter->sg = __sg_next(iter->sg);
1121                                 if (!iter->sg)
1122                                         break;
1123
1124                                 rem = iter->sg->length;
1125                                 iter->dma = sg_dma_address(iter->sg);
1126                                 iter->max = iter->dma + rem;
1127
1128                                 if (maybe_64K && index < max &&
1129                                     !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1130                                       (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1131                                        rem >= (max - index) << PAGE_SHIFT)))
1132                                         maybe_64K = false;
1133
1134                                 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1135                                         break;
1136                         }
1137                 } while (rem >= page_size && index < max);
1138
1139                 kunmap_atomic(vaddr);
1140
1141                 /*
1142                  * Is it safe to mark the 2M block as 64K? -- Either we have
1143                  * filled whole page-table with 64K entries, or filled part of
1144                  * it and have reached the end of the sg table and we have
1145                  * enough padding.
1146                  */
1147                 if (maybe_64K &&
1148                     (index == max ||
1149                      (i915_vm_has_scratch_64K(vma->vm) &&
1150                       !iter->sg && IS_ALIGNED(vma->node.start +
1151                                               vma->node.size,
1152                                               I915_GTT_PAGE_SIZE_2M)))) {
1153                         vaddr = kmap_atomic_px(pd);
1154                         vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
1155                         kunmap_atomic(vaddr);
1156                         page_size = I915_GTT_PAGE_SIZE_64K;
1157                 }
1158
1159                 vma->page_sizes.gtt |= page_size;
1160         } while (iter->sg);
1161 }
1162
1163 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
1164                                    struct i915_vma *vma,
1165                                    enum i915_cache_level cache_level,
1166                                    u32 unused)
1167 {
1168         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1169         struct sgt_dma iter = sgt_dma(vma);
1170         struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
1171
1172         if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1173                 gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
1174         } else {
1175                 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1176
1177                 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
1178                                                      &iter, &idx, cache_level))
1179                         GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
1180
1181                 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1182         }
1183 }
1184
1185 static void gen8_free_page_tables(struct i915_address_space *vm,
1186                                   struct i915_page_directory *pd)
1187 {
1188         int i;
1189
1190         if (!px_page(pd))
1191                 return;
1192
1193         for (i = 0; i < I915_PDES; i++) {
1194                 if (pd->page_table[i] != vm->scratch_pt)
1195                         free_pt(vm, pd->page_table[i]);
1196         }
1197 }
1198
1199 static int gen8_init_scratch(struct i915_address_space *vm)
1200 {
1201         int ret;
1202
1203         ret = setup_scratch_page(vm, I915_GFP_DMA);
1204         if (ret)
1205                 return ret;
1206
1207         vm->scratch_pt = alloc_pt(vm);
1208         if (IS_ERR(vm->scratch_pt)) {
1209                 ret = PTR_ERR(vm->scratch_pt);
1210                 goto free_scratch_page;
1211         }
1212
1213         vm->scratch_pd = alloc_pd(vm);
1214         if (IS_ERR(vm->scratch_pd)) {
1215                 ret = PTR_ERR(vm->scratch_pd);
1216                 goto free_pt;
1217         }
1218
1219         if (use_4lvl(vm)) {
1220                 vm->scratch_pdp = alloc_pdp(vm);
1221                 if (IS_ERR(vm->scratch_pdp)) {
1222                         ret = PTR_ERR(vm->scratch_pdp);
1223                         goto free_pd;
1224                 }
1225         }
1226
1227         gen8_initialize_pt(vm, vm->scratch_pt);
1228         gen8_initialize_pd(vm, vm->scratch_pd);
1229         if (use_4lvl(vm))
1230                 gen8_initialize_pdp(vm, vm->scratch_pdp);
1231
1232         return 0;
1233
1234 free_pd:
1235         free_pd(vm, vm->scratch_pd);
1236 free_pt:
1237         free_pt(vm, vm->scratch_pt);
1238 free_scratch_page:
1239         cleanup_scratch_page(vm);
1240
1241         return ret;
1242 }
1243
1244 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1245 {
1246         struct i915_address_space *vm = &ppgtt->base;
1247         struct drm_i915_private *dev_priv = vm->i915;
1248         enum vgt_g2v_type msg;
1249         int i;
1250
1251         if (use_4lvl(vm)) {
1252                 const u64 daddr = px_dma(&ppgtt->pml4);
1253
1254                 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1255                 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1256
1257                 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1258                                 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1259         } else {
1260                 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1261                         const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
1262
1263                         I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1264                         I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1265                 }
1266
1267                 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1268                                 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1269         }
1270
1271         I915_WRITE(vgtif_reg(g2v_notify), msg);
1272
1273         return 0;
1274 }
1275
1276 static void gen8_free_scratch(struct i915_address_space *vm)
1277 {
1278         if (use_4lvl(vm))
1279                 free_pdp(vm, vm->scratch_pdp);
1280         free_pd(vm, vm->scratch_pd);
1281         free_pt(vm, vm->scratch_pt);
1282         cleanup_scratch_page(vm);
1283 }
1284
1285 static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
1286                                     struct i915_page_directory_pointer *pdp)
1287 {
1288         const unsigned int pdpes = i915_pdpes_per_pdp(vm);
1289         int i;
1290
1291         for (i = 0; i < pdpes; i++) {
1292                 if (pdp->page_directory[i] == vm->scratch_pd)
1293                         continue;
1294
1295                 gen8_free_page_tables(vm, pdp->page_directory[i]);
1296                 free_pd(vm, pdp->page_directory[i]);
1297         }
1298
1299         free_pdp(vm, pdp);
1300 }
1301
1302 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1303 {
1304         int i;
1305
1306         for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1307                 if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
1308                         continue;
1309
1310                 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
1311         }
1312
1313         cleanup_px(&ppgtt->base, &ppgtt->pml4);
1314 }
1315
1316 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1317 {
1318         struct drm_i915_private *dev_priv = vm->i915;
1319         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1320
1321         if (intel_vgpu_active(dev_priv))
1322                 gen8_ppgtt_notify_vgt(ppgtt, false);
1323
1324         if (use_4lvl(vm))
1325                 gen8_ppgtt_cleanup_4lvl(ppgtt);
1326         else
1327                 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
1328
1329         gen8_free_scratch(vm);
1330 }
1331
1332 static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1333                                struct i915_page_directory *pd,
1334                                u64 start, u64 length)
1335 {
1336         struct i915_page_table *pt;
1337         u64 from = start;
1338         unsigned int pde;
1339
1340         gen8_for_each_pde(pt, pd, start, length, pde) {
1341                 int count = gen8_pte_count(start, length);
1342
1343                 if (pt == vm->scratch_pt) {
1344                         pt = alloc_pt(vm);
1345                         if (IS_ERR(pt))
1346                                 goto unwind;
1347
1348                         if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
1349                                 gen8_initialize_pt(vm, pt);
1350
1351                         gen8_ppgtt_set_pde(vm, pd, pt, pde);
1352                         pd->used_pdes++;
1353                         GEM_BUG_ON(pd->used_pdes > I915_PDES);
1354                 }
1355
1356                 pt->used_ptes += count;
1357         }
1358         return 0;
1359
1360 unwind:
1361         gen8_ppgtt_clear_pd(vm, pd, from, start - from);
1362         return -ENOMEM;
1363 }
1364
1365 static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1366                                 struct i915_page_directory_pointer *pdp,
1367                                 u64 start, u64 length)
1368 {
1369         struct i915_page_directory *pd;
1370         u64 from = start;
1371         unsigned int pdpe;
1372         int ret;
1373
1374         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1375                 if (pd == vm->scratch_pd) {
1376                         pd = alloc_pd(vm);
1377                         if (IS_ERR(pd))
1378                                 goto unwind;
1379
1380                         gen8_initialize_pd(vm, pd);
1381                         gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1382                         pdp->used_pdpes++;
1383                         GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
1384
1385                         mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
1386                 }
1387
1388                 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
1389                 if (unlikely(ret))
1390                         goto unwind_pd;
1391         }
1392
1393         return 0;
1394
1395 unwind_pd:
1396         if (!pd->used_pdes) {
1397                 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1398                 GEM_BUG_ON(!pdp->used_pdpes);
1399                 pdp->used_pdpes--;
1400                 free_pd(vm, pd);
1401         }
1402 unwind:
1403         gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1404         return -ENOMEM;
1405 }
1406
1407 static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1408                                  u64 start, u64 length)
1409 {
1410         return gen8_ppgtt_alloc_pdp(vm,
1411                                     &i915_vm_to_ppgtt(vm)->pdp, start, length);
1412 }
1413
1414 static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1415                                  u64 start, u64 length)
1416 {
1417         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1418         struct i915_pml4 *pml4 = &ppgtt->pml4;
1419         struct i915_page_directory_pointer *pdp;
1420         u64 from = start;
1421         u32 pml4e;
1422         int ret;
1423
1424         gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1425                 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1426                         pdp = alloc_pdp(vm);
1427                         if (IS_ERR(pdp))
1428                                 goto unwind;
1429
1430                         gen8_initialize_pdp(vm, pdp);
1431                         gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1432                 }
1433
1434                 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
1435                 if (unlikely(ret))
1436                         goto unwind_pdp;
1437         }
1438
1439         return 0;
1440
1441 unwind_pdp:
1442         if (!pdp->used_pdpes) {
1443                 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1444                 free_pdp(vm, pdp);
1445         }
1446 unwind:
1447         gen8_ppgtt_clear_4lvl(vm, from, start - from);
1448         return -ENOMEM;
1449 }
1450
1451 static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1452                           struct i915_page_directory_pointer *pdp,
1453                           u64 start, u64 length,
1454                           gen8_pte_t scratch_pte,
1455                           struct seq_file *m)
1456 {
1457         struct i915_address_space *vm = &ppgtt->base;
1458         struct i915_page_directory *pd;
1459         u32 pdpe;
1460
1461         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1462                 struct i915_page_table *pt;
1463                 u64 pd_len = length;
1464                 u64 pd_start = start;
1465                 u32 pde;
1466
1467                 if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
1468                         continue;
1469
1470                 seq_printf(m, "\tPDPE #%d\n", pdpe);
1471                 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1472                         u32 pte;
1473                         gen8_pte_t *pt_vaddr;
1474
1475                         if (pd->page_table[pde] == ppgtt->base.scratch_pt)
1476                                 continue;
1477
1478                         pt_vaddr = kmap_atomic_px(pt);
1479                         for (pte = 0; pte < GEN8_PTES; pte += 4) {
1480                                 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1481                                           pde << GEN8_PDE_SHIFT |
1482                                           pte << GEN8_PTE_SHIFT);
1483                                 int i;
1484                                 bool found = false;
1485
1486                                 for (i = 0; i < 4; i++)
1487                                         if (pt_vaddr[pte + i] != scratch_pte)
1488                                                 found = true;
1489                                 if (!found)
1490                                         continue;
1491
1492                                 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1493                                 for (i = 0; i < 4; i++) {
1494                                         if (pt_vaddr[pte + i] != scratch_pte)
1495                                                 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1496                                         else
1497                                                 seq_puts(m, "  SCRATCH ");
1498                                 }
1499                                 seq_puts(m, "\n");
1500                         }
1501                         kunmap_atomic(pt_vaddr);
1502                 }
1503         }
1504 }
1505
1506 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1507 {
1508         struct i915_address_space *vm = &ppgtt->base;
1509         const gen8_pte_t scratch_pte =
1510                 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
1511         u64 start = 0, length = ppgtt->base.total;
1512
1513         if (use_4lvl(vm)) {
1514                 u64 pml4e;
1515                 struct i915_pml4 *pml4 = &ppgtt->pml4;
1516                 struct i915_page_directory_pointer *pdp;
1517
1518                 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1519                         if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
1520                                 continue;
1521
1522                         seq_printf(m, "    PML4E #%llu\n", pml4e);
1523                         gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
1524                 }
1525         } else {
1526                 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
1527         }
1528 }
1529
1530 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
1531 {
1532         struct i915_address_space *vm = &ppgtt->base;
1533         struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1534         struct i915_page_directory *pd;
1535         u64 start = 0, length = ppgtt->base.total;
1536         u64 from = start;
1537         unsigned int pdpe;
1538
1539         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1540                 pd = alloc_pd(vm);
1541                 if (IS_ERR(pd))
1542                         goto unwind;
1543
1544                 gen8_initialize_pd(vm, pd);
1545                 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1546                 pdp->used_pdpes++;
1547         }
1548
1549         pdp->used_pdpes++; /* never remove */
1550         return 0;
1551
1552 unwind:
1553         start -= from;
1554         gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1555                 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1556                 free_pd(vm, pd);
1557         }
1558         pdp->used_pdpes = 0;
1559         return -ENOMEM;
1560 }
1561
1562 /*
1563  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1564  * with a net effect resembling a 2-level page table in normal x86 terms. Each
1565  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1566  * space.
1567  *
1568  */
1569 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1570 {
1571         struct i915_address_space *vm = &ppgtt->base;
1572         struct drm_i915_private *dev_priv = vm->i915;
1573         int ret;
1574
1575         ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1576                 1ULL << 48 :
1577                 1ULL << 32;
1578
1579         /* There are only few exceptions for gen >=6. chv and bxt.
1580          * And we are not sure about the latter so play safe for now.
1581          */
1582         if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1583                 ppgtt->base.pt_kmap_wc = true;
1584
1585         ret = gen8_init_scratch(&ppgtt->base);
1586         if (ret) {
1587                 ppgtt->base.total = 0;
1588                 return ret;
1589         }
1590
1591         if (use_4lvl(vm)) {
1592                 ret = setup_px(&ppgtt->base, &ppgtt->pml4);
1593                 if (ret)
1594                         goto free_scratch;
1595
1596                 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1597
1598                 ppgtt->switch_mm = gen8_mm_switch_4lvl;
1599                 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
1600                 ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
1601                 ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
1602         } else {
1603                 ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
1604                 if (ret)
1605                         goto free_scratch;
1606
1607                 if (intel_vgpu_active(dev_priv)) {
1608                         ret = gen8_preallocate_top_level_pdp(ppgtt);
1609                         if (ret) {
1610                                 __pdp_fini(&ppgtt->pdp);
1611                                 goto free_scratch;
1612                         }
1613                 }
1614
1615                 ppgtt->switch_mm = gen8_mm_switch_3lvl;
1616                 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
1617                 ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
1618                 ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
1619         }
1620
1621         if (intel_vgpu_active(dev_priv))
1622                 gen8_ppgtt_notify_vgt(ppgtt, true);
1623
1624         ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1625         ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1626         ppgtt->base.bind_vma = ppgtt_bind_vma;
1627         ppgtt->base.set_pages = ppgtt_set_pages;
1628         ppgtt->base.clear_pages = clear_pages;
1629         ppgtt->debug_dump = gen8_dump_ppgtt;
1630
1631         return 0;
1632
1633 free_scratch:
1634         gen8_free_scratch(&ppgtt->base);
1635         return ret;
1636 }
1637
1638 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1639 {
1640         struct i915_address_space *vm = &ppgtt->base;
1641         struct i915_page_table *unused;
1642         gen6_pte_t scratch_pte;
1643         u32 pd_entry, pte, pde;
1644         u32 start = 0, length = ppgtt->base.total;
1645
1646         scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1647                                      I915_CACHE_LLC, 0);
1648
1649         gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
1650                 u32 expected;
1651                 gen6_pte_t *pt_vaddr;
1652                 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
1653                 pd_entry = readl(ppgtt->pd_addr + pde);
1654                 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1655
1656                 if (pd_entry != expected)
1657                         seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1658                                    pde,
1659                                    pd_entry,
1660                                    expected);
1661                 seq_printf(m, "\tPDE: %x\n", pd_entry);
1662
1663                 pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
1664
1665                 for (pte = 0; pte < GEN6_PTES; pte+=4) {
1666                         unsigned long va =
1667                                 (pde * PAGE_SIZE * GEN6_PTES) +
1668                                 (pte * PAGE_SIZE);
1669                         int i;
1670                         bool found = false;
1671                         for (i = 0; i < 4; i++)
1672                                 if (pt_vaddr[pte + i] != scratch_pte)
1673                                         found = true;
1674                         if (!found)
1675                                 continue;
1676
1677                         seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1678                         for (i = 0; i < 4; i++) {
1679                                 if (pt_vaddr[pte + i] != scratch_pte)
1680                                         seq_printf(m, " %08x", pt_vaddr[pte + i]);
1681                                 else
1682                                         seq_puts(m, "  SCRATCH ");
1683                         }
1684                         seq_puts(m, "\n");
1685                 }
1686                 kunmap_atomic(pt_vaddr);
1687         }
1688 }
1689
1690 /* Write pde (index) from the page directory @pd to the page table @pt */
1691 static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1692                                   const unsigned int pde,
1693                                   const struct i915_page_table *pt)
1694 {
1695         /* Caller needs to make sure the write completes if necessary */
1696         writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1697                        ppgtt->pd_addr + pde);
1698 }
1699
1700 /* Write all the page tables found in the ppgtt structure to incrementing page
1701  * directories. */
1702 static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
1703                                   u32 start, u32 length)
1704 {
1705         struct i915_page_table *pt;
1706         unsigned int pde;
1707
1708         gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1709                 gen6_write_pde(ppgtt, pde, pt);
1710
1711         mark_tlbs_dirty(ppgtt);
1712         wmb();
1713 }
1714
1715 static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
1716 {
1717         GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1718         return ppgtt->pd.base.ggtt_offset << 10;
1719 }
1720
1721 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1722                          struct drm_i915_gem_request *req)
1723 {
1724         struct intel_engine_cs *engine = req->engine;
1725         u32 *cs;
1726
1727         /* NB: TLBs must be flushed and invalidated before a switch */
1728         cs = intel_ring_begin(req, 6);
1729         if (IS_ERR(cs))
1730                 return PTR_ERR(cs);
1731
1732         *cs++ = MI_LOAD_REGISTER_IMM(2);
1733         *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1734         *cs++ = PP_DIR_DCLV_2G;
1735         *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1736         *cs++ = get_pd_offset(ppgtt);
1737         *cs++ = MI_NOOP;
1738         intel_ring_advance(req, cs);
1739
1740         return 0;
1741 }
1742
1743 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1744                           struct drm_i915_gem_request *req)
1745 {
1746         struct intel_engine_cs *engine = req->engine;
1747         u32 *cs;
1748
1749         /* NB: TLBs must be flushed and invalidated before a switch */
1750         cs = intel_ring_begin(req, 6);
1751         if (IS_ERR(cs))
1752                 return PTR_ERR(cs);
1753
1754         *cs++ = MI_LOAD_REGISTER_IMM(2);
1755         *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1756         *cs++ = PP_DIR_DCLV_2G;
1757         *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1758         *cs++ = get_pd_offset(ppgtt);
1759         *cs++ = MI_NOOP;
1760         intel_ring_advance(req, cs);
1761
1762         return 0;
1763 }
1764
1765 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1766                           struct drm_i915_gem_request *req)
1767 {
1768         struct intel_engine_cs *engine = req->engine;
1769         struct drm_i915_private *dev_priv = req->i915;
1770
1771         I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1772         I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
1773         return 0;
1774 }
1775
1776 static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
1777 {
1778         struct intel_engine_cs *engine;
1779         enum intel_engine_id id;
1780
1781         for_each_engine(engine, dev_priv, id) {
1782                 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1783                                  GEN8_GFX_PPGTT_48B : 0;
1784                 I915_WRITE(RING_MODE_GEN7(engine),
1785                            _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1786         }
1787 }
1788
1789 static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
1790 {
1791         struct intel_engine_cs *engine;
1792         u32 ecochk, ecobits;
1793         enum intel_engine_id id;
1794
1795         ecobits = I915_READ(GAC_ECO_BITS);
1796         I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1797
1798         ecochk = I915_READ(GAM_ECOCHK);
1799         if (IS_HASWELL(dev_priv)) {
1800                 ecochk |= ECOCHK_PPGTT_WB_HSW;
1801         } else {
1802                 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1803                 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1804         }
1805         I915_WRITE(GAM_ECOCHK, ecochk);
1806
1807         for_each_engine(engine, dev_priv, id) {
1808                 /* GFX_MODE is per-ring on gen7+ */
1809                 I915_WRITE(RING_MODE_GEN7(engine),
1810                            _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1811         }
1812 }
1813
1814 static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1815 {
1816         u32 ecochk, gab_ctl, ecobits;
1817
1818         ecobits = I915_READ(GAC_ECO_BITS);
1819         I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1820                    ECOBITS_PPGTT_CACHE64B);
1821
1822         gab_ctl = I915_READ(GAB_CTL);
1823         I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1824
1825         ecochk = I915_READ(GAM_ECOCHK);
1826         I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1827
1828         I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1829 }
1830
1831 /* PPGTT support for Sandybdrige/Gen6 and later */
1832 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1833                                    u64 start, u64 length)
1834 {
1835         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1836         unsigned int first_entry = start >> PAGE_SHIFT;
1837         unsigned int pde = first_entry / GEN6_PTES;
1838         unsigned int pte = first_entry % GEN6_PTES;
1839         unsigned int num_entries = length >> PAGE_SHIFT;
1840         gen6_pte_t scratch_pte =
1841                 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
1842
1843         while (num_entries) {
1844                 struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1845                 unsigned int end = min(pte + num_entries, GEN6_PTES);
1846                 gen6_pte_t *vaddr;
1847
1848                 num_entries -= end - pte;
1849
1850                 /* Note that the hw doesn't support removing PDE on the fly
1851                  * (they are cached inside the context with no means to
1852                  * invalidate the cache), so we can only reset the PTE
1853                  * entries back to scratch.
1854                  */
1855
1856                 vaddr = kmap_atomic_px(pt);
1857                 do {
1858                         vaddr[pte++] = scratch_pte;
1859                 } while (pte < end);
1860                 kunmap_atomic(vaddr);
1861
1862                 pte = 0;
1863         }
1864 }
1865
1866 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1867                                       struct i915_vma *vma,
1868                                       enum i915_cache_level cache_level,
1869                                       u32 flags)
1870 {
1871         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1872         unsigned first_entry = vma->node.start >> PAGE_SHIFT;
1873         unsigned act_pt = first_entry / GEN6_PTES;
1874         unsigned act_pte = first_entry % GEN6_PTES;
1875         const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1876         struct sgt_dma iter = sgt_dma(vma);
1877         gen6_pte_t *vaddr;
1878
1879         vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
1880         do {
1881                 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1882
1883                 iter.dma += PAGE_SIZE;
1884                 if (iter.dma == iter.max) {
1885                         iter.sg = __sg_next(iter.sg);
1886                         if (!iter.sg)
1887                                 break;
1888
1889                         iter.dma = sg_dma_address(iter.sg);
1890                         iter.max = iter.dma + iter.sg->length;
1891                 }
1892
1893                 if (++act_pte == GEN6_PTES) {
1894                         kunmap_atomic(vaddr);
1895                         vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
1896                         act_pte = 0;
1897                 }
1898         } while (1);
1899         kunmap_atomic(vaddr);
1900
1901         vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1902 }
1903
1904 static int gen6_alloc_va_range(struct i915_address_space *vm,
1905                                u64 start, u64 length)
1906 {
1907         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1908         struct i915_page_table *pt;
1909         u64 from = start;
1910         unsigned int pde;
1911         bool flush = false;
1912
1913         gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1914                 if (pt == vm->scratch_pt) {
1915                         pt = alloc_pt(vm);
1916                         if (IS_ERR(pt))
1917                                 goto unwind_out;
1918
1919                         gen6_initialize_pt(vm, pt);
1920                         ppgtt->pd.page_table[pde] = pt;
1921                         gen6_write_pde(ppgtt, pde, pt);
1922                         flush = true;
1923                 }
1924         }
1925
1926         if (flush) {
1927                 mark_tlbs_dirty(ppgtt);
1928                 wmb();
1929         }
1930
1931         return 0;
1932
1933 unwind_out:
1934         gen6_ppgtt_clear_range(vm, from, start);
1935         return -ENOMEM;
1936 }
1937
1938 static int gen6_init_scratch(struct i915_address_space *vm)
1939 {
1940         int ret;
1941
1942         ret = setup_scratch_page(vm, I915_GFP_DMA);
1943         if (ret)
1944                 return ret;
1945
1946         vm->scratch_pt = alloc_pt(vm);
1947         if (IS_ERR(vm->scratch_pt)) {
1948                 cleanup_scratch_page(vm);
1949                 return PTR_ERR(vm->scratch_pt);
1950         }
1951
1952         gen6_initialize_pt(vm, vm->scratch_pt);
1953
1954         return 0;
1955 }
1956
1957 static void gen6_free_scratch(struct i915_address_space *vm)
1958 {
1959         free_pt(vm, vm->scratch_pt);
1960         cleanup_scratch_page(vm);
1961 }
1962
1963 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1964 {
1965         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1966         struct i915_page_directory *pd = &ppgtt->pd;
1967         struct i915_page_table *pt;
1968         u32 pde;
1969
1970         drm_mm_remove_node(&ppgtt->node);
1971
1972         gen6_for_all_pdes(pt, pd, pde)
1973                 if (pt != vm->scratch_pt)
1974                         free_pt(vm, pt);
1975
1976         gen6_free_scratch(vm);
1977 }
1978
1979 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
1980 {
1981         struct i915_address_space *vm = &ppgtt->base;
1982         struct drm_i915_private *dev_priv = ppgtt->base.i915;
1983         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1984         int ret;
1985
1986         /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1987          * allocator works in address space sizes, so it's multiplied by page
1988          * size. We allocate at the top of the GTT to avoid fragmentation.
1989          */
1990         BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
1991
1992         ret = gen6_init_scratch(vm);
1993         if (ret)
1994                 return ret;
1995
1996         ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
1997                                   GEN6_PD_SIZE, GEN6_PD_ALIGN,
1998                                   I915_COLOR_UNEVICTABLE,
1999                                   0, ggtt->base.total,
2000                                   PIN_HIGH);
2001         if (ret)
2002                 goto err_out;
2003
2004         if (ppgtt->node.start < ggtt->mappable_end)
2005                 DRM_DEBUG("Forced to use aperture for PDEs\n");
2006
2007         ppgtt->pd.base.ggtt_offset =
2008                 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
2009
2010         ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
2011                 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
2012
2013         return 0;
2014
2015 err_out:
2016         gen6_free_scratch(vm);
2017         return ret;
2018 }
2019
2020 static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
2021 {
2022         return gen6_ppgtt_allocate_page_directories(ppgtt);
2023 }
2024
2025 static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
2026                                   u64 start, u64 length)
2027 {
2028         struct i915_page_table *unused;
2029         u32 pde;
2030
2031         gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
2032                 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
2033 }
2034
2035 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2036 {
2037         struct drm_i915_private *dev_priv = ppgtt->base.i915;
2038         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2039         int ret;
2040
2041         ppgtt->base.pte_encode = ggtt->base.pte_encode;
2042         if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
2043                 ppgtt->switch_mm = gen6_mm_switch;
2044         else if (IS_HASWELL(dev_priv))
2045                 ppgtt->switch_mm = hsw_mm_switch;
2046         else if (IS_GEN7(dev_priv))
2047                 ppgtt->switch_mm = gen7_mm_switch;
2048         else
2049                 BUG();
2050
2051         ret = gen6_ppgtt_alloc(ppgtt);
2052         if (ret)
2053                 return ret;
2054
2055         ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
2056
2057         gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
2058         gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
2059
2060         ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
2061         if (ret) {
2062                 gen6_ppgtt_cleanup(&ppgtt->base);
2063                 return ret;
2064         }
2065
2066         ppgtt->base.clear_range = gen6_ppgtt_clear_range;
2067         ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
2068         ppgtt->base.unbind_vma = ppgtt_unbind_vma;
2069         ppgtt->base.bind_vma = ppgtt_bind_vma;
2070         ppgtt->base.set_pages = ppgtt_set_pages;
2071         ppgtt->base.clear_pages = clear_pages;
2072         ppgtt->base.cleanup = gen6_ppgtt_cleanup;
2073         ppgtt->debug_dump = gen6_dump_ppgtt;
2074
2075         DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
2076                          ppgtt->node.size >> 20,
2077                          ppgtt->node.start / PAGE_SIZE);
2078
2079         DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
2080                          ppgtt->pd.base.ggtt_offset << 10);
2081
2082         return 0;
2083 }
2084
2085 static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
2086                            struct drm_i915_private *dev_priv)
2087 {
2088         ppgtt->base.i915 = dev_priv;
2089         ppgtt->base.dma = &dev_priv->drm.pdev->dev;
2090
2091         if (INTEL_INFO(dev_priv)->gen < 8)
2092                 return gen6_ppgtt_init(ppgtt);
2093         else
2094                 return gen8_ppgtt_init(ppgtt);
2095 }
2096
2097 static void i915_address_space_init(struct i915_address_space *vm,
2098                                     struct drm_i915_private *dev_priv,
2099                                     const char *name)
2100 {
2101         i915_gem_timeline_init(dev_priv, &vm->timeline, name);
2102
2103         drm_mm_init(&vm->mm, 0, vm->total);
2104         vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
2105
2106         INIT_LIST_HEAD(&vm->active_list);
2107         INIT_LIST_HEAD(&vm->inactive_list);
2108         INIT_LIST_HEAD(&vm->unbound_list);
2109
2110         list_add_tail(&vm->global_link, &dev_priv->vm_list);
2111         pagevec_init(&vm->free_pages, false);
2112 }
2113
2114 static void i915_address_space_fini(struct i915_address_space *vm)
2115 {
2116         if (pagevec_count(&vm->free_pages))
2117                 vm_free_pages_release(vm, true);
2118
2119         i915_gem_timeline_fini(&vm->timeline);
2120         drm_mm_takedown(&vm->mm);
2121         list_del(&vm->global_link);
2122 }
2123
2124 static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
2125 {
2126         /* This function is for gtt related workarounds. This function is
2127          * called on driver load and after a GPU reset, so you can place
2128          * workarounds here even if they get overwritten by GPU reset.
2129          */
2130         /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */
2131         if (IS_BROADWELL(dev_priv))
2132                 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2133         else if (IS_CHERRYVIEW(dev_priv))
2134                 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2135         else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
2136                 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2137         else if (IS_GEN9_LP(dev_priv))
2138                 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2139
2140         /*
2141          * To support 64K PTEs we need to first enable the use of the
2142          * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2143          * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2144          * shouldn't be needed after GEN10.
2145          *
2146          * 64K pages were first introduced from BDW+, although technically they
2147          * only *work* from gen9+. For pre-BDW we instead have the option for
2148          * 32K pages, but we don't currently have any support for it in our
2149          * driver.
2150          */
2151         if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
2152             INTEL_GEN(dev_priv) <= 10)
2153                 I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
2154                            I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
2155                            GAMW_ECO_ENABLE_64K_IPS_FIELD);
2156 }
2157
2158 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2159 {
2160         gtt_write_workarounds(dev_priv);
2161
2162         /* In the case of execlists, PPGTT is enabled by the context descriptor
2163          * and the PDPs are contained within the context itself.  We don't
2164          * need to do anything here. */
2165         if (i915_modparams.enable_execlists)
2166                 return 0;
2167
2168         if (!USES_PPGTT(dev_priv))
2169                 return 0;
2170
2171         if (IS_GEN6(dev_priv))
2172                 gen6_ppgtt_enable(dev_priv);
2173         else if (IS_GEN7(dev_priv))
2174                 gen7_ppgtt_enable(dev_priv);
2175         else if (INTEL_GEN(dev_priv) >= 8)
2176                 gen8_ppgtt_enable(dev_priv);
2177         else
2178                 MISSING_CASE(INTEL_GEN(dev_priv));
2179
2180         return 0;
2181 }
2182
2183 struct i915_hw_ppgtt *
2184 i915_ppgtt_create(struct drm_i915_private *dev_priv,
2185                   struct drm_i915_file_private *fpriv,
2186                   const char *name)
2187 {
2188         struct i915_hw_ppgtt *ppgtt;
2189         int ret;
2190
2191         ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2192         if (!ppgtt)
2193                 return ERR_PTR(-ENOMEM);
2194
2195         ret = __hw_ppgtt_init(ppgtt, dev_priv);
2196         if (ret) {
2197                 kfree(ppgtt);
2198                 return ERR_PTR(ret);
2199         }
2200
2201         kref_init(&ppgtt->ref);
2202         i915_address_space_init(&ppgtt->base, dev_priv, name);
2203         ppgtt->base.file = fpriv;
2204
2205         trace_i915_ppgtt_create(&ppgtt->base);
2206
2207         return ppgtt;
2208 }
2209
2210 void i915_ppgtt_close(struct i915_address_space *vm)
2211 {
2212         struct list_head *phases[] = {
2213                 &vm->active_list,
2214                 &vm->inactive_list,
2215                 &vm->unbound_list,
2216                 NULL,
2217         }, **phase;
2218
2219         GEM_BUG_ON(vm->closed);
2220         vm->closed = true;
2221
2222         for (phase = phases; *phase; phase++) {
2223                 struct i915_vma *vma, *vn;
2224
2225                 list_for_each_entry_safe(vma, vn, *phase, vm_link)
2226                         if (!i915_vma_is_closed(vma))
2227                                 i915_vma_close(vma);
2228         }
2229 }
2230
2231 void i915_ppgtt_release(struct kref *kref)
2232 {
2233         struct i915_hw_ppgtt *ppgtt =
2234                 container_of(kref, struct i915_hw_ppgtt, ref);
2235
2236         trace_i915_ppgtt_release(&ppgtt->base);
2237
2238         /* vmas should already be unbound and destroyed */
2239         WARN_ON(!list_empty(&ppgtt->base.active_list));
2240         WARN_ON(!list_empty(&ppgtt->base.inactive_list));
2241         WARN_ON(!list_empty(&ppgtt->base.unbound_list));
2242
2243         ppgtt->base.cleanup(&ppgtt->base);
2244         i915_address_space_fini(&ppgtt->base);
2245         kfree(ppgtt);
2246 }
2247
2248 /* Certain Gen5 chipsets require require idling the GPU before
2249  * unmapping anything from the GTT when VT-d is enabled.
2250  */
2251 static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2252 {
2253         /* Query intel_iommu to see if we need the workaround. Presumably that
2254          * was loaded first.
2255          */
2256         return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
2257 }
2258
2259 static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv)
2260 {
2261         struct intel_engine_cs *engine;
2262         enum intel_engine_id id;
2263         u32 fault;
2264
2265         for_each_engine(engine, dev_priv, id) {
2266                 fault = I915_READ(RING_FAULT_REG(engine));
2267                 if (fault & RING_FAULT_VALID) {
2268                         DRM_DEBUG_DRIVER("Unexpected fault\n"
2269                                          "\tAddr: 0x%08lx\n"
2270                                          "\tAddress space: %s\n"
2271                                          "\tSource ID: %d\n"
2272                                          "\tType: %d\n",
2273                                          fault & PAGE_MASK,
2274                                          fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2275                                          RING_FAULT_SRCID(fault),
2276                                          RING_FAULT_FAULT_TYPE(fault));
2277                         I915_WRITE(RING_FAULT_REG(engine),
2278                                    fault & ~RING_FAULT_VALID);
2279                 }
2280         }
2281
2282         POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
2283 }
2284
2285 static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
2286 {
2287         u32 fault = I915_READ(GEN8_RING_FAULT_REG);
2288
2289         if (fault & RING_FAULT_VALID) {
2290                 DRM_DEBUG_DRIVER("Unexpected fault\n"
2291                                  "\tAddr: 0x%08lx\n"
2292                                  "\tEngine ID: %d\n"
2293                                  "\tSource ID: %d\n"
2294                                  "\tType: %d\n",
2295                                  fault & PAGE_MASK,
2296                                  GEN8_RING_FAULT_ENGINE_ID(fault),
2297                                  RING_FAULT_SRCID(fault),
2298                                  RING_FAULT_FAULT_TYPE(fault));
2299                 I915_WRITE(GEN8_RING_FAULT_REG,
2300                            fault & ~RING_FAULT_VALID);
2301         }
2302
2303         POSTING_READ(GEN8_RING_FAULT_REG);
2304 }
2305
2306 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2307 {
2308         /* From GEN8 onwards we only have one 'All Engine Fault Register' */
2309         if (INTEL_GEN(dev_priv) >= 8)
2310                 gen8_check_and_clear_faults(dev_priv);
2311         else if (INTEL_GEN(dev_priv) >= 6)
2312                 gen6_check_and_clear_faults(dev_priv);
2313         else
2314                 return;
2315 }
2316
2317 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2318 {
2319         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2320
2321         /* Don't bother messing with faults pre GEN6 as we have little
2322          * documentation supporting that it's a good idea.
2323          */
2324         if (INTEL_GEN(dev_priv) < 6)
2325                 return;
2326
2327         i915_check_and_clear_faults(dev_priv);
2328
2329         ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
2330
2331         i915_ggtt_invalidate(dev_priv);
2332 }
2333
2334 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2335                                struct sg_table *pages)
2336 {
2337         do {
2338                 if (dma_map_sg(&obj->base.dev->pdev->dev,
2339                                pages->sgl, pages->nents,
2340                                PCI_DMA_BIDIRECTIONAL))
2341                         return 0;
2342
2343                 /* If the DMA remap fails, one cause can be that we have
2344                  * too many objects pinned in a small remapping table,
2345                  * such as swiotlb. Incrementally purge all other objects and
2346                  * try again - if there are no more pages to remove from
2347                  * the DMA remapper, i915_gem_shrink will return 0.
2348                  */
2349                 GEM_BUG_ON(obj->mm.pages == pages);
2350         } while (i915_gem_shrink(to_i915(obj->base.dev),
2351                                  obj->base.size >> PAGE_SHIFT, NULL,
2352                                  I915_SHRINK_BOUND |
2353                                  I915_SHRINK_UNBOUND |
2354                                  I915_SHRINK_ACTIVE));
2355
2356         return -ENOSPC;
2357 }
2358
2359 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2360 {
2361         writeq(pte, addr);
2362 }
2363
2364 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2365                                   dma_addr_t addr,
2366                                   u64 offset,
2367                                   enum i915_cache_level level,
2368                                   u32 unused)
2369 {
2370         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2371         gen8_pte_t __iomem *pte =
2372                 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2373
2374         gen8_set_pte(pte, gen8_pte_encode(addr, level));
2375
2376         ggtt->invalidate(vm->i915);
2377 }
2378
2379 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2380                                      struct i915_vma *vma,
2381                                      enum i915_cache_level level,
2382                                      u32 unused)
2383 {
2384         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2385         struct sgt_iter sgt_iter;
2386         gen8_pte_t __iomem *gtt_entries;
2387         const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
2388         dma_addr_t addr;
2389
2390         gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2391         gtt_entries += vma->node.start >> PAGE_SHIFT;
2392         for_each_sgt_dma(addr, sgt_iter, vma->pages)
2393                 gen8_set_pte(gtt_entries++, pte_encode | addr);
2394
2395         wmb();
2396
2397         /* This next bit makes the above posting read even more important. We
2398          * want to flush the TLBs only after we're certain all the PTE updates
2399          * have finished.
2400          */
2401         ggtt->invalidate(vm->i915);
2402 }
2403
2404 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2405                                   dma_addr_t addr,
2406                                   u64 offset,
2407                                   enum i915_cache_level level,
2408                                   u32 flags)
2409 {
2410         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2411         gen6_pte_t __iomem *pte =
2412                 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2413
2414         iowrite32(vm->pte_encode(addr, level, flags), pte);
2415
2416         ggtt->invalidate(vm->i915);
2417 }
2418
2419 /*
2420  * Binds an object into the global gtt with the specified cache level. The object
2421  * will be accessible to the GPU via commands whose operands reference offsets
2422  * within the global GTT as well as accessible by the GPU through the GMADR
2423  * mapped BAR (dev_priv->mm.gtt->gtt).
2424  */
2425 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2426                                      struct i915_vma *vma,
2427                                      enum i915_cache_level level,
2428                                      u32 flags)
2429 {
2430         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2431         gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2432         unsigned int i = vma->node.start >> PAGE_SHIFT;
2433         struct sgt_iter iter;
2434         dma_addr_t addr;
2435         for_each_sgt_dma(addr, iter, vma->pages)
2436                 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2437         wmb();
2438
2439         /* This next bit makes the above posting read even more important. We
2440          * want to flush the TLBs only after we're certain all the PTE updates
2441          * have finished.
2442          */
2443         ggtt->invalidate(vm->i915);
2444 }
2445
2446 static void nop_clear_range(struct i915_address_space *vm,
2447                             u64 start, u64 length)
2448 {
2449 }
2450
2451 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2452                                   u64 start, u64 length)
2453 {
2454         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2455         unsigned first_entry = start >> PAGE_SHIFT;
2456         unsigned num_entries = length >> PAGE_SHIFT;
2457         const gen8_pte_t scratch_pte =
2458                 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2459         gen8_pte_t __iomem *gtt_base =
2460                 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2461         const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2462         int i;
2463
2464         if (WARN(num_entries > max_entries,
2465                  "First entry = %d; Num entries = %d (max=%d)\n",
2466                  first_entry, num_entries, max_entries))
2467                 num_entries = max_entries;
2468
2469         for (i = 0; i < num_entries; i++)
2470                 gen8_set_pte(&gtt_base[i], scratch_pte);
2471 }
2472
2473 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2474 {
2475         struct drm_i915_private *dev_priv = vm->i915;
2476
2477         /*
2478          * Make sure the internal GAM fifo has been cleared of all GTT
2479          * writes before exiting stop_machine(). This guarantees that
2480          * any aperture accesses waiting to start in another process
2481          * cannot back up behind the GTT writes causing a hang.
2482          * The register can be any arbitrary GAM register.
2483          */
2484         POSTING_READ(GFX_FLSH_CNTL_GEN6);
2485 }
2486
2487 struct insert_page {
2488         struct i915_address_space *vm;
2489         dma_addr_t addr;
2490         u64 offset;
2491         enum i915_cache_level level;
2492 };
2493
2494 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2495 {
2496         struct insert_page *arg = _arg;
2497
2498         gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2499         bxt_vtd_ggtt_wa(arg->vm);
2500
2501         return 0;
2502 }
2503
2504 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2505                                           dma_addr_t addr,
2506                                           u64 offset,
2507                                           enum i915_cache_level level,
2508                                           u32 unused)
2509 {
2510         struct insert_page arg = { vm, addr, offset, level };
2511
2512         stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2513 }
2514
2515 struct insert_entries {
2516         struct i915_address_space *vm;
2517         struct i915_vma *vma;
2518         enum i915_cache_level level;
2519 };
2520
2521 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2522 {
2523         struct insert_entries *arg = _arg;
2524
2525         gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
2526         bxt_vtd_ggtt_wa(arg->vm);
2527
2528         return 0;
2529 }
2530
2531 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2532                                              struct i915_vma *vma,
2533                                              enum i915_cache_level level,
2534                                              u32 unused)
2535 {
2536         struct insert_entries arg = { vm, vma, level };
2537
2538         stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2539 }
2540
2541 struct clear_range {
2542         struct i915_address_space *vm;
2543         u64 start;
2544         u64 length;
2545 };
2546
2547 static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2548 {
2549         struct clear_range *arg = _arg;
2550
2551         gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2552         bxt_vtd_ggtt_wa(arg->vm);
2553
2554         return 0;
2555 }
2556
2557 static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2558                                           u64 start,
2559                                           u64 length)
2560 {
2561         struct clear_range arg = { vm, start, length };
2562
2563         stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2564 }
2565
2566 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2567                                   u64 start, u64 length)
2568 {
2569         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2570         unsigned first_entry = start >> PAGE_SHIFT;
2571         unsigned num_entries = length >> PAGE_SHIFT;
2572         gen6_pte_t scratch_pte, __iomem *gtt_base =
2573                 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2574         const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2575         int i;
2576
2577         if (WARN(num_entries > max_entries,
2578                  "First entry = %d; Num entries = %d (max=%d)\n",
2579                  first_entry, num_entries, max_entries))
2580                 num_entries = max_entries;
2581
2582         scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
2583                                      I915_CACHE_LLC, 0);
2584
2585         for (i = 0; i < num_entries; i++)
2586                 iowrite32(scratch_pte, &gtt_base[i]);
2587 }
2588
2589 static void i915_ggtt_insert_page(struct i915_address_space *vm,
2590                                   dma_addr_t addr,
2591                                   u64 offset,
2592                                   enum i915_cache_level cache_level,
2593                                   u32 unused)
2594 {
2595         unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2596                 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2597
2598         intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2599 }
2600
2601 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2602                                      struct i915_vma *vma,
2603                                      enum i915_cache_level cache_level,
2604                                      u32 unused)
2605 {
2606         unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2607                 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2608
2609         intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2610                                     flags);
2611 }
2612
2613 static void i915_ggtt_clear_range(struct i915_address_space *vm,
2614                                   u64 start, u64 length)
2615 {
2616         intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2617 }
2618
2619 static int ggtt_bind_vma(struct i915_vma *vma,
2620                          enum i915_cache_level cache_level,
2621                          u32 flags)
2622 {
2623         struct drm_i915_private *i915 = vma->vm->i915;
2624         struct drm_i915_gem_object *obj = vma->obj;
2625         u32 pte_flags;
2626
2627         /* Currently applicable only to VLV */
2628         pte_flags = 0;
2629         if (obj->gt_ro)
2630                 pte_flags |= PTE_READ_ONLY;
2631
2632         intel_runtime_pm_get(i915);
2633         vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2634         intel_runtime_pm_put(i915);
2635
2636         vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2637
2638         /*
2639          * Without aliasing PPGTT there's no difference between
2640          * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2641          * upgrade to both bound if we bind either to avoid double-binding.
2642          */
2643         vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2644
2645         return 0;
2646 }
2647
2648 static void ggtt_unbind_vma(struct i915_vma *vma)
2649 {
2650         struct drm_i915_private *i915 = vma->vm->i915;
2651
2652         intel_runtime_pm_get(i915);
2653         vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2654         intel_runtime_pm_put(i915);
2655 }
2656
2657 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2658                                  enum i915_cache_level cache_level,
2659                                  u32 flags)
2660 {
2661         struct drm_i915_private *i915 = vma->vm->i915;
2662         u32 pte_flags;
2663         int ret;
2664
2665         /* Currently applicable only to VLV */
2666         pte_flags = 0;
2667         if (vma->obj->gt_ro)
2668                 pte_flags |= PTE_READ_ONLY;
2669
2670         if (flags & I915_VMA_LOCAL_BIND) {
2671                 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2672
2673                 if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2674                     appgtt->base.allocate_va_range) {
2675                         ret = appgtt->base.allocate_va_range(&appgtt->base,
2676                                                              vma->node.start,
2677                                                              vma->size);
2678                         if (ret)
2679                                 return ret;
2680                 }
2681
2682                 appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
2683                                             pte_flags);
2684         }
2685
2686         if (flags & I915_VMA_GLOBAL_BIND) {
2687                 intel_runtime_pm_get(i915);
2688                 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2689                 intel_runtime_pm_put(i915);
2690         }
2691
2692         return 0;
2693 }
2694
2695 static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2696 {
2697         struct drm_i915_private *i915 = vma->vm->i915;
2698
2699         if (vma->flags & I915_VMA_GLOBAL_BIND) {
2700                 intel_runtime_pm_get(i915);
2701                 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2702                 intel_runtime_pm_put(i915);
2703         }
2704
2705         if (vma->flags & I915_VMA_LOCAL_BIND) {
2706                 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2707
2708                 vm->clear_range(vm, vma->node.start, vma->size);
2709         }
2710 }
2711
2712 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2713                                struct sg_table *pages)
2714 {
2715         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2716         struct device *kdev = &dev_priv->drm.pdev->dev;
2717         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2718
2719         if (unlikely(ggtt->do_idle_maps)) {
2720                 if (i915_gem_wait_for_idle(dev_priv, 0)) {
2721                         DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2722                         /* Wait a bit, in hopes it avoids the hang */
2723                         udelay(10);
2724                 }
2725         }
2726
2727         dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2728 }
2729
2730 static int ggtt_set_pages(struct i915_vma *vma)
2731 {
2732         int ret;
2733
2734         GEM_BUG_ON(vma->pages);
2735
2736         ret = i915_get_ggtt_vma_pages(vma);
2737         if (ret)
2738                 return ret;
2739
2740         vma->page_sizes = vma->obj->mm.page_sizes;
2741
2742         return 0;
2743 }
2744
2745 static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2746                                   unsigned long color,
2747                                   u64 *start,
2748                                   u64 *end)
2749 {
2750         if (node->allocated && node->color != color)
2751                 *start += I915_GTT_PAGE_SIZE;
2752
2753         /* Also leave a space between the unallocated reserved node after the
2754          * GTT and any objects within the GTT, i.e. we use the color adjustment
2755          * to insert a guard page to prevent prefetches crossing over the
2756          * GTT boundary.
2757          */
2758         node = list_next_entry(node, node_list);
2759         if (node->color != color)
2760                 *end -= I915_GTT_PAGE_SIZE;
2761 }
2762
2763 int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2764 {
2765         struct i915_ggtt *ggtt = &i915->ggtt;
2766         struct i915_hw_ppgtt *ppgtt;
2767         int err;
2768
2769         ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
2770         if (IS_ERR(ppgtt))
2771                 return PTR_ERR(ppgtt);
2772
2773         if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2774                 err = -ENODEV;
2775                 goto err_ppgtt;
2776         }
2777
2778         if (ppgtt->base.allocate_va_range) {
2779                 /* Note we only pre-allocate as far as the end of the global
2780                  * GTT. On 48b / 4-level page-tables, the difference is very,
2781                  * very significant! We have to preallocate as GVT/vgpu does
2782                  * not like the page directory disappearing.
2783                  */
2784                 err = ppgtt->base.allocate_va_range(&ppgtt->base,
2785                                                     0, ggtt->base.total);
2786                 if (err)
2787                         goto err_ppgtt;
2788         }
2789
2790         i915->mm.aliasing_ppgtt = ppgtt;
2791
2792         WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2793         ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2794
2795         WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2796         ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2797
2798         return 0;
2799
2800 err_ppgtt:
2801         i915_ppgtt_put(ppgtt);
2802         return err;
2803 }
2804
2805 void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2806 {
2807         struct i915_ggtt *ggtt = &i915->ggtt;
2808         struct i915_hw_ppgtt *ppgtt;
2809
2810         ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2811         if (!ppgtt)
2812                 return;
2813
2814         i915_ppgtt_put(ppgtt);
2815
2816         ggtt->base.bind_vma = ggtt_bind_vma;
2817         ggtt->base.unbind_vma = ggtt_unbind_vma;
2818 }
2819
2820 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2821 {
2822         /* Let GEM Manage all of the aperture.
2823          *
2824          * However, leave one page at the end still bound to the scratch page.
2825          * There are a number of places where the hardware apparently prefetches
2826          * past the end of the object, and we've seen multiple hangs with the
2827          * GPU head pointer stuck in a batchbuffer bound at the last page of the
2828          * aperture.  One page should be enough to keep any prefetching inside
2829          * of the aperture.
2830          */
2831         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2832         unsigned long hole_start, hole_end;
2833         struct drm_mm_node *entry;
2834         int ret;
2835
2836         ret = intel_vgt_balloon(dev_priv);
2837         if (ret)
2838                 return ret;
2839
2840         /* Reserve a mappable slot for our lockless error capture */
2841         ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2842                                           PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2843                                           0, ggtt->mappable_end,
2844                                           DRM_MM_INSERT_LOW);
2845         if (ret)
2846                 return ret;
2847
2848         /* Clear any non-preallocated blocks */
2849         drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
2850                 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2851                               hole_start, hole_end);
2852                 ggtt->base.clear_range(&ggtt->base, hole_start,
2853                                        hole_end - hole_start);
2854         }
2855
2856         /* And finally clear the reserved guard page */
2857         ggtt->base.clear_range(&ggtt->base,
2858                                ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
2859
2860         if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
2861                 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
2862                 if (ret)
2863                         goto err;
2864         }
2865
2866         return 0;
2867
2868 err:
2869         drm_mm_remove_node(&ggtt->error_capture);
2870         return ret;
2871 }
2872
2873 /**
2874  * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2875  * @dev_priv: i915 device
2876  */
2877 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2878 {
2879         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2880         struct i915_vma *vma, *vn;
2881         struct pagevec *pvec;
2882
2883         ggtt->base.closed = true;
2884
2885         mutex_lock(&dev_priv->drm.struct_mutex);
2886         WARN_ON(!list_empty(&ggtt->base.active_list));
2887         list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2888                 WARN_ON(i915_vma_unbind(vma));
2889         mutex_unlock(&dev_priv->drm.struct_mutex);
2890
2891         i915_gem_cleanup_stolen(&dev_priv->drm);
2892
2893         mutex_lock(&dev_priv->drm.struct_mutex);
2894         i915_gem_fini_aliasing_ppgtt(dev_priv);
2895
2896         if (drm_mm_node_allocated(&ggtt->error_capture))
2897                 drm_mm_remove_node(&ggtt->error_capture);
2898
2899         if (drm_mm_initialized(&ggtt->base.mm)) {
2900                 intel_vgt_deballoon(dev_priv);
2901                 i915_address_space_fini(&ggtt->base);
2902         }
2903
2904         ggtt->base.cleanup(&ggtt->base);
2905
2906         pvec = &dev_priv->mm.wc_stash;
2907         if (pvec->nr) {
2908                 set_pages_array_wb(pvec->pages, pvec->nr);
2909                 __pagevec_release(pvec);
2910         }
2911
2912         mutex_unlock(&dev_priv->drm.struct_mutex);
2913
2914         arch_phys_wc_del(ggtt->mtrr);
2915         io_mapping_fini(&ggtt->mappable);
2916 }
2917
2918 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2919 {
2920         snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2921         snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2922         return snb_gmch_ctl << 20;
2923 }
2924
2925 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2926 {
2927         bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2928         bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2929         if (bdw_gmch_ctl)
2930                 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2931
2932 #ifdef CONFIG_X86_32
2933         /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2934         if (bdw_gmch_ctl > 4)
2935                 bdw_gmch_ctl = 4;
2936 #endif
2937
2938         return bdw_gmch_ctl << 20;
2939 }
2940
2941 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2942 {
2943         gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2944         gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2945
2946         if (gmch_ctrl)
2947                 return 1 << (20 + gmch_ctrl);
2948
2949         return 0;
2950 }
2951
2952 static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
2953 {
2954         snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2955         snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
2956         return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
2957 }
2958
2959 static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
2960 {
2961         bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2962         bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
2963         return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
2964 }
2965
2966 static size_t chv_get_stolen_size(u16 gmch_ctrl)
2967 {
2968         gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2969         gmch_ctrl &= SNB_GMCH_GMS_MASK;
2970
2971         /*
2972          * 0x0  to 0x10: 32MB increments starting at 0MB
2973          * 0x11 to 0x16: 4MB increments starting at 8MB
2974          * 0x17 to 0x1d: 4MB increments start at 36MB
2975          */
2976         if (gmch_ctrl < 0x11)
2977                 return (size_t)gmch_ctrl << 25;
2978         else if (gmch_ctrl < 0x17)
2979                 return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
2980         else
2981                 return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
2982 }
2983
2984 static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2985 {
2986         gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2987         gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2988
2989         if (gen9_gmch_ctl < 0xf0)
2990                 return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
2991         else
2992                 /* 4MB increments starting at 0xf0 for 4MB */
2993                 return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
2994 }
2995
2996 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
2997 {
2998         struct drm_i915_private *dev_priv = ggtt->base.i915;
2999         struct pci_dev *pdev = dev_priv->drm.pdev;
3000         phys_addr_t phys_addr;
3001         int ret;
3002
3003         /* For Modern GENs the PTEs and register space are split in the BAR */
3004         phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
3005
3006         /*
3007          * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
3008          * will be dropped. For WC mappings in general we have 64 byte burst
3009          * writes when the WC buffer is flushed, so we can't use it, but have to
3010          * resort to an uncached mapping. The WC issue is easily caught by the
3011          * readback check when writing GTT PTE entries.
3012          */
3013         if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
3014                 ggtt->gsm = ioremap_nocache(phys_addr, size);
3015         else
3016                 ggtt->gsm = ioremap_wc(phys_addr, size);
3017         if (!ggtt->gsm) {
3018                 DRM_ERROR("Failed to map the ggtt page table\n");
3019                 return -ENOMEM;
3020         }
3021
3022         ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
3023         if (ret) {
3024                 DRM_ERROR("Scratch setup failed\n");
3025                 /* iounmap will also get called at remove, but meh */
3026                 iounmap(ggtt->gsm);
3027                 return ret;
3028         }
3029
3030         return 0;
3031 }
3032
3033 static struct intel_ppat_entry *
3034 __alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
3035 {
3036         struct intel_ppat_entry *entry = &ppat->entries[index];
3037
3038         GEM_BUG_ON(index >= ppat->max_entries);
3039         GEM_BUG_ON(test_bit(index, ppat->used));
3040
3041         entry->ppat = ppat;
3042         entry->value = value;
3043         kref_init(&entry->ref);
3044         set_bit(index, ppat->used);
3045         set_bit(index, ppat->dirty);
3046
3047         return entry;
3048 }
3049
3050 static void __free_ppat_entry(struct intel_ppat_entry *entry)
3051 {
3052         struct intel_ppat *ppat = entry->ppat;
3053         unsigned int index = entry - ppat->entries;
3054
3055         GEM_BUG_ON(index >= ppat->max_entries);
3056         GEM_BUG_ON(!test_bit(index, ppat->used));
3057
3058         entry->value = ppat->clear_value;
3059         clear_bit(index, ppat->used);
3060         set_bit(index, ppat->dirty);
3061 }
3062
3063 /**
3064  * intel_ppat_get - get a usable PPAT entry
3065  * @i915: i915 device instance
3066  * @value: the PPAT value required by the caller
3067  *
3068  * The function tries to search if there is an existing PPAT entry which
3069  * matches with the required value. If perfectly matched, the existing PPAT
3070  * entry will be used. If only partially matched, it will try to check if
3071  * there is any available PPAT index. If yes, it will allocate a new PPAT
3072  * index for the required entry and update the HW. If not, the partially
3073  * matched entry will be used.
3074  */
3075 const struct intel_ppat_entry *
3076 intel_ppat_get(struct drm_i915_private *i915, u8 value)
3077 {
3078         struct intel_ppat *ppat = &i915->ppat;
3079         struct intel_ppat_entry *entry = NULL;
3080         unsigned int scanned, best_score;
3081         int i;
3082
3083         GEM_BUG_ON(!ppat->max_entries);
3084
3085         scanned = best_score = 0;
3086         for_each_set_bit(i, ppat->used, ppat->max_entries) {
3087                 unsigned int score;
3088
3089                 score = ppat->match(ppat->entries[i].value, value);
3090                 if (score > best_score) {
3091                         entry = &ppat->entries[i];
3092                         if (score == INTEL_PPAT_PERFECT_MATCH) {
3093                                 kref_get(&entry->ref);
3094                                 return entry;
3095                         }
3096                         best_score = score;
3097                 }
3098                 scanned++;
3099         }
3100
3101         if (scanned == ppat->max_entries) {
3102                 if (!entry)
3103                         return ERR_PTR(-ENOSPC);
3104
3105                 kref_get(&entry->ref);
3106                 return entry;
3107         }
3108
3109         i = find_first_zero_bit(ppat->used, ppat->max_entries);
3110         entry = __alloc_ppat_entry(ppat, i, value);
3111         ppat->update_hw(i915);
3112         return entry;
3113 }
3114
3115 static void release_ppat(struct kref *kref)
3116 {
3117         struct intel_ppat_entry *entry =
3118                 container_of(kref, struct intel_ppat_entry, ref);
3119         struct drm_i915_private *i915 = entry->ppat->i915;
3120
3121         __free_ppat_entry(entry);
3122         entry->ppat->update_hw(i915);
3123 }
3124
3125 /**
3126  * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
3127  * @entry: an intel PPAT entry
3128  *
3129  * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
3130  * entry is dynamically allocated, its reference count will be decreased. Once
3131  * the reference count becomes into zero, the PPAT index becomes free again.
3132  */
3133 void intel_ppat_put(const struct intel_ppat_entry *entry)
3134 {
3135         struct intel_ppat *ppat = entry->ppat;
3136         unsigned int index = entry - ppat->entries;
3137
3138         GEM_BUG_ON(!ppat->max_entries);
3139
3140         kref_put(&ppat->entries[index].ref, release_ppat);
3141 }
3142
3143 static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
3144 {
3145         struct intel_ppat *ppat = &dev_priv->ppat;
3146         int i;
3147
3148         for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
3149                 I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
3150                 clear_bit(i, ppat->dirty);
3151         }
3152 }
3153
3154 static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
3155 {
3156         struct intel_ppat *ppat = &dev_priv->ppat;
3157         u64 pat = 0;
3158         int i;
3159
3160         for (i = 0; i < ppat->max_entries; i++)
3161                 pat |= GEN8_PPAT(i, ppat->entries[i].value);
3162
3163         bitmap_clear(ppat->dirty, 0, ppat->max_entries);
3164
3165         I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
3166         I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
3167 }
3168
3169 static unsigned int bdw_private_pat_match(u8 src, u8 dst)
3170 {
3171         unsigned int score = 0;
3172         enum {
3173                 AGE_MATCH = BIT(0),
3174                 TC_MATCH = BIT(1),
3175                 CA_MATCH = BIT(2),
3176         };
3177
3178         /* Cache attribute has to be matched. */
3179         if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
3180                 return 0;
3181
3182         score |= CA_MATCH;
3183
3184         if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
3185                 score |= TC_MATCH;
3186
3187         if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
3188                 score |= AGE_MATCH;
3189
3190         if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
3191                 return INTEL_PPAT_PERFECT_MATCH;
3192
3193         return score;
3194 }
3195
3196 static unsigned int chv_private_pat_match(u8 src, u8 dst)
3197 {
3198         return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
3199                 INTEL_PPAT_PERFECT_MATCH : 0;
3200 }
3201
3202 static void cnl_setup_private_ppat(struct intel_ppat *ppat)
3203 {
3204         ppat->max_entries = 8;
3205         ppat->update_hw = cnl_private_pat_update_hw;
3206         ppat->match = bdw_private_pat_match;
3207         ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3208
3209         __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
3210         __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
3211         __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
3212         __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
3213         __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3214         __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3215         __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3216         __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3217 }
3218
3219 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
3220  * bits. When using advanced contexts each context stores its own PAT, but
3221  * writing this data shouldn't be harmful even in those cases. */
3222 static void bdw_setup_private_ppat(struct intel_ppat *ppat)
3223 {
3224         ppat->max_entries = 8;
3225         ppat->update_hw = bdw_private_pat_update_hw;
3226         ppat->match = bdw_private_pat_match;
3227         ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3228
3229         if (!USES_PPGTT(ppat->i915)) {
3230                 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3231                  * so RTL will always use the value corresponding to
3232                  * pat_sel = 000".
3233                  * So let's disable cache for GGTT to avoid screen corruptions.
3234                  * MOCS still can be used though.
3235                  * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3236                  * before this patch, i.e. the same uncached + snooping access
3237                  * like on gen6/7 seems to be in effect.
3238                  * - So this just fixes blitter/render access. Again it looks
3239                  * like it's not just uncached access, but uncached + snooping.
3240                  * So we can still hold onto all our assumptions wrt cpu
3241                  * clflushing on LLC machines.
3242                  */
3243                 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
3244                 return;
3245         }
3246
3247         __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);      /* for normal objects, no eLLC */
3248         __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);  /* for something pointing to ptes? */
3249         __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);  /* for scanout with eLLC */
3250         __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);                      /* Uncached objects, mostly for scanout */
3251         __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3252         __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3253         __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3254         __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3255 }
3256
3257 static void chv_setup_private_ppat(struct intel_ppat *ppat)
3258 {
3259         ppat->max_entries = 8;
3260         ppat->update_hw = bdw_private_pat_update_hw;
3261         ppat->match = chv_private_pat_match;
3262         ppat->clear_value = CHV_PPAT_SNOOP;
3263
3264         /*
3265          * Map WB on BDW to snooped on CHV.
3266          *
3267          * Only the snoop bit has meaning for CHV, the rest is
3268          * ignored.
3269          *
3270          * The hardware will never snoop for certain types of accesses:
3271          * - CPU GTT (GMADR->GGTT->no snoop->memory)
3272          * - PPGTT page tables
3273          * - some other special cycles
3274          *
3275          * As with BDW, we also need to consider the following for GT accesses:
3276          * "For GGTT, there is NO pat_sel[2:0] from the entry,
3277          * so RTL will always use the value corresponding to
3278          * pat_sel = 000".
3279          * Which means we must set the snoop bit in PAT entry 0
3280          * in order to keep the global status page working.
3281          */
3282
3283         __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
3284         __alloc_ppat_entry(ppat, 1, 0);
3285         __alloc_ppat_entry(ppat, 2, 0);
3286         __alloc_ppat_entry(ppat, 3, 0);
3287         __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
3288         __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
3289         __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
3290         __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
3291 }
3292
3293 static void gen6_gmch_remove(struct i915_address_space *vm)
3294 {
3295         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3296
3297         iounmap(ggtt->gsm);
3298         cleanup_scratch_page(vm);
3299 }
3300
3301 static void setup_private_pat(struct drm_i915_private *dev_priv)
3302 {
3303         struct intel_ppat *ppat = &dev_priv->ppat;
3304         int i;
3305
3306         ppat->i915 = dev_priv;
3307
3308         if (INTEL_GEN(dev_priv) >= 10)
3309                 cnl_setup_private_ppat(ppat);
3310         else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3311                 chv_setup_private_ppat(ppat);
3312         else
3313                 bdw_setup_private_ppat(ppat);
3314
3315         GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
3316
3317         for_each_clear_bit(i, ppat->used, ppat->max_entries) {
3318                 ppat->entries[i].value = ppat->clear_value;
3319                 ppat->entries[i].ppat = ppat;
3320                 set_bit(i, ppat->dirty);
3321         }
3322
3323         ppat->update_hw(dev_priv);
3324 }
3325
3326 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3327 {
3328         struct drm_i915_private *dev_priv = ggtt->base.i915;
3329         struct pci_dev *pdev = dev_priv->drm.pdev;
3330         unsigned int size;
3331         u16 snb_gmch_ctl;
3332         int err;
3333
3334         /* TODO: We're not aware of mappable constraints on gen8 yet */
3335         ggtt->mappable_base = pci_resource_start(pdev, 2);
3336         ggtt->mappable_end = pci_resource_len(pdev, 2);
3337
3338         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
3339         if (!err)
3340                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
3341         if (err)
3342                 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3343
3344         pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3345
3346         if (INTEL_GEN(dev_priv) >= 9) {
3347                 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
3348                 size = gen8_get_total_gtt_size(snb_gmch_ctl);
3349         } else if (IS_CHERRYVIEW(dev_priv)) {
3350                 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
3351                 size = chv_get_total_gtt_size(snb_gmch_ctl);
3352         } else {
3353                 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
3354                 size = gen8_get_total_gtt_size(snb_gmch_ctl);
3355         }
3356
3357         ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
3358         ggtt->base.cleanup = gen6_gmch_remove;
3359         ggtt->base.bind_vma = ggtt_bind_vma;
3360         ggtt->base.unbind_vma = ggtt_unbind_vma;
3361         ggtt->base.set_pages = ggtt_set_pages;
3362         ggtt->base.clear_pages = clear_pages;
3363         ggtt->base.insert_page = gen8_ggtt_insert_page;
3364         ggtt->base.clear_range = nop_clear_range;
3365         if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3366                 ggtt->base.clear_range = gen8_ggtt_clear_range;
3367
3368         ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3369
3370         /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3371         if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
3372                 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3373                 ggtt->base.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
3374                 if (ggtt->base.clear_range != nop_clear_range)
3375                         ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3376         }
3377
3378         ggtt->invalidate = gen6_ggtt_invalidate;
3379
3380         setup_private_pat(dev_priv);
3381
3382         return ggtt_probe_common(ggtt, size);
3383 }
3384
3385 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3386 {
3387         struct drm_i915_private *dev_priv = ggtt->base.i915;
3388         struct pci_dev *pdev = dev_priv->drm.pdev;
3389         unsigned int size;
3390         u16 snb_gmch_ctl;
3391         int err;
3392
3393         ggtt->mappable_base = pci_resource_start(pdev, 2);
3394         ggtt->mappable_end = pci_resource_len(pdev, 2);
3395
3396         /* 64/512MB is the current min/max we actually know of, but this is just
3397          * a coarse sanity check.
3398          */
3399         if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3400                 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
3401                 return -ENXIO;
3402         }
3403
3404         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3405         if (!err)
3406                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3407         if (err)
3408                 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3409         pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3410
3411         ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
3412
3413         size = gen6_get_total_gtt_size(snb_gmch_ctl);
3414         ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
3415
3416         ggtt->base.clear_range = gen6_ggtt_clear_range;
3417         ggtt->base.insert_page = gen6_ggtt_insert_page;
3418         ggtt->base.insert_entries = gen6_ggtt_insert_entries;
3419         ggtt->base.bind_vma = ggtt_bind_vma;
3420         ggtt->base.unbind_vma = ggtt_unbind_vma;
3421         ggtt->base.set_pages = ggtt_set_pages;
3422         ggtt->base.clear_pages = clear_pages;
3423         ggtt->base.cleanup = gen6_gmch_remove;
3424
3425         ggtt->invalidate = gen6_ggtt_invalidate;
3426
3427         if (HAS_EDRAM(dev_priv))
3428                 ggtt->base.pte_encode = iris_pte_encode;
3429         else if (IS_HASWELL(dev_priv))
3430                 ggtt->base.pte_encode = hsw_pte_encode;
3431         else if (IS_VALLEYVIEW(dev_priv))
3432                 ggtt->base.pte_encode = byt_pte_encode;
3433         else if (INTEL_GEN(dev_priv) >= 7)
3434                 ggtt->base.pte_encode = ivb_pte_encode;
3435         else
3436                 ggtt->base.pte_encode = snb_pte_encode;
3437
3438         return ggtt_probe_common(ggtt, size);
3439 }
3440
3441 static void i915_gmch_remove(struct i915_address_space *vm)
3442 {
3443         intel_gmch_remove();
3444 }
3445
3446 static int i915_gmch_probe(struct i915_ggtt *ggtt)
3447 {
3448         struct drm_i915_private *dev_priv = ggtt->base.i915;
3449         int ret;
3450
3451         ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3452         if (!ret) {
3453                 DRM_ERROR("failed to set up gmch\n");
3454                 return -EIO;
3455         }
3456
3457         intel_gtt_get(&ggtt->base.total,
3458                       &ggtt->stolen_size,
3459                       &ggtt->mappable_base,
3460                       &ggtt->mappable_end);
3461
3462         ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3463         ggtt->base.insert_page = i915_ggtt_insert_page;
3464         ggtt->base.insert_entries = i915_ggtt_insert_entries;
3465         ggtt->base.clear_range = i915_ggtt_clear_range;
3466         ggtt->base.bind_vma = ggtt_bind_vma;
3467         ggtt->base.unbind_vma = ggtt_unbind_vma;
3468         ggtt->base.set_pages = ggtt_set_pages;
3469         ggtt->base.clear_pages = clear_pages;
3470         ggtt->base.cleanup = i915_gmch_remove;
3471
3472         ggtt->invalidate = gmch_ggtt_invalidate;
3473
3474         if (unlikely(ggtt->do_idle_maps))
3475                 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3476
3477         return 0;
3478 }
3479
3480 /**
3481  * i915_ggtt_probe_hw - Probe GGTT hardware location
3482  * @dev_priv: i915 device
3483  */
3484 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
3485 {
3486         struct i915_ggtt *ggtt = &dev_priv->ggtt;
3487         int ret;
3488
3489         ggtt->base.i915 = dev_priv;
3490         ggtt->base.dma = &dev_priv->drm.pdev->dev;
3491
3492         if (INTEL_GEN(dev_priv) <= 5)
3493                 ret = i915_gmch_probe(ggtt);
3494         else if (INTEL_GEN(dev_priv) < 8)
3495                 ret = gen6_gmch_probe(ggtt);
3496         else
3497                 ret = gen8_gmch_probe(ggtt);
3498         if (ret)
3499                 return ret;
3500
3501         /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3502          * This is easier than doing range restriction on the fly, as we
3503          * currently don't have any bits spare to pass in this upper
3504          * restriction!
3505          */
3506         if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
3507                 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3508                 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3509         }
3510
3511         if ((ggtt->base.total - 1) >> 32) {
3512                 DRM_ERROR("We never expected a Global GTT with more than 32bits"
3513                           " of address space! Found %lldM!\n",
3514                           ggtt->base.total >> 20);
3515                 ggtt->base.total = 1ULL << 32;
3516                 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3517         }
3518
3519         if (ggtt->mappable_end > ggtt->base.total) {
3520                 DRM_ERROR("mappable aperture extends past end of GGTT,"
3521                           " aperture=%llx, total=%llx\n",
3522                           ggtt->mappable_end, ggtt->base.total);
3523                 ggtt->mappable_end = ggtt->base.total;
3524         }
3525
3526         /* GMADR is the PCI mmio aperture into the global GTT. */
3527         DRM_INFO("Memory usable by graphics device = %lluM\n",
3528                  ggtt->base.total >> 20);
3529         DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
3530         DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
3531         if (intel_vtd_active())
3532                 DRM_INFO("VT-d active for gfx access\n");
3533
3534         return 0;
3535 }
3536
3537 /**
3538  * i915_ggtt_init_hw - Initialize GGTT hardware
3539  * @dev_priv: i915 device
3540  */
3541 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3542 {
3543         struct i915_ggtt *ggtt = &dev_priv->ggtt;
3544         int ret;
3545
3546         INIT_LIST_HEAD(&dev_priv->vm_list);
3547
3548         /* Note that we use page colouring to enforce a guard page at the
3549          * end of the address space. This is required as the CS may prefetch
3550          * beyond the end of the batch buffer, across the page boundary,
3551          * and beyond the end of the GTT if we do not provide a guard.
3552          */
3553         mutex_lock(&dev_priv->drm.struct_mutex);
3554         i915_address_space_init(&ggtt->base, dev_priv, "[global]");
3555         if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
3556                 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
3557         mutex_unlock(&dev_priv->drm.struct_mutex);
3558
3559         if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
3560                                 dev_priv->ggtt.mappable_base,
3561                                 dev_priv->ggtt.mappable_end)) {
3562                 ret = -EIO;
3563                 goto out_gtt_cleanup;
3564         }
3565
3566         ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
3567
3568         /*
3569          * Initialise stolen early so that we may reserve preallocated
3570          * objects for the BIOS to KMS transition.
3571          */
3572         ret = i915_gem_init_stolen(dev_priv);
3573         if (ret)
3574                 goto out_gtt_cleanup;
3575
3576         return 0;
3577
3578 out_gtt_cleanup:
3579         ggtt->base.cleanup(&ggtt->base);
3580         return ret;
3581 }
3582
3583 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3584 {
3585         if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3586                 return -EIO;
3587
3588         return 0;
3589 }
3590
3591 void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3592 {
3593         GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3594
3595         i915->ggtt.invalidate = guc_ggtt_invalidate;
3596 }
3597
3598 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3599 {
3600         /* We should only be called after i915_ggtt_enable_guc() */
3601         GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3602
3603         i915->ggtt.invalidate = gen6_ggtt_invalidate;
3604 }
3605
3606 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3607 {
3608         struct i915_ggtt *ggtt = &dev_priv->ggtt;
3609         struct drm_i915_gem_object *obj, *on;
3610
3611         i915_check_and_clear_faults(dev_priv);
3612
3613         /* First fill our portion of the GTT with scratch pages */
3614         ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
3615
3616         ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3617
3618         /* clflush objects bound into the GGTT and rebind them. */
3619         list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
3620                 bool ggtt_bound = false;
3621                 struct i915_vma *vma;
3622
3623                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3624                         if (vma->vm != &ggtt->base)
3625                                 continue;
3626
3627                         if (!i915_vma_unbind(vma))
3628                                 continue;
3629
3630                         WARN_ON(i915_vma_bind(vma, obj->cache_level,
3631                                               PIN_UPDATE));
3632                         ggtt_bound = true;
3633                 }
3634
3635                 if (ggtt_bound)
3636                         WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3637         }
3638
3639         ggtt->base.closed = false;
3640
3641         if (INTEL_GEN(dev_priv) >= 8) {
3642                 struct intel_ppat *ppat = &dev_priv->ppat;
3643
3644                 bitmap_set(ppat->dirty, 0, ppat->max_entries);
3645                 dev_priv->ppat.update_hw(dev_priv);
3646                 return;
3647         }
3648
3649         if (USES_PPGTT(dev_priv)) {
3650                 struct i915_address_space *vm;
3651
3652                 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3653                         struct i915_hw_ppgtt *ppgtt;
3654
3655                         if (i915_is_ggtt(vm))
3656                                 ppgtt = dev_priv->mm.aliasing_ppgtt;
3657                         else
3658                                 ppgtt = i915_vm_to_ppgtt(vm);
3659
3660                         gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
3661                 }
3662         }
3663
3664         i915_ggtt_invalidate(dev_priv);
3665 }
3666
3667 static struct scatterlist *
3668 rotate_pages(const dma_addr_t *in, unsigned int offset,
3669              unsigned int width, unsigned int height,
3670              unsigned int stride,
3671              struct sg_table *st, struct scatterlist *sg)
3672 {
3673         unsigned int column, row;
3674         unsigned int src_idx;
3675
3676         for (column = 0; column < width; column++) {
3677                 src_idx = stride * (height - 1) + column;
3678                 for (row = 0; row < height; row++) {
3679                         st->nents++;
3680                         /* We don't need the pages, but need to initialize
3681                          * the entries so the sg list can be happily traversed.
3682                          * The only thing we need are DMA addresses.
3683                          */
3684                         sg_set_page(sg, NULL, PAGE_SIZE, 0);
3685                         sg_dma_address(sg) = in[offset + src_idx];
3686                         sg_dma_len(sg) = PAGE_SIZE;
3687                         sg = sg_next(sg);
3688                         src_idx -= stride;
3689                 }
3690         }
3691
3692         return sg;
3693 }
3694
3695 static noinline struct sg_table *
3696 intel_rotate_pages(struct intel_rotation_info *rot_info,
3697                    struct drm_i915_gem_object *obj)
3698 {
3699         const unsigned long n_pages = obj->base.size / PAGE_SIZE;
3700         unsigned int size = intel_rotation_info_size(rot_info);
3701         struct sgt_iter sgt_iter;
3702         dma_addr_t dma_addr;
3703         unsigned long i;
3704         dma_addr_t *page_addr_list;
3705         struct sg_table *st;
3706         struct scatterlist *sg;
3707         int ret = -ENOMEM;
3708
3709         /* Allocate a temporary list of source pages for random access. */
3710         page_addr_list = kvmalloc_array(n_pages,
3711                                         sizeof(dma_addr_t),
3712                                         GFP_KERNEL);
3713         if (!page_addr_list)
3714                 return ERR_PTR(ret);
3715
3716         /* Allocate target SG list. */
3717         st = kmalloc(sizeof(*st), GFP_KERNEL);
3718         if (!st)
3719                 goto err_st_alloc;
3720
3721         ret = sg_alloc_table(st, size, GFP_KERNEL);
3722         if (ret)
3723                 goto err_sg_alloc;
3724
3725         /* Populate source page list from the object. */
3726         i = 0;
3727         for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
3728                 page_addr_list[i++] = dma_addr;
3729
3730         GEM_BUG_ON(i != n_pages);
3731         st->nents = 0;
3732         sg = st->sgl;
3733
3734         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3735                 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3736                                   rot_info->plane[i].width, rot_info->plane[i].height,
3737                                   rot_info->plane[i].stride, st, sg);
3738         }
3739
3740         DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3741                       obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3742
3743         kvfree(page_addr_list);
3744
3745         return st;
3746
3747 err_sg_alloc:
3748         kfree(st);
3749 err_st_alloc:
3750         kvfree(page_addr_list);
3751
3752         DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3753                       obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3754
3755         return ERR_PTR(ret);
3756 }
3757
3758 static noinline struct sg_table *
3759 intel_partial_pages(const struct i915_ggtt_view *view,
3760                     struct drm_i915_gem_object *obj)
3761 {
3762         struct sg_table *st;
3763         struct scatterlist *sg, *iter;
3764         unsigned int count = view->partial.size;
3765         unsigned int offset;
3766         int ret = -ENOMEM;
3767
3768         st = kmalloc(sizeof(*st), GFP_KERNEL);
3769         if (!st)
3770                 goto err_st_alloc;
3771
3772         ret = sg_alloc_table(st, count, GFP_KERNEL);
3773         if (ret)
3774                 goto err_sg_alloc;
3775
3776         iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3777         GEM_BUG_ON(!iter);
3778
3779         sg = st->sgl;
3780         st->nents = 0;
3781         do {
3782                 unsigned int len;
3783
3784                 len = min(iter->length - (offset << PAGE_SHIFT),
3785                           count << PAGE_SHIFT);
3786                 sg_set_page(sg, NULL, len, 0);
3787                 sg_dma_address(sg) =
3788                         sg_dma_address(iter) + (offset << PAGE_SHIFT);
3789                 sg_dma_len(sg) = len;
3790
3791                 st->nents++;
3792                 count -= len >> PAGE_SHIFT;
3793                 if (count == 0) {
3794                         sg_mark_end(sg);
3795                         return st;
3796                 }
3797
3798                 sg = __sg_next(sg);
3799                 iter = __sg_next(iter);
3800                 offset = 0;
3801         } while (1);
3802
3803 err_sg_alloc:
3804         kfree(st);
3805 err_st_alloc:
3806         return ERR_PTR(ret);
3807 }
3808
3809 static int
3810 i915_get_ggtt_vma_pages(struct i915_vma *vma)
3811 {
3812         int ret;
3813
3814         /* The vma->pages are only valid within the lifespan of the borrowed
3815          * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3816          * must be the vma->pages. A simple rule is that vma->pages must only
3817          * be accessed when the obj->mm.pages are pinned.
3818          */
3819         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3820
3821         switch (vma->ggtt_view.type) {
3822         case I915_GGTT_VIEW_NORMAL:
3823                 vma->pages = vma->obj->mm.pages;
3824                 return 0;
3825
3826         case I915_GGTT_VIEW_ROTATED:
3827                 vma->pages =
3828                         intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3829                 break;
3830
3831         case I915_GGTT_VIEW_PARTIAL:
3832                 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3833                 break;
3834
3835         default:
3836                 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3837                           vma->ggtt_view.type);
3838                 return -EINVAL;
3839         }
3840
3841         ret = 0;
3842         if (unlikely(IS_ERR(vma->pages))) {
3843                 ret = PTR_ERR(vma->pages);
3844                 vma->pages = NULL;
3845                 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3846                           vma->ggtt_view.type, ret);
3847         }
3848         return ret;
3849 }
3850
3851 /**
3852  * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3853  * @vm: the &struct i915_address_space
3854  * @node: the &struct drm_mm_node (typically i915_vma.mode)
3855  * @size: how much space to allocate inside the GTT,
3856  *        must be #I915_GTT_PAGE_SIZE aligned
3857  * @offset: where to insert inside the GTT,
3858  *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3859  *          (@offset + @size) must fit within the address space
3860  * @color: color to apply to node, if this node is not from a VMA,
3861  *         color must be #I915_COLOR_UNEVICTABLE
3862  * @flags: control search and eviction behaviour
3863  *
3864  * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3865  * the address space (using @size and @color). If the @node does not fit, it
3866  * tries to evict any overlapping nodes from the GTT, including any
3867  * neighbouring nodes if the colors do not match (to ensure guard pages between
3868  * differing domains). See i915_gem_evict_for_node() for the gory details
3869  * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3870  * evicting active overlapping objects, and any overlapping node that is pinned
3871  * or marked as unevictable will also result in failure.
3872  *
3873  * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3874  * asked to wait for eviction and interrupted.
3875  */
3876 int i915_gem_gtt_reserve(struct i915_address_space *vm,
3877                          struct drm_mm_node *node,
3878                          u64 size, u64 offset, unsigned long color,
3879                          unsigned int flags)
3880 {
3881         int err;
3882
3883         GEM_BUG_ON(!size);
3884         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3885         GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3886         GEM_BUG_ON(range_overflows(offset, size, vm->total));
3887         GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
3888         GEM_BUG_ON(drm_mm_node_allocated(node));
3889
3890         node->size = size;
3891         node->start = offset;
3892         node->color = color;
3893
3894         err = drm_mm_reserve_node(&vm->mm, node);
3895         if (err != -ENOSPC)
3896                 return err;
3897
3898         if (flags & PIN_NOEVICT)
3899                 return -ENOSPC;
3900
3901         err = i915_gem_evict_for_node(vm, node, flags);
3902         if (err == 0)
3903                 err = drm_mm_reserve_node(&vm->mm, node);
3904
3905         return err;
3906 }
3907
3908 static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3909 {
3910         u64 range, addr;
3911
3912         GEM_BUG_ON(range_overflows(start, len, end));
3913         GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3914
3915         range = round_down(end - len, align) - round_up(start, align);
3916         if (range) {
3917                 if (sizeof(unsigned long) == sizeof(u64)) {
3918                         addr = get_random_long();
3919                 } else {
3920                         addr = get_random_int();
3921                         if (range > U32_MAX) {
3922                                 addr <<= 32;
3923                                 addr |= get_random_int();
3924                         }
3925                 }
3926                 div64_u64_rem(addr, range, &addr);
3927                 start += addr;
3928         }
3929
3930         return round_up(start, align);
3931 }
3932
3933 /**
3934  * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3935  * @vm: the &struct i915_address_space
3936  * @node: the &struct drm_mm_node (typically i915_vma.node)
3937  * @size: how much space to allocate inside the GTT,
3938  *        must be #I915_GTT_PAGE_SIZE aligned
3939  * @alignment: required alignment of starting offset, may be 0 but
3940  *             if specified, this must be a power-of-two and at least
3941  *             #I915_GTT_MIN_ALIGNMENT
3942  * @color: color to apply to node
3943  * @start: start of any range restriction inside GTT (0 for all),
3944  *         must be #I915_GTT_PAGE_SIZE aligned
3945  * @end: end of any range restriction inside GTT (U64_MAX for all),
3946  *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3947  * @flags: control search and eviction behaviour
3948  *
3949  * i915_gem_gtt_insert() first searches for an available hole into which
3950  * is can insert the node. The hole address is aligned to @alignment and
3951  * its @size must then fit entirely within the [@start, @end] bounds. The
3952  * nodes on either side of the hole must match @color, or else a guard page
3953  * will be inserted between the two nodes (or the node evicted). If no
3954  * suitable hole is found, first a victim is randomly selected and tested
3955  * for eviction, otherwise then the LRU list of objects within the GTT
3956  * is scanned to find the first set of replacement nodes to create the hole.
3957  * Those old overlapping nodes are evicted from the GTT (and so must be
3958  * rebound before any future use). Any node that is currently pinned cannot
3959  * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3960  * active and #PIN_NONBLOCK is specified, that node is also skipped when
3961  * searching for an eviction candidate. See i915_gem_evict_something() for
3962  * the gory details on the eviction algorithm.
3963  *
3964  * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3965  * asked to wait for eviction and interrupted.
3966  */
3967 int i915_gem_gtt_insert(struct i915_address_space *vm,
3968                         struct drm_mm_node *node,
3969                         u64 size, u64 alignment, unsigned long color,
3970                         u64 start, u64 end, unsigned int flags)
3971 {
3972         enum drm_mm_insert_mode mode;
3973         u64 offset;
3974         int err;
3975
3976         lockdep_assert_held(&vm->i915->drm.struct_mutex);
3977         GEM_BUG_ON(!size);
3978         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3979         GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3980         GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3981         GEM_BUG_ON(start >= end);
3982         GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3983         GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3984         GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
3985         GEM_BUG_ON(drm_mm_node_allocated(node));
3986
3987         if (unlikely(range_overflows(start, size, end)))
3988                 return -ENOSPC;
3989
3990         if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3991                 return -ENOSPC;
3992
3993         mode = DRM_MM_INSERT_BEST;
3994         if (flags & PIN_HIGH)
3995                 mode = DRM_MM_INSERT_HIGH;
3996         if (flags & PIN_MAPPABLE)
3997                 mode = DRM_MM_INSERT_LOW;
3998
3999         /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
4000          * so we know that we always have a minimum alignment of 4096.
4001          * The drm_mm range manager is optimised to return results
4002          * with zero alignment, so where possible use the optimal
4003          * path.
4004          */
4005         BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
4006         if (alignment <= I915_GTT_MIN_ALIGNMENT)
4007                 alignment = 0;
4008
4009         err = drm_mm_insert_node_in_range(&vm->mm, node,
4010                                           size, alignment, color,
4011                                           start, end, mode);
4012         if (err != -ENOSPC)
4013                 return err;
4014
4015         if (flags & PIN_NOEVICT)
4016                 return -ENOSPC;
4017
4018         /* No free space, pick a slot at random.
4019          *
4020          * There is a pathological case here using a GTT shared between
4021          * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
4022          *
4023          *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
4024          *         (64k objects)             (448k objects)
4025          *
4026          * Now imagine that the eviction LRU is ordered top-down (just because
4027          * pathology meets real life), and that we need to evict an object to
4028          * make room inside the aperture. The eviction scan then has to walk
4029          * the 448k list before it finds one within range. And now imagine that
4030          * it has to search for a new hole between every byte inside the memcpy,
4031          * for several simultaneous clients.
4032          *
4033          * On a full-ppgtt system, if we have run out of available space, there
4034          * will be lots and lots of objects in the eviction list! Again,
4035          * searching that LRU list may be slow if we are also applying any
4036          * range restrictions (e.g. restriction to low 4GiB) and so, for
4037          * simplicity and similarilty between different GTT, try the single
4038          * random replacement first.
4039          */
4040         offset = random_offset(start, end,
4041                                size, alignment ?: I915_GTT_MIN_ALIGNMENT);
4042         err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
4043         if (err != -ENOSPC)
4044                 return err;
4045
4046         /* Randomly selected placement is pinned, do a search */
4047         err = i915_gem_evict_something(vm, size, alignment, color,
4048                                        start, end, flags);
4049         if (err)
4050                 return err;
4051
4052         return drm_mm_insert_node_in_range(&vm->mm, node,
4053                                            size, alignment, color,
4054                                            start, end, DRM_MM_INSERT_EVICT);
4055 }
4056
4057 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4058 #include "selftests/mock_gtt.c"
4059 #include "selftests/i915_gem_gtt.c"
4060 #endif