drm/i915: Keep a small stash of preallocated WC pages
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 22 Aug 2017 17:38:28 +0000 (18:38 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 22 Aug 2017 18:43:00 +0000 (19:43 +0100)
We use WC pages for coherent writes into the ppGTT on !llc
architectures. However, to create a WC page requires a stop_machine(),
i.e. is very slow. To compensate we currently keep a per-vm cache of
recently freed pages, but we still see the slow startup of new contexts.
We can amoritize that cost slightly by allocating WC pages in small
batches (PAGEVEC_SIZE == 14) and since creating a WC page implies a
stop_machine() there is no penalty for keeping that stash global.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170822173828.5932-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c

index 60267e375e8812374fca2f5113401b85df23eebf..7587ef53026b7c5370c9219c9447e0c061fbae7b 100644 (file)
@@ -1465,6 +1465,11 @@ struct i915_gem_mm {
        struct llist_head free_list;
        struct work_struct free_work;
 
+       /**
+        * Small stash of WC pages
+        */
+       struct pagevec wc_stash;
+
        /** Usable portion of the GTT for GEM */
        dma_addr_t stolen_base; /* limited to low memory (32-bit) */
 
index 0f7399801398d0e701a064d6d4c34ba175b72700..708b95cd8c30f6542a495d479923d3af91027d66 100644 (file)
@@ -356,39 +356,86 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
 
 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
 {
-       struct page *page;
+       struct pagevec *pvec = &vm->free_pages;
 
        if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
                i915_gem_shrink_all(vm->i915);
 
-       if (vm->free_pages.nr)
-               return vm->free_pages.pages[--vm->free_pages.nr];
+       if (likely(pvec->nr))
+               return pvec->pages[--pvec->nr];
+
+       if (!vm->pt_kmap_wc)
+               return alloc_page(gfp);
+
+       /* A placeholder for a specific mutex to guard the WC stash */
+       lockdep_assert_held(&vm->i915->drm.struct_mutex);
+
+       /* Look in our global stash of WC pages... */
+       pvec = &vm->i915->mm.wc_stash;
+       if (likely(pvec->nr))
+               return pvec->pages[--pvec->nr];
 
-       page = alloc_page(gfp);
-       if (!page)
+       /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
+       do {
+               struct page *page;
+
+               page = alloc_page(gfp);
+               if (unlikely(!page))
+                       break;
+
+               pvec->pages[pvec->nr++] = page;
+       } while (pagevec_space(pvec));
+
+       if (unlikely(!pvec->nr))
                return NULL;
 
-       if (vm->pt_kmap_wc)
-               set_pages_array_wc(&page, 1);
+       set_pages_array_wc(pvec->pages, pvec->nr);
 
-       return page;
+       return pvec->pages[--pvec->nr];
 }
 
-static void vm_free_pages_release(struct i915_address_space *vm)
+static void vm_free_pages_release(struct i915_address_space *vm,
+                                 bool immediate)
 {
-       GEM_BUG_ON(!pagevec_count(&vm->free_pages));
+       struct pagevec *pvec = &vm->free_pages;
+
+       GEM_BUG_ON(!pagevec_count(pvec));
+
+       if (vm->pt_kmap_wc) {
+               struct pagevec *stash = &vm->i915->mm.wc_stash;
 
-       if (vm->pt_kmap_wc)
-               set_pages_array_wb(vm->free_pages.pages,
-                                  pagevec_count(&vm->free_pages));
+               /* When we use WC, first fill up the global stash and then
+                * only if full immediately free the overflow.
+                */
+
+               lockdep_assert_held(&vm->i915->drm.struct_mutex);
+               if (pagevec_space(stash)) {
+                       do {
+                               stash->pages[stash->nr++] =
+                                       pvec->pages[--pvec->nr];
+                               if (!pvec->nr)
+                                       return;
+                       } while (pagevec_space(stash));
+
+                       /* As we have made some room in the VM's free_pages,
+                        * we can wait for it to fill again. Unless we are
+                        * inside i915_address_space_fini() and must
+                        * immediately release the pages!
+                        */
+                       if (!immediate)
+                               return;
+               }
+
+               set_pages_array_wb(pvec->pages, pvec->nr);
+       }
 
-       __pagevec_release(&vm->free_pages);
+       __pagevec_release(pvec);
 }
 
 static void vm_free_page(struct i915_address_space *vm, struct page *page)
 {
        if (!pagevec_add(&vm->free_pages, page))
-               vm_free_pages_release(vm);
+               vm_free_pages_release(vm, false);
 }
 
 static int __setup_page_dma(struct i915_address_space *vm,
@@ -452,12 +499,31 @@ static void fill_page_dma_32(struct i915_address_space *vm,
 static int
 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 {
-       return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
+       struct page *page;
+       dma_addr_t addr;
+
+       page = alloc_page(gfp | __GFP_ZERO);
+       if (unlikely(!page))
+               return -ENOMEM;
+
+       addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
+                           PCI_DMA_BIDIRECTIONAL);
+       if (unlikely(dma_mapping_error(vm->dma, addr))) {
+               __free_page(page);
+               return -ENOMEM;
+       }
+
+       vm->scratch_page.page = page;
+       vm->scratch_page.daddr = addr;
+       return 0;
 }
 
 static void cleanup_scratch_page(struct i915_address_space *vm)
 {
-       cleanup_page_dma(vm, &vm->scratch_page);
+       struct i915_page_dma *p = &vm->scratch_page;
+
+       dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       __free_page(p->page);
 }
 
 static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
@@ -1337,18 +1403,18 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
                1ULL << 48 :
                1ULL << 32;
 
-       ret = gen8_init_scratch(&ppgtt->base);
-       if (ret) {
-               ppgtt->base.total = 0;
-               return ret;
-       }
-
        /* There are only few exceptions for gen >=6. chv and bxt.
         * And we are not sure about the latter so play safe for now.
         */
        if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
                ppgtt->base.pt_kmap_wc = true;
 
+       ret = gen8_init_scratch(&ppgtt->base);
+       if (ret) {
+               ppgtt->base.total = 0;
+               return ret;
+       }
+
        if (use_4lvl(vm)) {
                ret = setup_px(&ppgtt->base, &ppgtt->pml4);
                if (ret)
@@ -1872,7 +1938,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
 static void i915_address_space_fini(struct i915_address_space *vm)
 {
        if (pagevec_count(&vm->free_pages))
-               vm_free_pages_release(vm);
+               vm_free_pages_release(vm, true);
 
        i915_gem_timeline_fini(&vm->timeline);
        drm_mm_takedown(&vm->mm);
@@ -2598,6 +2664,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
 {
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma, *vn;
+       struct pagevec *pvec;
 
        ggtt->base.closed = true;
 
@@ -2621,6 +2688,13 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
        }
 
        ggtt->base.cleanup(&ggtt->base);
+
+       pvec = &dev_priv->mm.wc_stash;
+       if (pvec->nr) {
+               set_pages_array_wb(pvec->pages, pvec->nr);
+               __pagevec_release(pvec);
+       }
+
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        arch_phys_wc_del(ggtt->mtrr);