drm/i915: Invalidate the guc ggtt TLB upon insertion
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem_internal.c
CommitLineData
920cf419
CW
1/*
2 * Copyright © 2014-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <drm/drmP.h>
26#include <drm/i915_drm.h>
27#include "i915_drv.h"
28
29#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
30
31/* convert swiotlb segment size into sensible units (pages)! */
32#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
33
34static void internal_free_pages(struct sg_table *st)
35{
36 struct scatterlist *sg;
37
38 for (sg = st->sgl; sg; sg = __sg_next(sg))
39 __free_pages(sg_page(sg), get_order(sg->length));
40
41 sg_free_table(st);
42 kfree(st);
43}
44
03ac84f1
CW
45static struct sg_table *
46i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
920cf419
CW
47{
48 struct drm_i915_private *i915 = to_i915(obj->base.dev);
49 unsigned int npages = obj->base.size / PAGE_SIZE;
50 struct sg_table *st;
51 struct scatterlist *sg;
52 int max_order;
53 gfp_t gfp;
54
55 st = kmalloc(sizeof(*st), GFP_KERNEL);
56 if (!st)
03ac84f1 57 return ERR_PTR(-ENOMEM);
920cf419
CW
58
59 if (sg_alloc_table(st, npages, GFP_KERNEL)) {
60 kfree(st);
03ac84f1 61 return ERR_PTR(-ENOMEM);
920cf419
CW
62 }
63
64 sg = st->sgl;
65 st->nents = 0;
66
67 max_order = MAX_ORDER;
68#ifdef CONFIG_SWIOTLB
69 if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
70 max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
71#endif
72
73 gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
c0f86832 74 if (IS_I965GM(i915) || IS_I965G(i915)) {
920cf419
CW
75 /* 965gm cannot relocate objects above 4GiB. */
76 gfp &= ~__GFP_HIGHMEM;
77 gfp |= __GFP_DMA32;
78 }
79
80 do {
81 int order = min(fls(npages) - 1, max_order);
82 struct page *page;
83
84 do {
85 page = alloc_pages(gfp | (order ? QUIET : 0), order);
86 if (page)
87 break;
88 if (!order--)
89 goto err;
90
91 /* Limit subsequent allocations as well */
92 max_order = order;
93 } while (1);
94
95 sg_set_page(sg, page, PAGE_SIZE << order, 0);
96 st->nents++;
97
98 npages -= 1 << order;
99 if (!npages) {
100 sg_mark_end(sg);
101 break;
102 }
103
104 sg = __sg_next(sg);
105 } while (1);
920cf419 106
03ac84f1 107 if (i915_gem_gtt_prepare_pages(obj, st))
920cf419 108 goto err;
920cf419
CW
109
110 /* Mark the pages as dontneed whilst they are still pinned. As soon
111 * as they are unpinned they are allowed to be reaped by the shrinker,
112 * and the caller is expected to repopulate - the contents of this
113 * object are only valid whilst active and pinned.
114 */
a4f5ea64 115 obj->mm.madv = I915_MADV_DONTNEED;
03ac84f1 116 return st;
920cf419
CW
117
118err:
119 sg_mark_end(sg);
120 internal_free_pages(st);
03ac84f1 121 return ERR_PTR(-ENOMEM);
920cf419
CW
122}
123
03ac84f1
CW
124static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
125 struct sg_table *pages)
920cf419 126{
03ac84f1
CW
127 i915_gem_gtt_finish_pages(obj, pages);
128 internal_free_pages(pages);
920cf419 129
a4f5ea64
CW
130 obj->mm.dirty = false;
131 obj->mm.madv = I915_MADV_WILLNEED;
920cf419
CW
132}
133
134static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
3599a91c
TU
135 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
136 I915_GEM_OBJECT_IS_SHRINKABLE,
920cf419
CW
137 .get_pages = i915_gem_object_get_pages_internal,
138 .put_pages = i915_gem_object_put_pages_internal,
139};
140
141/**
142 * Creates a new object that wraps some internal memory for private use.
143 * This object is not backed by swappable storage, and as such its contents
144 * are volatile and only valid whilst pinned. If the object is reaped by the
145 * shrinker, its pages and data will be discarded. Equally, it is not a full
146 * GEM object and so not valid for access from userspace. This makes it useful
147 * for hardware interfaces like ringbuffers (which are pinned from the time
148 * the request is written to the time the hardware stops accessing it), but
149 * not for contexts (which need to be preserved when not active for later
150 * reuse). Note that it is not cleared upon allocation.
151 */
152struct drm_i915_gem_object *
153i915_gem_object_create_internal(struct drm_i915_private *i915,
154 unsigned int size)
155{
156 struct drm_i915_gem_object *obj;
157
187685cb 158 obj = i915_gem_object_alloc(i915);
920cf419
CW
159 if (!obj)
160 return ERR_PTR(-ENOMEM);
161
162 drm_gem_private_object_init(&i915->drm, &obj->base, size);
163 i915_gem_object_init(obj, &i915_gem_object_internal_ops);
164
165 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
166 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
167 obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
168
169 return obj;
170}