mm, treewide: redefine MAX_ORDER sanely
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gem / i915_gem_internal.c
CommitLineData
920cf419 1/*
10be98a7 2 * SPDX-License-Identifier: MIT
920cf419 3 *
10be98a7 4 * Copyright © 2014-2016 Intel Corporation
920cf419
CW
5 */
6
10be98a7
CW
7#include <linux/scatterlist.h>
8#include <linux/slab.h>
10be98a7 9
920cf419 10#include "i915_drv.h"
10be98a7 11#include "i915_gem.h"
b508d01f 12#include "i915_gem_internal.h"
10be98a7 13#include "i915_gem_object.h"
37d63f8f 14#include "i915_scatterlist.h"
10be98a7 15#include "i915_utils.h"
920cf419
CW
16
17#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
ee2202d7 18#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
920cf419 19
920cf419
CW
20static void internal_free_pages(struct sg_table *st)
21{
22 struct scatterlist *sg;
23
4703b047
CW
24 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
25 if (sg_page(sg))
26 __free_pages(sg_page(sg), get_order(sg->length));
27 }
920cf419
CW
28
29 sg_free_table(st);
30 kfree(st);
31}
32
b91b09ee 33static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
920cf419
CW
34{
35 struct drm_i915_private *i915 = to_i915(obj->base.dev);
920cf419
CW
36 struct sg_table *st;
37 struct scatterlist *sg;
c3bfba9a 38 unsigned int npages; /* restricted by sg_alloc_table */
23baf831 39 int max_order = MAX_ORDER;
78a07fe7 40 unsigned int max_segment;
920cf419
CW
41 gfp_t gfp;
42
c3bfba9a
CW
43 if (overflows_type(obj->base.size >> PAGE_SHIFT, npages))
44 return -E2BIG;
45
46 npages = obj->base.size >> PAGE_SHIFT;
78a07fe7
RB
47 max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT;
48 max_order = min(max_order, get_order(max_segment));
920cf419
CW
49
50 gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
c0f86832 51 if (IS_I965GM(i915) || IS_I965G(i915)) {
920cf419
CW
52 /* 965gm cannot relocate objects above 4GiB. */
53 gfp &= ~__GFP_HIGHMEM;
54 gfp |= __GFP_DMA32;
55 }
56
bb96dcf5
CW
57create_st:
58 st = kmalloc(sizeof(*st), GFP_KERNEL);
59 if (!st)
b91b09ee 60 return -ENOMEM;
bb96dcf5 61
bb96dcf5
CW
62 if (sg_alloc_table(st, npages, GFP_KERNEL)) {
63 kfree(st);
b91b09ee 64 return -ENOMEM;
bb96dcf5
CW
65 }
66
67 sg = st->sgl;
68 st->nents = 0;
69
920cf419
CW
70 do {
71 int order = min(fls(npages) - 1, max_order);
72 struct page *page;
73
74 do {
ee2202d7
CW
75 page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
76 order);
920cf419
CW
77 if (page)
78 break;
79 if (!order--)
80 goto err;
81
82 /* Limit subsequent allocations as well */
83 max_order = order;
84 } while (1);
85
86 sg_set_page(sg, page, PAGE_SIZE << order, 0);
87 st->nents++;
88
89 npages -= 1 << order;
90 if (!npages) {
91 sg_mark_end(sg);
92 break;
93 }
94
95 sg = __sg_next(sg);
96 } while (1);
920cf419 97
bb96dcf5
CW
98 if (i915_gem_gtt_prepare_pages(obj, st)) {
99 /* Failed to dma-map try again with single page sg segments */
100 if (get_order(st->sgl->length)) {
101 internal_free_pages(st);
102 max_order = 0;
103 goto create_st;
104 }
920cf419 105 goto err;
bb96dcf5 106 }
920cf419 107
8c949515 108 __i915_gem_object_set_pages(obj, st);
b91b09ee
MA
109
110 return 0;
920cf419
CW
111
112err:
4703b047 113 sg_set_page(sg, NULL, 0, 0);
920cf419
CW
114 sg_mark_end(sg);
115 internal_free_pages(st);
b91b09ee
MA
116
117 return -ENOMEM;
920cf419
CW
118}
119
03ac84f1
CW
120static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
121 struct sg_table *pages)
920cf419 122{
03ac84f1
CW
123 i915_gem_gtt_finish_pages(obj, pages);
124 internal_free_pages(pages);
920cf419 125
a4f5ea64 126 obj->mm.dirty = false;
3884d8af
MA
127
128 __start_cpu_write(obj);
920cf419
CW
129}
130
131static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
7d192daa 132 .name = "i915_gem_object_internal",
c471748d 133 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
920cf419
CW
134 .get_pages = i915_gem_object_get_pages_internal,
135 .put_pages = i915_gem_object_put_pages_internal,
136};
137
920cf419 138struct drm_i915_gem_object *
b0b0f2d2
ML
139__i915_gem_object_create_internal(struct drm_i915_private *i915,
140 const struct drm_i915_gem_object_ops *ops,
141 phys_addr_t size)
920cf419 142{
7867d709 143 static struct lock_class_key lock_class;
920cf419 144 struct drm_i915_gem_object *obj;
b8f55be6 145 unsigned int cache_level;
920cf419 146
fcd46e53 147 GEM_BUG_ON(!size);
bf6b2030 148 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
fcd46e53
CW
149
150 if (overflows_type(size, obj->base.size))
151 return ERR_PTR(-E2BIG);
152
13f1bfd3 153 obj = i915_gem_object_alloc();
920cf419
CW
154 if (!obj)
155 return ERR_PTR(-ENOMEM);
156
157 drm_gem_private_object_init(&i915->drm, &obj->base, size);
b0b0f2d2 158 i915_gem_object_init(obj, ops, &lock_class, 0);
0ff37575 159 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
920cf419 160
7c98501a
MA
161 /*
162 * Mark the object as volatile, such that the pages are marked as
163 * dontneed whilst they are still pinned. As soon as they are unpinned
164 * they are allowed to be reaped by the shrinker, and the caller is
165 * expected to repopulate - the contents of this object are only valid
166 * whilst active and pinned.
167 */
168 i915_gem_object_set_volatile(obj);
169
c0a51fd0
CK
170 obj->read_domains = I915_GEM_DOMAIN_CPU;
171 obj->write_domain = I915_GEM_DOMAIN_CPU;
b8f55be6
CW
172
173 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
174 i915_gem_object_set_cache_coherency(obj, cache_level);
920cf419
CW
175
176 return obj;
177}
b0b0f2d2
ML
178
179/**
180 * i915_gem_object_create_internal: create an object with volatile pages
181 * @i915: the i915 device
182 * @size: the size in bytes of backing storage to allocate for the object
183 *
184 * Creates a new object that wraps some internal memory for private use.
185 * This object is not backed by swappable storage, and as such its contents
186 * are volatile and only valid whilst pinned. If the object is reaped by the
187 * shrinker, its pages and data will be discarded. Equally, it is not a full
188 * GEM object and so not valid for access from userspace. This makes it useful
189 * for hardware interfaces like ringbuffers (which are pinned from the time
190 * the request is written to the time the hardware stops accessing it), but
191 * not for contexts (which need to be preserved when not active for later
192 * reuse). Note that it is not cleared upon allocation.
193 */
194struct drm_i915_gem_object *
195i915_gem_object_create_internal(struct drm_i915_private *i915,
196 phys_addr_t size)
197{
198 return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size);
199}