drm/i915: Move more GEM objects under gem/
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gem / selftests / huge_gem_object.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6
7 #include "huge_gem_object.h"
8
9 static void huge_free_pages(struct drm_i915_gem_object *obj,
10                             struct sg_table *pages)
11 {
12         unsigned long nreal = obj->scratch / PAGE_SIZE;
13         struct scatterlist *sg;
14
15         for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
16                 __free_page(sg_page(sg));
17
18         sg_free_table(pages);
19         kfree(pages);
20 }
21
22 static int huge_get_pages(struct drm_i915_gem_object *obj)
23 {
24 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
25         const unsigned long nreal = obj->scratch / PAGE_SIZE;
26         const unsigned long npages = obj->base.size / PAGE_SIZE;
27         struct scatterlist *sg, *src, *end;
28         struct sg_table *pages;
29         unsigned long n;
30
31         pages = kmalloc(sizeof(*pages), GFP);
32         if (!pages)
33                 return -ENOMEM;
34
35         if (sg_alloc_table(pages, npages, GFP)) {
36                 kfree(pages);
37                 return -ENOMEM;
38         }
39
40         sg = pages->sgl;
41         for (n = 0; n < nreal; n++) {
42                 struct page *page;
43
44                 page = alloc_page(GFP | __GFP_HIGHMEM);
45                 if (!page) {
46                         sg_mark_end(sg);
47                         goto err;
48                 }
49
50                 sg_set_page(sg, page, PAGE_SIZE, 0);
51                 sg = __sg_next(sg);
52         }
53         if (nreal < npages) {
54                 for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
55                         sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
56                         src = __sg_next(src);
57                         if (src == end)
58                                 src = pages->sgl;
59                 }
60         }
61
62         if (i915_gem_gtt_prepare_pages(obj, pages))
63                 goto err;
64
65         __i915_gem_object_set_pages(obj, pages, PAGE_SIZE);
66
67         return 0;
68
69 err:
70         huge_free_pages(obj, pages);
71
72         return -ENOMEM;
73 #undef GFP
74 }
75
76 static void huge_put_pages(struct drm_i915_gem_object *obj,
77                            struct sg_table *pages)
78 {
79         i915_gem_gtt_finish_pages(obj, pages);
80         huge_free_pages(obj, pages);
81
82         obj->mm.dirty = false;
83 }
84
85 static const struct drm_i915_gem_object_ops huge_ops = {
86         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
87                  I915_GEM_OBJECT_IS_SHRINKABLE,
88         .get_pages = huge_get_pages,
89         .put_pages = huge_put_pages,
90 };
91
92 struct drm_i915_gem_object *
93 huge_gem_object(struct drm_i915_private *i915,
94                 phys_addr_t phys_size,
95                 dma_addr_t dma_size)
96 {
97         struct drm_i915_gem_object *obj;
98         unsigned int cache_level;
99
100         GEM_BUG_ON(!phys_size || phys_size > dma_size);
101         GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
102         GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));
103
104         if (overflows_type(dma_size, obj->base.size))
105                 return ERR_PTR(-E2BIG);
106
107         obj = i915_gem_object_alloc();
108         if (!obj)
109                 return ERR_PTR(-ENOMEM);
110
111         drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
112         i915_gem_object_init(obj, &huge_ops);
113
114         obj->read_domains = I915_GEM_DOMAIN_CPU;
115         obj->write_domain = I915_GEM_DOMAIN_CPU;
116         cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
117         i915_gem_object_set_cache_coherency(obj, cache_level);
118         obj->scratch = phys_size;
119
120         return obj;
121 }