drm/i915: Store a i915 backpointer from engine, and use it
[linux-block.git] / drivers / gpu / drm / i915 / i915_gem_render_state.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Mika Kuoppala <mika.kuoppala@intel.com>
25  *
26  */
27
28 #include "i915_drv.h"
29 #include "intel_renderstate.h"
30
31 static const struct intel_renderstate_rodata *
32 render_state_get_rodata(const int gen)
33 {
34         switch (gen) {
35         case 6:
36                 return &gen6_null_state;
37         case 7:
38                 return &gen7_null_state;
39         case 8:
40                 return &gen8_null_state;
41         case 9:
42                 return &gen9_null_state;
43         }
44
45         return NULL;
46 }
47
48 static int render_state_init(struct render_state *so,
49                              struct drm_i915_private *dev_priv)
50 {
51         int ret;
52
53         so->gen = INTEL_GEN(dev_priv);
54         so->rodata = render_state_get_rodata(so->gen);
55         if (so->rodata == NULL)
56                 return 0;
57
58         if (so->rodata->batch_items * 4 > 4096)
59                 return -EINVAL;
60
61         so->obj = i915_gem_object_create(dev_priv->dev, 4096);
62         if (IS_ERR(so->obj))
63                 return PTR_ERR(so->obj);
64
65         ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
66         if (ret)
67                 goto free_gem;
68
69         so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
70         return 0;
71
72 free_gem:
73         drm_gem_object_unreference(&so->obj->base);
74         return ret;
75 }
76
77 /*
78  * Macro to add commands to auxiliary batch.
79  * This macro only checks for page overflow before inserting the commands,
80  * this is sufficient as the null state generator makes the final batch
81  * with two passes to build command and state separately. At this point
82  * the size of both are known and it compacts them by relocating the state
83  * right after the commands taking care of aligment so we should sufficient
84  * space below them for adding new commands.
85  */
86 #define OUT_BATCH(batch, i, val)                                \
87         do {                                                    \
88                 if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) {  \
89                         ret = -ENOSPC;                          \
90                         goto err_out;                           \
91                 }                                               \
92                 (batch)[(i)++] = (val);                         \
93         } while(0)
94
95 static int render_state_setup(struct render_state *so)
96 {
97         const struct intel_renderstate_rodata *rodata = so->rodata;
98         unsigned int i = 0, reloc_index = 0;
99         struct page *page;
100         u32 *d;
101         int ret;
102
103         ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
104         if (ret)
105                 return ret;
106
107         page = i915_gem_object_get_dirty_page(so->obj, 0);
108         d = kmap(page);
109
110         while (i < rodata->batch_items) {
111                 u32 s = rodata->batch[i];
112
113                 if (i * 4  == rodata->reloc[reloc_index]) {
114                         u64 r = s + so->ggtt_offset;
115                         s = lower_32_bits(r);
116                         if (so->gen >= 8) {
117                                 if (i + 1 >= rodata->batch_items ||
118                                     rodata->batch[i + 1] != 0) {
119                                         ret = -EINVAL;
120                                         goto err_out;
121                                 }
122
123                                 d[i++] = s;
124                                 s = upper_32_bits(r);
125                         }
126
127                         reloc_index++;
128                 }
129
130                 d[i++] = s;
131         }
132
133         while (i % CACHELINE_DWORDS)
134                 OUT_BATCH(d, i, MI_NOOP);
135
136         so->aux_batch_offset = i * sizeof(u32);
137
138         OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
139         so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
140
141         /*
142          * Since we are sending length, we need to strictly conform to
143          * all requirements. For Gen2 this must be a multiple of 8.
144          */
145         so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
146
147         kunmap(page);
148
149         ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
150         if (ret)
151                 return ret;
152
153         if (rodata->reloc[reloc_index] != -1) {
154                 DRM_ERROR("only %d relocs resolved\n", reloc_index);
155                 return -EINVAL;
156         }
157
158         return 0;
159
160 err_out:
161         kunmap(page);
162         return ret;
163 }
164
165 #undef OUT_BATCH
166
167 void i915_gem_render_state_fini(struct render_state *so)
168 {
169         i915_gem_object_ggtt_unpin(so->obj);
170         drm_gem_object_unreference(&so->obj->base);
171 }
172
173 int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
174                                   struct render_state *so)
175 {
176         int ret;
177
178         if (WARN_ON(engine->id != RCS))
179                 return -ENOENT;
180
181         ret = render_state_init(so, engine->i915);
182         if (ret)
183                 return ret;
184
185         if (so->rodata == NULL)
186                 return 0;
187
188         ret = render_state_setup(so);
189         if (ret) {
190                 i915_gem_render_state_fini(so);
191                 return ret;
192         }
193
194         return 0;
195 }
196
197 int i915_gem_render_state_init(struct drm_i915_gem_request *req)
198 {
199         struct render_state so;
200         int ret;
201
202         ret = i915_gem_render_state_prepare(req->engine, &so);
203         if (ret)
204                 return ret;
205
206         if (so.rodata == NULL)
207                 return 0;
208
209         ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
210                                              so.rodata->batch_items * 4,
211                                              I915_DISPATCH_SECURE);
212         if (ret)
213                 goto out;
214
215         if (so.aux_batch_size > 8) {
216                 ret = req->engine->dispatch_execbuffer(req,
217                                                      (so.ggtt_offset +
218                                                       so.aux_batch_offset),
219                                                      so.aux_batch_size,
220                                                      I915_DISPATCH_SECURE);
221                 if (ret)
222                         goto out;
223         }
224
225         i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
226
227 out:
228         i915_gem_render_state_fini(&so);
229         return ret;
230 }