Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (C) 2008 Ben Skeggs. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining | |
6 | * a copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sublicense, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the | |
14 | * next paragraph) shall be included in all copies or substantial | |
15 | * portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | |
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | |
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
24 | * | |
25 | */ | |
22b33e8e | 26 | #include <linux/dma-buf.h> |
760285e7 | 27 | #include <drm/drmP.h> |
6ee73861 BS |
28 | |
29 | #include "nouveau_drv.h" | |
760285e7 | 30 | #include <drm/nouveau_drm.h> |
6ee73861 | 31 | #include "nouveau_dma.h" |
d375e7d5 | 32 | #include "nouveau_fence.h" |
6ee73861 BS |
33 | |
34 | #define nouveau_gem_pushbuf_sync(chan) 0 | |
35 | ||
36 | int | |
37 | nouveau_gem_object_new(struct drm_gem_object *gem) | |
38 | { | |
39 | return 0; | |
40 | } | |
41 | ||
42 | void | |
43 | nouveau_gem_object_del(struct drm_gem_object *gem) | |
44 | { | |
45 | struct nouveau_bo *nvbo = gem->driver_private; | |
46 | struct ttm_buffer_object *bo = &nvbo->bo; | |
47 | ||
48 | if (!nvbo) | |
49 | return; | |
50 | nvbo->gem = NULL; | |
51 | ||
6ee73861 BS |
52 | if (unlikely(nvbo->pin_refcnt)) { |
53 | nvbo->pin_refcnt = 1; | |
54 | nouveau_bo_unpin(nvbo); | |
55 | } | |
56 | ||
22b33e8e DA |
57 | if (gem->import_attach) |
58 | drm_prime_gem_destroy(gem, nvbo->bo.sg); | |
59 | ||
6ee73861 | 60 | ttm_bo_unref(&bo); |
fd632aa3 DV |
61 | |
62 | drm_gem_object_release(gem); | |
63 | kfree(gem); | |
6ee73861 BS |
64 | } |
65 | ||
639212d0 BS |
66 | int |
67 | nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) | |
68 | { | |
69 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | |
2fd3db6f BS |
70 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
71 | struct nouveau_vma *vma; | |
72 | int ret; | |
639212d0 BS |
73 | |
74 | if (!fpriv->vm) | |
75 | return 0; | |
76 | ||
2fd3db6f BS |
77 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); |
78 | if (ret) | |
79 | return ret; | |
80 | ||
81 | vma = nouveau_bo_vma_find(nvbo, fpriv->vm); | |
82 | if (!vma) { | |
83 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); | |
84 | if (!vma) { | |
85 | ret = -ENOMEM; | |
86 | goto out; | |
87 | } | |
88 | ||
89 | ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma); | |
90 | if (ret) { | |
91 | kfree(vma); | |
92 | goto out; | |
93 | } | |
94 | } else { | |
95 | vma->refcount++; | |
96 | } | |
97 | ||
98 | out: | |
99 | ttm_bo_unreserve(&nvbo->bo); | |
100 | return ret; | |
639212d0 BS |
101 | } |
102 | ||
103 | void | |
104 | nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) | |
105 | { | |
106 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | |
2fd3db6f BS |
107 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
108 | struct nouveau_vma *vma; | |
109 | int ret; | |
639212d0 BS |
110 | |
111 | if (!fpriv->vm) | |
112 | return; | |
2fd3db6f BS |
113 | |
114 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); | |
115 | if (ret) | |
116 | return; | |
117 | ||
118 | vma = nouveau_bo_vma_find(nvbo, fpriv->vm); | |
119 | if (vma) { | |
8fe198b2 | 120 | if (--vma->refcount == 0) { |
2fd3db6f | 121 | nouveau_bo_vma_del(nvbo, vma); |
8fe198b2 MS |
122 | kfree(vma); |
123 | } | |
2fd3db6f BS |
124 | } |
125 | ttm_bo_unreserve(&nvbo->bo); | |
639212d0 BS |
126 | } |
127 | ||
6ee73861 | 128 | int |
f6d4e621 BS |
129 | nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, |
130 | uint32_t tile_mode, uint32_t tile_flags, | |
131 | struct nouveau_bo **pnvbo) | |
6ee73861 | 132 | { |
db5c8e29 | 133 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
6ee73861 | 134 | struct nouveau_bo *nvbo; |
6ba9a683 | 135 | u32 flags = 0; |
6ee73861 BS |
136 | int ret; |
137 | ||
6ba9a683 BS |
138 | if (domain & NOUVEAU_GEM_DOMAIN_VRAM) |
139 | flags |= TTM_PL_FLAG_VRAM; | |
140 | if (domain & NOUVEAU_GEM_DOMAIN_GART) | |
141 | flags |= TTM_PL_FLAG_TT; | |
142 | if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) | |
143 | flags |= TTM_PL_FLAG_SYSTEM; | |
144 | ||
7375c95b | 145 | ret = nouveau_bo_new(dev, size, align, flags, tile_mode, |
22b33e8e | 146 | tile_flags, NULL, pnvbo); |
6ee73861 BS |
147 | if (ret) |
148 | return ret; | |
149 | nvbo = *pnvbo; | |
150 | ||
db5c8e29 BS |
151 | /* we restrict allowed domains on nv50+ to only the types |
152 | * that were requested at creation time. not possibly on | |
153 | * earlier chips without busting the ABI. | |
154 | */ | |
155 | nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | | |
156 | NOUVEAU_GEM_DOMAIN_GART; | |
157 | if (dev_priv->card_type >= NV_50) | |
158 | nvbo->valid_domains &= domain; | |
159 | ||
6ee73861 BS |
160 | nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); |
161 | if (!nvbo->gem) { | |
162 | nouveau_bo_ref(NULL, pnvbo); | |
163 | return -ENOMEM; | |
164 | } | |
165 | ||
5df23979 | 166 | nvbo->bo.persistent_swap_storage = nvbo->gem->filp; |
6ee73861 BS |
167 | nvbo->gem->driver_private = nvbo; |
168 | return 0; | |
169 | } | |
170 | ||
171 | static int | |
e758a311 BS |
172 | nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, |
173 | struct drm_nouveau_gem_info *rep) | |
6ee73861 | 174 | { |
e758a311 | 175 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); |
6ee73861 | 176 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
e758a311 | 177 | struct nouveau_vma *vma; |
6ee73861 BS |
178 | |
179 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
180 | rep->domain = NOUVEAU_GEM_DOMAIN_GART; | |
181 | else | |
182 | rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; | |
183 | ||
e758a311 BS |
184 | rep->offset = nvbo->bo.offset; |
185 | if (fpriv->vm) { | |
186 | vma = nouveau_bo_vma_find(nvbo, fpriv->vm); | |
187 | if (!vma) | |
188 | return -EINVAL; | |
189 | ||
190 | rep->offset = vma->offset; | |
191 | } | |
192 | ||
6ee73861 | 193 | rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; |
d550c41e | 194 | rep->map_handle = nvbo->bo.addr_space_offset; |
6ee73861 BS |
195 | rep->tile_mode = nvbo->tile_mode; |
196 | rep->tile_flags = nvbo->tile_flags; | |
197 | return 0; | |
198 | } | |
199 | ||
6ee73861 BS |
200 | int |
201 | nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |
202 | struct drm_file *file_priv) | |
203 | { | |
204 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
205 | struct drm_nouveau_gem_new *req = data; | |
206 | struct nouveau_bo *nvbo = NULL; | |
6ee73861 BS |
207 | int ret = 0; |
208 | ||
949c4a34 | 209 | dev_priv->ttm.bdev.dev_mapping = dev->dev_mapping; |
6ee73861 | 210 | |
60d2a88a BS |
211 | if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { |
212 | NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); | |
6ee73861 | 213 | return -EINVAL; |
60d2a88a | 214 | } |
6ee73861 | 215 | |
f6d4e621 | 216 | ret = nouveau_gem_new(dev, req->info.size, req->align, |
6ba9a683 BS |
217 | req->info.domain, req->info.tile_mode, |
218 | req->info.tile_flags, &nvbo); | |
6ee73861 BS |
219 | if (ret) |
220 | return ret; | |
221 | ||
6ee73861 | 222 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); |
e758a311 BS |
223 | if (ret == 0) { |
224 | ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); | |
225 | if (ret) | |
226 | drm_gem_handle_delete(file_priv, req->info.handle); | |
227 | } | |
228 | ||
29d08b3e DA |
229 | /* drop reference from allocate - handle holds it now */ |
230 | drm_gem_object_unreference_unlocked(nvbo->gem); | |
6ee73861 BS |
231 | return ret; |
232 | } | |
233 | ||
234 | static int | |
235 | nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, | |
236 | uint32_t write_domains, uint32_t valid_domains) | |
237 | { | |
238 | struct nouveau_bo *nvbo = gem->driver_private; | |
239 | struct ttm_buffer_object *bo = &nvbo->bo; | |
db5c8e29 | 240 | uint32_t domains = valid_domains & nvbo->valid_domains & |
78ad0f7b FJ |
241 | (write_domains ? write_domains : read_domains); |
242 | uint32_t pref_flags = 0, valid_flags = 0; | |
6ee73861 | 243 | |
78ad0f7b | 244 | if (!domains) |
6ee73861 BS |
245 | return -EINVAL; |
246 | ||
78ad0f7b FJ |
247 | if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) |
248 | valid_flags |= TTM_PL_FLAG_VRAM; | |
249 | ||
250 | if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) | |
251 | valid_flags |= TTM_PL_FLAG_TT; | |
252 | ||
253 | if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
254 | bo->mem.mem_type == TTM_PL_VRAM) | |
255 | pref_flags |= TTM_PL_FLAG_VRAM; | |
256 | ||
257 | else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && | |
258 | bo->mem.mem_type == TTM_PL_TT) | |
259 | pref_flags |= TTM_PL_FLAG_TT; | |
260 | ||
261 | else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) | |
262 | pref_flags |= TTM_PL_FLAG_VRAM; | |
263 | ||
264 | else | |
265 | pref_flags |= TTM_PL_FLAG_TT; | |
266 | ||
267 | nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); | |
6ee73861 | 268 | |
6ee73861 BS |
269 | return 0; |
270 | } | |
271 | ||
272 | struct validate_op { | |
6ee73861 BS |
273 | struct list_head vram_list; |
274 | struct list_head gart_list; | |
275 | struct list_head both_list; | |
276 | }; | |
277 | ||
278 | static void | |
279 | validate_fini_list(struct list_head *list, struct nouveau_fence *fence) | |
280 | { | |
281 | struct list_head *entry, *tmp; | |
282 | struct nouveau_bo *nvbo; | |
283 | ||
284 | list_for_each_safe(entry, tmp, list) { | |
285 | nvbo = list_entry(entry, struct nouveau_bo, entry); | |
332b242f FJ |
286 | |
287 | nouveau_bo_fence(nvbo, fence); | |
6ee73861 | 288 | |
a1606a95 BS |
289 | if (unlikely(nvbo->validate_mapped)) { |
290 | ttm_bo_kunmap(&nvbo->kmap); | |
291 | nvbo->validate_mapped = false; | |
292 | } | |
293 | ||
6ee73861 BS |
294 | list_del(&nvbo->entry); |
295 | nvbo->reserved_by = NULL; | |
296 | ttm_bo_unreserve(&nvbo->bo); | |
374c3af8 | 297 | drm_gem_object_unreference_unlocked(nvbo->gem); |
6ee73861 BS |
298 | } |
299 | } | |
300 | ||
301 | static void | |
234896a7 | 302 | validate_fini(struct validate_op *op, struct nouveau_fence* fence) |
6ee73861 | 303 | { |
234896a7 LB |
304 | validate_fini_list(&op->vram_list, fence); |
305 | validate_fini_list(&op->gart_list, fence); | |
306 | validate_fini_list(&op->both_list, fence); | |
6ee73861 BS |
307 | } |
308 | ||
309 | static int | |
310 | validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, | |
311 | struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
312 | int nr_buffers, struct validate_op *op) | |
313 | { | |
314 | struct drm_device *dev = chan->dev; | |
315 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
316 | uint32_t sequence; | |
317 | int trycnt = 0; | |
318 | int ret, i; | |
319 | ||
320 | sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence); | |
321 | retry: | |
322 | if (++trycnt > 100000) { | |
323 | NV_ERROR(dev, "%s failed and gave up.\n", __func__); | |
324 | return -EINVAL; | |
325 | } | |
326 | ||
327 | for (i = 0; i < nr_buffers; i++) { | |
328 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; | |
329 | struct drm_gem_object *gem; | |
330 | struct nouveau_bo *nvbo; | |
331 | ||
332 | gem = drm_gem_object_lookup(dev, file_priv, b->handle); | |
333 | if (!gem) { | |
334 | NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle); | |
335 | validate_fini(op, NULL); | |
bf79cb91 | 336 | return -ENOENT; |
6ee73861 BS |
337 | } |
338 | nvbo = gem->driver_private; | |
339 | ||
340 | if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { | |
341 | NV_ERROR(dev, "multiple instances of buffer %d on " | |
342 | "validation list\n", b->handle); | |
5086f69e | 343 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
344 | validate_fini(op, NULL); |
345 | return -EINVAL; | |
346 | } | |
347 | ||
938c40ed | 348 | ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); |
6ee73861 BS |
349 | if (ret) { |
350 | validate_fini(op, NULL); | |
938c40ed BS |
351 | if (unlikely(ret == -EAGAIN)) |
352 | ret = ttm_bo_wait_unreserved(&nvbo->bo, true); | |
374c3af8 | 353 | drm_gem_object_unreference_unlocked(gem); |
938c40ed BS |
354 | if (unlikely(ret)) { |
355 | if (ret != -ERESTARTSYS) | |
356 | NV_ERROR(dev, "fail reserve\n"); | |
6ee73861 | 357 | return ret; |
a1606a95 | 358 | } |
6ee73861 BS |
359 | goto retry; |
360 | } | |
361 | ||
a1606a95 | 362 | b->user_priv = (uint64_t)(unsigned long)nvbo; |
6ee73861 BS |
363 | nvbo->reserved_by = file_priv; |
364 | nvbo->pbbo_index = i; | |
365 | if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && | |
366 | (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) | |
367 | list_add_tail(&nvbo->entry, &op->both_list); | |
368 | else | |
369 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) | |
370 | list_add_tail(&nvbo->entry, &op->vram_list); | |
371 | else | |
372 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) | |
373 | list_add_tail(&nvbo->entry, &op->gart_list); | |
374 | else { | |
375 | NV_ERROR(dev, "invalid valid domains: 0x%08x\n", | |
376 | b->valid_domains); | |
0208843d | 377 | list_add_tail(&nvbo->entry, &op->both_list); |
6ee73861 BS |
378 | validate_fini(op, NULL); |
379 | return -EINVAL; | |
380 | } | |
6ee73861 BS |
381 | } |
382 | ||
383 | return 0; | |
384 | } | |
385 | ||
525895ba BS |
386 | static int |
387 | validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) | |
388 | { | |
389 | struct nouveau_fence *fence = NULL; | |
390 | int ret = 0; | |
391 | ||
392 | spin_lock(&nvbo->bo.bdev->fence_lock); | |
393 | if (nvbo->bo.sync_obj) | |
394 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); | |
395 | spin_unlock(&nvbo->bo.bdev->fence_lock); | |
396 | ||
397 | if (fence) { | |
398 | ret = nouveau_fence_sync(fence, chan); | |
399 | nouveau_fence_unref(&fence); | |
400 | } | |
401 | ||
402 | return ret; | |
403 | } | |
404 | ||
6ee73861 BS |
405 | static int |
406 | validate_list(struct nouveau_channel *chan, struct list_head *list, | |
407 | struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) | |
408 | { | |
a3fcd0a9 | 409 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; |
6ee73861 BS |
410 | struct drm_nouveau_gem_pushbuf_bo __user *upbbo = |
411 | (void __force __user *)(uintptr_t)user_pbbo_ptr; | |
a1606a95 | 412 | struct drm_device *dev = chan->dev; |
6ee73861 BS |
413 | struct nouveau_bo *nvbo; |
414 | int ret, relocs = 0; | |
415 | ||
416 | list_for_each_entry(nvbo, list, entry) { | |
417 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; | |
6ee73861 | 418 | |
525895ba | 419 | ret = validate_sync(chan, nvbo); |
415e6186 BS |
420 | if (unlikely(ret)) { |
421 | NV_ERROR(dev, "fail pre-validate sync\n"); | |
422 | return ret; | |
6ee73861 BS |
423 | } |
424 | ||
425 | ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, | |
426 | b->write_domains, | |
427 | b->valid_domains); | |
a1606a95 BS |
428 | if (unlikely(ret)) { |
429 | NV_ERROR(dev, "fail set_domain\n"); | |
6ee73861 | 430 | return ret; |
a1606a95 | 431 | } |
6ee73861 | 432 | |
7a45d764 | 433 | ret = nouveau_bo_validate(nvbo, true, false, false); |
a1606a95 | 434 | if (unlikely(ret)) { |
938c40ed BS |
435 | if (ret != -ERESTARTSYS) |
436 | NV_ERROR(dev, "fail ttm_validate\n"); | |
6ee73861 | 437 | return ret; |
a1606a95 | 438 | } |
6ee73861 | 439 | |
525895ba | 440 | ret = validate_sync(chan, nvbo); |
415e6186 BS |
441 | if (unlikely(ret)) { |
442 | NV_ERROR(dev, "fail post-validate sync\n"); | |
443 | return ret; | |
444 | } | |
445 | ||
a3fcd0a9 BS |
446 | if (dev_priv->card_type < NV_50) { |
447 | if (nvbo->bo.offset == b->presumed.offset && | |
448 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && | |
449 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || | |
450 | (nvbo->bo.mem.mem_type == TTM_PL_TT && | |
451 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) | |
452 | continue; | |
453 | ||
454 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
455 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; | |
456 | else | |
457 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; | |
458 | b->presumed.offset = nvbo->bo.offset; | |
459 | b->presumed.valid = 0; | |
460 | relocs++; | |
461 | ||
462 | if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, | |
463 | &b->presumed, sizeof(b->presumed))) | |
464 | return -EFAULT; | |
465 | } | |
6ee73861 BS |
466 | } |
467 | ||
468 | return relocs; | |
469 | } | |
470 | ||
471 | static int | |
472 | nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |
473 | struct drm_file *file_priv, | |
474 | struct drm_nouveau_gem_pushbuf_bo *pbbo, | |
475 | uint64_t user_buffers, int nr_buffers, | |
476 | struct validate_op *op, int *apply_relocs) | |
477 | { | |
a1606a95 | 478 | struct drm_device *dev = chan->dev; |
6ee73861 BS |
479 | int ret, relocs = 0; |
480 | ||
481 | INIT_LIST_HEAD(&op->vram_list); | |
482 | INIT_LIST_HEAD(&op->gart_list); | |
483 | INIT_LIST_HEAD(&op->both_list); | |
484 | ||
6ee73861 BS |
485 | if (nr_buffers == 0) |
486 | return 0; | |
487 | ||
488 | ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); | |
a1606a95 | 489 | if (unlikely(ret)) { |
938c40ed BS |
490 | if (ret != -ERESTARTSYS) |
491 | NV_ERROR(dev, "validate_init\n"); | |
6ee73861 | 492 | return ret; |
a1606a95 | 493 | } |
6ee73861 BS |
494 | |
495 | ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); | |
496 | if (unlikely(ret < 0)) { | |
938c40ed BS |
497 | if (ret != -ERESTARTSYS) |
498 | NV_ERROR(dev, "validate vram_list\n"); | |
6ee73861 BS |
499 | validate_fini(op, NULL); |
500 | return ret; | |
501 | } | |
502 | relocs += ret; | |
503 | ||
504 | ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); | |
505 | if (unlikely(ret < 0)) { | |
938c40ed BS |
506 | if (ret != -ERESTARTSYS) |
507 | NV_ERROR(dev, "validate gart_list\n"); | |
6ee73861 BS |
508 | validate_fini(op, NULL); |
509 | return ret; | |
510 | } | |
511 | relocs += ret; | |
512 | ||
513 | ret = validate_list(chan, &op->both_list, pbbo, user_buffers); | |
514 | if (unlikely(ret < 0)) { | |
938c40ed BS |
515 | if (ret != -ERESTARTSYS) |
516 | NV_ERROR(dev, "validate both_list\n"); | |
6ee73861 BS |
517 | validate_fini(op, NULL); |
518 | return ret; | |
519 | } | |
520 | relocs += ret; | |
521 | ||
522 | *apply_relocs = relocs; | |
523 | return 0; | |
524 | } | |
525 | ||
526 | static inline void * | |
527 | u_memcpya(uint64_t user, unsigned nmemb, unsigned size) | |
528 | { | |
529 | void *mem; | |
530 | void __user *userptr = (void __force __user *)(uintptr_t)user; | |
531 | ||
532 | mem = kmalloc(nmemb * size, GFP_KERNEL); | |
533 | if (!mem) | |
534 | return ERR_PTR(-ENOMEM); | |
535 | ||
536 | if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) { | |
537 | kfree(mem); | |
538 | return ERR_PTR(-EFAULT); | |
539 | } | |
540 | ||
541 | return mem; | |
542 | } | |
543 | ||
544 | static int | |
a1606a95 BS |
545 | nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, |
546 | struct drm_nouveau_gem_pushbuf *req, | |
547 | struct drm_nouveau_gem_pushbuf_bo *bo) | |
6ee73861 BS |
548 | { |
549 | struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; | |
12f735b7 LB |
550 | int ret = 0; |
551 | unsigned i; | |
6ee73861 | 552 | |
a1606a95 | 553 | reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); |
6ee73861 BS |
554 | if (IS_ERR(reloc)) |
555 | return PTR_ERR(reloc); | |
556 | ||
a1606a95 | 557 | for (i = 0; i < req->nr_relocs; i++) { |
6ee73861 BS |
558 | struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; |
559 | struct drm_nouveau_gem_pushbuf_bo *b; | |
a1606a95 | 560 | struct nouveau_bo *nvbo; |
6ee73861 BS |
561 | uint32_t data; |
562 | ||
a1606a95 BS |
563 | if (unlikely(r->bo_index > req->nr_buffers)) { |
564 | NV_ERROR(dev, "reloc bo index invalid\n"); | |
6ee73861 BS |
565 | ret = -EINVAL; |
566 | break; | |
567 | } | |
568 | ||
569 | b = &bo[r->bo_index]; | |
a1606a95 | 570 | if (b->presumed.valid) |
6ee73861 BS |
571 | continue; |
572 | ||
a1606a95 BS |
573 | if (unlikely(r->reloc_bo_index > req->nr_buffers)) { |
574 | NV_ERROR(dev, "reloc container bo index invalid\n"); | |
575 | ret = -EINVAL; | |
576 | break; | |
577 | } | |
578 | nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; | |
579 | ||
580 | if (unlikely(r->reloc_bo_offset + 4 > | |
581 | nvbo->bo.mem.num_pages << PAGE_SHIFT)) { | |
582 | NV_ERROR(dev, "reloc outside of bo\n"); | |
583 | ret = -EINVAL; | |
584 | break; | |
585 | } | |
586 | ||
587 | if (!nvbo->kmap.virtual) { | |
588 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, | |
589 | &nvbo->kmap); | |
590 | if (ret) { | |
591 | NV_ERROR(dev, "failed kmap for reloc\n"); | |
592 | break; | |
593 | } | |
594 | nvbo->validate_mapped = true; | |
595 | } | |
596 | ||
6ee73861 | 597 | if (r->flags & NOUVEAU_GEM_RELOC_LOW) |
a1606a95 | 598 | data = b->presumed.offset + r->data; |
6ee73861 BS |
599 | else |
600 | if (r->flags & NOUVEAU_GEM_RELOC_HIGH) | |
a1606a95 | 601 | data = (b->presumed.offset + r->data) >> 32; |
6ee73861 BS |
602 | else |
603 | data = r->data; | |
604 | ||
605 | if (r->flags & NOUVEAU_GEM_RELOC_OR) { | |
a1606a95 | 606 | if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) |
6ee73861 BS |
607 | data |= r->tor; |
608 | else | |
609 | data |= r->vor; | |
610 | } | |
611 | ||
702adba2 | 612 | spin_lock(&nvbo->bo.bdev->fence_lock); |
a1606a95 | 613 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); |
702adba2 | 614 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
a1606a95 BS |
615 | if (ret) { |
616 | NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); | |
617 | break; | |
618 | } | |
a1606a95 BS |
619 | |
620 | nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); | |
6ee73861 BS |
621 | } |
622 | ||
623 | kfree(reloc); | |
624 | return ret; | |
625 | } | |
626 | ||
627 | int | |
628 | nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |
629 | struct drm_file *file_priv) | |
630 | { | |
a1606a95 | 631 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
6ee73861 | 632 | struct drm_nouveau_gem_pushbuf *req = data; |
a1606a95 BS |
633 | struct drm_nouveau_gem_pushbuf_push *push; |
634 | struct drm_nouveau_gem_pushbuf_bo *bo; | |
6ee73861 BS |
635 | struct nouveau_channel *chan; |
636 | struct validate_op op; | |
6e86e041 | 637 | struct nouveau_fence *fence = NULL; |
a1606a95 | 638 | int i, j, ret = 0, do_reloc = 0; |
6ee73861 | 639 | |
e8a863c1 | 640 | chan = nouveau_channel_get(file_priv, req->channel); |
cff5c133 BS |
641 | if (IS_ERR(chan)) |
642 | return PTR_ERR(chan); | |
6ee73861 | 643 | |
a1606a95 BS |
644 | req->vram_available = dev_priv->fb_aper_free; |
645 | req->gart_available = dev_priv->gart_info.aper_free; | |
646 | if (unlikely(req->nr_push == 0)) | |
647 | goto out_next; | |
6ee73861 | 648 | |
a1606a95 BS |
649 | if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { |
650 | NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", | |
651 | req->nr_push, NOUVEAU_GEM_MAX_PUSH); | |
cff5c133 | 652 | nouveau_channel_put(&chan); |
a1606a95 | 653 | return -EINVAL; |
6ee73861 BS |
654 | } |
655 | ||
a1606a95 BS |
656 | if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { |
657 | NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", | |
658 | req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); | |
cff5c133 | 659 | nouveau_channel_put(&chan); |
a1606a95 | 660 | return -EINVAL; |
6ee73861 BS |
661 | } |
662 | ||
a1606a95 BS |
663 | if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { |
664 | NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", | |
665 | req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); | |
cff5c133 | 666 | nouveau_channel_put(&chan); |
6ee73861 BS |
667 | return -EINVAL; |
668 | } | |
669 | ||
a1606a95 | 670 | push = u_memcpya(req->push, req->nr_push, sizeof(*push)); |
cff5c133 BS |
671 | if (IS_ERR(push)) { |
672 | nouveau_channel_put(&chan); | |
a1606a95 | 673 | return PTR_ERR(push); |
cff5c133 | 674 | } |
a1606a95 | 675 | |
6ee73861 | 676 | bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); |
a1606a95 BS |
677 | if (IS_ERR(bo)) { |
678 | kfree(push); | |
cff5c133 | 679 | nouveau_channel_put(&chan); |
6ee73861 | 680 | return PTR_ERR(bo); |
a1606a95 | 681 | } |
6ee73861 | 682 | |
accf9496 | 683 | /* Ensure all push buffers are on validate list */ |
415e6186 BS |
684 | for (i = 0; i < req->nr_push; i++) { |
685 | if (push[i].bo_index >= req->nr_buffers) { | |
686 | NV_ERROR(dev, "push %d buffer not in list\n", i); | |
687 | ret = -EINVAL; | |
7fa0cba2 | 688 | goto out_prevalid; |
415e6186 | 689 | } |
415e6186 BS |
690 | } |
691 | ||
6ee73861 BS |
692 | /* Validate buffer list */ |
693 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, | |
694 | req->nr_buffers, &op, &do_reloc); | |
695 | if (ret) { | |
938c40ed BS |
696 | if (ret != -ERESTARTSYS) |
697 | NV_ERROR(dev, "validate: %d\n", ret); | |
7fa0cba2 | 698 | goto out_prevalid; |
6ee73861 BS |
699 | } |
700 | ||
6ee73861 BS |
701 | /* Apply any relocations that are required */ |
702 | if (do_reloc) { | |
a1606a95 | 703 | ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo); |
6ee73861 | 704 | if (ret) { |
6ee73861 | 705 | NV_ERROR(dev, "reloc apply: %d\n", ret); |
6ee73861 BS |
706 | goto out; |
707 | } | |
6ee73861 | 708 | } |
6ee73861 | 709 | |
9a391ad8 | 710 | if (chan->dma.ib_max) { |
5e120f6e | 711 | ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); |
6ee73861 | 712 | if (ret) { |
9a391ad8 | 713 | NV_INFO(dev, "nv50cal_space: %d\n", ret); |
6ee73861 BS |
714 | goto out; |
715 | } | |
6ee73861 | 716 | |
a1606a95 BS |
717 | for (i = 0; i < req->nr_push; i++) { |
718 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
719 | bo[push[i].bo_index].user_priv; | |
720 | ||
721 | nv50_dma_push(chan, nvbo, push[i].offset, | |
722 | push[i].length); | |
723 | } | |
9a391ad8 | 724 | } else |
ee508b82 | 725 | if (dev_priv->chipset >= 0x25) { |
a1606a95 | 726 | ret = RING_SPACE(chan, req->nr_push * 2); |
6ee73861 BS |
727 | if (ret) { |
728 | NV_ERROR(dev, "cal_space: %d\n", ret); | |
729 | goto out; | |
730 | } | |
a1606a95 BS |
731 | |
732 | for (i = 0; i < req->nr_push; i++) { | |
733 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
734 | bo[push[i].bo_index].user_priv; | |
735 | struct drm_mm_node *mem = nvbo->bo.mem.mm_node; | |
736 | ||
737 | OUT_RING(chan, ((mem->start << PAGE_SHIFT) + | |
738 | push[i].offset) | 2); | |
739 | OUT_RING(chan, 0); | |
740 | } | |
6ee73861 | 741 | } else { |
a1606a95 | 742 | ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); |
6ee73861 BS |
743 | if (ret) { |
744 | NV_ERROR(dev, "jmp_space: %d\n", ret); | |
745 | goto out; | |
746 | } | |
6ee73861 | 747 | |
a1606a95 BS |
748 | for (i = 0; i < req->nr_push; i++) { |
749 | struct nouveau_bo *nvbo = (void *)(unsigned long) | |
750 | bo[push[i].bo_index].user_priv; | |
751 | struct drm_mm_node *mem = nvbo->bo.mem.mm_node; | |
752 | uint32_t cmd; | |
753 | ||
754 | cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); | |
755 | cmd |= 0x20000000; | |
756 | if (unlikely(cmd != req->suffix0)) { | |
757 | if (!nvbo->kmap.virtual) { | |
758 | ret = ttm_bo_kmap(&nvbo->bo, 0, | |
759 | nvbo->bo.mem. | |
760 | num_pages, | |
761 | &nvbo->kmap); | |
762 | if (ret) { | |
763 | WIND_RING(chan); | |
764 | goto out; | |
765 | } | |
766 | nvbo->validate_mapped = true; | |
767 | } | |
768 | ||
769 | nouveau_bo_wr32(nvbo, (push[i].offset + | |
770 | push[i].length - 8) / 4, cmd); | |
771 | } | |
772 | ||
773 | OUT_RING(chan, ((mem->start << PAGE_SHIFT) + | |
774 | push[i].offset) | 0x20000000); | |
6ee73861 | 775 | OUT_RING(chan, 0); |
a1606a95 BS |
776 | for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) |
777 | OUT_RING(chan, 0); | |
778 | } | |
6ee73861 BS |
779 | } |
780 | ||
d375e7d5 | 781 | ret = nouveau_fence_new(chan, &fence); |
6ee73861 BS |
782 | if (ret) { |
783 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); | |
784 | WIND_RING(chan); | |
785 | goto out; | |
786 | } | |
787 | ||
788 | out: | |
234896a7 | 789 | validate_fini(&op, fence); |
382d62e5 | 790 | nouveau_fence_unref(&fence); |
7fa0cba2 MS |
791 | |
792 | out_prevalid: | |
6ee73861 | 793 | kfree(bo); |
a1606a95 | 794 | kfree(push); |
6ee73861 BS |
795 | |
796 | out_next: | |
9a391ad8 BS |
797 | if (chan->dma.ib_max) { |
798 | req->suffix0 = 0x00000000; | |
799 | req->suffix1 = 0x00000000; | |
800 | } else | |
ee508b82 | 801 | if (dev_priv->chipset >= 0x25) { |
6ee73861 BS |
802 | req->suffix0 = 0x00020000; |
803 | req->suffix1 = 0x00000000; | |
804 | } else { | |
805 | req->suffix0 = 0x20000000 | | |
806 | (chan->pushbuf_base + ((chan->dma.cur + 2) << 2)); | |
807 | req->suffix1 = 0x00000000; | |
808 | } | |
809 | ||
cff5c133 | 810 | nouveau_channel_put(&chan); |
6ee73861 BS |
811 | return ret; |
812 | } | |
813 | ||
6ee73861 BS |
814 | static inline uint32_t |
815 | domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) | |
816 | { | |
817 | uint32_t flags = 0; | |
818 | ||
819 | if (domain & NOUVEAU_GEM_DOMAIN_VRAM) | |
820 | flags |= TTM_PL_FLAG_VRAM; | |
821 | if (domain & NOUVEAU_GEM_DOMAIN_GART) | |
822 | flags |= TTM_PL_FLAG_TT; | |
823 | ||
824 | return flags; | |
825 | } | |
826 | ||
6ee73861 BS |
827 | int |
828 | nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |
829 | struct drm_file *file_priv) | |
830 | { | |
831 | struct drm_nouveau_gem_cpu_prep *req = data; | |
832 | struct drm_gem_object *gem; | |
833 | struct nouveau_bo *nvbo; | |
834 | bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); | |
835 | int ret = -EINVAL; | |
836 | ||
6ee73861 BS |
837 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); |
838 | if (!gem) | |
bf79cb91 | 839 | return -ENOENT; |
6ee73861 BS |
840 | nvbo = nouveau_gem_object(gem); |
841 | ||
21e86c1c BS |
842 | spin_lock(&nvbo->bo.bdev->fence_lock); |
843 | ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); | |
844 | spin_unlock(&nvbo->bo.bdev->fence_lock); | |
bc9025bd | 845 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
846 | return ret; |
847 | } | |
848 | ||
849 | int | |
850 | nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, | |
851 | struct drm_file *file_priv) | |
852 | { | |
21e86c1c | 853 | return 0; |
6ee73861 BS |
854 | } |
855 | ||
856 | int | |
857 | nouveau_gem_ioctl_info(struct drm_device *dev, void *data, | |
858 | struct drm_file *file_priv) | |
859 | { | |
860 | struct drm_nouveau_gem_info *req = data; | |
861 | struct drm_gem_object *gem; | |
862 | int ret; | |
863 | ||
6ee73861 BS |
864 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); |
865 | if (!gem) | |
bf79cb91 | 866 | return -ENOENT; |
6ee73861 | 867 | |
e758a311 | 868 | ret = nouveau_gem_info(file_priv, gem, req); |
bc9025bd | 869 | drm_gem_object_unreference_unlocked(gem); |
6ee73861 BS |
870 | return ret; |
871 | } | |
872 |