Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright 2007 Dave Airlied | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | */ | |
24 | /* | |
25 | * Authors: Dave Airlied <airlied@linux.ie> | |
26 | * Ben Skeggs <darktama@iinet.net.au> | |
27 | * Jeremy Kolb <jkolb@brandeis.edu> | |
28 | */ | |
29 | ||
30 | #include "drmP.h" | |
31 | ||
32 | #include "nouveau_drm.h" | |
33 | #include "nouveau_drv.h" | |
34 | #include "nouveau_dma.h" | |
35 | ||
36 | static void | |
37 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |
38 | { | |
39 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
40 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
41 | ||
42 | ttm_bo_kunmap(&nvbo->kmap); | |
43 | ||
44 | if (unlikely(nvbo->gem)) | |
45 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | |
46 | ||
47 | spin_lock(&dev_priv->ttm.bo_list_lock); | |
48 | list_del(&nvbo->head); | |
49 | spin_unlock(&dev_priv->ttm.bo_list_lock); | |
50 | kfree(nvbo); | |
51 | } | |
52 | ||
53 | int | |
54 | nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |
55 | int size, int align, uint32_t flags, uint32_t tile_mode, | |
56 | uint32_t tile_flags, bool no_vm, bool mappable, | |
57 | struct nouveau_bo **pnvbo) | |
58 | { | |
59 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
60 | struct nouveau_bo *nvbo; | |
61 | int ret, n = 0; | |
62 | ||
63 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); | |
64 | if (!nvbo) | |
65 | return -ENOMEM; | |
66 | INIT_LIST_HEAD(&nvbo->head); | |
67 | INIT_LIST_HEAD(&nvbo->entry); | |
68 | nvbo->mappable = mappable; | |
69 | nvbo->no_vm = no_vm; | |
70 | nvbo->tile_mode = tile_mode; | |
71 | nvbo->tile_flags = tile_flags; | |
72 | ||
73 | /* | |
74 | * Some of the tile_flags have a periodic structure of N*4096 bytes, | |
75 | * align to to that as well as the page size. Overallocate memory to | |
76 | * avoid corruption of other buffer objects. | |
77 | */ | |
78 | switch (tile_flags) { | |
79 | case 0x1800: | |
80 | case 0x2800: | |
81 | case 0x4800: | |
82 | case 0x7a00: | |
83 | if (dev_priv->chipset >= 0xA0) { | |
84 | /* This is based on high end cards with 448 bits | |
85 | * memory bus, could be different elsewhere.*/ | |
86 | size += 6 * 28672; | |
87 | /* 8 * 28672 is the actual alignment requirement, | |
88 | * but we must also align to page size. */ | |
89 | align = 2 * 8 * 28672; | |
90 | } else if (dev_priv->chipset >= 0x90) { | |
91 | size += 3 * 16384; | |
92 | align = 12 * 16834; | |
93 | } else { | |
94 | size += 3 * 8192; | |
95 | /* 12 * 8192 is the actual alignment requirement, | |
96 | * but we must also align to page size. */ | |
97 | align = 2 * 12 * 8192; | |
98 | } | |
99 | break; | |
100 | default: | |
101 | break; | |
102 | } | |
103 | ||
104 | align >>= PAGE_SHIFT; | |
105 | ||
106 | size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); | |
107 | if (dev_priv->card_type == NV_50) { | |
108 | size = (size + 65535) & ~65535; | |
109 | if (align < (65536 / PAGE_SIZE)) | |
110 | align = (65536 / PAGE_SIZE); | |
111 | } | |
112 | ||
113 | if (flags & TTM_PL_FLAG_VRAM) | |
114 | nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; | |
115 | if (flags & TTM_PL_FLAG_TT) | |
116 | nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
117 | nvbo->placement.fpfn = 0; | |
118 | nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; | |
119 | nvbo->placement.placement = nvbo->placements; | |
120 | nvbo->placement.busy_placement = nvbo->placements; | |
121 | nvbo->placement.num_placement = n; | |
122 | nvbo->placement.num_busy_placement = n; | |
123 | ||
124 | nvbo->channel = chan; | |
125 | nouveau_bo_placement_set(nvbo, flags); | |
126 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, | |
127 | ttm_bo_type_device, &nvbo->placement, align, 0, | |
128 | false, NULL, size, nouveau_bo_del_ttm); | |
129 | nvbo->channel = NULL; | |
130 | if (ret) { | |
131 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ | |
132 | return ret; | |
133 | } | |
134 | ||
135 | spin_lock(&dev_priv->ttm.bo_list_lock); | |
136 | list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list); | |
137 | spin_unlock(&dev_priv->ttm.bo_list_lock); | |
138 | *pnvbo = nvbo; | |
139 | return 0; | |
140 | } | |
141 | ||
142 | void | |
143 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype) | |
144 | { | |
145 | int n = 0; | |
146 | ||
147 | if (memtype & TTM_PL_FLAG_VRAM) | |
148 | nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; | |
149 | if (memtype & TTM_PL_FLAG_TT) | |
150 | nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
151 | if (memtype & TTM_PL_FLAG_SYSTEM) | |
152 | nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; | |
153 | nvbo->placement.placement = nvbo->placements; | |
154 | nvbo->placement.busy_placement = nvbo->placements; | |
155 | nvbo->placement.num_placement = n; | |
156 | nvbo->placement.num_busy_placement = n; | |
157 | } | |
158 | ||
159 | int | |
160 | nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) | |
161 | { | |
162 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
163 | struct ttm_buffer_object *bo = &nvbo->bo; | |
164 | int ret, i; | |
165 | ||
166 | if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { | |
167 | NV_ERROR(nouveau_bdev(bo->bdev)->dev, | |
168 | "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, | |
169 | 1 << bo->mem.mem_type, memtype); | |
170 | return -EINVAL; | |
171 | } | |
172 | ||
173 | if (nvbo->pin_refcnt++) | |
174 | return 0; | |
175 | ||
176 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
177 | if (ret) | |
178 | goto out; | |
179 | ||
180 | nouveau_bo_placement_set(nvbo, memtype); | |
181 | for (i = 0; i < nvbo->placement.num_placement; i++) | |
182 | nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | |
183 | ||
184 | ret = ttm_bo_validate(bo, &nvbo->placement, false, false); | |
185 | if (ret == 0) { | |
186 | switch (bo->mem.mem_type) { | |
187 | case TTM_PL_VRAM: | |
188 | dev_priv->fb_aper_free -= bo->mem.size; | |
189 | break; | |
190 | case TTM_PL_TT: | |
191 | dev_priv->gart_info.aper_free -= bo->mem.size; | |
192 | break; | |
193 | default: | |
194 | break; | |
195 | } | |
196 | } | |
197 | ttm_bo_unreserve(bo); | |
198 | out: | |
199 | if (unlikely(ret)) | |
200 | nvbo->pin_refcnt--; | |
201 | return ret; | |
202 | } | |
203 | ||
204 | int | |
205 | nouveau_bo_unpin(struct nouveau_bo *nvbo) | |
206 | { | |
207 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
208 | struct ttm_buffer_object *bo = &nvbo->bo; | |
209 | int ret, i; | |
210 | ||
211 | if (--nvbo->pin_refcnt) | |
212 | return 0; | |
213 | ||
214 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
215 | if (ret) | |
216 | return ret; | |
217 | ||
218 | for (i = 0; i < nvbo->placement.num_placement; i++) | |
219 | nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; | |
220 | ||
221 | ret = ttm_bo_validate(bo, &nvbo->placement, false, false); | |
222 | if (ret == 0) { | |
223 | switch (bo->mem.mem_type) { | |
224 | case TTM_PL_VRAM: | |
225 | dev_priv->fb_aper_free += bo->mem.size; | |
226 | break; | |
227 | case TTM_PL_TT: | |
228 | dev_priv->gart_info.aper_free += bo->mem.size; | |
229 | break; | |
230 | default: | |
231 | break; | |
232 | } | |
233 | } | |
234 | ||
235 | ttm_bo_unreserve(bo); | |
236 | return ret; | |
237 | } | |
238 | ||
239 | int | |
240 | nouveau_bo_map(struct nouveau_bo *nvbo) | |
241 | { | |
242 | int ret; | |
243 | ||
244 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); | |
245 | if (ret) | |
246 | return ret; | |
247 | ||
248 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); | |
249 | ttm_bo_unreserve(&nvbo->bo); | |
250 | return ret; | |
251 | } | |
252 | ||
253 | void | |
254 | nouveau_bo_unmap(struct nouveau_bo *nvbo) | |
255 | { | |
256 | ttm_bo_kunmap(&nvbo->kmap); | |
257 | } | |
258 | ||
259 | u16 | |
260 | nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) | |
261 | { | |
262 | bool is_iomem; | |
263 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
264 | mem = &mem[index]; | |
265 | if (is_iomem) | |
266 | return ioread16_native((void __force __iomem *)mem); | |
267 | else | |
268 | return *mem; | |
269 | } | |
270 | ||
271 | void | |
272 | nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) | |
273 | { | |
274 | bool is_iomem; | |
275 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
276 | mem = &mem[index]; | |
277 | if (is_iomem) | |
278 | iowrite16_native(val, (void __force __iomem *)mem); | |
279 | else | |
280 | *mem = val; | |
281 | } | |
282 | ||
283 | u32 | |
284 | nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) | |
285 | { | |
286 | bool is_iomem; | |
287 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
288 | mem = &mem[index]; | |
289 | if (is_iomem) | |
290 | return ioread32_native((void __force __iomem *)mem); | |
291 | else | |
292 | return *mem; | |
293 | } | |
294 | ||
295 | void | |
296 | nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) | |
297 | { | |
298 | bool is_iomem; | |
299 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
300 | mem = &mem[index]; | |
301 | if (is_iomem) | |
302 | iowrite32_native(val, (void __force __iomem *)mem); | |
303 | else | |
304 | *mem = val; | |
305 | } | |
306 | ||
307 | static struct ttm_backend * | |
308 | nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) | |
309 | { | |
310 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
311 | struct drm_device *dev = dev_priv->dev; | |
312 | ||
313 | switch (dev_priv->gart_info.type) { | |
b694dfb2 | 314 | #if __OS_HAS_AGP |
6ee73861 BS |
315 | case NOUVEAU_GART_AGP: |
316 | return ttm_agp_backend_init(bdev, dev->agp->bridge); | |
b694dfb2 | 317 | #endif |
6ee73861 BS |
318 | case NOUVEAU_GART_SGDMA: |
319 | return nouveau_sgdma_init_ttm(dev); | |
320 | default: | |
321 | NV_ERROR(dev, "Unknown GART type %d\n", | |
322 | dev_priv->gart_info.type); | |
323 | break; | |
324 | } | |
325 | ||
326 | return NULL; | |
327 | } | |
328 | ||
329 | static int | |
330 | nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
331 | { | |
332 | /* We'll do this from user space. */ | |
333 | return 0; | |
334 | } | |
335 | ||
336 | static int | |
337 | nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
338 | struct ttm_mem_type_manager *man) | |
339 | { | |
340 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
341 | struct drm_device *dev = dev_priv->dev; | |
342 | ||
343 | switch (type) { | |
344 | case TTM_PL_SYSTEM: | |
345 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
346 | man->available_caching = TTM_PL_MASK_CACHING; | |
347 | man->default_caching = TTM_PL_FLAG_CACHED; | |
348 | break; | |
349 | case TTM_PL_VRAM: | |
350 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | |
351 | TTM_MEMTYPE_FLAG_MAPPABLE | | |
352 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; | |
353 | man->available_caching = TTM_PL_FLAG_UNCACHED | | |
354 | TTM_PL_FLAG_WC; | |
355 | man->default_caching = TTM_PL_FLAG_WC; | |
356 | ||
357 | man->io_addr = NULL; | |
358 | man->io_offset = drm_get_resource_start(dev, 1); | |
359 | man->io_size = drm_get_resource_len(dev, 1); | |
360 | if (man->io_size > nouveau_mem_fb_amount(dev)) | |
361 | man->io_size = nouveau_mem_fb_amount(dev); | |
362 | ||
363 | man->gpu_offset = dev_priv->vm_vram_base; | |
364 | break; | |
365 | case TTM_PL_TT: | |
366 | switch (dev_priv->gart_info.type) { | |
367 | case NOUVEAU_GART_AGP: | |
368 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | | |
369 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; | |
370 | man->available_caching = TTM_PL_FLAG_UNCACHED; | |
371 | man->default_caching = TTM_PL_FLAG_UNCACHED; | |
372 | break; | |
373 | case NOUVEAU_GART_SGDMA: | |
374 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | | |
375 | TTM_MEMTYPE_FLAG_CMA; | |
376 | man->available_caching = TTM_PL_MASK_CACHING; | |
377 | man->default_caching = TTM_PL_FLAG_CACHED; | |
378 | break; | |
379 | default: | |
380 | NV_ERROR(dev, "Unknown GART type: %d\n", | |
381 | dev_priv->gart_info.type); | |
382 | return -EINVAL; | |
383 | } | |
384 | ||
385 | man->io_offset = dev_priv->gart_info.aper_base; | |
386 | man->io_size = dev_priv->gart_info.aper_size; | |
387 | man->io_addr = NULL; | |
388 | man->gpu_offset = dev_priv->vm_gart_base; | |
389 | break; | |
390 | default: | |
391 | NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); | |
392 | return -EINVAL; | |
393 | } | |
394 | return 0; | |
395 | } | |
396 | ||
397 | static void | |
398 | nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |
399 | { | |
400 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
401 | ||
402 | switch (bo->mem.mem_type) { | |
22fbd538 FJ |
403 | case TTM_PL_VRAM: |
404 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT | | |
405 | TTM_PL_FLAG_SYSTEM); | |
406 | break; | |
6ee73861 BS |
407 | default: |
408 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); | |
409 | break; | |
410 | } | |
22fbd538 FJ |
411 | |
412 | *pl = nvbo->placement; | |
6ee73861 BS |
413 | } |
414 | ||
415 | ||
416 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access | |
417 | * TTM_PL_{VRAM,TT} directly. | |
418 | */ | |
419 | static int | |
420 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | |
421 | struct nouveau_bo *nvbo, bool evict, bool no_wait, | |
422 | struct ttm_mem_reg *new_mem) | |
423 | { | |
424 | struct nouveau_fence *fence = NULL; | |
425 | int ret; | |
426 | ||
427 | ret = nouveau_fence_new(chan, &fence, true); | |
428 | if (ret) | |
429 | return ret; | |
430 | ||
431 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, | |
432 | evict, no_wait, new_mem); | |
433 | nouveau_fence_unref((void *)&fence); | |
434 | return ret; | |
435 | } | |
436 | ||
437 | static inline uint32_t | |
438 | nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, | |
439 | struct ttm_mem_reg *mem) | |
440 | { | |
441 | if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) { | |
442 | if (mem->mem_type == TTM_PL_TT) | |
443 | return NvDmaGART; | |
444 | return NvDmaVRAM; | |
445 | } | |
446 | ||
447 | if (mem->mem_type == TTM_PL_TT) | |
448 | return chan->gart_handle; | |
449 | return chan->vram_handle; | |
450 | } | |
451 | ||
452 | static int | |
453 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait, | |
454 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
455 | { | |
456 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
457 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
458 | struct nouveau_channel *chan; | |
459 | uint64_t src_offset, dst_offset; | |
460 | uint32_t page_count; | |
461 | int ret; | |
462 | ||
463 | chan = nvbo->channel; | |
464 | if (!chan || nvbo->tile_flags || nvbo->no_vm) { | |
465 | chan = dev_priv->channel; | |
466 | if (!chan) | |
467 | return -EINVAL; | |
468 | } | |
469 | ||
470 | src_offset = old_mem->mm_node->start << PAGE_SHIFT; | |
471 | dst_offset = new_mem->mm_node->start << PAGE_SHIFT; | |
472 | if (chan != dev_priv->channel) { | |
473 | if (old_mem->mem_type == TTM_PL_TT) | |
474 | src_offset += dev_priv->vm_gart_base; | |
475 | else | |
476 | src_offset += dev_priv->vm_vram_base; | |
477 | ||
478 | if (new_mem->mem_type == TTM_PL_TT) | |
479 | dst_offset += dev_priv->vm_gart_base; | |
480 | else | |
481 | dst_offset += dev_priv->vm_vram_base; | |
482 | } | |
483 | ||
484 | ret = RING_SPACE(chan, 3); | |
485 | if (ret) | |
486 | return ret; | |
487 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); | |
488 | OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem)); | |
489 | OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem)); | |
490 | ||
491 | if (dev_priv->card_type >= NV_50) { | |
492 | ret = RING_SPACE(chan, 4); | |
493 | if (ret) | |
494 | return ret; | |
495 | BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); | |
496 | OUT_RING(chan, 1); | |
497 | BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); | |
498 | OUT_RING(chan, 1); | |
499 | } | |
500 | ||
501 | page_count = new_mem->num_pages; | |
502 | while (page_count) { | |
503 | int line_count = (page_count > 2047) ? 2047 : page_count; | |
504 | ||
505 | if (dev_priv->card_type >= NV_50) { | |
506 | ret = RING_SPACE(chan, 3); | |
507 | if (ret) | |
508 | return ret; | |
509 | BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); | |
510 | OUT_RING(chan, upper_32_bits(src_offset)); | |
511 | OUT_RING(chan, upper_32_bits(dst_offset)); | |
512 | } | |
513 | ret = RING_SPACE(chan, 11); | |
514 | if (ret) | |
515 | return ret; | |
516 | BEGIN_RING(chan, NvSubM2MF, | |
517 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); | |
518 | OUT_RING(chan, lower_32_bits(src_offset)); | |
519 | OUT_RING(chan, lower_32_bits(dst_offset)); | |
520 | OUT_RING(chan, PAGE_SIZE); /* src_pitch */ | |
521 | OUT_RING(chan, PAGE_SIZE); /* dst_pitch */ | |
522 | OUT_RING(chan, PAGE_SIZE); /* line_length */ | |
523 | OUT_RING(chan, line_count); | |
524 | OUT_RING(chan, (1<<8)|(1<<0)); | |
525 | OUT_RING(chan, 0); | |
526 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); | |
527 | OUT_RING(chan, 0); | |
528 | ||
529 | page_count -= line_count; | |
530 | src_offset += (PAGE_SIZE * line_count); | |
531 | dst_offset += (PAGE_SIZE * line_count); | |
532 | } | |
533 | ||
534 | return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem); | |
535 | } | |
536 | ||
537 | static int | |
538 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |
539 | bool no_wait, struct ttm_mem_reg *new_mem) | |
540 | { | |
541 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
542 | struct ttm_placement placement; | |
543 | struct ttm_mem_reg tmp_mem; | |
544 | int ret; | |
545 | ||
546 | placement.fpfn = placement.lpfn = 0; | |
547 | placement.num_placement = placement.num_busy_placement = 1; | |
548 | placement.placement = &placement_memtype; | |
549 | ||
550 | tmp_mem = *new_mem; | |
551 | tmp_mem.mm_node = NULL; | |
552 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); | |
553 | if (ret) | |
554 | return ret; | |
555 | ||
556 | ret = ttm_tt_bind(bo->ttm, &tmp_mem); | |
557 | if (ret) | |
558 | goto out; | |
559 | ||
560 | ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem); | |
561 | if (ret) | |
562 | goto out; | |
563 | ||
564 | ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); | |
565 | out: | |
566 | if (tmp_mem.mm_node) { | |
567 | spin_lock(&bo->bdev->glob->lru_lock); | |
568 | drm_mm_put_block(tmp_mem.mm_node); | |
569 | spin_unlock(&bo->bdev->glob->lru_lock); | |
570 | } | |
571 | ||
572 | return ret; | |
573 | } | |
574 | ||
575 | static int | |
576 | nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |
577 | bool no_wait, struct ttm_mem_reg *new_mem) | |
578 | { | |
579 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
580 | struct ttm_placement placement; | |
581 | struct ttm_mem_reg tmp_mem; | |
582 | int ret; | |
583 | ||
584 | placement.fpfn = placement.lpfn = 0; | |
585 | placement.num_placement = placement.num_busy_placement = 1; | |
586 | placement.placement = &placement_memtype; | |
587 | ||
588 | tmp_mem = *new_mem; | |
589 | tmp_mem.mm_node = NULL; | |
590 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); | |
591 | if (ret) | |
592 | return ret; | |
593 | ||
594 | ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); | |
595 | if (ret) | |
596 | goto out; | |
597 | ||
598 | ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem); | |
599 | if (ret) | |
600 | goto out; | |
601 | ||
602 | out: | |
603 | if (tmp_mem.mm_node) { | |
604 | spin_lock(&bo->bdev->glob->lru_lock); | |
605 | drm_mm_put_block(tmp_mem.mm_node); | |
606 | spin_unlock(&bo->bdev->glob->lru_lock); | |
607 | } | |
608 | ||
609 | return ret; | |
610 | } | |
611 | ||
612 | static int | |
613 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |
614 | bool no_wait, struct ttm_mem_reg *new_mem) | |
615 | { | |
616 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
617 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
618 | struct drm_device *dev = dev_priv->dev; | |
619 | struct ttm_mem_reg *old_mem = &bo->mem; | |
620 | int ret; | |
621 | ||
622 | if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM && | |
623 | !nvbo->no_vm) { | |
624 | uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT; | |
625 | ||
626 | ret = nv50_mem_vm_bind_linear(dev, | |
627 | offset + dev_priv->vm_vram_base, | |
628 | new_mem->size, nvbo->tile_flags, | |
629 | offset); | |
630 | if (ret) | |
631 | return ret; | |
632 | } | |
633 | ||
634 | if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE) | |
635 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | |
636 | ||
637 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { | |
638 | BUG_ON(bo->mem.mm_node != NULL); | |
639 | bo->mem = *new_mem; | |
640 | new_mem->mm_node = NULL; | |
641 | return 0; | |
642 | } | |
643 | ||
644 | if (new_mem->mem_type == TTM_PL_SYSTEM) { | |
645 | if (old_mem->mem_type == TTM_PL_SYSTEM) | |
646 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | |
647 | if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem)) | |
648 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | |
649 | } else if (old_mem->mem_type == TTM_PL_SYSTEM) { | |
650 | if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem)) | |
651 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | |
652 | } else { | |
653 | if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem)) | |
654 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | |
655 | } | |
656 | ||
657 | return 0; | |
658 | } | |
659 | ||
660 | static int | |
661 | nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
662 | { | |
663 | return 0; | |
664 | } | |
665 | ||
666 | struct ttm_bo_driver nouveau_bo_driver = { | |
667 | .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, | |
668 | .invalidate_caches = nouveau_bo_invalidate_caches, | |
669 | .init_mem_type = nouveau_bo_init_mem_type, | |
670 | .evict_flags = nouveau_bo_evict_flags, | |
671 | .move = nouveau_bo_move, | |
672 | .verify_access = nouveau_bo_verify_access, | |
673 | .sync_obj_signaled = nouveau_fence_signalled, | |
674 | .sync_obj_wait = nouveau_fence_wait, | |
675 | .sync_obj_flush = nouveau_fence_flush, | |
676 | .sync_obj_unref = nouveau_fence_unref, | |
677 | .sync_obj_ref = nouveau_fence_ref, | |
678 | }; | |
679 |