Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | #include "drmP.h" |
2 | #include "nouveau_drv.h" | |
3 | #include <linux/pagemap.h> | |
5a0e3ad6 | 4 | #include <linux/slab.h> |
6ee73861 BS |
5 | |
6 | #define NV_CTXDMA_PAGE_SHIFT 12 | |
7 | #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) | |
8 | #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) | |
9 | ||
10 | struct nouveau_sgdma_be { | |
11 | struct ttm_backend backend; | |
12 | struct drm_device *dev; | |
13 | ||
14 | dma_addr_t *pages; | |
e0138c26 | 15 | bool *ttm_alloced; |
6ee73861 BS |
16 | unsigned nr_pages; |
17 | ||
b571fe21 | 18 | u64 offset; |
6ee73861 BS |
19 | bool bound; |
20 | }; | |
21 | ||
22 | static int | |
23 | nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | |
27e8b237 KRW |
24 | struct page **pages, struct page *dummy_read_page, |
25 | dma_addr_t *dma_addrs) | |
6ee73861 BS |
26 | { |
27 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | |
28 | struct drm_device *dev = nvbe->dev; | |
29 | ||
30 | NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); | |
31 | ||
32 | if (nvbe->pages) | |
33 | return -EINVAL; | |
34 | ||
35 | nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL); | |
36 | if (!nvbe->pages) | |
37 | return -ENOMEM; | |
38 | ||
e0138c26 KRW |
39 | nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); |
40 | if (!nvbe->ttm_alloced) | |
41 | return -ENOMEM; | |
42 | ||
6ee73861 BS |
43 | nvbe->nr_pages = 0; |
44 | while (num_pages--) { | |
e0138c26 KRW |
45 | if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { |
46 | nvbe->pages[nvbe->nr_pages] = | |
47 | dma_addrs[nvbe->nr_pages]; | |
48 | nvbe->ttm_alloced[nvbe->nr_pages] = true; | |
49 | } else { | |
50 | nvbe->pages[nvbe->nr_pages] = | |
51 | pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, | |
6ee73861 | 52 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
e0138c26 KRW |
53 | if (pci_dma_mapping_error(dev->pdev, |
54 | nvbe->pages[nvbe->nr_pages])) { | |
55 | be->func->clear(be); | |
56 | return -EFAULT; | |
57 | } | |
6ee73861 BS |
58 | } |
59 | ||
60 | nvbe->nr_pages++; | |
61 | } | |
62 | ||
63 | return 0; | |
64 | } | |
65 | ||
66 | static void | |
67 | nouveau_sgdma_clear(struct ttm_backend *be) | |
68 | { | |
69 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | |
dd19e44b | 70 | struct drm_device *dev; |
6ee73861 BS |
71 | |
72 | if (nvbe && nvbe->pages) { | |
dd19e44b MS |
73 | dev = nvbe->dev; |
74 | NV_DEBUG(dev, "\n"); | |
75 | ||
6ee73861 BS |
76 | if (nvbe->bound) |
77 | be->func->unbind(be); | |
78 | ||
79 | while (nvbe->nr_pages--) { | |
e0138c26 KRW |
80 | if (!nvbe->ttm_alloced[nvbe->nr_pages]) |
81 | pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], | |
6ee73861 BS |
82 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
83 | } | |
84 | kfree(nvbe->pages); | |
e0138c26 | 85 | kfree(nvbe->ttm_alloced); |
6ee73861 | 86 | nvbe->pages = NULL; |
e0138c26 | 87 | nvbe->ttm_alloced = NULL; |
6ee73861 BS |
88 | nvbe->nr_pages = 0; |
89 | } | |
90 | } | |
91 | ||
efa58db3 BS |
92 | static void |
93 | nouveau_sgdma_destroy(struct ttm_backend *be) | |
94 | { | |
95 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | |
96 | ||
97 | if (be) { | |
98 | NV_DEBUG(nvbe->dev, "\n"); | |
99 | ||
100 | if (nvbe) { | |
101 | if (nvbe->pages) | |
102 | be->func->clear(be); | |
103 | kfree(nvbe); | |
104 | } | |
105 | } | |
106 | } | |
107 | ||
6ee73861 | 108 | static int |
efa58db3 | 109 | nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) |
6ee73861 BS |
110 | { |
111 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | |
112 | struct drm_device *dev = nvbe->dev; | |
113 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
114 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; | |
115 | unsigned i, j, pte; | |
116 | ||
d961db75 | 117 | NV_DEBUG(dev, "pg=0x%lx\n", mem->start); |
6ee73861 | 118 | |
b571fe21 BS |
119 | nvbe->offset = mem->start << PAGE_SHIFT; |
120 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; | |
6ee73861 BS |
121 | for (i = 0; i < nvbe->nr_pages; i++) { |
122 | dma_addr_t dma_offset = nvbe->pages[i]; | |
123 | uint32_t offset_l = lower_32_bits(dma_offset); | |
6ee73861 | 124 | |
b571fe21 BS |
125 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { |
126 | nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); | |
6ee73861 BS |
127 | dma_offset += NV_CTXDMA_PAGE_SIZE; |
128 | } | |
129 | } | |
6ee73861 BS |
130 | |
131 | nvbe->bound = true; | |
132 | return 0; | |
133 | } | |
134 | ||
135 | static int | |
efa58db3 | 136 | nv04_sgdma_unbind(struct ttm_backend *be) |
6ee73861 BS |
137 | { |
138 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | |
139 | struct drm_device *dev = nvbe->dev; | |
140 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
141 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; | |
142 | unsigned i, j, pte; | |
143 | ||
144 | NV_DEBUG(dev, "\n"); | |
145 | ||
146 | if (!nvbe->bound) | |
147 | return 0; | |
148 | ||
b571fe21 | 149 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
6ee73861 | 150 | for (i = 0; i < nvbe->nr_pages; i++) { |
b571fe21 BS |
151 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) |
152 | nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); | |
40b2a687 BS |
153 | } |
154 | ||
6ee73861 BS |
155 | nvbe->bound = false; |
156 | return 0; | |
157 | } | |
158 | ||
efa58db3 BS |
159 | static struct ttm_backend_func nv04_sgdma_backend = { |
160 | .populate = nouveau_sgdma_populate, | |
161 | .clear = nouveau_sgdma_clear, | |
162 | .bind = nv04_sgdma_bind, | |
163 | .unbind = nv04_sgdma_unbind, | |
164 | .destroy = nouveau_sgdma_destroy | |
165 | }; | |
6ee73861 | 166 | |
7948758d BS |
167 | static void |
168 | nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe) | |
169 | { | |
170 | struct drm_device *dev = nvbe->dev; | |
171 | ||
172 | nv_wr32(dev, 0x100810, 0x00000022); | |
173 | if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100)) | |
174 | NV_ERROR(dev, "vm flush timeout: 0x%08x\n", | |
175 | nv_rd32(dev, 0x100810)); | |
176 | nv_wr32(dev, 0x100810, 0x00000000); | |
177 | } | |
178 | ||
179 | static int | |
180 | nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | |
181 | { | |
182 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | |
183 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | |
184 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | |
185 | dma_addr_t *list = nvbe->pages; | |
186 | u32 pte = mem->start << 2; | |
187 | u32 cnt = nvbe->nr_pages; | |
188 | ||
189 | nvbe->offset = mem->start << PAGE_SHIFT; | |
190 | ||
191 | while (cnt--) { | |
192 | nv_wo32(pgt, pte, (*list++ >> 7) | 1); | |
193 | pte += 4; | |
194 | } | |
195 | ||
196 | nv41_sgdma_flush(nvbe); | |
197 | nvbe->bound = true; | |
198 | return 0; | |
199 | } | |
200 | ||
201 | static int | |
202 | nv41_sgdma_unbind(struct ttm_backend *be) | |
203 | { | |
204 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | |
205 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | |
206 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | |
207 | u32 pte = (nvbe->offset >> 12) << 2; | |
208 | u32 cnt = nvbe->nr_pages; | |
209 | ||
210 | while (cnt--) { | |
211 | nv_wo32(pgt, pte, 0x00000000); | |
212 | pte += 4; | |
213 | } | |
214 | ||
215 | nv41_sgdma_flush(nvbe); | |
216 | nvbe->bound = false; | |
217 | return 0; | |
218 | } | |
219 | ||
220 | static struct ttm_backend_func nv41_sgdma_backend = { | |
221 | .populate = nouveau_sgdma_populate, | |
222 | .clear = nouveau_sgdma_clear, | |
223 | .bind = nv41_sgdma_bind, | |
224 | .unbind = nv41_sgdma_unbind, | |
225 | .destroy = nouveau_sgdma_destroy | |
226 | }; | |
227 | ||
228 | static void | |
229 | nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe) | |
230 | { | |
231 | struct drm_device *dev = nvbe->dev; | |
232 | ||
233 | nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12); | |
234 | nv_wr32(dev, 0x100808, nvbe->offset | 0x20); | |
235 | if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001)) | |
236 | NV_ERROR(dev, "gart flush timeout: 0x%08x\n", | |
237 | nv_rd32(dev, 0x100808)); | |
238 | nv_wr32(dev, 0x100808, 0x00000000); | |
239 | } | |
240 | ||
241 | static void | |
242 | nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt) | |
243 | { | |
244 | struct drm_nouveau_private *dev_priv = pgt->dev->dev_private; | |
245 | dma_addr_t dummy = dev_priv->gart_info.dummy.addr; | |
246 | u32 pte, tmp[4]; | |
247 | ||
248 | pte = base >> 2; | |
249 | base &= ~0x0000000f; | |
250 | ||
251 | tmp[0] = nv_ro32(pgt, base + 0x0); | |
252 | tmp[1] = nv_ro32(pgt, base + 0x4); | |
253 | tmp[2] = nv_ro32(pgt, base + 0x8); | |
254 | tmp[3] = nv_ro32(pgt, base + 0xc); | |
255 | while (cnt--) { | |
256 | u32 addr = list ? (*list++ >> 12) : (dummy >> 12); | |
257 | switch (pte++ & 0x3) { | |
258 | case 0: | |
259 | tmp[0] &= ~0x07ffffff; | |
260 | tmp[0] |= addr; | |
261 | break; | |
262 | case 1: | |
263 | tmp[0] &= ~0xf8000000; | |
264 | tmp[0] |= addr << 27; | |
265 | tmp[1] &= ~0x003fffff; | |
266 | tmp[1] |= addr >> 5; | |
267 | break; | |
268 | case 2: | |
269 | tmp[1] &= ~0xffc00000; | |
270 | tmp[1] |= addr << 22; | |
271 | tmp[2] &= ~0x0001ffff; | |
272 | tmp[2] |= addr >> 10; | |
273 | break; | |
274 | case 3: | |
275 | tmp[2] &= ~0xfffe0000; | |
276 | tmp[2] |= addr << 17; | |
277 | tmp[3] &= ~0x00000fff; | |
278 | tmp[3] |= addr >> 15; | |
279 | break; | |
280 | } | |
281 | } | |
282 | ||
283 | tmp[3] |= 0x40000000; | |
284 | ||
285 | nv_wo32(pgt, base + 0x0, tmp[0]); | |
286 | nv_wo32(pgt, base + 0x4, tmp[1]); | |
287 | nv_wo32(pgt, base + 0x8, tmp[2]); | |
288 | nv_wo32(pgt, base + 0xc, tmp[3]); | |
289 | } | |
290 | ||
291 | static int | |
292 | nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | |
293 | { | |
294 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | |
295 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | |
296 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | |
297 | dma_addr_t *list = nvbe->pages; | |
298 | u32 pte = mem->start << 2, tmp[4]; | |
299 | u32 cnt = nvbe->nr_pages; | |
300 | int i; | |
301 | ||
302 | nvbe->offset = mem->start << PAGE_SHIFT; | |
303 | ||
304 | if (pte & 0x0000000c) { | |
305 | u32 max = 4 - ((pte >> 2) & 0x3); | |
306 | u32 part = (cnt > max) ? max : cnt; | |
307 | nv44_sgdma_fill(pgt, list, pte, part); | |
308 | pte += (part << 2); | |
309 | list += part; | |
310 | cnt -= part; | |
311 | } | |
312 | ||
313 | while (cnt >= 4) { | |
314 | for (i = 0; i < 4; i++) | |
315 | tmp[i] = *list++ >> 12; | |
316 | nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27); | |
317 | nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22); | |
318 | nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17); | |
319 | nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000); | |
320 | pte += 0x10; | |
321 | cnt -= 4; | |
322 | } | |
323 | ||
324 | if (cnt) | |
325 | nv44_sgdma_fill(pgt, list, pte, cnt); | |
326 | ||
327 | nv44_sgdma_flush(nvbe); | |
328 | nvbe->bound = true; | |
329 | return 0; | |
330 | } | |
331 | ||
332 | static int | |
333 | nv44_sgdma_unbind(struct ttm_backend *be) | |
334 | { | |
335 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | |
336 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | |
337 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | |
338 | u32 pte = (nvbe->offset >> 12) << 2; | |
339 | u32 cnt = nvbe->nr_pages; | |
340 | ||
341 | if (pte & 0x0000000c) { | |
342 | u32 max = 4 - ((pte >> 2) & 0x3); | |
343 | u32 part = (cnt > max) ? max : cnt; | |
344 | nv44_sgdma_fill(pgt, NULL, pte, part); | |
345 | pte += (part << 2); | |
346 | cnt -= part; | |
347 | } | |
348 | ||
349 | while (cnt >= 4) { | |
350 | nv_wo32(pgt, pte + 0x0, 0x00000000); | |
351 | nv_wo32(pgt, pte + 0x4, 0x00000000); | |
352 | nv_wo32(pgt, pte + 0x8, 0x00000000); | |
353 | nv_wo32(pgt, pte + 0xc, 0x00000000); | |
354 | pte += 0x10; | |
355 | cnt -= 4; | |
356 | } | |
357 | ||
358 | if (cnt) | |
359 | nv44_sgdma_fill(pgt, NULL, pte, cnt); | |
360 | ||
361 | nv44_sgdma_flush(nvbe); | |
362 | nvbe->bound = false; | |
363 | return 0; | |
364 | } | |
365 | ||
366 | static struct ttm_backend_func nv44_sgdma_backend = { | |
367 | .populate = nouveau_sgdma_populate, | |
368 | .clear = nouveau_sgdma_clear, | |
369 | .bind = nv44_sgdma_bind, | |
370 | .unbind = nv44_sgdma_unbind, | |
371 | .destroy = nouveau_sgdma_destroy | |
372 | }; | |
373 | ||
b571fe21 BS |
374 | static int |
375 | nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) | |
376 | { | |
377 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | |
378 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | |
379 | ||
380 | nvbe->offset = mem->start << PAGE_SHIFT; | |
381 | ||
382 | nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset, | |
383 | nvbe->nr_pages << PAGE_SHIFT, nvbe->pages); | |
384 | nvbe->bound = true; | |
385 | return 0; | |
386 | } | |
387 | ||
388 | static int | |
389 | nv50_sgdma_unbind(struct ttm_backend *be) | |
390 | { | |
391 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | |
392 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; | |
393 | ||
394 | if (!nvbe->bound) | |
395 | return 0; | |
396 | ||
397 | nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset, | |
398 | nvbe->nr_pages << PAGE_SHIFT); | |
399 | nvbe->bound = false; | |
400 | return 0; | |
401 | } | |
402 | ||
b571fe21 BS |
403 | static struct ttm_backend_func nv50_sgdma_backend = { |
404 | .populate = nouveau_sgdma_populate, | |
405 | .clear = nouveau_sgdma_clear, | |
406 | .bind = nv50_sgdma_bind, | |
407 | .unbind = nv50_sgdma_unbind, | |
408 | .destroy = nouveau_sgdma_destroy | |
409 | }; | |
410 | ||
6ee73861 BS |
411 | struct ttm_backend * |
412 | nouveau_sgdma_init_ttm(struct drm_device *dev) | |
413 | { | |
414 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
415 | struct nouveau_sgdma_be *nvbe; | |
416 | ||
6ee73861 BS |
417 | nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); |
418 | if (!nvbe) | |
419 | return NULL; | |
420 | ||
421 | nvbe->dev = dev; | |
422 | ||
7948758d | 423 | nvbe->backend.func = dev_priv->gart_info.func; |
6ee73861 BS |
424 | return &nvbe->backend; |
425 | } | |
426 | ||
427 | int | |
428 | nouveau_sgdma_init(struct drm_device *dev) | |
429 | { | |
430 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
431 | struct nouveau_gpuobj *gpuobj = NULL; | |
7948758d BS |
432 | u32 aper_size, align; |
433 | int ret; | |
434 | ||
435 | if (dev_priv->card_type >= NV_50 || | |
436 | dev_priv->ramin_rsvd_vram >= 2 * 1024 * 1024) | |
437 | aper_size = 512 * 1024 * 1024; | |
438 | else | |
439 | aper_size = 64 * 1024 * 1024; | |
440 | ||
441 | /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for | |
442 | * christmas. The cards before it have them, the cards after | |
443 | * it have them, why is NV44 so unloved? | |
444 | */ | |
445 | dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL); | |
446 | if (!dev_priv->gart_info.dummy.page) | |
447 | return -ENOMEM; | |
448 | ||
449 | dev_priv->gart_info.dummy.addr = | |
450 | pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page, | |
451 | 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
452 | if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) { | |
453 | NV_ERROR(dev, "error mapping dummy page\n"); | |
454 | __free_page(dev_priv->gart_info.dummy.page); | |
455 | dev_priv->gart_info.dummy.page = NULL; | |
456 | return -ENOMEM; | |
457 | } | |
6ee73861 | 458 | |
efa58db3 | 459 | if (dev_priv->card_type >= NV_50) { |
7948758d | 460 | ret = nouveau_vm_get(dev_priv->chan_vm, aper_size, |
efa58db3 BS |
461 | 12, NV_MEM_ACCESS_RW, |
462 | &dev_priv->gart_info.vma); | |
463 | if (ret) | |
464 | return ret; | |
465 | ||
466 | dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset; | |
7948758d | 467 | dev_priv->gart_info.aper_size = aper_size; |
58e6c7a9 | 468 | dev_priv->gart_info.type = NOUVEAU_GART_HW; |
7948758d BS |
469 | dev_priv->gart_info.func = &nv50_sgdma_backend; |
470 | } else | |
471 | if (drm_pci_device_is_pcie(dev) && | |
472 | dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) { | |
473 | if (nv44_graph_class(dev)) { | |
474 | dev_priv->gart_info.func = &nv44_sgdma_backend; | |
475 | align = 512 * 1024; | |
476 | } else { | |
477 | dev_priv->gart_info.func = &nv41_sgdma_backend; | |
478 | align = 16; | |
479 | } | |
9d5a6c43 | 480 | |
7948758d BS |
481 | ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align, |
482 | NVOBJ_FLAG_ZERO_ALLOC | | |
483 | NVOBJ_FLAG_ZERO_FREE, &gpuobj); | |
484 | if (ret) { | |
485 | NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); | |
486 | return ret; | |
487 | } | |
6ee73861 | 488 | |
7948758d BS |
489 | dev_priv->gart_info.sg_ctxdma = gpuobj; |
490 | dev_priv->gart_info.aper_base = 0; | |
491 | dev_priv->gart_info.aper_size = aper_size; | |
492 | dev_priv->gart_info.type = NOUVEAU_GART_HW; | |
493 | } else { | |
494 | ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16, | |
495 | NVOBJ_FLAG_ZERO_ALLOC | | |
496 | NVOBJ_FLAG_ZERO_FREE, &gpuobj); | |
b571fe21 BS |
497 | if (ret) { |
498 | NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); | |
499 | return ret; | |
500 | } | |
6ee73861 | 501 | |
b3beb167 BS |
502 | nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | |
503 | (1 << 12) /* PT present */ | | |
504 | (0 << 13) /* PT *not* linear */ | | |
7f4a195f BS |
505 | (0 << 14) /* RW */ | |
506 | (2 << 16) /* PCI */); | |
b3beb167 | 507 | nv_wo32(gpuobj, 4, aper_size - 1); |
b571fe21 BS |
508 | |
509 | dev_priv->gart_info.sg_ctxdma = gpuobj; | |
510 | dev_priv->gart_info.aper_base = 0; | |
511 | dev_priv->gart_info.aper_size = aper_size; | |
58e6c7a9 | 512 | dev_priv->gart_info.type = NOUVEAU_GART_PDMA; |
7948758d | 513 | dev_priv->gart_info.func = &nv04_sgdma_backend; |
6ee73861 | 514 | } |
6ee73861 | 515 | |
6ee73861 BS |
516 | return 0; |
517 | } | |
518 | ||
519 | void | |
520 | nouveau_sgdma_takedown(struct drm_device *dev) | |
521 | { | |
522 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
523 | ||
a8eaebc6 | 524 | nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); |
b571fe21 | 525 | nouveau_vm_put(&dev_priv->gart_info.vma); |
7948758d BS |
526 | |
527 | if (dev_priv->gart_info.dummy.page) { | |
528 | pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr, | |
529 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
530 | __free_page(dev_priv->gart_info.dummy.page); | |
531 | dev_priv->gart_info.dummy.page = NULL; | |
532 | } | |
6ee73861 BS |
533 | } |
534 | ||
fd70b6cd FJ |
535 | uint32_t |
536 | nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset) | |
6ee73861 BS |
537 | { |
538 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
539 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; | |
fd70b6cd | 540 | int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
6ee73861 | 541 | |
fd70b6cd | 542 | BUG_ON(dev_priv->card_type >= NV_50); |
6ee73861 | 543 | |
fd70b6cd FJ |
544 | return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) | |
545 | (offset & NV_CTXDMA_PAGE_MASK); | |
6ee73861 | 546 | } |