Commit | Line | Data |
---|---|---|
760285e7 | 1 | #include <drm/drmP.h> |
6ee73861 BS |
2 | #include "nouveau_drv.h" |
3 | #include <linux/pagemap.h> | |
5a0e3ad6 | 4 | #include <linux/slab.h> |
6ee73861 BS |
5 | |
6 | #define NV_CTXDMA_PAGE_SHIFT 12 | |
7 | #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) | |
8 | #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) | |
9 | ||
10 | struct nouveau_sgdma_be { | |
8e7e7052 JG |
11 | /* this has to be the first field so populate/unpopulated in |
12 | * nouve_bo.c works properly, otherwise have to move them here | |
13 | */ | |
14 | struct ttm_dma_tt ttm; | |
6ee73861 | 15 | struct drm_device *dev; |
b571fe21 | 16 | u64 offset; |
6ee73861 BS |
17 | }; |
18 | ||
efa58db3 | 19 | static void |
649bf3ca | 20 | nouveau_sgdma_destroy(struct ttm_tt *ttm) |
efa58db3 | 21 | { |
649bf3ca | 22 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
efa58db3 | 23 | |
649bf3ca | 24 | if (ttm) { |
efa58db3 | 25 | NV_DEBUG(nvbe->dev, "\n"); |
8e7e7052 | 26 | ttm_dma_tt_fini(&nvbe->ttm); |
649bf3ca | 27 | kfree(nvbe); |
efa58db3 BS |
28 | } |
29 | } | |
30 | ||
6ee73861 | 31 | static int |
649bf3ca | 32 | nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
6ee73861 | 33 | { |
649bf3ca | 34 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
6ee73861 BS |
35 | struct drm_device *dev = nvbe->dev; |
36 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
37 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; | |
38 | unsigned i, j, pte; | |
39 | ||
d961db75 | 40 | NV_DEBUG(dev, "pg=0x%lx\n", mem->start); |
6ee73861 | 41 | |
b571fe21 BS |
42 | nvbe->offset = mem->start << PAGE_SHIFT; |
43 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; | |
649bf3ca | 44 | for (i = 0; i < ttm->num_pages; i++) { |
8e7e7052 | 45 | dma_addr_t dma_offset = nvbe->ttm.dma_address[i]; |
6ee73861 | 46 | uint32_t offset_l = lower_32_bits(dma_offset); |
6ee73861 | 47 | |
b571fe21 BS |
48 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { |
49 | nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); | |
1bf27066 | 50 | offset_l += NV_CTXDMA_PAGE_SIZE; |
6ee73861 BS |
51 | } |
52 | } | |
6ee73861 | 53 | |
6ee73861 BS |
54 | return 0; |
55 | } | |
56 | ||
57 | static int | |
649bf3ca | 58 | nv04_sgdma_unbind(struct ttm_tt *ttm) |
6ee73861 | 59 | { |
649bf3ca | 60 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
6ee73861 BS |
61 | struct drm_device *dev = nvbe->dev; |
62 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
63 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; | |
64 | unsigned i, j, pte; | |
65 | ||
66 | NV_DEBUG(dev, "\n"); | |
67 | ||
649bf3ca | 68 | if (ttm->state != tt_bound) |
6ee73861 BS |
69 | return 0; |
70 | ||
b571fe21 | 71 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
649bf3ca | 72 | for (i = 0; i < ttm->num_pages; i++) { |
b571fe21 BS |
73 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) |
74 | nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); | |
40b2a687 BS |
75 | } |
76 | ||
6ee73861 BS |
77 | return 0; |
78 | } | |
79 | ||
efa58db3 | 80 | static struct ttm_backend_func nv04_sgdma_backend = { |
efa58db3 BS |
81 | .bind = nv04_sgdma_bind, |
82 | .unbind = nv04_sgdma_unbind, | |
83 | .destroy = nouveau_sgdma_destroy | |
84 | }; | |
6ee73861 | 85 | |
7948758d BS |
86 | static void |
87 | nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe) | |
88 | { | |
89 | struct drm_device *dev = nvbe->dev; | |
90 | ||
91 | nv_wr32(dev, 0x100810, 0x00000022); | |
92 | if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100)) | |
93 | NV_ERROR(dev, "vm flush timeout: 0x%08x\n", | |
94 | nv_rd32(dev, 0x100810)); | |
95 | nv_wr32(dev, 0x100810, 0x00000000); | |
96 | } | |
97 | ||
98 | static int | |
649bf3ca | 99 | nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
7948758d | 100 | { |
649bf3ca | 101 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
7948758d BS |
102 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
103 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | |
8e7e7052 | 104 | dma_addr_t *list = nvbe->ttm.dma_address; |
7948758d | 105 | u32 pte = mem->start << 2; |
649bf3ca | 106 | u32 cnt = ttm->num_pages; |
7948758d BS |
107 | |
108 | nvbe->offset = mem->start << PAGE_SHIFT; | |
109 | ||
110 | while (cnt--) { | |
111 | nv_wo32(pgt, pte, (*list++ >> 7) | 1); | |
112 | pte += 4; | |
113 | } | |
114 | ||
115 | nv41_sgdma_flush(nvbe); | |
7948758d BS |
116 | return 0; |
117 | } | |
118 | ||
119 | static int | |
649bf3ca | 120 | nv41_sgdma_unbind(struct ttm_tt *ttm) |
7948758d | 121 | { |
649bf3ca | 122 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
7948758d BS |
123 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
124 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | |
125 | u32 pte = (nvbe->offset >> 12) << 2; | |
649bf3ca | 126 | u32 cnt = ttm->num_pages; |
7948758d BS |
127 | |
128 | while (cnt--) { | |
129 | nv_wo32(pgt, pte, 0x00000000); | |
130 | pte += 4; | |
131 | } | |
132 | ||
133 | nv41_sgdma_flush(nvbe); | |
7948758d BS |
134 | return 0; |
135 | } | |
136 | ||
137 | static struct ttm_backend_func nv41_sgdma_backend = { | |
7948758d BS |
138 | .bind = nv41_sgdma_bind, |
139 | .unbind = nv41_sgdma_unbind, | |
140 | .destroy = nouveau_sgdma_destroy | |
141 | }; | |
142 | ||
143 | static void | |
649bf3ca | 144 | nv44_sgdma_flush(struct ttm_tt *ttm) |
7948758d | 145 | { |
649bf3ca | 146 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
7948758d BS |
147 | struct drm_device *dev = nvbe->dev; |
148 | ||
649bf3ca | 149 | nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12); |
7948758d BS |
150 | nv_wr32(dev, 0x100808, nvbe->offset | 0x20); |
151 | if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001)) | |
152 | NV_ERROR(dev, "gart flush timeout: 0x%08x\n", | |
153 | nv_rd32(dev, 0x100808)); | |
154 | nv_wr32(dev, 0x100808, 0x00000000); | |
155 | } | |
156 | ||
157 | static void | |
158 | nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt) | |
159 | { | |
160 | struct drm_nouveau_private *dev_priv = pgt->dev->dev_private; | |
161 | dma_addr_t dummy = dev_priv->gart_info.dummy.addr; | |
162 | u32 pte, tmp[4]; | |
163 | ||
164 | pte = base >> 2; | |
165 | base &= ~0x0000000f; | |
166 | ||
167 | tmp[0] = nv_ro32(pgt, base + 0x0); | |
168 | tmp[1] = nv_ro32(pgt, base + 0x4); | |
169 | tmp[2] = nv_ro32(pgt, base + 0x8); | |
170 | tmp[3] = nv_ro32(pgt, base + 0xc); | |
171 | while (cnt--) { | |
172 | u32 addr = list ? (*list++ >> 12) : (dummy >> 12); | |
173 | switch (pte++ & 0x3) { | |
174 | case 0: | |
175 | tmp[0] &= ~0x07ffffff; | |
176 | tmp[0] |= addr; | |
177 | break; | |
178 | case 1: | |
179 | tmp[0] &= ~0xf8000000; | |
180 | tmp[0] |= addr << 27; | |
181 | tmp[1] &= ~0x003fffff; | |
182 | tmp[1] |= addr >> 5; | |
183 | break; | |
184 | case 2: | |
185 | tmp[1] &= ~0xffc00000; | |
186 | tmp[1] |= addr << 22; | |
187 | tmp[2] &= ~0x0001ffff; | |
188 | tmp[2] |= addr >> 10; | |
189 | break; | |
190 | case 3: | |
191 | tmp[2] &= ~0xfffe0000; | |
192 | tmp[2] |= addr << 17; | |
193 | tmp[3] &= ~0x00000fff; | |
194 | tmp[3] |= addr >> 15; | |
195 | break; | |
196 | } | |
197 | } | |
198 | ||
199 | tmp[3] |= 0x40000000; | |
200 | ||
201 | nv_wo32(pgt, base + 0x0, tmp[0]); | |
202 | nv_wo32(pgt, base + 0x4, tmp[1]); | |
203 | nv_wo32(pgt, base + 0x8, tmp[2]); | |
204 | nv_wo32(pgt, base + 0xc, tmp[3]); | |
205 | } | |
206 | ||
207 | static int | |
649bf3ca | 208 | nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
7948758d | 209 | { |
649bf3ca | 210 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
7948758d BS |
211 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
212 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | |
8e7e7052 | 213 | dma_addr_t *list = nvbe->ttm.dma_address; |
7948758d | 214 | u32 pte = mem->start << 2, tmp[4]; |
649bf3ca | 215 | u32 cnt = ttm->num_pages; |
3230cfc3 | 216 | int i; |
7948758d BS |
217 | |
218 | nvbe->offset = mem->start << PAGE_SHIFT; | |
219 | ||
220 | if (pte & 0x0000000c) { | |
221 | u32 max = 4 - ((pte >> 2) & 0x3); | |
222 | u32 part = (cnt > max) ? max : cnt; | |
223 | nv44_sgdma_fill(pgt, list, pte, part); | |
224 | pte += (part << 2); | |
225 | list += part; | |
226 | cnt -= part; | |
227 | } | |
228 | ||
229 | while (cnt >= 4) { | |
230 | for (i = 0; i < 4; i++) | |
231 | tmp[i] = *list++ >> 12; | |
232 | nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27); | |
233 | nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22); | |
234 | nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17); | |
235 | nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000); | |
236 | pte += 0x10; | |
237 | cnt -= 4; | |
238 | } | |
239 | ||
240 | if (cnt) | |
241 | nv44_sgdma_fill(pgt, list, pte, cnt); | |
242 | ||
649bf3ca | 243 | nv44_sgdma_flush(ttm); |
7948758d BS |
244 | return 0; |
245 | } | |
246 | ||
247 | static int | |
649bf3ca | 248 | nv44_sgdma_unbind(struct ttm_tt *ttm) |
7948758d | 249 | { |
649bf3ca | 250 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
7948758d BS |
251 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
252 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; | |
253 | u32 pte = (nvbe->offset >> 12) << 2; | |
649bf3ca | 254 | u32 cnt = ttm->num_pages; |
7948758d BS |
255 | |
256 | if (pte & 0x0000000c) { | |
257 | u32 max = 4 - ((pte >> 2) & 0x3); | |
258 | u32 part = (cnt > max) ? max : cnt; | |
259 | nv44_sgdma_fill(pgt, NULL, pte, part); | |
260 | pte += (part << 2); | |
261 | cnt -= part; | |
262 | } | |
263 | ||
264 | while (cnt >= 4) { | |
265 | nv_wo32(pgt, pte + 0x0, 0x00000000); | |
266 | nv_wo32(pgt, pte + 0x4, 0x00000000); | |
267 | nv_wo32(pgt, pte + 0x8, 0x00000000); | |
268 | nv_wo32(pgt, pte + 0xc, 0x00000000); | |
269 | pte += 0x10; | |
270 | cnt -= 4; | |
271 | } | |
272 | ||
273 | if (cnt) | |
274 | nv44_sgdma_fill(pgt, NULL, pte, cnt); | |
275 | ||
649bf3ca | 276 | nv44_sgdma_flush(ttm); |
7948758d BS |
277 | return 0; |
278 | } | |
279 | ||
280 | static struct ttm_backend_func nv44_sgdma_backend = { | |
7948758d BS |
281 | .bind = nv44_sgdma_bind, |
282 | .unbind = nv44_sgdma_unbind, | |
283 | .destroy = nouveau_sgdma_destroy | |
284 | }; | |
285 | ||
b571fe21 | 286 | static int |
649bf3ca | 287 | nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
b571fe21 | 288 | { |
8e7e7052 | 289 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
26c0c9e3 | 290 | struct nouveau_mem *node = mem->mm_node; |
649bf3ca | 291 | |
26c0c9e3 | 292 | /* noop: bound in move_notify() */ |
22b33e8e DA |
293 | if (ttm->sg) { |
294 | node->sg = ttm->sg; | |
295 | } else | |
296 | node->pages = nvbe->ttm.dma_address; | |
b571fe21 BS |
297 | return 0; |
298 | } | |
299 | ||
300 | static int | |
649bf3ca | 301 | nv50_sgdma_unbind(struct ttm_tt *ttm) |
b571fe21 | 302 | { |
26c0c9e3 | 303 | /* noop: unbound in move_notify() */ |
b571fe21 BS |
304 | return 0; |
305 | } | |
306 | ||
b571fe21 | 307 | static struct ttm_backend_func nv50_sgdma_backend = { |
b571fe21 BS |
308 | .bind = nv50_sgdma_bind, |
309 | .unbind = nv50_sgdma_unbind, | |
310 | .destroy = nouveau_sgdma_destroy | |
311 | }; | |
312 | ||
649bf3ca JG |
313 | struct ttm_tt * |
314 | nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, | |
315 | unsigned long size, uint32_t page_flags, | |
316 | struct page *dummy_read_page) | |
6ee73861 | 317 | { |
649bf3ca JG |
318 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); |
319 | struct drm_device *dev = dev_priv->dev; | |
6ee73861 BS |
320 | struct nouveau_sgdma_be *nvbe; |
321 | ||
6ee73861 BS |
322 | nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); |
323 | if (!nvbe) | |
324 | return NULL; | |
325 | ||
326 | nvbe->dev = dev; | |
8e7e7052 | 327 | nvbe->ttm.ttm.func = dev_priv->gart_info.func; |
6ee73861 | 328 | |
8e7e7052 JG |
329 | if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { |
330 | kfree(nvbe); | |
649bf3ca JG |
331 | return NULL; |
332 | } | |
8e7e7052 | 333 | return &nvbe->ttm.ttm; |
6ee73861 BS |
334 | } |
335 | ||
336 | int | |
337 | nouveau_sgdma_init(struct drm_device *dev) | |
338 | { | |
339 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
340 | struct nouveau_gpuobj *gpuobj = NULL; | |
7948758d BS |
341 | u32 aper_size, align; |
342 | int ret; | |
343 | ||
d0f3c7e4 | 344 | if (dev_priv->card_type >= NV_40) |
7948758d BS |
345 | aper_size = 512 * 1024 * 1024; |
346 | else | |
d0f3c7e4 | 347 | aper_size = 128 * 1024 * 1024; |
7948758d BS |
348 | |
349 | /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for | |
350 | * christmas. The cards before it have them, the cards after | |
351 | * it have them, why is NV44 so unloved? | |
352 | */ | |
353 | dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL); | |
354 | if (!dev_priv->gart_info.dummy.page) | |
355 | return -ENOMEM; | |
356 | ||
357 | dev_priv->gart_info.dummy.addr = | |
358 | pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page, | |
359 | 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
360 | if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) { | |
361 | NV_ERROR(dev, "error mapping dummy page\n"); | |
362 | __free_page(dev_priv->gart_info.dummy.page); | |
363 | dev_priv->gart_info.dummy.page = NULL; | |
364 | return -ENOMEM; | |
365 | } | |
6ee73861 | 366 | |
efa58db3 | 367 | if (dev_priv->card_type >= NV_50) { |
26c0c9e3 | 368 | dev_priv->gart_info.aper_base = 0; |
7948758d | 369 | dev_priv->gart_info.aper_size = aper_size; |
58e6c7a9 | 370 | dev_priv->gart_info.type = NOUVEAU_GART_HW; |
7948758d BS |
371 | dev_priv->gart_info.func = &nv50_sgdma_backend; |
372 | } else | |
58b6542b | 373 | if (0 && pci_is_pcie(dev->pdev) && |
01d15332 | 374 | dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { |
7948758d BS |
375 | if (nv44_graph_class(dev)) { |
376 | dev_priv->gart_info.func = &nv44_sgdma_backend; | |
377 | align = 512 * 1024; | |
378 | } else { | |
379 | dev_priv->gart_info.func = &nv41_sgdma_backend; | |
380 | align = 16; | |
381 | } | |
9d5a6c43 | 382 | |
7948758d BS |
383 | ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align, |
384 | NVOBJ_FLAG_ZERO_ALLOC | | |
385 | NVOBJ_FLAG_ZERO_FREE, &gpuobj); | |
386 | if (ret) { | |
387 | NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); | |
388 | return ret; | |
389 | } | |
6ee73861 | 390 | |
7948758d BS |
391 | dev_priv->gart_info.sg_ctxdma = gpuobj; |
392 | dev_priv->gart_info.aper_base = 0; | |
393 | dev_priv->gart_info.aper_size = aper_size; | |
394 | dev_priv->gart_info.type = NOUVEAU_GART_HW; | |
395 | } else { | |
396 | ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16, | |
397 | NVOBJ_FLAG_ZERO_ALLOC | | |
398 | NVOBJ_FLAG_ZERO_FREE, &gpuobj); | |
b571fe21 BS |
399 | if (ret) { |
400 | NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); | |
401 | return ret; | |
402 | } | |
6ee73861 | 403 | |
b3beb167 BS |
404 | nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | |
405 | (1 << 12) /* PT present */ | | |
406 | (0 << 13) /* PT *not* linear */ | | |
7f4a195f BS |
407 | (0 << 14) /* RW */ | |
408 | (2 << 16) /* PCI */); | |
b3beb167 | 409 | nv_wo32(gpuobj, 4, aper_size - 1); |
b571fe21 BS |
410 | |
411 | dev_priv->gart_info.sg_ctxdma = gpuobj; | |
412 | dev_priv->gart_info.aper_base = 0; | |
413 | dev_priv->gart_info.aper_size = aper_size; | |
58e6c7a9 | 414 | dev_priv->gart_info.type = NOUVEAU_GART_PDMA; |
7948758d | 415 | dev_priv->gart_info.func = &nv04_sgdma_backend; |
6ee73861 | 416 | } |
6ee73861 | 417 | |
6ee73861 BS |
418 | return 0; |
419 | } | |
420 | ||
421 | void | |
422 | nouveau_sgdma_takedown(struct drm_device *dev) | |
423 | { | |
424 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
425 | ||
a8eaebc6 | 426 | nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); |
7948758d BS |
427 | |
428 | if (dev_priv->gart_info.dummy.page) { | |
429 | pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr, | |
430 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
431 | __free_page(dev_priv->gart_info.dummy.page); | |
432 | dev_priv->gart_info.dummy.page = NULL; | |
433 | } | |
6ee73861 BS |
434 | } |
435 | ||
fd70b6cd FJ |
436 | uint32_t |
437 | nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset) | |
6ee73861 BS |
438 | { |
439 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
440 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; | |
fd70b6cd | 441 | int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
6ee73861 | 442 | |
fd70b6cd | 443 | BUG_ON(dev_priv->card_type >= NV_50); |
6ee73861 | 444 | |
fd70b6cd FJ |
445 | return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) | |
446 | (offset & NV_CTXDMA_PAGE_MASK); | |
6ee73861 | 447 | } |