Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright 2007 Dave Airlied | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | */ | |
24 | /* | |
25 | * Authors: Dave Airlied <airlied@linux.ie> | |
26 | * Ben Skeggs <darktama@iinet.net.au> | |
27 | * Jeremy Kolb <jkolb@brandeis.edu> | |
28 | */ | |
29 | ||
fdb751ef | 30 | #include <linux/dma-mapping.h> |
6ee73861 | 31 | |
4dc28134 | 32 | #include "nouveau_drv.h" |
8b9d5d63 | 33 | #include "nouveau_chan.h" |
d375e7d5 | 34 | #include "nouveau_fence.h" |
6ee73861 | 35 | |
ebb945a9 BS |
36 | #include "nouveau_bo.h" |
37 | #include "nouveau_ttm.h" | |
38 | #include "nouveau_gem.h" | |
9ce523cc | 39 | #include "nouveau_mem.h" |
24e8375b | 40 | #include "nouveau_vmm.h" |
a510604d | 41 | |
d7722134 BS |
42 | #include <nvif/class.h> |
43 | #include <nvif/if500b.h> | |
44 | #include <nvif/if900b.h> | |
45 | ||
cae515f4 DA |
46 | static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, |
47 | struct ttm_resource *reg); | |
29a1d482 | 48 | static void nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm); |
cae515f4 | 49 | |
bc9e7b9a BS |
50 | /* |
51 | * NV10-NV40 tiling helpers | |
52 | */ | |
53 | ||
54 | static void | |
ebb945a9 BS |
55 | nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, |
56 | u32 addr, u32 size, u32 pitch, u32 flags) | |
bc9e7b9a | 57 | { |
77145f1c | 58 | struct nouveau_drm *drm = nouveau_drm(dev); |
ebb945a9 | 59 | int i = reg - drm->tile.reg; |
359088d5 | 60 | struct nvkm_fb *fb = nvxx_fb(&drm->client.device); |
b1e4553c | 61 | struct nvkm_fb_tile *tile = &fb->tile.region[i]; |
bc9e7b9a | 62 | |
ebb945a9 | 63 | nouveau_fence_unref(®->fence); |
bc9e7b9a BS |
64 | |
65 | if (tile->pitch) | |
03c8952f | 66 | nvkm_fb_tile_fini(fb, i, tile); |
bc9e7b9a BS |
67 | |
68 | if (pitch) | |
03c8952f | 69 | nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile); |
bc9e7b9a | 70 | |
03c8952f | 71 | nvkm_fb_tile_prog(fb, i, tile); |
bc9e7b9a BS |
72 | } |
73 | ||
ebb945a9 | 74 | static struct nouveau_drm_tile * |
bc9e7b9a BS |
75 | nv10_bo_get_tile_region(struct drm_device *dev, int i) |
76 | { | |
77145f1c | 77 | struct nouveau_drm *drm = nouveau_drm(dev); |
ebb945a9 | 78 | struct nouveau_drm_tile *tile = &drm->tile.reg[i]; |
bc9e7b9a | 79 | |
ebb945a9 | 80 | spin_lock(&drm->tile.lock); |
bc9e7b9a BS |
81 | |
82 | if (!tile->used && | |
83 | (!tile->fence || nouveau_fence_done(tile->fence))) | |
84 | tile->used = true; | |
85 | else | |
86 | tile = NULL; | |
87 | ||
ebb945a9 | 88 | spin_unlock(&drm->tile.lock); |
bc9e7b9a BS |
89 | return tile; |
90 | } | |
91 | ||
92 | static void | |
ebb945a9 | 93 | nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, |
f54d1867 | 94 | struct dma_fence *fence) |
bc9e7b9a | 95 | { |
77145f1c | 96 | struct nouveau_drm *drm = nouveau_drm(dev); |
bc9e7b9a BS |
97 | |
98 | if (tile) { | |
ebb945a9 | 99 | spin_lock(&drm->tile.lock); |
f54d1867 | 100 | tile->fence = (struct nouveau_fence *)dma_fence_get(fence); |
bc9e7b9a | 101 | tile->used = false; |
ebb945a9 | 102 | spin_unlock(&drm->tile.lock); |
bc9e7b9a BS |
103 | } |
104 | } | |
105 | ||
ebb945a9 BS |
106 | static struct nouveau_drm_tile * |
107 | nv10_bo_set_tiling(struct drm_device *dev, u32 addr, | |
7760a2e3 | 108 | u32 size, u32 pitch, u32 zeta) |
bc9e7b9a | 109 | { |
77145f1c | 110 | struct nouveau_drm *drm = nouveau_drm(dev); |
1167c6bc | 111 | struct nvkm_fb *fb = nvxx_fb(&drm->client.device); |
ebb945a9 | 112 | struct nouveau_drm_tile *tile, *found = NULL; |
bc9e7b9a BS |
113 | int i; |
114 | ||
b1e4553c | 115 | for (i = 0; i < fb->tile.regions; i++) { |
bc9e7b9a BS |
116 | tile = nv10_bo_get_tile_region(dev, i); |
117 | ||
118 | if (pitch && !found) { | |
119 | found = tile; | |
120 | continue; | |
121 | ||
b1e4553c | 122 | } else if (tile && fb->tile.region[i].pitch) { |
bc9e7b9a BS |
123 | /* Kill an unused tile region. */ |
124 | nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); | |
125 | } | |
126 | ||
127 | nv10_bo_put_tile_region(dev, tile, NULL); | |
128 | } | |
129 | ||
130 | if (found) | |
7760a2e3 | 131 | nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta); |
bc9e7b9a BS |
132 | return found; |
133 | } | |
134 | ||
6ee73861 BS |
135 | static void |
136 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |
137 | { | |
ebb945a9 BS |
138 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
139 | struct drm_device *dev = drm->dev; | |
6ee73861 BS |
140 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
141 | ||
6797cea1 | 142 | WARN_ON(nvbo->bo.pin_count > 0); |
141b15e5 | 143 | nouveau_bo_del_io_reserve_lru(bo); |
bc9e7b9a | 144 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); |
641f53c0 TR |
145 | |
146 | /* | |
147 | * If nouveau_bo_new() allocated this buffer, the GEM object was never | |
148 | * initialized, so don't attempt to release it. | |
149 | */ | |
150 | if (bo->base.dev) | |
151 | drm_gem_object_release(&bo->base); | |
152 | ||
6ee73861 BS |
153 | kfree(nvbo); |
154 | } | |
155 | ||
4d8b3d34 BS |
156 | static inline u64 |
157 | roundup_64(u64 x, u32 y) | |
158 | { | |
159 | x += y - 1; | |
160 | do_div(x, y); | |
161 | return x * y; | |
162 | } | |
163 | ||
a0af9add | 164 | static void |
81b61579 | 165 | nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size) |
a0af9add | 166 | { |
ebb945a9 | 167 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
1167c6bc | 168 | struct nvif_device *device = &drm->client.device; |
a0af9add | 169 | |
967e7bde | 170 | if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { |
7760a2e3 | 171 | if (nvbo->mode) { |
967e7bde | 172 | if (device->info.chipset >= 0x40) { |
a0af9add | 173 | *align = 65536; |
7760a2e3 | 174 | *size = roundup_64(*size, 64 * nvbo->mode); |
a0af9add | 175 | |
967e7bde | 176 | } else if (device->info.chipset >= 0x30) { |
a0af9add | 177 | *align = 32768; |
7760a2e3 | 178 | *size = roundup_64(*size, 64 * nvbo->mode); |
a0af9add | 179 | |
967e7bde | 180 | } else if (device->info.chipset >= 0x20) { |
a0af9add | 181 | *align = 16384; |
7760a2e3 | 182 | *size = roundup_64(*size, 64 * nvbo->mode); |
a0af9add | 183 | |
967e7bde | 184 | } else if (device->info.chipset >= 0x10) { |
a0af9add | 185 | *align = 16384; |
7760a2e3 | 186 | *size = roundup_64(*size, 32 * nvbo->mode); |
a0af9add FJ |
187 | } |
188 | } | |
bfd83aca | 189 | } else { |
7760a2e3 BS |
190 | *size = roundup_64(*size, (1 << nvbo->page)); |
191 | *align = max((1 << nvbo->page), *align); | |
a0af9add FJ |
192 | } |
193 | ||
4d8b3d34 | 194 | *size = roundup_64(*size, PAGE_SIZE); |
a0af9add FJ |
195 | } |
196 | ||
019cbd4a | 197 | struct nouveau_bo * |
81b61579 | 198 | nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, |
9ca7f796 | 199 | u32 tile_mode, u32 tile_flags) |
6ee73861 | 200 | { |
e75c091b | 201 | struct nouveau_drm *drm = cli->drm; |
6ee73861 | 202 | struct nouveau_bo *nvbo; |
a220dd73 | 203 | struct nvif_mmu *mmu = &cli->mmu; |
bfe91afa | 204 | struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm; |
019cbd4a | 205 | int i, pi = -1; |
0108bc80 | 206 | |
9ca7f796 TR |
207 | if (!*size) { |
208 | NV_WARN(drm, "skipped size %016llx\n", *size); | |
019cbd4a | 209 | return ERR_PTR(-EINVAL); |
0108bc80 | 210 | } |
22b33e8e | 211 | |
6ee73861 BS |
212 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); |
213 | if (!nvbo) | |
019cbd4a | 214 | return ERR_PTR(-ENOMEM); |
6ee73861 BS |
215 | INIT_LIST_HEAD(&nvbo->head); |
216 | INIT_LIST_HEAD(&nvbo->entry); | |
fd2871af | 217 | INIT_LIST_HEAD(&nvbo->vma_list); |
ebb945a9 | 218 | nvbo->bo.bdev = &drm->ttm.bdev; |
6ee73861 | 219 | |
acb16cfa BS |
220 | /* This is confusing, and doesn't actually mean we want an uncached |
221 | * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated | |
222 | * into in nouveau_gem_new(). | |
223 | */ | |
81b61579 | 224 | if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) { |
acb16cfa BS |
225 | /* Determine if we can get a cache-coherent map, forcing |
226 | * uncached mapping if we can't. | |
227 | */ | |
74a39954 | 228 | if (!nouveau_drm_use_coherent_gpu_mapping(drm)) |
acb16cfa BS |
229 | nvbo->force_coherent = true; |
230 | } | |
c3a0c771 | 231 | |
7760a2e3 BS |
232 | if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { |
233 | nvbo->kind = (tile_flags & 0x0000ff00) >> 8; | |
a220dd73 BS |
234 | if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { |
235 | kfree(nvbo); | |
019cbd4a | 236 | return ERR_PTR(-EINVAL); |
a220dd73 BS |
237 | } |
238 | ||
239 | nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; | |
7760a2e3 BS |
240 | } else |
241 | if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { | |
242 | nvbo->kind = (tile_flags & 0x00007f00) >> 8; | |
243 | nvbo->comp = (tile_flags & 0x00030000) >> 16; | |
a220dd73 BS |
244 | if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { |
245 | kfree(nvbo); | |
019cbd4a | 246 | return ERR_PTR(-EINVAL); |
a220dd73 | 247 | } |
7760a2e3 BS |
248 | } else { |
249 | nvbo->zeta = (tile_flags & 0x00000007); | |
250 | } | |
251 | nvbo->mode = tile_mode; | |
252 | nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); | |
253 | ||
7dc6a446 BS |
254 | /* Determine the desirable target GPU page size for the buffer. */ |
255 | for (i = 0; i < vmm->page_nr; i++) { | |
256 | /* Because we cannot currently allow VMM maps to fail | |
257 | * during buffer migration, we need to determine page | |
258 | * size for the buffer up-front, and pre-allocate its | |
259 | * page tables. | |
260 | * | |
261 | * Skip page sizes that can't support needed domains. | |
262 | */ | |
263 | if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && | |
81b61579 | 264 | (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) |
7dc6a446 | 265 | continue; |
81b61579 | 266 | if ((domain & NOUVEAU_GEM_DOMAIN_GART) && |
f29f18eb | 267 | (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) |
7dc6a446 BS |
268 | continue; |
269 | ||
270 | /* Select this page size if it's the first that supports | |
271 | * the potential memory domains, or when it's compatible | |
272 | * with the requested compression settings. | |
273 | */ | |
274 | if (pi < 0 || !nvbo->comp || vmm->page[i].comp) | |
275 | pi = i; | |
276 | ||
277 | /* Stop once the buffer is larger than the current page size. */ | |
9ca7f796 | 278 | if (*size >= 1ULL << vmm->page[i].shift) |
7dc6a446 BS |
279 | break; |
280 | } | |
281 | ||
282 | if (WARN_ON(pi < 0)) | |
019cbd4a | 283 | return ERR_PTR(-EINVAL); |
7dc6a446 BS |
284 | |
285 | /* Disable compression if suitable settings couldn't be found. */ | |
286 | if (nvbo->comp && !vmm->page[pi].comp) { | |
287 | if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) | |
288 | nvbo->kind = mmu->kind[nvbo->kind]; | |
289 | nvbo->comp = 0; | |
f91bac5b | 290 | } |
7dc6a446 | 291 | nvbo->page = vmm->page[pi].shift; |
f91bac5b | 292 | |
81b61579 | 293 | nouveau_bo_fixup_align(nvbo, align, size); |
9ca7f796 | 294 | |
019cbd4a TR |
295 | return nvbo; |
296 | } | |
297 | ||
298 | int | |
81b61579 | 299 | nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, |
019cbd4a TR |
300 | struct sg_table *sg, struct dma_resv *robj) |
301 | { | |
302 | int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; | |
303 | size_t acc_size; | |
304 | int ret; | |
305 | ||
306 | acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo)); | |
307 | ||
fd2871af | 308 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; |
81b61579 | 309 | nouveau_bo_placement_set(nvbo, domain, 0); |
141b15e5 | 310 | INIT_LIST_HEAD(&nvbo->io_reserve_lru); |
6ee73861 | 311 | |
019cbd4a TR |
312 | ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, |
313 | &nvbo->placement, align >> PAGE_SHIFT, false, | |
314 | acc_size, sg, robj, nouveau_bo_del_ttm); | |
6ee73861 BS |
315 | if (ret) { |
316 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ | |
317 | return ret; | |
318 | } | |
319 | ||
019cbd4a TR |
320 | return 0; |
321 | } | |
322 | ||
323 | int | |
324 | nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, | |
81b61579 | 325 | uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, |
019cbd4a TR |
326 | struct sg_table *sg, struct dma_resv *robj, |
327 | struct nouveau_bo **pnvbo) | |
328 | { | |
329 | struct nouveau_bo *nvbo; | |
330 | int ret; | |
331 | ||
81b61579 | 332 | nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, |
9ca7f796 | 333 | tile_flags); |
019cbd4a TR |
334 | if (IS_ERR(nvbo)) |
335 | return PTR_ERR(nvbo); | |
336 | ||
81b61579 | 337 | ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); |
019cbd4a TR |
338 | if (ret) |
339 | return ret; | |
340 | ||
6ee73861 BS |
341 | *pnvbo = nvbo; |
342 | return 0; | |
343 | } | |
344 | ||
78ad0f7b | 345 | static void |
ce65b874 | 346 | set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain) |
78ad0f7b FJ |
347 | { |
348 | *n = 0; | |
349 | ||
48e07c23 CK |
350 | if (domain & NOUVEAU_GEM_DOMAIN_VRAM) { |
351 | pl[*n].mem_type = TTM_PL_VRAM; | |
ce65b874 | 352 | pl[*n].flags = 0; |
5839172f | 353 | (*n)++; |
48e07c23 CK |
354 | } |
355 | if (domain & NOUVEAU_GEM_DOMAIN_GART) { | |
356 | pl[*n].mem_type = TTM_PL_TT; | |
ce65b874 | 357 | pl[*n].flags = 0; |
5839172f | 358 | (*n)++; |
48e07c23 CK |
359 | } |
360 | if (domain & NOUVEAU_GEM_DOMAIN_CPU) { | |
361 | pl[*n].mem_type = TTM_PL_SYSTEM; | |
ce65b874 | 362 | pl[(*n)++].flags = 0; |
48e07c23 | 363 | } |
78ad0f7b FJ |
364 | } |
365 | ||
699ddfd9 | 366 | static void |
81b61579 | 367 | set_placement_range(struct nouveau_bo *nvbo, uint32_t domain) |
699ddfd9 | 368 | { |
ebb945a9 | 369 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
1167c6bc | 370 | u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT; |
f1217ed0 | 371 | unsigned i, fpfn, lpfn; |
699ddfd9 | 372 | |
1167c6bc | 373 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && |
81b61579 | 374 | nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && |
4beb116a | 375 | nvbo->bo.mem.num_pages < vram_pages / 4) { |
699ddfd9 FJ |
376 | /* |
377 | * Make sure that the color and depth buffers are handled | |
378 | * by independent memory controller units. Up to a 9x | |
379 | * speed up when alpha-blending and depth-test are enabled | |
380 | * at the same time. | |
381 | */ | |
7760a2e3 | 382 | if (nvbo->zeta) { |
f1217ed0 CK |
383 | fpfn = vram_pages / 2; |
384 | lpfn = ~0; | |
699ddfd9 | 385 | } else { |
f1217ed0 CK |
386 | fpfn = 0; |
387 | lpfn = vram_pages / 2; | |
388 | } | |
389 | for (i = 0; i < nvbo->placement.num_placement; ++i) { | |
390 | nvbo->placements[i].fpfn = fpfn; | |
391 | nvbo->placements[i].lpfn = lpfn; | |
392 | } | |
393 | for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { | |
394 | nvbo->busy_placements[i].fpfn = fpfn; | |
395 | nvbo->busy_placements[i].lpfn = lpfn; | |
699ddfd9 FJ |
396 | } |
397 | } | |
398 | } | |
399 | ||
6ee73861 | 400 | void |
81b61579 CK |
401 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain, |
402 | uint32_t busy) | |
6ee73861 | 403 | { |
78ad0f7b | 404 | struct ttm_placement *pl = &nvbo->placement; |
78ad0f7b FJ |
405 | |
406 | pl->placement = nvbo->placements; | |
ce65b874 | 407 | set_placement_list(nvbo->placements, &pl->num_placement, domain); |
78ad0f7b FJ |
408 | |
409 | pl->busy_placement = nvbo->busy_placements; | |
ce65b874 CK |
410 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, |
411 | domain | busy); | |
699ddfd9 | 412 | |
81b61579 | 413 | set_placement_range(nvbo, domain); |
6ee73861 BS |
414 | } |
415 | ||
416 | int | |
81b61579 | 417 | nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) |
6ee73861 | 418 | { |
ebb945a9 | 419 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
6ee73861 | 420 | struct ttm_buffer_object *bo = &nvbo->bo; |
ad76b3f7 | 421 | bool force = false, evict = false; |
78ad0f7b | 422 | int ret; |
6ee73861 | 423 | |
dfd5e50e | 424 | ret = ttm_bo_reserve(bo, false, false, NULL); |
0ae6d7bc | 425 | if (ret) |
50ab2e52 | 426 | return ret; |
0ae6d7bc | 427 | |
1167c6bc | 428 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && |
81b61579 | 429 | domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) { |
7760a2e3 BS |
430 | if (!nvbo->contig) { |
431 | nvbo->contig = true; | |
ad76b3f7 | 432 | force = true; |
7760a2e3 | 433 | evict = true; |
ad76b3f7 | 434 | } |
6ee73861 BS |
435 | } |
436 | ||
6797cea1 | 437 | if (nvbo->bo.pin_count) { |
81b61579 CK |
438 | bool error = evict; |
439 | ||
440 | switch (bo->mem.mem_type) { | |
441 | case TTM_PL_VRAM: | |
442 | error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM); | |
443 | break; | |
444 | case TTM_PL_TT: | |
445 | error |= !(domain & NOUVEAU_GEM_DOMAIN_GART); | |
446 | default: | |
447 | break; | |
448 | } | |
449 | ||
450 | if (error) { | |
ad76b3f7 BS |
451 | NV_ERROR(drm, "bo %p pinned elsewhere: " |
452 | "0x%08x vs 0x%08x\n", bo, | |
81b61579 | 453 | bo->mem.mem_type, domain); |
ad76b3f7 BS |
454 | ret = -EBUSY; |
455 | } | |
6797cea1 | 456 | ttm_bo_pin(&nvbo->bo); |
50ab2e52 | 457 | goto out; |
ad76b3f7 BS |
458 | } |
459 | ||
460 | if (evict) { | |
81b61579 | 461 | nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); |
ad76b3f7 BS |
462 | ret = nouveau_bo_validate(nvbo, false, false); |
463 | if (ret) | |
464 | goto out; | |
465 | } | |
6ee73861 | 466 | |
81b61579 | 467 | nouveau_bo_placement_set(nvbo, domain, 0); |
97a875cb | 468 | ret = nouveau_bo_validate(nvbo, false, false); |
6aac6ced BS |
469 | if (ret) |
470 | goto out; | |
6797cea1 CK |
471 | |
472 | ttm_bo_pin(&nvbo->bo); | |
6aac6ced BS |
473 | |
474 | switch (bo->mem.mem_type) { | |
475 | case TTM_PL_VRAM: | |
e11bfb99 | 476 | drm->gem.vram_available -= bo->base.size; |
6aac6ced BS |
477 | break; |
478 | case TTM_PL_TT: | |
e11bfb99 | 479 | drm->gem.gart_available -= bo->base.size; |
6aac6ced BS |
480 | break; |
481 | default: | |
482 | break; | |
6ee73861 | 483 | } |
5be5a15a | 484 | |
6ee73861 | 485 | out: |
ad76b3f7 | 486 | if (force && ret) |
7760a2e3 | 487 | nvbo->contig = false; |
0ae6d7bc | 488 | ttm_bo_unreserve(bo); |
6ee73861 BS |
489 | return ret; |
490 | } | |
491 | ||
492 | int | |
493 | nouveau_bo_unpin(struct nouveau_bo *nvbo) | |
494 | { | |
ebb945a9 | 495 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
6ee73861 | 496 | struct ttm_buffer_object *bo = &nvbo->bo; |
6797cea1 | 497 | int ret; |
6ee73861 | 498 | |
dfd5e50e | 499 | ret = ttm_bo_reserve(bo, false, false, NULL); |
6ee73861 BS |
500 | if (ret) |
501 | return ret; | |
502 | ||
6797cea1 CK |
503 | ttm_bo_unpin(&nvbo->bo); |
504 | if (!nvbo->bo.pin_count) { | |
6ee73861 BS |
505 | switch (bo->mem.mem_type) { |
506 | case TTM_PL_VRAM: | |
e11bfb99 | 507 | drm->gem.vram_available += bo->base.size; |
6ee73861 BS |
508 | break; |
509 | case TTM_PL_TT: | |
e11bfb99 | 510 | drm->gem.gart_available += bo->base.size; |
6ee73861 BS |
511 | break; |
512 | default: | |
513 | break; | |
514 | } | |
515 | } | |
516 | ||
517 | ttm_bo_unreserve(bo); | |
6797cea1 | 518 | return 0; |
6ee73861 BS |
519 | } |
520 | ||
521 | int | |
522 | nouveau_bo_map(struct nouveau_bo *nvbo) | |
523 | { | |
524 | int ret; | |
525 | ||
dfd5e50e | 526 | ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); |
6ee73861 BS |
527 | if (ret) |
528 | return ret; | |
529 | ||
36a471ba | 530 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); |
c3a0c771 | 531 | |
6ee73861 BS |
532 | ttm_bo_unreserve(&nvbo->bo); |
533 | return ret; | |
534 | } | |
535 | ||
536 | void | |
537 | nouveau_bo_unmap(struct nouveau_bo *nvbo) | |
538 | { | |
c3a0c771 AC |
539 | if (!nvbo) |
540 | return; | |
541 | ||
36a471ba | 542 | ttm_bo_kunmap(&nvbo->kmap); |
6ee73861 BS |
543 | } |
544 | ||
b22870ba AC |
545 | void |
546 | nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) | |
547 | { | |
548 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | |
e34b8fee | 549 | struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; |
f295c8cf | 550 | int i, j; |
b22870ba AC |
551 | |
552 | if (!ttm_dma) | |
553 | return; | |
e94c55b8 TK |
554 | if (!ttm_dma->pages) { |
555 | NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); | |
556 | return; | |
557 | } | |
b22870ba AC |
558 | |
559 | /* Don't waste time looping if the object is coherent */ | |
560 | if (nvbo->force_coherent) | |
561 | return; | |
562 | ||
4042160c DA |
563 | i = 0; |
564 | while (i < ttm_dma->num_pages) { | |
f295c8cf DA |
565 | struct page *p = ttm_dma->pages[i]; |
566 | size_t num_pages = 1; | |
567 | ||
568 | for (j = i + 1; j < ttm_dma->num_pages; ++j) { | |
569 | if (++p != ttm_dma->pages[j]) | |
570 | break; | |
571 | ||
572 | ++num_pages; | |
573 | } | |
359088d5 BS |
574 | dma_sync_single_for_device(drm->dev->dev, |
575 | ttm_dma->dma_address[i], | |
f295c8cf DA |
576 | num_pages * PAGE_SIZE, DMA_TO_DEVICE); |
577 | i += num_pages; | |
578 | } | |
b22870ba AC |
579 | } |
580 | ||
581 | void | |
582 | nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) | |
583 | { | |
584 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | |
e34b8fee | 585 | struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; |
f295c8cf | 586 | int i, j; |
b22870ba AC |
587 | |
588 | if (!ttm_dma) | |
589 | return; | |
e94c55b8 TK |
590 | if (!ttm_dma->pages) { |
591 | NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); | |
592 | return; | |
593 | } | |
b22870ba AC |
594 | |
595 | /* Don't waste time looping if the object is coherent */ | |
596 | if (nvbo->force_coherent) | |
597 | return; | |
598 | ||
4042160c DA |
599 | i = 0; |
600 | while (i < ttm_dma->num_pages) { | |
f295c8cf DA |
601 | struct page *p = ttm_dma->pages[i]; |
602 | size_t num_pages = 1; | |
603 | ||
604 | for (j = i + 1; j < ttm_dma->num_pages; ++j) { | |
605 | if (++p != ttm_dma->pages[j]) | |
606 | break; | |
607 | ||
608 | ++num_pages; | |
609 | } | |
610 | ||
359088d5 | 611 | dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], |
f295c8cf DA |
612 | num_pages * PAGE_SIZE, DMA_FROM_DEVICE); |
613 | i += num_pages; | |
614 | } | |
b22870ba AC |
615 | } |
616 | ||
141b15e5 CK |
617 | void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) |
618 | { | |
619 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | |
620 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
621 | ||
622 | mutex_lock(&drm->ttm.io_reserve_mutex); | |
623 | list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); | |
624 | mutex_unlock(&drm->ttm.io_reserve_mutex); | |
625 | } | |
626 | ||
627 | void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo) | |
628 | { | |
629 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | |
630 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
631 | ||
632 | mutex_lock(&drm->ttm.io_reserve_mutex); | |
633 | list_del_init(&nvbo->io_reserve_lru); | |
634 | mutex_unlock(&drm->ttm.io_reserve_mutex); | |
635 | } | |
636 | ||
7a45d764 BS |
637 | int |
638 | nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, | |
97a875cb | 639 | bool no_wait_gpu) |
7a45d764 | 640 | { |
19be5570 | 641 | struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; |
7a45d764 BS |
642 | int ret; |
643 | ||
19be5570 | 644 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx); |
7a45d764 BS |
645 | if (ret) |
646 | return ret; | |
647 | ||
b22870ba AC |
648 | nouveau_bo_sync_for_device(nvbo); |
649 | ||
7a45d764 BS |
650 | return 0; |
651 | } | |
652 | ||
6ee73861 BS |
653 | void |
654 | nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) | |
655 | { | |
656 | bool is_iomem; | |
657 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
c3a0c771 | 658 | |
36a471ba | 659 | mem += index; |
c3a0c771 | 660 | |
6ee73861 BS |
661 | if (is_iomem) |
662 | iowrite16_native(val, (void __force __iomem *)mem); | |
663 | else | |
664 | *mem = val; | |
665 | } | |
666 | ||
667 | u32 | |
668 | nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) | |
669 | { | |
670 | bool is_iomem; | |
671 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
c3a0c771 | 672 | |
36a471ba | 673 | mem += index; |
c3a0c771 | 674 | |
6ee73861 BS |
675 | if (is_iomem) |
676 | return ioread32_native((void __force __iomem *)mem); | |
677 | else | |
678 | return *mem; | |
679 | } | |
680 | ||
681 | void | |
682 | nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) | |
683 | { | |
684 | bool is_iomem; | |
685 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
c3a0c771 | 686 | |
36a471ba | 687 | mem += index; |
c3a0c771 | 688 | |
6ee73861 BS |
689 | if (is_iomem) |
690 | iowrite32_native(val, (void __force __iomem *)mem); | |
691 | else | |
692 | *mem = val; | |
693 | } | |
694 | ||
649bf3ca | 695 | static struct ttm_tt * |
dde5da23 | 696 | nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) |
6ee73861 | 697 | { |
a7fb8a23 | 698 | #if IS_ENABLED(CONFIG_AGP) |
dde5da23 | 699 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
6ee73861 | 700 | |
340b0e7c | 701 | if (drm->agp.bridge) { |
dde5da23 | 702 | return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags); |
6ee73861 | 703 | } |
df1b4b91 | 704 | #endif |
6ee73861 | 705 | |
dde5da23 | 706 | return nouveau_sgdma_create_ttm(bo, page_flags); |
6ee73861 BS |
707 | } |
708 | ||
8635784a DA |
709 | static int |
710 | nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, | |
711 | struct ttm_resource *reg) | |
712 | { | |
713 | #if IS_ENABLED(CONFIG_AGP) | |
714 | struct nouveau_drm *drm = nouveau_bdev(bdev); | |
0b988ca1 DA |
715 | #endif |
716 | if (!reg) | |
717 | return -EINVAL; | |
718 | #if IS_ENABLED(CONFIG_AGP) | |
8635784a | 719 | if (drm->agp.bridge) |
48efa57e | 720 | return ttm_agp_bind(ttm, reg); |
8635784a DA |
721 | #endif |
722 | return nouveau_sgdma_bind(bdev, ttm, reg); | |
723 | } | |
724 | ||
725 | static void | |
726 | nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) | |
727 | { | |
728 | #if IS_ENABLED(CONFIG_AGP) | |
729 | struct nouveau_drm *drm = nouveau_bdev(bdev); | |
730 | ||
731 | if (drm->agp.bridge) { | |
48efa57e | 732 | ttm_agp_unbind(ttm); |
8635784a DA |
733 | return; |
734 | } | |
735 | #endif | |
736 | nouveau_sgdma_unbind(bdev, ttm); | |
737 | } | |
738 | ||
6ee73861 BS |
739 | static void |
740 | nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |
741 | { | |
742 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
743 | ||
744 | switch (bo->mem.mem_type) { | |
22fbd538 | 745 | case TTM_PL_VRAM: |
81b61579 CK |
746 | nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, |
747 | NOUVEAU_GEM_DOMAIN_CPU); | |
22fbd538 | 748 | break; |
6ee73861 | 749 | default: |
81b61579 | 750 | nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0); |
6ee73861 BS |
751 | break; |
752 | } | |
22fbd538 FJ |
753 | |
754 | *pl = nvbo->placement; | |
6ee73861 BS |
755 | } |
756 | ||
d2f96666 | 757 | static int |
3c57d85d | 758 | nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, |
2966141a | 759 | struct ttm_resource *reg) |
d2f96666 | 760 | { |
9ce523cc BS |
761 | struct nouveau_mem *old_mem = nouveau_mem(&bo->mem); |
762 | struct nouveau_mem *new_mem = nouveau_mem(reg); | |
d7722134 | 763 | struct nvif_vmm *vmm = &drm->client.vmm.vmm; |
d2f96666 BS |
764 | int ret; |
765 | ||
d7722134 BS |
766 | ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0, |
767 | old_mem->mem.size, &old_mem->vma[0]); | |
d2f96666 BS |
768 | if (ret) |
769 | return ret; | |
770 | ||
d7722134 BS |
771 | ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0, |
772 | new_mem->mem.size, &old_mem->vma[1]); | |
773 | if (ret) | |
774 | goto done; | |
3c57d85d | 775 | |
9ce523cc BS |
776 | ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]); |
777 | if (ret) | |
778 | goto done; | |
779 | ||
780 | ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]); | |
781 | done: | |
782 | if (ret) { | |
d7722134 BS |
783 | nvif_vmm_put(vmm, &old_mem->vma[1]); |
784 | nvif_vmm_put(vmm, &old_mem->vma[0]); | |
9ce523cc | 785 | } |
d2f96666 BS |
786 | return 0; |
787 | } | |
788 | ||
f1ab0cc9 | 789 | static int |
54661867 DA |
790 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, |
791 | struct ttm_operation_ctx *ctx, | |
792 | struct ttm_resource *new_reg) | |
f1ab0cc9 | 793 | { |
ebb945a9 | 794 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
1934a2ad | 795 | struct nouveau_channel *chan = drm->ttm.chan; |
a01ca78c | 796 | struct nouveau_cli *cli = (void *)chan->user.client; |
35b8141b | 797 | struct nouveau_fence *fence; |
f1ab0cc9 BS |
798 | int ret; |
799 | ||
d2f96666 | 800 | /* create temporary vmas for the transfer and attach them to the |
be83cd4e | 801 | * old nvkm_mem node, these will get cleaned up after ttm has |
2966141a | 802 | * destroyed the ttm_resource |
3425df48 | 803 | */ |
1167c6bc | 804 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
605f9ccd | 805 | ret = nouveau_bo_move_prep(drm, bo, new_reg); |
d2f96666 | 806 | if (ret) |
3c57d85d | 807 | return ret; |
3425df48 BS |
808 | } |
809 | ||
551620f2 DV |
810 | if (drm_drv_uses_atomic_modeset(drm->dev)) |
811 | mutex_lock(&cli->mutex); | |
812 | else | |
813 | mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); | |
54661867 | 814 | ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); |
6a6b73f2 | 815 | if (ret == 0) { |
605f9ccd | 816 | ret = drm->ttm.move(chan, bo, &bo->mem, new_reg); |
35b8141b BS |
817 | if (ret == 0) { |
818 | ret = nouveau_fence_new(chan, false, &fence); | |
819 | if (ret == 0) { | |
f2c24b83 ML |
820 | ret = ttm_bo_move_accel_cleanup(bo, |
821 | &fence->base, | |
e46f468f | 822 | evict, false, |
605f9ccd | 823 | new_reg); |
35b8141b BS |
824 | nouveau_fence_unref(&fence); |
825 | } | |
826 | } | |
6a6b73f2 | 827 | } |
0ad72863 | 828 | mutex_unlock(&cli->mutex); |
6a6b73f2 | 829 | return ret; |
6ee73861 BS |
830 | } |
831 | ||
d1b167e1 | 832 | void |
49981046 | 833 | nouveau_bo_move_init(struct nouveau_drm *drm) |
d1b167e1 | 834 | { |
72ecb0a6 | 835 | static const struct _method_table { |
d1b167e1 | 836 | const char *name; |
1a46098e | 837 | int engine; |
315a8b2e | 838 | s32 oclass; |
d1b167e1 BS |
839 | int (*exec)(struct nouveau_channel *, |
840 | struct ttm_buffer_object *, | |
2966141a | 841 | struct ttm_resource *, struct ttm_resource *); |
d1b167e1 BS |
842 | int (*init)(struct nouveau_channel *, u32 handle); |
843 | } _methods[] = { | |
c36322d2 BS |
844 | { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init }, |
845 | { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init }, | |
6e1f34e3 BS |
846 | { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init }, |
847 | { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init }, | |
146cfe24 BS |
848 | { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init }, |
849 | { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init }, | |
8e7e1586 BS |
850 | { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init }, |
851 | { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init }, | |
990b4547 BS |
852 | { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init }, |
853 | { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init }, | |
00fc6f6f | 854 | { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, |
49981046 | 855 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, |
1a46098e BS |
856 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, |
857 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, | |
858 | { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init }, | |
859 | { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init }, | |
860 | { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init }, | |
861 | { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, | |
862 | { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, | |
5490e5df | 863 | {}, |
72ecb0a6 JJ |
864 | }; |
865 | const struct _method_table *mthd = _methods; | |
d1b167e1 BS |
866 | const char *name = "CPU"; |
867 | int ret; | |
868 | ||
869 | do { | |
49981046 | 870 | struct nouveau_channel *chan; |
ebb945a9 | 871 | |
00fc6f6f | 872 | if (mthd->engine) |
49981046 BS |
873 | chan = drm->cechan; |
874 | else | |
875 | chan = drm->channel; | |
876 | if (chan == NULL) | |
877 | continue; | |
878 | ||
9ac596a4 | 879 | ret = nvif_object_ctor(&chan->user, "ttmBoMove", |
0ad72863 BS |
880 | mthd->oclass | (mthd->engine << 16), |
881 | mthd->oclass, NULL, 0, | |
882 | &drm->ttm.copy); | |
d1b167e1 | 883 | if (ret == 0) { |
0ad72863 | 884 | ret = mthd->init(chan, drm->ttm.copy.handle); |
ebb945a9 | 885 | if (ret) { |
9ac596a4 | 886 | nvif_object_dtor(&drm->ttm.copy); |
ebb945a9 | 887 | continue; |
d1b167e1 | 888 | } |
ebb945a9 BS |
889 | |
890 | drm->ttm.move = mthd->exec; | |
1bb3f6a2 | 891 | drm->ttm.chan = chan; |
ebb945a9 BS |
892 | name = mthd->name; |
893 | break; | |
d1b167e1 BS |
894 | } |
895 | } while ((++mthd)->exec); | |
896 | ||
ebb945a9 | 897 | NV_INFO(drm, "MM: using %s for buffer copies\n", name); |
d1b167e1 BS |
898 | } |
899 | ||
a4154bbf | 900 | static void |
66257db7 | 901 | nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, |
2966141a | 902 | struct ttm_resource *new_reg) |
a4154bbf | 903 | { |
9ce523cc | 904 | struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL; |
a4154bbf | 905 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
24e8375b | 906 | struct nouveau_vma *vma; |
fd2871af | 907 | |
9f1feed2 BS |
908 | /* ttm can now (stupidly) pass the driver bos it didn't create... */ |
909 | if (bo->destroy != nouveau_bo_del_ttm) | |
910 | return; | |
911 | ||
141b15e5 CK |
912 | nouveau_bo_del_io_reserve_lru(bo); |
913 | ||
a48296ab | 914 | if (mem && new_reg->mem_type != TTM_PL_SYSTEM && |
9ce523cc | 915 | mem->mem.page == nvbo->page) { |
a48296ab | 916 | list_for_each_entry(vma, &nvbo->vma_list, head) { |
24e8375b | 917 | nouveau_vma_map(vma, mem); |
a48296ab BS |
918 | } |
919 | } else { | |
920 | list_for_each_entry(vma, &nvbo->vma_list, head) { | |
10dcab3e | 921 | WARN_ON(ttm_bo_wait(bo, false, false)); |
24e8375b | 922 | nouveau_vma_unmap(vma); |
fd2871af | 923 | } |
a4154bbf | 924 | } |
0dc9b286 ND |
925 | |
926 | if (new_reg) { | |
927 | if (new_reg->mm_node) | |
928 | nvbo->offset = (new_reg->start << PAGE_SHIFT); | |
929 | else | |
930 | nvbo->offset = 0; | |
931 | } | |
932 | ||
a4154bbf BS |
933 | } |
934 | ||
6ee73861 | 935 | static int |
2966141a | 936 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg, |
ebb945a9 | 937 | struct nouveau_drm_tile **new_tile) |
6ee73861 | 938 | { |
ebb945a9 BS |
939 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
940 | struct drm_device *dev = drm->dev; | |
a0af9add | 941 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
605f9ccd | 942 | u64 offset = new_reg->start << PAGE_SHIFT; |
6ee73861 | 943 | |
a4154bbf | 944 | *new_tile = NULL; |
605f9ccd | 945 | if (new_reg->mem_type != TTM_PL_VRAM) |
a0af9add | 946 | return 0; |
a0af9add | 947 | |
1167c6bc | 948 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { |
e11bfb99 | 949 | *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size, |
7760a2e3 | 950 | nvbo->mode, nvbo->zeta); |
6ee73861 BS |
951 | } |
952 | ||
a0af9add FJ |
953 | return 0; |
954 | } | |
955 | ||
956 | static void | |
957 | nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |
ebb945a9 BS |
958 | struct nouveau_drm_tile *new_tile, |
959 | struct nouveau_drm_tile **old_tile) | |
a0af9add | 960 | { |
ebb945a9 BS |
961 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
962 | struct drm_device *dev = drm->dev; | |
52791eee | 963 | struct dma_fence *fence = dma_resv_get_excl(bo->base.resv); |
a0af9add | 964 | |
f2c24b83 | 965 | nv10_bo_put_tile_region(dev, *old_tile, fence); |
a4154bbf | 966 | *old_tile = new_tile; |
a0af9add FJ |
967 | } |
968 | ||
969 | static int | |
2823f4f0 CK |
970 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, |
971 | struct ttm_operation_ctx *ctx, | |
ebdf5651 DA |
972 | struct ttm_resource *new_reg, |
973 | struct ttm_place *hop) | |
a0af9add | 974 | { |
ebb945a9 | 975 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
a0af9add | 976 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
2966141a | 977 | struct ttm_resource *old_reg = &bo->mem; |
ebb945a9 | 978 | struct nouveau_drm_tile *new_tile = NULL; |
a0af9add FJ |
979 | int ret = 0; |
980 | ||
0c8c0659 | 981 | |
bfe5e585 DA |
982 | if (new_reg->mem_type == TTM_PL_TT) { |
983 | ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); | |
984 | if (ret) | |
985 | return ret; | |
986 | } | |
987 | ||
6d820003 | 988 | nouveau_bo_move_ntfy(bo, evict, new_reg); |
0ef1ed81 | 989 | ret = ttm_bo_wait_ctx(bo, ctx); |
88932a7b | 990 | if (ret) |
6d820003 | 991 | goto out_ntfy; |
88932a7b | 992 | |
6797cea1 | 993 | if (nvbo->bo.pin_count) |
5be5a15a AC |
994 | NV_WARN(drm, "Moving pinned object %p!\n", nvbo); |
995 | ||
1167c6bc | 996 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
605f9ccd | 997 | ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile); |
a4154bbf | 998 | if (ret) |
6d820003 | 999 | goto out_ntfy; |
a4154bbf | 1000 | } |
a0af9add | 1001 | |
a0af9add | 1002 | /* Fake bo copy. */ |
605f9ccd | 1003 | if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
ecfe6953 | 1004 | ttm_bo_move_null(bo, new_reg); |
a0af9add | 1005 | goto out; |
6ee73861 BS |
1006 | } |
1007 | ||
12b68474 DA |
1008 | if (old_reg->mem_type == TTM_PL_SYSTEM && |
1009 | new_reg->mem_type == TTM_PL_TT) { | |
1010 | ttm_bo_move_null(bo, new_reg); | |
1011 | goto out; | |
1012 | } | |
1013 | ||
1014 | if (old_reg->mem_type == TTM_PL_TT && | |
1015 | new_reg->mem_type == TTM_PL_SYSTEM) { | |
29a1d482 DA |
1016 | nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); |
1017 | ttm_resource_free(bo, &bo->mem); | |
c37d951c | 1018 | ttm_bo_assign_mem(bo, new_reg); |
12b68474 DA |
1019 | goto out; |
1020 | } | |
1021 | ||
a0af9add | 1022 | /* Hardware assisted copy. */ |
cef9e99e | 1023 | if (drm->ttm.move) { |
660a5995 DA |
1024 | if ((old_reg->mem_type == TTM_PL_SYSTEM && |
1025 | new_reg->mem_type == TTM_PL_VRAM) || | |
1026 | (old_reg->mem_type == TTM_PL_VRAM && | |
1027 | new_reg->mem_type == TTM_PL_SYSTEM)) { | |
1028 | hop->fpfn = 0; | |
1029 | hop->lpfn = 0; | |
1030 | hop->mem_type = TTM_PL_TT; | |
1031 | hop->flags = 0; | |
1032 | return -EMULTIHOP; | |
1033 | } | |
0c8c0659 DA |
1034 | ret = nouveau_bo_move_m2mf(bo, evict, ctx, |
1035 | new_reg); | |
660a5995 DA |
1036 | } else |
1037 | ret = -ENODEV; | |
a0af9add | 1038 | |
660a5995 DA |
1039 | if (ret) { |
1040 | /* Fallback to software copy. */ | |
1041 | ret = ttm_bo_move_memcpy(bo, ctx, new_reg); | |
1042 | } | |
a0af9add FJ |
1043 | |
1044 | out: | |
1167c6bc | 1045 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
a4154bbf BS |
1046 | if (ret) |
1047 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); | |
1048 | else | |
1049 | nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); | |
1050 | } | |
6d820003 DA |
1051 | out_ntfy: |
1052 | if (ret) { | |
1053 | swap(*new_reg, bo->mem); | |
1054 | nouveau_bo_move_ntfy(bo, false, new_reg); | |
1055 | swap(*new_reg, bo->mem); | |
1056 | } | |
a0af9add | 1057 | return ret; |
6ee73861 BS |
1058 | } |
1059 | ||
1060 | static int | |
1061 | nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
1062 | { | |
acb46527 DH |
1063 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
1064 | ||
f8659be8 | 1065 | return drm_vma_node_verify_access(&nvbo->bo.base.vma_node, |
d9a1f0b4 | 1066 | filp->private_data); |
6ee73861 BS |
1067 | } |
1068 | ||
141b15e5 CK |
1069 | static void |
1070 | nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, | |
1071 | struct ttm_resource *reg) | |
1072 | { | |
1073 | struct nouveau_mem *mem = nouveau_mem(reg); | |
1074 | ||
1075 | if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { | |
1076 | switch (reg->mem_type) { | |
1077 | case TTM_PL_TT: | |
1078 | if (mem->kind) | |
1079 | nvif_object_unmap_handle(&mem->mem.object); | |
1080 | break; | |
1081 | case TTM_PL_VRAM: | |
1082 | nvif_object_unmap_handle(&mem->mem.object); | |
1083 | break; | |
1084 | default: | |
1085 | break; | |
1086 | } | |
1087 | } | |
1088 | } | |
1089 | ||
f32f02fd | 1090 | static int |
2966141a | 1091 | nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) |
f32f02fd | 1092 | { |
ebb945a9 | 1093 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
1167c6bc | 1094 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
9ce523cc | 1095 | struct nouveau_mem *mem = nouveau_mem(reg); |
1cf65c45 | 1096 | struct nvif_mmu *mmu = &drm->client.mmu; |
141b15e5 | 1097 | int ret; |
f32f02fd | 1098 | |
141b15e5 CK |
1099 | mutex_lock(&drm->ttm.io_reserve_mutex); |
1100 | retry: | |
605f9ccd | 1101 | switch (reg->mem_type) { |
f32f02fd JG |
1102 | case TTM_PL_SYSTEM: |
1103 | /* System memory */ | |
141b15e5 CK |
1104 | ret = 0; |
1105 | goto out; | |
f32f02fd | 1106 | case TTM_PL_TT: |
a7fb8a23 | 1107 | #if IS_ENABLED(CONFIG_AGP) |
340b0e7c | 1108 | if (drm->agp.bridge) { |
54d04ea8 CK |
1109 | reg->bus.offset = (reg->start << PAGE_SHIFT) + |
1110 | drm->agp.base; | |
605f9ccd | 1111 | reg->bus.is_iomem = !drm->agp.cma; |
1cf65c45 | 1112 | reg->bus.caching = ttm_write_combined; |
f32f02fd JG |
1113 | } |
1114 | #endif | |
141b15e5 CK |
1115 | if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || |
1116 | !mem->kind) { | |
a5540906 | 1117 | /* untiled */ |
141b15e5 | 1118 | ret = 0; |
a5540906 | 1119 | break; |
141b15e5 | 1120 | } |
f6e7393e | 1121 | fallthrough; /* tiled memory */ |
f32f02fd | 1122 | case TTM_PL_VRAM: |
54d04ea8 CK |
1123 | reg->bus.offset = (reg->start << PAGE_SHIFT) + |
1124 | device->func->resource_addr(device, 1); | |
605f9ccd | 1125 | reg->bus.is_iomem = true; |
1cf65c45 CK |
1126 | |
1127 | /* Some BARs do not support being ioremapped WC */ | |
1128 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && | |
be323a4c | 1129 | mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) |
1cf65c45 CK |
1130 | reg->bus.caching = ttm_uncached; |
1131 | else | |
1132 | reg->bus.caching = ttm_write_combined; | |
1133 | ||
d7722134 BS |
1134 | if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { |
1135 | union { | |
1136 | struct nv50_mem_map_v0 nv50; | |
1137 | struct gf100_mem_map_v0 gf100; | |
1138 | } args; | |
1139 | u64 handle, length; | |
1140 | u32 argc = 0; | |
d7722134 BS |
1141 | |
1142 | switch (mem->mem.object.oclass) { | |
1143 | case NVIF_CLASS_MEM_NV50: | |
1144 | args.nv50.version = 0; | |
1145 | args.nv50.ro = 0; | |
1146 | args.nv50.kind = mem->kind; | |
1147 | args.nv50.comp = mem->comp; | |
b554b12a | 1148 | argc = sizeof(args.nv50); |
d7722134 BS |
1149 | break; |
1150 | case NVIF_CLASS_MEM_GF100: | |
1151 | args.gf100.version = 0; | |
1152 | args.gf100.ro = 0; | |
1153 | args.gf100.kind = mem->kind; | |
b554b12a | 1154 | argc = sizeof(args.gf100); |
d7722134 BS |
1155 | break; |
1156 | default: | |
1157 | WARN_ON(1); | |
1158 | break; | |
1159 | } | |
1160 | ||
1161 | ret = nvif_object_map_handle(&mem->mem.object, | |
b554b12a | 1162 | &args, argc, |
d7722134 | 1163 | &handle, &length); |
8a39db76 BS |
1164 | if (ret != 1) { |
1165 | if (WARN_ON(ret == 0)) | |
141b15e5 CK |
1166 | ret = -EINVAL; |
1167 | goto out; | |
8a39db76 | 1168 | } |
d7722134 | 1169 | |
d7722134 | 1170 | reg->bus.offset = handle; |
f869ef88 | 1171 | } |
aea656b0 | 1172 | ret = 0; |
f32f02fd JG |
1173 | break; |
1174 | default: | |
141b15e5 | 1175 | ret = -EINVAL; |
f32f02fd | 1176 | } |
141b15e5 CK |
1177 | |
1178 | out: | |
1179 | if (ret == -ENOSPC) { | |
1180 | struct nouveau_bo *nvbo; | |
1181 | ||
1182 | nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, | |
1183 | typeof(*nvbo), | |
1184 | io_reserve_lru); | |
1185 | if (nvbo) { | |
1186 | list_del_init(&nvbo->io_reserve_lru); | |
1187 | drm_vma_node_unmap(&nvbo->bo.base.vma_node, | |
1188 | bdev->dev_mapping); | |
1189 | nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem); | |
1190 | goto retry; | |
1191 | } | |
1192 | ||
1193 | } | |
1194 | mutex_unlock(&drm->ttm.io_reserve_mutex); | |
1195 | return ret; | |
f32f02fd JG |
1196 | } |
1197 | ||
1198 | static void | |
2966141a | 1199 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg) |
f32f02fd | 1200 | { |
d7722134 | 1201 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
f869ef88 | 1202 | |
141b15e5 CK |
1203 | mutex_lock(&drm->ttm.io_reserve_mutex); |
1204 | nouveau_ttm_io_mem_free_locked(drm, reg); | |
1205 | mutex_unlock(&drm->ttm.io_reserve_mutex); | |
f32f02fd JG |
1206 | } |
1207 | ||
76fe313a | 1208 | vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) |
f32f02fd | 1209 | { |
ebb945a9 | 1210 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
e1429b4c | 1211 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
1167c6bc | 1212 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
7e8820fe | 1213 | u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; |
f1217ed0 | 1214 | int i, ret; |
e1429b4c BS |
1215 | |
1216 | /* as long as the bo isn't in vram, and isn't tiled, we've got | |
1217 | * nothing to do here. | |
1218 | */ | |
1219 | if (bo->mem.mem_type != TTM_PL_VRAM) { | |
1167c6bc | 1220 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || |
7760a2e3 | 1221 | !nvbo->kind) |
e1429b4c | 1222 | return 0; |
a5540906 | 1223 | |
76fe313a CK |
1224 | if (bo->mem.mem_type != TTM_PL_SYSTEM) |
1225 | return 0; | |
1226 | ||
1227 | nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); | |
1228 | ||
1229 | } else { | |
1230 | /* make sure bo is in mappable vram */ | |
1231 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || | |
1232 | bo->mem.start + bo->mem.num_pages < mappable) | |
1233 | return 0; | |
a5540906 | 1234 | |
76fe313a CK |
1235 | for (i = 0; i < nvbo->placement.num_placement; ++i) { |
1236 | nvbo->placements[i].fpfn = 0; | |
1237 | nvbo->placements[i].lpfn = mappable; | |
a5540906 | 1238 | } |
e1429b4c | 1239 | |
76fe313a CK |
1240 | for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { |
1241 | nvbo->busy_placements[i].fpfn = 0; | |
1242 | nvbo->busy_placements[i].lpfn = mappable; | |
1243 | } | |
e1429b4c | 1244 | |
76fe313a | 1245 | nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); |
f1217ed0 CK |
1246 | } |
1247 | ||
76fe313a CK |
1248 | ret = nouveau_bo_validate(nvbo, false, false); |
1249 | if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS)) | |
1250 | return VM_FAULT_NOPAGE; | |
1251 | else if (unlikely(ret)) | |
1252 | return VM_FAULT_SIGBUS; | |
e1429b4c | 1253 | |
76fe313a CK |
1254 | ttm_bo_move_to_lru_tail_unlocked(bo); |
1255 | return 0; | |
f32f02fd JG |
1256 | } |
1257 | ||
3230cfc3 | 1258 | static int |
0a667b50 DA |
1259 | nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, |
1260 | struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) | |
3230cfc3 | 1261 | { |
e34b8fee | 1262 | struct ttm_tt *ttm_dma = (void *)ttm; |
ebb945a9 | 1263 | struct nouveau_drm *drm; |
359088d5 | 1264 | struct device *dev; |
22b33e8e | 1265 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
3230cfc3 | 1266 | |
7eec9151 | 1267 | if (ttm_tt_is_populated(ttm)) |
3230cfc3 KRW |
1268 | return 0; |
1269 | ||
22b33e8e | 1270 | if (slave && ttm->sg) { |
c67e6279 CK |
1271 | drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address, |
1272 | ttm->num_pages); | |
22b33e8e DA |
1273 | return 0; |
1274 | } | |
1275 | ||
0a667b50 | 1276 | drm = nouveau_bdev(bdev); |
359088d5 | 1277 | dev = drm->dev->dev; |
3230cfc3 | 1278 | |
461619f5 | 1279 | return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); |
3230cfc3 KRW |
1280 | } |
1281 | ||
1282 | static void | |
0a667b50 DA |
1283 | nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, |
1284 | struct ttm_tt *ttm) | |
3230cfc3 | 1285 | { |
ebb945a9 | 1286 | struct nouveau_drm *drm; |
359088d5 | 1287 | struct device *dev; |
22b33e8e DA |
1288 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
1289 | ||
1290 | if (slave) | |
1291 | return; | |
3230cfc3 | 1292 | |
0a667b50 | 1293 | drm = nouveau_bdev(bdev); |
359088d5 | 1294 | dev = drm->dev->dev; |
3230cfc3 | 1295 | |
461619f5 | 1296 | return ttm_pool_free(&drm->ttm.bdev.pool, ttm); |
3230cfc3 KRW |
1297 | } |
1298 | ||
8635784a DA |
1299 | static void |
1300 | nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev, | |
1301 | struct ttm_tt *ttm) | |
1302 | { | |
1303 | #if IS_ENABLED(CONFIG_AGP) | |
1304 | struct nouveau_drm *drm = nouveau_bdev(bdev); | |
1305 | if (drm->agp.bridge) { | |
37bff654 | 1306 | ttm_agp_unbind(ttm); |
7626168f | 1307 | ttm_tt_destroy_common(bdev, ttm); |
48efa57e | 1308 | ttm_agp_destroy(ttm); |
8635784a DA |
1309 | return; |
1310 | } | |
1311 | #endif | |
1312 | nouveau_sgdma_destroy(bdev, ttm); | |
1313 | } | |
1314 | ||
875ac34a | 1315 | void |
809e9447 | 1316 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) |
875ac34a | 1317 | { |
52791eee | 1318 | struct dma_resv *resv = nvbo->bo.base.resv; |
bdaf7ddf | 1319 | |
809e9447 | 1320 | if (exclusive) |
52791eee | 1321 | dma_resv_add_excl_fence(resv, &fence->base); |
809e9447 | 1322 | else if (fence) |
52791eee | 1323 | dma_resv_add_shared_fence(resv, &fence->base); |
875ac34a BS |
1324 | } |
1325 | ||
6a6e5988 DA |
1326 | static void |
1327 | nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo) | |
1328 | { | |
1329 | nouveau_bo_move_ntfy(bo, false, NULL); | |
1330 | } | |
1331 | ||
6ee73861 | 1332 | struct ttm_bo_driver nouveau_bo_driver = { |
649bf3ca | 1333 | .ttm_tt_create = &nouveau_ttm_tt_create, |
3230cfc3 KRW |
1334 | .ttm_tt_populate = &nouveau_ttm_tt_populate, |
1335 | .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, | |
8635784a | 1336 | .ttm_tt_destroy = &nouveau_ttm_tt_destroy, |
a2ab19fe | 1337 | .eviction_valuable = ttm_bo_eviction_valuable, |
6ee73861 | 1338 | .evict_flags = nouveau_bo_evict_flags, |
6a6e5988 | 1339 | .delete_mem_notify = nouveau_bo_delete_mem_notify, |
6ee73861 BS |
1340 | .move = nouveau_bo_move, |
1341 | .verify_access = nouveau_bo_verify_access, | |
f32f02fd JG |
1342 | .io_mem_reserve = &nouveau_ttm_io_mem_reserve, |
1343 | .io_mem_free = &nouveau_ttm_io_mem_free, | |
6ee73861 | 1344 | }; |