drm/amdgpu/virt: use kiq to access registers (v2)
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_virt.c
CommitLineData
4e4bbe73
ML
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
25
26int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
27{
28 int r;
29 void *ptr;
30
31 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
32 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
33 &adev->virt.csa_vmid0_addr, &ptr);
34 if (r)
35 return r;
36
37 memset(ptr, 0, AMDGPU_CSA_SIZE);
38 return 0;
39}
40
41/*
42 * amdgpu_map_static_csa should be called during amdgpu_vm_init
43 * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
44 * to this VM, and each command submission of GFX should use this virtual
45 * address within META_DATA init package to support SRIOV gfx preemption.
46 */
47
48int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
49{
50 int r;
51 struct amdgpu_bo_va *bo_va;
52 struct ww_acquire_ctx ticket;
53 struct list_head list;
54 struct amdgpu_bo_list_entry pd;
55 struct ttm_validate_buffer csa_tv;
56
57 INIT_LIST_HEAD(&list);
58 INIT_LIST_HEAD(&csa_tv.head);
59 csa_tv.bo = &adev->virt.csa_obj->tbo;
60 csa_tv.shared = true;
61
62 list_add(&csa_tv.head, &list);
63 amdgpu_vm_get_pd_bo(vm, &list, &pd);
64
65 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
66 if (r) {
67 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
68 return r;
69 }
70
71 bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
72 if (!bo_va) {
73 ttm_eu_backoff_reservation(&ticket, &list);
74 DRM_ERROR("failed to create bo_va for static CSA\n");
75 return -ENOMEM;
76 }
77
78 r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
79 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
80 AMDGPU_PTE_EXECUTABLE);
81
82 if (r) {
83 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
84 amdgpu_vm_bo_rmv(adev, bo_va);
85 ttm_eu_backoff_reservation(&ticket, &list);
86 kfree(bo_va);
87 return r;
88 }
89
90 vm->csa_bo_va = bo_va;
91 ttm_eu_backoff_reservation(&ticket, &list);
92 return 0;
93}
bc992ba5
XY
94
95void amdgpu_virt_init_setting(struct amdgpu_device *adev)
96{
97 mutex_init(&adev->virt.lock);
98}
99
100uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
101{
102 signed long r;
103 uint32_t val;
104 struct dma_fence *f;
105 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
106 struct amdgpu_ring *ring = &kiq->ring;
107
108 BUG_ON(!ring->funcs->emit_rreg);
109
110 mutex_lock(&adev->virt.lock);
111 amdgpu_ring_alloc(ring, 32);
112 amdgpu_ring_emit_hdp_flush(ring);
113 amdgpu_ring_emit_rreg(ring, reg);
114 amdgpu_ring_emit_hdp_invalidate(ring);
115 amdgpu_fence_emit(ring, &f);
116 amdgpu_ring_commit(ring);
117 mutex_unlock(&adev->virt.lock);
118
119 r = dma_fence_wait(f, false);
120 if (r)
121 DRM_ERROR("wait for kiq fence error: %ld.\n", r);
122 dma_fence_put(f);
123
124 val = adev->wb.wb[adev->virt.reg_val_offs];
125
126 return val;
127}
128
129void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
130{
131 signed long r;
132 struct dma_fence *f;
133 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
134 struct amdgpu_ring *ring = &kiq->ring;
135
136 BUG_ON(!ring->funcs->emit_wreg);
137
138 mutex_lock(&adev->virt.lock);
139 amdgpu_ring_alloc(ring, 32);
140 amdgpu_ring_emit_hdp_flush(ring);
141 amdgpu_ring_emit_wreg(ring, reg, v);
142 amdgpu_ring_emit_hdp_invalidate(ring);
143 amdgpu_fence_emit(ring, &f);
144 amdgpu_ring_commit(ring);
145 mutex_unlock(&adev->virt.lock);
146
147 r = dma_fence_wait(f, false);
148 if (r)
149 DRM_ERROR("wait for kiq fence error: %ld.\n", r);
150 dma_fence_put(f);
151}