Commit | Line | Data |
---|---|---|
4e4bbe73 ML |
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
f867723b SR |
24 | #include <linux/module.h> |
25 | ||
26 | #include <drm/drm_drv.h> | |
27 | ||
4e4bbe73 ML |
28 | #include "amdgpu.h" |
29 | ||
a16f8f11 | 30 | bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) |
31 | { | |
32 | /* By now all MMIO pages except mailbox are blocked */ | |
33 | /* if blocking is enabled in hypervisor. Choose the */ | |
34 | /* SCRATCH_REG0 to test. */ | |
35 | return RREG32_NO_KIQ(0xc040) == 0xffffffff; | |
36 | } | |
37 | ||
bc992ba5 XY |
38 | void amdgpu_virt_init_setting(struct amdgpu_device *adev) |
39 | { | |
06465d8e | 40 | /* enable virtual display */ |
02f6efb4 ED |
41 | if (adev->mode_info.num_crtc == 0) |
42 | adev->mode_info.num_crtc = 1; | |
06465d8e | 43 | adev->enable_virtual_display = true; |
4cb62b50 | 44 | adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC; |
213cacef XY |
45 | adev->cg_flags = 0; |
46 | adev->pg_flags = 0; | |
bc992ba5 XY |
47 | } |
48 | ||
af5fe1e9 CK |
49 | void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, |
50 | uint32_t reg0, uint32_t reg1, | |
51 | uint32_t ref, uint32_t mask) | |
52 | { | |
53 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | |
54 | struct amdgpu_ring *ring = &kiq->ring; | |
55 | signed long r, cnt = 0; | |
56 | unsigned long flags; | |
57 | uint32_t seq; | |
58 | ||
59 | spin_lock_irqsave(&kiq->ring_lock, flags); | |
60 | amdgpu_ring_alloc(ring, 32); | |
61 | amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1, | |
62 | ref, mask); | |
04e4e2e9 YT |
63 | r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); |
64 | if (r) | |
65 | goto failed_undo; | |
66 | ||
af5fe1e9 CK |
67 | amdgpu_ring_commit(ring); |
68 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | |
69 | ||
70 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | |
71 | ||
72 | /* don't wait anymore for IRQ context */ | |
73 | if (r < 1 && in_interrupt()) | |
74 | goto failed_kiq; | |
75 | ||
76 | might_sleep(); | |
77 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { | |
78 | ||
79 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); | |
80 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | |
81 | } | |
82 | ||
83 | if (cnt > MAX_KIQ_REG_TRY) | |
84 | goto failed_kiq; | |
85 | ||
86 | return; | |
87 | ||
04e4e2e9 YT |
88 | failed_undo: |
89 | amdgpu_ring_undo(ring); | |
90 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | |
af5fe1e9 CK |
91 | failed_kiq: |
92 | pr_err("failed to write reg %x wait reg %x\n", reg0, reg1); | |
93 | } | |
94 | ||
1e9f1392 XY |
95 | /** |
96 | * amdgpu_virt_request_full_gpu() - request full gpu access | |
97 | * @amdgpu: amdgpu device. | |
98 | * @init: is driver init time. | |
99 | * When start to init/fini driver, first need to request full gpu access. | |
100 | * Return: Zero if request success, otherwise will return error. | |
101 | */ | |
102 | int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) | |
103 | { | |
104 | struct amdgpu_virt *virt = &adev->virt; | |
105 | int r; | |
106 | ||
107 | if (virt->ops && virt->ops->req_full_gpu) { | |
108 | r = virt->ops->req_full_gpu(adev, init); | |
109 | if (r) | |
110 | return r; | |
111 | ||
112 | adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; | |
113 | } | |
114 | ||
115 | return 0; | |
116 | } | |
117 | ||
118 | /** | |
119 | * amdgpu_virt_release_full_gpu() - release full gpu access | |
120 | * @amdgpu: amdgpu device. | |
121 | * @init: is driver init time. | |
122 | * When finishing driver init/fini, need to release full gpu access. | |
123 | * Return: Zero if release success, otherwise will returen error. | |
124 | */ | |
125 | int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init) | |
126 | { | |
127 | struct amdgpu_virt *virt = &adev->virt; | |
128 | int r; | |
129 | ||
130 | if (virt->ops && virt->ops->rel_full_gpu) { | |
131 | r = virt->ops->rel_full_gpu(adev, init); | |
132 | if (r) | |
133 | return r; | |
134 | ||
135 | adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; | |
136 | } | |
137 | return 0; | |
138 | } | |
139 | ||
140 | /** | |
141 | * amdgpu_virt_reset_gpu() - reset gpu | |
142 | * @amdgpu: amdgpu device. | |
143 | * Send reset command to GPU hypervisor to reset GPU that VM is using | |
144 | * Return: Zero if reset success, otherwise will return error. | |
145 | */ | |
146 | int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) | |
147 | { | |
148 | struct amdgpu_virt *virt = &adev->virt; | |
149 | int r; | |
150 | ||
151 | if (virt->ops && virt->ops->reset_gpu) { | |
152 | r = virt->ops->reset_gpu(adev); | |
153 | if (r) | |
154 | return r; | |
155 | ||
156 | adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; | |
157 | } | |
158 | ||
159 | return 0; | |
160 | } | |
904cd389 | 161 | |
aa53bc2e ML |
162 | void amdgpu_virt_request_init_data(struct amdgpu_device *adev) |
163 | { | |
164 | struct amdgpu_virt *virt = &adev->virt; | |
165 | ||
166 | if (virt->ops && virt->ops->req_init_data) | |
167 | virt->ops->req_init_data(adev); | |
168 | ||
169 | if (adev->virt.req_init_data_ver > 0) | |
170 | DRM_INFO("host supports REQ_INIT_DATA handshake\n"); | |
171 | else | |
172 | DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n"); | |
173 | } | |
174 | ||
b636176e | 175 | /** |
176 | * amdgpu_virt_wait_reset() - wait for reset gpu completed | |
177 | * @amdgpu: amdgpu device. | |
178 | * Wait for GPU reset completed. | |
179 | * Return: Zero if reset success, otherwise will return error. | |
180 | */ | |
181 | int amdgpu_virt_wait_reset(struct amdgpu_device *adev) | |
182 | { | |
183 | struct amdgpu_virt *virt = &adev->virt; | |
184 | ||
185 | if (!virt->ops || !virt->ops->wait_reset) | |
186 | return -EINVAL; | |
187 | ||
188 | return virt->ops->wait_reset(adev); | |
189 | } | |
190 | ||
904cd389 XY |
191 | /** |
192 | * amdgpu_virt_alloc_mm_table() - alloc memory for mm table | |
193 | * @amdgpu: amdgpu device. | |
194 | * MM table is used by UVD and VCE for its initialization | |
195 | * Return: Zero if allocate success. | |
196 | */ | |
197 | int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) | |
198 | { | |
199 | int r; | |
200 | ||
201 | if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr) | |
202 | return 0; | |
203 | ||
204 | r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, | |
205 | AMDGPU_GEM_DOMAIN_VRAM, | |
206 | &adev->virt.mm_table.bo, | |
207 | &adev->virt.mm_table.gpu_addr, | |
208 | (void *)&adev->virt.mm_table.cpu_addr); | |
209 | if (r) { | |
210 | DRM_ERROR("failed to alloc mm table and error = %d.\n", r); | |
211 | return r; | |
212 | } | |
213 | ||
214 | memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE); | |
215 | DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n", | |
216 | adev->virt.mm_table.gpu_addr, | |
217 | adev->virt.mm_table.cpu_addr); | |
218 | return 0; | |
219 | } | |
220 | ||
221 | /** | |
222 | * amdgpu_virt_free_mm_table() - free mm table memory | |
223 | * @amdgpu: amdgpu device. | |
224 | * Free MM table memory | |
225 | */ | |
226 | void amdgpu_virt_free_mm_table(struct amdgpu_device *adev) | |
227 | { | |
228 | if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr) | |
229 | return; | |
230 | ||
231 | amdgpu_bo_free_kernel(&adev->virt.mm_table.bo, | |
232 | &adev->virt.mm_table.gpu_addr, | |
233 | (void *)&adev->virt.mm_table.cpu_addr); | |
234 | adev->virt.mm_table.gpu_addr = 0; | |
235 | } | |
2dc8f81e HC |
236 | |
237 | ||
238 | int amdgpu_virt_fw_reserve_get_checksum(void *obj, | |
239 | unsigned long obj_size, | |
240 | unsigned int key, | |
241 | unsigned int chksum) | |
242 | { | |
243 | unsigned int ret = key; | |
244 | unsigned long i = 0; | |
245 | unsigned char *pos; | |
246 | ||
247 | pos = (char *)obj; | |
248 | /* calculate checksum */ | |
249 | for (i = 0; i < obj_size; ++i) | |
250 | ret += *(pos + i); | |
251 | /* minus the chksum itself */ | |
252 | pos = (char *)&chksum; | |
253 | for (i = 0; i < sizeof(chksum); ++i) | |
254 | ret -= *(pos + i); | |
255 | return ret; | |
256 | } | |
257 | ||
258 | void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) | |
259 | { | |
2dc8f81e HC |
260 | uint32_t pf2vf_size = 0; |
261 | uint32_t checksum = 0; | |
262 | uint32_t checkval; | |
263 | char *str; | |
264 | ||
265 | adev->virt.fw_reserve.p_pf2vf = NULL; | |
266 | adev->virt.fw_reserve.p_vf2pf = NULL; | |
267 | ||
268 | if (adev->fw_vram_usage.va != NULL) { | |
269 | adev->virt.fw_reserve.p_pf2vf = | |
bed1ed36 | 270 | (struct amd_sriov_msg_pf2vf_info_header *)( |
2dc8f81e | 271 | adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET); |
2dc8f81e HC |
272 | AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size); |
273 | AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum); | |
75bc6099 | 274 | AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature); |
2dc8f81e HC |
275 | |
276 | /* pf2vf message must be in 4K */ | |
277 | if (pf2vf_size > 0 && pf2vf_size < 4096) { | |
278 | checkval = amdgpu_virt_fw_reserve_get_checksum( | |
279 | adev->virt.fw_reserve.p_pf2vf, pf2vf_size, | |
280 | adev->virt.fw_reserve.checksum_key, checksum); | |
281 | if (checkval == checksum) { | |
282 | adev->virt.fw_reserve.p_vf2pf = | |
283 | ((void *)adev->virt.fw_reserve.p_pf2vf + | |
284 | pf2vf_size); | |
285 | memset((void *)adev->virt.fw_reserve.p_vf2pf, 0, | |
286 | sizeof(amdgim_vf2pf_info)); | |
287 | AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version, | |
288 | AMDGPU_FW_VRAM_VF2PF_VER); | |
289 | AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size, | |
290 | sizeof(amdgim_vf2pf_info)); | |
291 | AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version, | |
292 | &str); | |
e477e940 | 293 | #ifdef MODULE |
2dc8f81e HC |
294 | if (THIS_MODULE->version != NULL) |
295 | strcpy(str, THIS_MODULE->version); | |
296 | else | |
e477e940 | 297 | #endif |
2dc8f81e HC |
298 | strcpy(str, "N/A"); |
299 | AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert, | |
300 | 0); | |
301 | AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum, | |
302 | amdgpu_virt_fw_reserve_get_checksum( | |
303 | adev->virt.fw_reserve.p_vf2pf, | |
304 | pf2vf_size, | |
305 | adev->virt.fw_reserve.checksum_key, 0)); | |
306 | } | |
307 | } | |
308 | } | |
309 | } | |
3aa0115d ML |
310 | |
311 | void amdgpu_detect_virtualization(struct amdgpu_device *adev) | |
312 | { | |
313 | uint32_t reg; | |
314 | ||
315 | switch (adev->asic_type) { | |
316 | case CHIP_TONGA: | |
317 | case CHIP_FIJI: | |
318 | reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); | |
319 | break; | |
320 | case CHIP_VEGA10: | |
321 | case CHIP_VEGA20: | |
322 | case CHIP_NAVI10: | |
323 | case CHIP_NAVI12: | |
7cf70047 | 324 | case CHIP_SIENNA_CICHLID: |
3aa0115d ML |
325 | case CHIP_ARCTURUS: |
326 | reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER); | |
327 | break; | |
328 | default: /* other chip doesn't support SRIOV */ | |
329 | reg = 0; | |
330 | break; | |
331 | } | |
332 | ||
333 | if (reg & 1) | |
334 | adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; | |
335 | ||
336 | if (reg & 0x80000000) | |
337 | adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; | |
338 | ||
339 | if (!reg) { | |
340 | if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ | |
341 | adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; | |
342 | } | |
343 | } | |
95a2f917 | 344 | |
d32709da | 345 | bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev) |
95a2f917 YT |
346 | { |
347 | return amdgpu_sriov_is_debug(adev) ? true : false; | |
348 | } | |
349 | ||
d32709da YT |
350 | bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev) |
351 | { | |
352 | return amdgpu_sriov_is_normal(adev) ? true : false; | |
353 | } | |
354 | ||
95a2f917 YT |
355 | int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev) |
356 | { | |
d32709da YT |
357 | if (!amdgpu_sriov_vf(adev) || |
358 | amdgpu_virt_access_debugfs_is_kiq(adev)) | |
95a2f917 YT |
359 | return 0; |
360 | ||
d32709da | 361 | if (amdgpu_virt_access_debugfs_is_mmio(adev)) |
95a2f917 YT |
362 | adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; |
363 | else | |
364 | return -EPERM; | |
365 | ||
366 | return 0; | |
367 | } | |
368 | ||
369 | void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev) | |
370 | { | |
371 | if (amdgpu_sriov_vf(adev)) | |
372 | adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; | |
373 | } | |
a7f28103 KW |
374 | |
375 | enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev) | |
376 | { | |
377 | enum amdgpu_sriov_vf_mode mode; | |
378 | ||
379 | if (amdgpu_sriov_vf(adev)) { | |
380 | if (amdgpu_sriov_is_pp_one_vf(adev)) | |
381 | mode = SRIOV_VF_MODE_ONE_VF; | |
382 | else | |
383 | mode = SRIOV_VF_MODE_MULTI_VF; | |
384 | } else { | |
385 | mode = SRIOV_VF_MODE_BARE_METAL; | |
386 | } | |
387 | ||
388 | return mode; | |
389 | } |