Merge tag 'drm-misc-next-2022-04-21' of git://anongit.freedesktop.org/drm/drm-misc...
[linux-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_virt.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/module.h>
25
26 #include <drm/drm_drv.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_ras.h"
30 #include "vi.h"
31 #include "soc15.h"
32 #include "nv.h"
33
34 #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
35         do { \
36                 vf2pf_info->ucode_info[ucode].id = ucode; \
37                 vf2pf_info->ucode_info[ucode].version = ver; \
38         } while (0)
39
40 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
41 {
42         /* By now all MMIO pages except mailbox are blocked */
43         /* if blocking is enabled in hypervisor. Choose the */
44         /* SCRATCH_REG0 to test. */
45         return RREG32_NO_KIQ(0xc040) == 0xffffffff;
46 }
47
48 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
49 {
50         struct drm_device *ddev = adev_to_drm(adev);
51
52         /* enable virtual display */
53         if (adev->asic_type != CHIP_ALDEBARAN &&
54             adev->asic_type != CHIP_ARCTURUS) {
55                 if (adev->mode_info.num_crtc == 0)
56                         adev->mode_info.num_crtc = 1;
57                 adev->enable_virtual_display = true;
58         }
59         ddev->driver_features &= ~DRIVER_ATOMIC;
60         adev->cg_flags = 0;
61         adev->pg_flags = 0;
62 }
63
64 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
65                                         uint32_t reg0, uint32_t reg1,
66                                         uint32_t ref, uint32_t mask)
67 {
68         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
69         struct amdgpu_ring *ring = &kiq->ring;
70         signed long r, cnt = 0;
71         unsigned long flags;
72         uint32_t seq;
73
74         spin_lock_irqsave(&kiq->ring_lock, flags);
75         amdgpu_ring_alloc(ring, 32);
76         amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
77                                             ref, mask);
78         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
79         if (r)
80                 goto failed_undo;
81
82         amdgpu_ring_commit(ring);
83         spin_unlock_irqrestore(&kiq->ring_lock, flags);
84
85         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
86
87         /* don't wait anymore for IRQ context */
88         if (r < 1 && in_interrupt())
89                 goto failed_kiq;
90
91         might_sleep();
92         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
93
94                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
95                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
96         }
97
98         if (cnt > MAX_KIQ_REG_TRY)
99                 goto failed_kiq;
100
101         return;
102
103 failed_undo:
104         amdgpu_ring_undo(ring);
105         spin_unlock_irqrestore(&kiq->ring_lock, flags);
106 failed_kiq:
107         dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
108 }
109
110 /**
111  * amdgpu_virt_request_full_gpu() - request full gpu access
112  * @adev:       amdgpu device.
113  * @init:       is driver init time.
114  * When start to init/fini driver, first need to request full gpu access.
115  * Return: Zero if request success, otherwise will return error.
116  */
117 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
118 {
119         struct amdgpu_virt *virt = &adev->virt;
120         int r;
121
122         if (virt->ops && virt->ops->req_full_gpu) {
123                 r = virt->ops->req_full_gpu(adev, init);
124                 if (r)
125                         return r;
126
127                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
128         }
129
130         return 0;
131 }
132
133 /**
134  * amdgpu_virt_release_full_gpu() - release full gpu access
135  * @adev:       amdgpu device.
136  * @init:       is driver init time.
137  * When finishing driver init/fini, need to release full gpu access.
138  * Return: Zero if release success, otherwise will returen error.
139  */
140 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
141 {
142         struct amdgpu_virt *virt = &adev->virt;
143         int r;
144
145         if (virt->ops && virt->ops->rel_full_gpu) {
146                 r = virt->ops->rel_full_gpu(adev, init);
147                 if (r)
148                         return r;
149
150                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
151         }
152         return 0;
153 }
154
155 /**
156  * amdgpu_virt_reset_gpu() - reset gpu
157  * @adev:       amdgpu device.
158  * Send reset command to GPU hypervisor to reset GPU that VM is using
159  * Return: Zero if reset success, otherwise will return error.
160  */
161 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
162 {
163         struct amdgpu_virt *virt = &adev->virt;
164         int r;
165
166         if (virt->ops && virt->ops->reset_gpu) {
167                 r = virt->ops->reset_gpu(adev);
168                 if (r)
169                         return r;
170
171                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
172         }
173
174         return 0;
175 }
176
177 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
178 {
179         struct amdgpu_virt *virt = &adev->virt;
180
181         if (virt->ops && virt->ops->req_init_data)
182                 virt->ops->req_init_data(adev);
183
184         if (adev->virt.req_init_data_ver > 0)
185                 DRM_INFO("host supports REQ_INIT_DATA handshake\n");
186         else
187                 DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
188 }
189
190 /**
191  * amdgpu_virt_wait_reset() - wait for reset gpu completed
192  * @adev:       amdgpu device.
193  * Wait for GPU reset completed.
194  * Return: Zero if reset success, otherwise will return error.
195  */
196 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
197 {
198         struct amdgpu_virt *virt = &adev->virt;
199
200         if (!virt->ops || !virt->ops->wait_reset)
201                 return -EINVAL;
202
203         return virt->ops->wait_reset(adev);
204 }
205
206 /**
207  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
208  * @adev:       amdgpu device.
209  * MM table is used by UVD and VCE for its initialization
210  * Return: Zero if allocate success.
211  */
212 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
213 {
214         int r;
215
216         if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
217                 return 0;
218
219         r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
220                                     AMDGPU_GEM_DOMAIN_VRAM,
221                                     &adev->virt.mm_table.bo,
222                                     &adev->virt.mm_table.gpu_addr,
223                                     (void *)&adev->virt.mm_table.cpu_addr);
224         if (r) {
225                 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
226                 return r;
227         }
228
229         memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
230         DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
231                  adev->virt.mm_table.gpu_addr,
232                  adev->virt.mm_table.cpu_addr);
233         return 0;
234 }
235
236 /**
237  * amdgpu_virt_free_mm_table() - free mm table memory
238  * @adev:       amdgpu device.
239  * Free MM table memory
240  */
241 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
242 {
243         if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
244                 return;
245
246         amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
247                               &adev->virt.mm_table.gpu_addr,
248                               (void *)&adev->virt.mm_table.cpu_addr);
249         adev->virt.mm_table.gpu_addr = 0;
250 }
251
252
253 unsigned int amd_sriov_msg_checksum(void *obj,
254                                 unsigned long obj_size,
255                                 unsigned int key,
256                                 unsigned int checksum)
257 {
258         unsigned int ret = key;
259         unsigned long i = 0;
260         unsigned char *pos;
261
262         pos = (char *)obj;
263         /* calculate checksum */
264         for (i = 0; i < obj_size; ++i)
265                 ret += *(pos + i);
266         /* minus the checksum itself */
267         pos = (char *)&checksum;
268         for (i = 0; i < sizeof(checksum); ++i)
269                 ret -= *(pos + i);
270         return ret;
271 }
272
273 static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
274 {
275         struct amdgpu_virt *virt = &adev->virt;
276         struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
277         /* GPU will be marked bad on host if bp count more then 10,
278          * so alloc 512 is enough.
279          */
280         unsigned int align_space = 512;
281         void *bps = NULL;
282         struct amdgpu_bo **bps_bo = NULL;
283
284         *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
285         if (!*data)
286                 goto data_failure;
287
288         bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
289         if (!bps)
290                 goto bps_failure;
291
292         bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
293         if (!bps_bo)
294                 goto bps_bo_failure;
295
296         (*data)->bps = bps;
297         (*data)->bps_bo = bps_bo;
298         (*data)->count = 0;
299         (*data)->last_reserved = 0;
300
301         virt->ras_init_done = true;
302
303         return 0;
304
305 bps_bo_failure:
306         kfree(bps);
307 bps_failure:
308         kfree(*data);
309 data_failure:
310         return -ENOMEM;
311 }
312
313 static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
314 {
315         struct amdgpu_virt *virt = &adev->virt;
316         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
317         struct amdgpu_bo *bo;
318         int i;
319
320         if (!data)
321                 return;
322
323         for (i = data->last_reserved - 1; i >= 0; i--) {
324                 bo = data->bps_bo[i];
325                 amdgpu_bo_free_kernel(&bo, NULL, NULL);
326                 data->bps_bo[i] = bo;
327                 data->last_reserved = i;
328         }
329 }
330
331 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
332 {
333         struct amdgpu_virt *virt = &adev->virt;
334         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
335
336         virt->ras_init_done = false;
337
338         if (!data)
339                 return;
340
341         amdgpu_virt_ras_release_bp(adev);
342
343         kfree(data->bps);
344         kfree(data->bps_bo);
345         kfree(data);
346         virt->virt_eh_data = NULL;
347 }
348
349 static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
350                 struct eeprom_table_record *bps, int pages)
351 {
352         struct amdgpu_virt *virt = &adev->virt;
353         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
354
355         if (!data)
356                 return;
357
358         memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
359         data->count += pages;
360 }
361
362 static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
363 {
364         struct amdgpu_virt *virt = &adev->virt;
365         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
366         struct amdgpu_bo *bo = NULL;
367         uint64_t bp;
368         int i;
369
370         if (!data)
371                 return;
372
373         for (i = data->last_reserved; i < data->count; i++) {
374                 bp = data->bps[i].retired_page;
375
376                 /* There are two cases of reserve error should be ignored:
377                  * 1) a ras bad page has been allocated (used by someone);
378                  * 2) a ras bad page has been reserved (duplicate error injection
379                  *    for one page);
380                  */
381                 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
382                                                AMDGPU_GPU_PAGE_SIZE,
383                                                AMDGPU_GEM_DOMAIN_VRAM,
384                                                &bo, NULL))
385                         DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
386
387                 data->bps_bo[i] = bo;
388                 data->last_reserved = i + 1;
389                 bo = NULL;
390         }
391 }
392
393 static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
394                 uint64_t retired_page)
395 {
396         struct amdgpu_virt *virt = &adev->virt;
397         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
398         int i;
399
400         if (!data)
401                 return true;
402
403         for (i = 0; i < data->count; i++)
404                 if (retired_page == data->bps[i].retired_page)
405                         return true;
406
407         return false;
408 }
409
410 static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
411                 uint64_t bp_block_offset, uint32_t bp_block_size)
412 {
413         struct eeprom_table_record bp;
414         uint64_t retired_page;
415         uint32_t bp_idx, bp_cnt;
416
417         if (bp_block_size) {
418                 bp_cnt = bp_block_size / sizeof(uint64_t);
419                 for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
420                         retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
421                                         bp_block_offset + bp_idx * sizeof(uint64_t));
422                         bp.retired_page = retired_page;
423
424                         if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
425                                 continue;
426
427                         amdgpu_virt_ras_add_bps(adev, &bp, 1);
428
429                         amdgpu_virt_ras_reserve_bps(adev);
430                 }
431         }
432 }
433
434 static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
435 {
436         struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
437         uint32_t checksum;
438         uint32_t checkval;
439
440         uint32_t i;
441         uint32_t tmp;
442
443         if (adev->virt.fw_reserve.p_pf2vf == NULL)
444                 return -EINVAL;
445
446         if (pf2vf_info->size > 1024) {
447                 DRM_ERROR("invalid pf2vf message size\n");
448                 return -EINVAL;
449         }
450
451         switch (pf2vf_info->version) {
452         case 1:
453                 checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
454                 checkval = amd_sriov_msg_checksum(
455                         adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
456                         adev->virt.fw_reserve.checksum_key, checksum);
457                 if (checksum != checkval) {
458                         DRM_ERROR("invalid pf2vf message\n");
459                         return -EINVAL;
460                 }
461
462                 adev->virt.gim_feature =
463                         ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
464                 break;
465         case 2:
466                 /* TODO: missing key, need to add it later */
467                 checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
468                 checkval = amd_sriov_msg_checksum(
469                         adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
470                         0, checksum);
471                 if (checksum != checkval) {
472                         DRM_ERROR("invalid pf2vf message\n");
473                         return -EINVAL;
474                 }
475
476                 adev->virt.vf2pf_update_interval_ms =
477                         ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
478                 adev->virt.gim_feature =
479                         ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
480                 adev->virt.reg_access =
481                         ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
482
483                 adev->virt.decode_max_dimension_pixels = 0;
484                 adev->virt.decode_max_frame_pixels = 0;
485                 adev->virt.encode_max_dimension_pixels = 0;
486                 adev->virt.encode_max_frame_pixels = 0;
487                 adev->virt.is_mm_bw_enabled = false;
488                 for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
489                         tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
490                         adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
491
492                         tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
493                         adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
494
495                         tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
496                         adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
497
498                         tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
499                         adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
500                 }
501                 if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
502                         adev->virt.is_mm_bw_enabled = true;
503
504                 adev->unique_id =
505                         ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
506                 break;
507         default:
508                 DRM_ERROR("invalid pf2vf version\n");
509                 return -EINVAL;
510         }
511
512         /* correct too large or too little interval value */
513         if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
514                 adev->virt.vf2pf_update_interval_ms = 2000;
515
516         return 0;
517 }
518
519 static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
520 {
521         struct amd_sriov_msg_vf2pf_info *vf2pf_info;
522         vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
523
524         if (adev->virt.fw_reserve.p_vf2pf == NULL)
525                 return;
526
527         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,      adev->vce.fw_version);
528         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD,      adev->uvd.fw_version);
529         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC,       adev->gmc.fw_version);
530         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME,       adev->gfx.me_fw_version);
531         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP,      adev->gfx.pfp_fw_version);
532         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE,       adev->gfx.ce_fw_version);
533         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC,      adev->gfx.rlc_fw_version);
534         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
535         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
536         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
537         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
538         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
539         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos.fw_version);
540         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
541                             adev->psp.asd_context.bin_desc.fw_version);
542         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
543                             adev->psp.ras_context.context.bin_desc.fw_version);
544         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
545                             adev->psp.xgmi_context.context.bin_desc.fw_version);
546         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
547         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
548         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
549         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN,      adev->vcn.fw_version);
550         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU,     adev->dm.dmcu_fw_version);
551 }
552
553 static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
554 {
555         struct amd_sriov_msg_vf2pf_info *vf2pf_info;
556
557         vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
558
559         if (adev->virt.fw_reserve.p_vf2pf == NULL)
560                 return -EINVAL;
561
562         memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
563
564         vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
565         vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
566
567 #ifdef MODULE
568         if (THIS_MODULE->version != NULL)
569                 strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
570         else
571 #endif
572                 strcpy(vf2pf_info->driver_version, "N/A");
573
574         vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
575         vf2pf_info->driver_cert = 0;
576         vf2pf_info->os_info.all = 0;
577
578         vf2pf_info->fb_usage =
579                 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
580         vf2pf_info->fb_vis_usage =
581                 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
582         vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
583         vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
584
585         amdgpu_virt_populate_vf2pf_ucode_info(adev);
586
587         /* TODO: read dynamic info */
588         vf2pf_info->gfx_usage = 0;
589         vf2pf_info->compute_usage = 0;
590         vf2pf_info->encode_usage = 0;
591         vf2pf_info->decode_usage = 0;
592
593         vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
594         vf2pf_info->checksum =
595                 amd_sriov_msg_checksum(
596                 vf2pf_info, vf2pf_info->header.size, 0, 0);
597
598         return 0;
599 }
600
601 static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
602 {
603         struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
604         int ret;
605
606         ret = amdgpu_virt_read_pf2vf_data(adev);
607         if (ret)
608                 goto out;
609         amdgpu_virt_write_vf2pf_data(adev);
610
611 out:
612         schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
613 }
614
615 void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
616 {
617         if (adev->virt.vf2pf_update_interval_ms != 0) {
618                 DRM_INFO("clean up the vf2pf work item\n");
619                 cancel_delayed_work_sync(&adev->virt.vf2pf_work);
620                 adev->virt.vf2pf_update_interval_ms = 0;
621         }
622 }
623
624 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
625 {
626         adev->virt.fw_reserve.p_pf2vf = NULL;
627         adev->virt.fw_reserve.p_vf2pf = NULL;
628         adev->virt.vf2pf_update_interval_ms = 0;
629
630         if (adev->mman.fw_vram_usage_va != NULL) {
631                 /* go through this logic in ip_init and reset to init workqueue*/
632                 amdgpu_virt_exchange_data(adev);
633
634                 INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
635                 schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
636         } else if (adev->bios != NULL) {
637                 /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
638                 adev->virt.fw_reserve.p_pf2vf =
639                         (struct amd_sriov_msg_pf2vf_info_header *)
640                         (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
641
642                 amdgpu_virt_read_pf2vf_data(adev);
643         }
644 }
645
646
647 void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
648 {
649         uint64_t bp_block_offset = 0;
650         uint32_t bp_block_size = 0;
651         struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
652
653         if (adev->mman.fw_vram_usage_va != NULL) {
654
655                 adev->virt.fw_reserve.p_pf2vf =
656                         (struct amd_sriov_msg_pf2vf_info_header *)
657                         (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
658                 adev->virt.fw_reserve.p_vf2pf =
659                         (struct amd_sriov_msg_vf2pf_info_header *)
660                         (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
661
662                 amdgpu_virt_read_pf2vf_data(adev);
663                 amdgpu_virt_write_vf2pf_data(adev);
664
665                 /* bad page handling for version 2 */
666                 if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
667                                 pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
668
669                                 bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
670                                                 ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
671                                 bp_block_size = pf2vf_v2->bp_block_size;
672
673                                 if (bp_block_size && !adev->virt.ras_init_done)
674                                         amdgpu_virt_init_ras_err_handler_data(adev);
675
676                                 if (adev->virt.ras_init_done)
677                                         amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
678                         }
679         }
680 }
681
682
683 void amdgpu_detect_virtualization(struct amdgpu_device *adev)
684 {
685         uint32_t reg;
686
687         switch (adev->asic_type) {
688         case CHIP_TONGA:
689         case CHIP_FIJI:
690                 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
691                 break;
692         case CHIP_VEGA10:
693         case CHIP_VEGA20:
694         case CHIP_NAVI10:
695         case CHIP_NAVI12:
696         case CHIP_SIENNA_CICHLID:
697         case CHIP_ARCTURUS:
698         case CHIP_ALDEBARAN:
699                 reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
700                 break;
701         default: /* other chip doesn't support SRIOV */
702                 reg = 0;
703                 break;
704         }
705
706         if (reg & 1)
707                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
708
709         if (reg & 0x80000000)
710                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
711
712         if (!reg) {
713                 if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
714                         adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
715         }
716
717         /* we have the ability to check now */
718         if (amdgpu_sriov_vf(adev)) {
719                 switch (adev->asic_type) {
720                 case CHIP_TONGA:
721                 case CHIP_FIJI:
722                         vi_set_virt_ops(adev);
723                         break;
724                 case CHIP_VEGA10:
725                         soc15_set_virt_ops(adev);
726                         /* send a dummy GPU_INIT_DATA request to host on vega10 */
727                         amdgpu_virt_request_init_data(adev);
728                         break;
729                 case CHIP_VEGA20:
730                 case CHIP_ARCTURUS:
731                 case CHIP_ALDEBARAN:
732                         soc15_set_virt_ops(adev);
733                         break;
734                 case CHIP_NAVI10:
735                 case CHIP_NAVI12:
736                 case CHIP_SIENNA_CICHLID:
737                         nv_set_virt_ops(adev);
738                         /* try send GPU_INIT_DATA request to host */
739                         amdgpu_virt_request_init_data(adev);
740                         break;
741                 default: /* other chip doesn't support SRIOV */
742                         DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
743                         break;
744                 }
745         }
746 }
747
748 static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
749 {
750         return amdgpu_sriov_is_debug(adev) ? true : false;
751 }
752
753 static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
754 {
755         return amdgpu_sriov_is_normal(adev) ? true : false;
756 }
757
758 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
759 {
760         if (!amdgpu_sriov_vf(adev) ||
761             amdgpu_virt_access_debugfs_is_kiq(adev))
762                 return 0;
763
764         if (amdgpu_virt_access_debugfs_is_mmio(adev))
765                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
766         else
767                 return -EPERM;
768
769         return 0;
770 }
771
772 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
773 {
774         if (amdgpu_sriov_vf(adev))
775                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
776 }
777
778 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
779 {
780         enum amdgpu_sriov_vf_mode mode;
781
782         if (amdgpu_sriov_vf(adev)) {
783                 if (amdgpu_sriov_is_pp_one_vf(adev))
784                         mode = SRIOV_VF_MODE_ONE_VF;
785                 else
786                         mode = SRIOV_VF_MODE_MULTI_VF;
787         } else {
788                 mode = SRIOV_VF_MODE_BARE_METAL;
789         }
790
791         return mode;
792 }
793
794 void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
795                         struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
796                         struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
797 {
798         uint32_t i;
799
800         if (!adev->virt.is_mm_bw_enabled)
801                 return;
802
803         if (encode) {
804                 for (i = 0; i < encode_array_size; i++) {
805                         encode[i].max_width = adev->virt.encode_max_dimension_pixels;
806                         encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
807                         if (encode[i].max_width > 0)
808                                 encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
809                         else
810                                 encode[i].max_height = 0;
811                 }
812         }
813
814         if (decode) {
815                 for (i = 0; i < decode_array_size; i++) {
816                         decode[i].max_width = adev->virt.decode_max_dimension_pixels;
817                         decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
818                         if (decode[i].max_width > 0)
819                                 decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
820                         else
821                                 decode[i].max_height = 0;
822                 }
823         }
824 }
825
826 static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
827                                                  u32 acc_flags, u32 hwip,
828                                                  bool write, u32 *rlcg_flag)
829 {
830         bool ret = false;
831
832         switch (hwip) {
833         case GC_HWIP:
834                 if (amdgpu_sriov_reg_indirect_gc(adev)) {
835                         *rlcg_flag =
836                                 write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
837                         ret = true;
838                 /* only in new version, AMDGPU_REGS_NO_KIQ and
839                  * AMDGPU_REGS_RLC are enabled simultaneously */
840                 } else if ((acc_flags & AMDGPU_REGS_RLC) &&
841                                 !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
842                         *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
843                         ret = true;
844                 }
845                 break;
846         case MMHUB_HWIP:
847                 if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
848                     (acc_flags & AMDGPU_REGS_RLC) && write) {
849                         *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
850                         ret = true;
851                 }
852                 break;
853         default:
854                 break;
855         }
856         return ret;
857 }
858
859 static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
860 {
861         struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
862         uint32_t timeout = 50000;
863         uint32_t i, tmp;
864         uint32_t ret = 0;
865         static void *scratch_reg0;
866         static void *scratch_reg1;
867         static void *scratch_reg2;
868         static void *scratch_reg3;
869         static void *spare_int;
870
871         if (!adev->gfx.rlc.rlcg_reg_access_supported) {
872                 dev_err(adev->dev,
873                         "indirect registers access through rlcg is not available\n");
874                 return 0;
875         }
876
877         reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
878         scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
879         scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
880         scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
881         scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
882         if (reg_access_ctrl->spare_int)
883                 spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
884
885         if (offset == reg_access_ctrl->grbm_cntl) {
886                 /* if the target reg offset is grbm_cntl, write to scratch_reg2 */
887                 writel(v, scratch_reg2);
888                 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
889         } else if (offset == reg_access_ctrl->grbm_idx) {
890                 /* if the target reg offset is grbm_idx, write to scratch_reg3 */
891                 writel(v, scratch_reg3);
892                 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
893         } else {
894                 /*
895                  * SCRATCH_REG0         = read/write value
896                  * SCRATCH_REG1[30:28]  = command
897                  * SCRATCH_REG1[19:0]   = address in dword
898                  * SCRATCH_REG1[26:24]  = Error reporting
899                  */
900                 writel(v, scratch_reg0);
901                 writel((offset | flag), scratch_reg1);
902                 if (reg_access_ctrl->spare_int)
903                         writel(1, spare_int);
904
905                 for (i = 0; i < timeout; i++) {
906                         tmp = readl(scratch_reg1);
907                         if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
908                                 break;
909                         udelay(10);
910                 }
911
912                 if (i >= timeout) {
913                         if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
914                                 if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
915                                         dev_err(adev->dev,
916                                                 "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
917                                 } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
918                                         dev_err(adev->dev,
919                                                 "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
920                                 } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
921                                         dev_err(adev->dev,
922                                                 "regiser is not in range, rlcg failed to program reg: 0x%05x\n", offset);
923                                 } else {
924                                         dev_err(adev->dev,
925                                                 "unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
926                                 }
927                         } else {
928                                 dev_err(adev->dev,
929                                         "timeout: rlcg faled to program reg: 0x%05x\n", offset);
930                         }
931                 }
932         }
933
934         ret = readl(scratch_reg0);
935         return ret;
936 }
937
938 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
939                        u32 offset, u32 value,
940                        u32 acc_flags, u32 hwip)
941 {
942         u32 rlcg_flag;
943
944         if (!amdgpu_sriov_runtime(adev) &&
945                 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
946                 amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
947                 return;
948         }
949
950         if (acc_flags & AMDGPU_REGS_NO_KIQ)
951                 WREG32_NO_KIQ(offset, value);
952         else
953                 WREG32(offset, value);
954 }
955
956 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
957                       u32 offset, u32 acc_flags, u32 hwip)
958 {
959         u32 rlcg_flag;
960
961         if (!amdgpu_sriov_runtime(adev) &&
962                 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
963                 return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
964
965         if (acc_flags & AMDGPU_REGS_NO_KIQ)
966                 return RREG32_NO_KIQ(offset);
967         else
968                 return RREG32(offset);
969 }