Commit | Line | Data |
---|---|---|
aaa36a97 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Alex Deucher | |
23 | */ | |
c366be54 SR |
24 | |
25 | #include <linux/delay.h> | |
aaa36a97 | 26 | #include <linux/firmware.h> |
47b757fb | 27 | #include <linux/module.h> |
c366be54 | 28 | |
aaa36a97 AD |
29 | #include "amdgpu.h" |
30 | #include "amdgpu_ucode.h" | |
31 | #include "amdgpu_trace.h" | |
32 | #include "vi.h" | |
33 | #include "vid.h" | |
34 | ||
35 | #include "oss/oss_3_0_d.h" | |
36 | #include "oss/oss_3_0_sh_mask.h" | |
37 | ||
38 | #include "gmc/gmc_8_1_d.h" | |
39 | #include "gmc/gmc_8_1_sh_mask.h" | |
40 | ||
41 | #include "gca/gfx_8_0_d.h" | |
74a5d165 | 42 | #include "gca/gfx_8_0_enum.h" |
aaa36a97 AD |
43 | #include "gca/gfx_8_0_sh_mask.h" |
44 | ||
45 | #include "bif/bif_5_0_d.h" | |
46 | #include "bif/bif_5_0_sh_mask.h" | |
47 | ||
48 | #include "tonga_sdma_pkt_open.h" | |
49 | ||
091aec0b AG |
50 | #include "ivsrcid/ivsrcid_vislands30.h" |
51 | ||
aaa36a97 AD |
52 | static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev); |
53 | static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev); | |
54 | static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev); | |
55 | static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev); | |
56 | ||
c65444fe JZ |
57 | MODULE_FIRMWARE("amdgpu/tonga_sdma.bin"); |
58 | MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin"); | |
59 | MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin"); | |
60 | MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin"); | |
1a5bbb66 DZ |
61 | MODULE_FIRMWARE("amdgpu/fiji_sdma.bin"); |
62 | MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin"); | |
bb16e3b6 | 63 | MODULE_FIRMWARE("amdgpu/stoney_sdma.bin"); |
2cc0c0b5 FC |
64 | MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin"); |
65 | MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin"); | |
66 | MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin"); | |
67 | MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin"); | |
c4642a47 JZ |
68 | MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin"); |
69 | MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin"); | |
2267e262 LL |
70 | MODULE_FIRMWARE("amdgpu/vegam_sdma.bin"); |
71 | MODULE_FIRMWARE("amdgpu/vegam_sdma1.bin"); | |
2cea03de | 72 | |
aaa36a97 AD |
73 | |
74 | static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = | |
75 | { | |
76 | SDMA0_REGISTER_OFFSET, | |
77 | SDMA1_REGISTER_OFFSET | |
78 | }; | |
79 | ||
80 | static const u32 golden_settings_tonga_a11[] = | |
81 | { | |
82 | mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, | |
83 | mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, | |
84 | mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, | |
85 | mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | |
86 | mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | |
87 | mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, | |
88 | mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, | |
89 | mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, | |
90 | mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | |
91 | mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | |
92 | }; | |
93 | ||
94 | static const u32 tonga_mgcg_cgcg_init[] = | |
95 | { | |
96 | mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, | |
97 | mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 | |
98 | }; | |
99 | ||
1a5bbb66 DZ |
100 | static const u32 golden_settings_fiji_a10[] = |
101 | { | |
102 | mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, | |
103 | mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, | |
104 | mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | |
105 | mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | |
106 | mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, | |
107 | mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, | |
108 | mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | |
109 | mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | |
110 | }; | |
111 | ||
112 | static const u32 fiji_mgcg_cgcg_init[] = | |
113 | { | |
114 | mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, | |
115 | mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 | |
116 | }; | |
117 | ||
2cc0c0b5 | 118 | static const u32 golden_settings_polaris11_a11[] = |
2cea03de FC |
119 | { |
120 | mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, | |
b9934878 | 121 | mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, |
2cea03de FC |
122 | mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, |
123 | mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | |
124 | mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | |
125 | mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, | |
b9934878 | 126 | mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, |
2cea03de FC |
127 | mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, |
128 | mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | |
129 | mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | |
130 | }; | |
131 | ||
2cc0c0b5 | 132 | static const u32 golden_settings_polaris10_a11[] = |
2cea03de FC |
133 | { |
134 | mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, | |
135 | mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, | |
136 | mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, | |
137 | mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | |
138 | mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | |
139 | mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, | |
140 | mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, | |
141 | mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, | |
142 | mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | |
143 | mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | |
144 | }; | |
145 | ||
aaa36a97 AD |
146 | static const u32 cz_golden_settings_a11[] = |
147 | { | |
148 | mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, | |
149 | mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, | |
150 | mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100, | |
151 | mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800, | |
152 | mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100, | |
153 | mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100, | |
154 | mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, | |
155 | mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, | |
156 | mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100, | |
157 | mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800, | |
158 | mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100, | |
159 | mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100, | |
160 | }; | |
161 | ||
162 | static const u32 cz_mgcg_cgcg_init[] = | |
163 | { | |
164 | mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, | |
165 | mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 | |
166 | }; | |
167 | ||
bb16e3b6 SL |
168 | static const u32 stoney_golden_settings_a11[] = |
169 | { | |
170 | mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100, | |
171 | mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800, | |
172 | mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100, | |
173 | mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100, | |
174 | }; | |
175 | ||
176 | static const u32 stoney_mgcg_cgcg_init[] = | |
177 | { | |
178 | mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100, | |
179 | }; | |
180 | ||
aaa36a97 AD |
181 | /* |
182 | * sDMA - System DMA | |
183 | * Starting with CIK, the GPU has new asynchronous | |
184 | * DMA engines. These engines are used for compute | |
185 | * and gfx. There are two DMA engines (SDMA0, SDMA1) | |
186 | * and each one supports 1 ring buffer used for gfx | |
187 | * and 2 queues used for compute. | |
188 | * | |
189 | * The programming model is very similar to the CP | |
190 | * (ring buffer, IBs, etc.), but sDMA has it's own | |
191 | * packet format that is different from the PM4 format | |
192 | * used by the CP. sDMA supports copying data, writing | |
193 | * embedded data, solid fills, and a number of other | |
194 | * things. It also has support for tiling/detiling of | |
195 | * buffers. | |
196 | */ | |
197 | ||
198 | static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) | |
199 | { | |
200 | switch (adev->asic_type) { | |
1a5bbb66 | 201 | case CHIP_FIJI: |
9c3f2b54 AD |
202 | amdgpu_device_program_register_sequence(adev, |
203 | fiji_mgcg_cgcg_init, | |
204 | ARRAY_SIZE(fiji_mgcg_cgcg_init)); | |
205 | amdgpu_device_program_register_sequence(adev, | |
206 | golden_settings_fiji_a10, | |
207 | ARRAY_SIZE(golden_settings_fiji_a10)); | |
1a5bbb66 | 208 | break; |
aaa36a97 | 209 | case CHIP_TONGA: |
9c3f2b54 AD |
210 | amdgpu_device_program_register_sequence(adev, |
211 | tonga_mgcg_cgcg_init, | |
212 | ARRAY_SIZE(tonga_mgcg_cgcg_init)); | |
213 | amdgpu_device_program_register_sequence(adev, | |
214 | golden_settings_tonga_a11, | |
215 | ARRAY_SIZE(golden_settings_tonga_a11)); | |
aaa36a97 | 216 | break; |
2cc0c0b5 | 217 | case CHIP_POLARIS11: |
c4642a47 | 218 | case CHIP_POLARIS12: |
c3f27c08 | 219 | case CHIP_VEGAM: |
9c3f2b54 AD |
220 | amdgpu_device_program_register_sequence(adev, |
221 | golden_settings_polaris11_a11, | |
222 | ARRAY_SIZE(golden_settings_polaris11_a11)); | |
2cea03de | 223 | break; |
2cc0c0b5 | 224 | case CHIP_POLARIS10: |
9c3f2b54 AD |
225 | amdgpu_device_program_register_sequence(adev, |
226 | golden_settings_polaris10_a11, | |
227 | ARRAY_SIZE(golden_settings_polaris10_a11)); | |
2cea03de | 228 | break; |
aaa36a97 | 229 | case CHIP_CARRIZO: |
9c3f2b54 AD |
230 | amdgpu_device_program_register_sequence(adev, |
231 | cz_mgcg_cgcg_init, | |
232 | ARRAY_SIZE(cz_mgcg_cgcg_init)); | |
233 | amdgpu_device_program_register_sequence(adev, | |
234 | cz_golden_settings_a11, | |
235 | ARRAY_SIZE(cz_golden_settings_a11)); | |
aaa36a97 | 236 | break; |
bb16e3b6 | 237 | case CHIP_STONEY: |
9c3f2b54 AD |
238 | amdgpu_device_program_register_sequence(adev, |
239 | stoney_mgcg_cgcg_init, | |
240 | ARRAY_SIZE(stoney_mgcg_cgcg_init)); | |
241 | amdgpu_device_program_register_sequence(adev, | |
242 | stoney_golden_settings_a11, | |
243 | ARRAY_SIZE(stoney_golden_settings_a11)); | |
bb16e3b6 | 244 | break; |
aaa36a97 AD |
245 | default: |
246 | break; | |
247 | } | |
248 | } | |
249 | ||
14d83e78 ML |
250 | static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) |
251 | { | |
252 | int i; | |
253 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
254 | release_firmware(adev->sdma.instance[i].fw); | |
255 | adev->sdma.instance[i].fw = NULL; | |
256 | } | |
257 | } | |
258 | ||
aaa36a97 AD |
259 | /** |
260 | * sdma_v3_0_init_microcode - load ucode images from disk | |
261 | * | |
262 | * @adev: amdgpu_device pointer | |
263 | * | |
264 | * Use the firmware interface to load the ucode images into | |
265 | * the driver (not loaded into hw). | |
266 | * Returns 0 on success, error on failure. | |
267 | */ | |
268 | static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) | |
269 | { | |
270 | const char *chip_name; | |
271 | char fw_name[30]; | |
c113ea1c | 272 | int err = 0, i; |
aaa36a97 AD |
273 | struct amdgpu_firmware_info *info = NULL; |
274 | const struct common_firmware_header *header = NULL; | |
595fd013 | 275 | const struct sdma_firmware_header_v1_0 *hdr; |
aaa36a97 AD |
276 | |
277 | DRM_DEBUG("\n"); | |
278 | ||
279 | switch (adev->asic_type) { | |
280 | case CHIP_TONGA: | |
281 | chip_name = "tonga"; | |
282 | break; | |
1a5bbb66 DZ |
283 | case CHIP_FIJI: |
284 | chip_name = "fiji"; | |
285 | break; | |
2cc0c0b5 FC |
286 | case CHIP_POLARIS10: |
287 | chip_name = "polaris10"; | |
2cea03de | 288 | break; |
2267e262 LL |
289 | case CHIP_POLARIS11: |
290 | chip_name = "polaris11"; | |
291 | break; | |
c4642a47 JZ |
292 | case CHIP_POLARIS12: |
293 | chip_name = "polaris12"; | |
294 | break; | |
2267e262 LL |
295 | case CHIP_VEGAM: |
296 | chip_name = "vegam"; | |
297 | break; | |
aaa36a97 AD |
298 | case CHIP_CARRIZO: |
299 | chip_name = "carrizo"; | |
300 | break; | |
bb16e3b6 SL |
301 | case CHIP_STONEY: |
302 | chip_name = "stoney"; | |
303 | break; | |
aaa36a97 AD |
304 | default: BUG(); |
305 | } | |
306 | ||
c113ea1c | 307 | for (i = 0; i < adev->sdma.num_instances; i++) { |
aaa36a97 | 308 | if (i == 0) |
c65444fe | 309 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); |
aaa36a97 | 310 | else |
c65444fe | 311 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); |
c113ea1c | 312 | err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); |
aaa36a97 AD |
313 | if (err) |
314 | goto out; | |
c113ea1c | 315 | err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); |
aaa36a97 AD |
316 | if (err) |
317 | goto out; | |
c113ea1c AD |
318 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; |
319 | adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); | |
320 | adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); | |
321 | if (adev->sdma.instance[i].feature_version >= 20) | |
322 | adev->sdma.instance[i].burst_nop = true; | |
aaa36a97 | 323 | |
9b008fb7 RZ |
324 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; |
325 | info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; | |
326 | info->fw = adev->sdma.instance[i].fw; | |
327 | header = (const struct common_firmware_header *)info->fw->data; | |
328 | adev->firmware.fw_size += | |
329 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | |
330 | ||
aaa36a97 AD |
331 | } |
332 | out: | |
333 | if (err) { | |
7ca85295 | 334 | pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name); |
c113ea1c AD |
335 | for (i = 0; i < adev->sdma.num_instances; i++) { |
336 | release_firmware(adev->sdma.instance[i].fw); | |
337 | adev->sdma.instance[i].fw = NULL; | |
aaa36a97 AD |
338 | } |
339 | } | |
340 | return err; | |
341 | } | |
342 | ||
343 | /** | |
344 | * sdma_v3_0_ring_get_rptr - get the current read pointer | |
345 | * | |
346 | * @ring: amdgpu ring pointer | |
347 | * | |
348 | * Get the current rptr from the hardware (VI+). | |
349 | */ | |
536fbf94 | 350 | static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring) |
aaa36a97 | 351 | { |
aaa36a97 | 352 | /* XXX check if swapping is necessary on BE */ |
d912adef | 353 | return ring->adev->wb.wb[ring->rptr_offs] >> 2; |
aaa36a97 AD |
354 | } |
355 | ||
356 | /** | |
357 | * sdma_v3_0_ring_get_wptr - get the current write pointer | |
358 | * | |
359 | * @ring: amdgpu ring pointer | |
360 | * | |
361 | * Get the current wptr from the hardware (VI+). | |
362 | */ | |
536fbf94 | 363 | static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring) |
aaa36a97 AD |
364 | { |
365 | struct amdgpu_device *adev = ring->adev; | |
366 | u32 wptr; | |
367 | ||
2ffe31de | 368 | if (ring->use_doorbell || ring->use_pollmem) { |
aaa36a97 AD |
369 | /* XXX check if swapping is necessary on BE */ |
370 | wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; | |
371 | } else { | |
1cf0abb6 | 372 | wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2; |
aaa36a97 AD |
373 | } |
374 | ||
375 | return wptr; | |
376 | } | |
377 | ||
378 | /** | |
379 | * sdma_v3_0_ring_set_wptr - commit the write pointer | |
380 | * | |
381 | * @ring: amdgpu ring pointer | |
382 | * | |
383 | * Write the wptr back to the hardware (VI+). | |
384 | */ | |
385 | static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) | |
386 | { | |
387 | struct amdgpu_device *adev = ring->adev; | |
388 | ||
389 | if (ring->use_doorbell) { | |
3e4b0bd9 | 390 | u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs]; |
aaa36a97 | 391 | /* XXX check if swapping is necessary on BE */ |
3e4b0bd9 | 392 | WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2)); |
536fbf94 | 393 | WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2); |
2ffe31de PD |
394 | } else if (ring->use_pollmem) { |
395 | u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs]; | |
396 | ||
397 | WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2)); | |
aaa36a97 | 398 | } else { |
1cf0abb6 | 399 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2); |
aaa36a97 AD |
400 | } |
401 | } | |
402 | ||
ac01db3d JZ |
403 | static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
404 | { | |
ccf191f8 | 405 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); |
ac01db3d JZ |
406 | int i; |
407 | ||
408 | for (i = 0; i < count; i++) | |
409 | if (sdma && sdma->burst_nop && (i == 0)) | |
79887142 | 410 | amdgpu_ring_write(ring, ring->funcs->nop | |
ac01db3d JZ |
411 | SDMA_PKT_NOP_HEADER_COUNT(count - 1)); |
412 | else | |
79887142 | 413 | amdgpu_ring_write(ring, ring->funcs->nop); |
ac01db3d JZ |
414 | } |
415 | ||
aaa36a97 AD |
416 | /** |
417 | * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine | |
418 | * | |
419 | * @ring: amdgpu ring pointer | |
420 | * @ib: IB object to schedule | |
421 | * | |
422 | * Schedule an IB in the DMA ring (VI). | |
423 | */ | |
424 | static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, | |
34955e03 | 425 | struct amdgpu_job *job, |
d88bf583 | 426 | struct amdgpu_ib *ib, |
c4c905ec | 427 | uint32_t flags) |
aaa36a97 | 428 | { |
34955e03 RZ |
429 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); |
430 | ||
aaa36a97 | 431 | /* IB packet must end on a 8 DW boundary */ |
ce73516d | 432 | sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); |
aaa36a97 AD |
433 | |
434 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | | |
c4f46f22 | 435 | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); |
aaa36a97 AD |
436 | /* base must be 32 byte aligned */ |
437 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); | |
438 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | |
439 | amdgpu_ring_write(ring, ib->length_dw); | |
440 | amdgpu_ring_write(ring, 0); | |
441 | amdgpu_ring_write(ring, 0); | |
442 | ||
443 | } | |
444 | ||
445 | /** | |
d2edb07b | 446 | * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring |
aaa36a97 AD |
447 | * |
448 | * @ring: amdgpu ring pointer | |
449 | * | |
450 | * Emit an hdp flush packet on the requested DMA ring. | |
451 | */ | |
d2edb07b | 452 | static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) |
aaa36a97 AD |
453 | { |
454 | u32 ref_and_mask = 0; | |
455 | ||
1cf0abb6 | 456 | if (ring->me == 0) |
aaa36a97 AD |
457 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); |
458 | else | |
459 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); | |
460 | ||
461 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | | |
462 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | | |
463 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ | |
464 | amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); | |
465 | amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); | |
466 | amdgpu_ring_write(ring, ref_and_mask); /* reference */ | |
467 | amdgpu_ring_write(ring, ref_and_mask); /* mask */ | |
468 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | | |
469 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ | |
470 | } | |
471 | ||
472 | /** | |
473 | * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring | |
474 | * | |
475 | * @ring: amdgpu ring pointer | |
476 | * @fence: amdgpu fence object | |
477 | * | |
478 | * Add a DMA fence packet to the ring to write | |
479 | * the fence seq number and DMA trap packet to generate | |
480 | * an interrupt if needed (VI). | |
481 | */ | |
482 | static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | |
890ee23f | 483 | unsigned flags) |
aaa36a97 | 484 | { |
890ee23f | 485 | bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; |
aaa36a97 AD |
486 | /* write the fence */ |
487 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); | |
488 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
489 | amdgpu_ring_write(ring, upper_32_bits(addr)); | |
490 | amdgpu_ring_write(ring, lower_32_bits(seq)); | |
491 | ||
492 | /* optionally write high bits as well */ | |
890ee23f | 493 | if (write64bit) { |
aaa36a97 AD |
494 | addr += 4; |
495 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); | |
496 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
497 | amdgpu_ring_write(ring, upper_32_bits(addr)); | |
498 | amdgpu_ring_write(ring, upper_32_bits(seq)); | |
499 | } | |
500 | ||
501 | /* generate an interrupt */ | |
502 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); | |
503 | amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); | |
504 | } | |
505 | ||
aaa36a97 AD |
506 | /** |
507 | * sdma_v3_0_gfx_stop - stop the gfx async dma engines | |
508 | * | |
509 | * @adev: amdgpu_device pointer | |
510 | * | |
511 | * Stop the gfx async dma ring buffers (VI). | |
512 | */ | |
513 | static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) | |
514 | { | |
c113ea1c AD |
515 | struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; |
516 | struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; | |
aaa36a97 AD |
517 | u32 rb_cntl, ib_cntl; |
518 | int i; | |
519 | ||
520 | if ((adev->mman.buffer_funcs_ring == sdma0) || | |
521 | (adev->mman.buffer_funcs_ring == sdma1)) | |
57adc4ce | 522 | amdgpu_ttm_set_buffer_funcs_status(adev, false); |
aaa36a97 | 523 | |
c113ea1c | 524 | for (i = 0; i < adev->sdma.num_instances; i++) { |
aaa36a97 AD |
525 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); |
526 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); | |
527 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | |
528 | ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); | |
529 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); | |
530 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | |
531 | } | |
aaa36a97 AD |
532 | } |
533 | ||
534 | /** | |
535 | * sdma_v3_0_rlc_stop - stop the compute async dma engines | |
536 | * | |
537 | * @adev: amdgpu_device pointer | |
538 | * | |
539 | * Stop the compute async dma queues (VI). | |
540 | */ | |
541 | static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev) | |
542 | { | |
543 | /* XXX todo */ | |
544 | } | |
545 | ||
cd06bf68 BG |
546 | /** |
547 | * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch | |
548 | * | |
549 | * @adev: amdgpu_device pointer | |
550 | * @enable: enable/disable the DMA MEs context switch. | |
551 | * | |
552 | * Halt or unhalt the async dma engines context switch (VI). | |
553 | */ | |
554 | static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) | |
555 | { | |
a667386c | 556 | u32 f32_cntl, phase_quantum = 0; |
cd06bf68 BG |
557 | int i; |
558 | ||
a667386c FK |
559 | if (amdgpu_sdma_phase_quantum) { |
560 | unsigned value = amdgpu_sdma_phase_quantum; | |
561 | unsigned unit = 0; | |
562 | ||
563 | while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> | |
564 | SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { | |
565 | value = (value + 1) >> 1; | |
566 | unit++; | |
567 | } | |
568 | if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> | |
569 | SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { | |
570 | value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> | |
571 | SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); | |
572 | unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> | |
573 | SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); | |
574 | WARN_ONCE(1, | |
575 | "clamping sdma_phase_quantum to %uK clock cycles\n", | |
576 | value << unit); | |
577 | } | |
578 | phase_quantum = | |
579 | value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | | |
580 | unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; | |
581 | } | |
582 | ||
c113ea1c | 583 | for (i = 0; i < adev->sdma.num_instances; i++) { |
cd06bf68 | 584 | f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); |
4048f0f0 | 585 | if (enable) { |
cd06bf68 BG |
586 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, |
587 | AUTO_CTXSW_ENABLE, 1); | |
4048f0f0 | 588 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, |
589 | ATC_L1_ENABLE, 1); | |
a667386c FK |
590 | if (amdgpu_sdma_phase_quantum) { |
591 | WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i], | |
592 | phase_quantum); | |
593 | WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i], | |
594 | phase_quantum); | |
595 | } | |
4048f0f0 | 596 | } else { |
cd06bf68 BG |
597 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, |
598 | AUTO_CTXSW_ENABLE, 0); | |
4048f0f0 | 599 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, |
600 | ATC_L1_ENABLE, 1); | |
601 | } | |
602 | ||
cd06bf68 BG |
603 | WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl); |
604 | } | |
605 | } | |
606 | ||
aaa36a97 AD |
607 | /** |
608 | * sdma_v3_0_enable - stop the async dma engines | |
609 | * | |
610 | * @adev: amdgpu_device pointer | |
611 | * @enable: enable/disable the DMA MEs. | |
612 | * | |
613 | * Halt or unhalt the async dma engines (VI). | |
614 | */ | |
615 | static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable) | |
616 | { | |
617 | u32 f32_cntl; | |
618 | int i; | |
619 | ||
004e29cc | 620 | if (!enable) { |
aaa36a97 AD |
621 | sdma_v3_0_gfx_stop(adev); |
622 | sdma_v3_0_rlc_stop(adev); | |
623 | } | |
624 | ||
c113ea1c | 625 | for (i = 0; i < adev->sdma.num_instances; i++) { |
aaa36a97 AD |
626 | f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); |
627 | if (enable) | |
628 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); | |
629 | else | |
630 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); | |
631 | WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl); | |
632 | } | |
633 | } | |
634 | ||
635 | /** | |
636 | * sdma_v3_0_gfx_resume - setup and start the async dma engines | |
637 | * | |
638 | * @adev: amdgpu_device pointer | |
639 | * | |
640 | * Set up the gfx DMA ring buffers and enable them (VI). | |
641 | * Returns 0 for success, error for failure. | |
642 | */ | |
643 | static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | |
644 | { | |
645 | struct amdgpu_ring *ring; | |
e33dac39 | 646 | u32 rb_cntl, ib_cntl, wptr_poll_cntl; |
aaa36a97 AD |
647 | u32 rb_bufsz; |
648 | u32 wb_offset; | |
649 | u32 doorbell; | |
e33dac39 | 650 | u64 wptr_gpu_addr; |
aaa36a97 AD |
651 | int i, j, r; |
652 | ||
c113ea1c AD |
653 | for (i = 0; i < adev->sdma.num_instances; i++) { |
654 | ring = &adev->sdma.instance[i].ring; | |
f6bd7942 | 655 | amdgpu_ring_clear_ring(ring); |
aaa36a97 AD |
656 | wb_offset = (ring->rptr_offs * 4); |
657 | ||
658 | mutex_lock(&adev->srbm_mutex); | |
659 | for (j = 0; j < 16; j++) { | |
660 | vi_srbm_select(adev, 0, 0, 0, j); | |
661 | /* SDMA GFX */ | |
662 | WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); | |
663 | WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); | |
664 | } | |
665 | vi_srbm_select(adev, 0, 0, 0, 0); | |
666 | mutex_unlock(&adev->srbm_mutex); | |
667 | ||
c458fe94 AD |
668 | WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], |
669 | adev->gfx.config.gb_addr_config & 0x70); | |
670 | ||
aaa36a97 AD |
671 | WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); |
672 | ||
673 | /* Set ring buffer size in dwords */ | |
674 | rb_bufsz = order_base_2(ring->ring_size / 4); | |
675 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); | |
676 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); | |
677 | #ifdef __BIG_ENDIAN | |
678 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); | |
679 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, | |
680 | RPTR_WRITEBACK_SWAP_ENABLE, 1); | |
681 | #endif | |
682 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | |
683 | ||
684 | /* Initialize the ring buffer's read and write pointers */ | |
78cb9083 | 685 | ring->wptr = 0; |
aaa36a97 | 686 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
78cb9083 | 687 | sdma_v3_0_ring_set_wptr(ring); |
d72f7c06 ML |
688 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); |
689 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | |
aaa36a97 AD |
690 | |
691 | /* set the wb address whether it's enabled or not */ | |
692 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | |
693 | upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); | |
694 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], | |
695 | lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); | |
696 | ||
697 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); | |
698 | ||
699 | WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); | |
700 | WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); | |
701 | ||
aaa36a97 AD |
702 | doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]); |
703 | ||
704 | if (ring->use_doorbell) { | |
705 | doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, | |
706 | OFFSET, ring->doorbell_index); | |
707 | doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); | |
708 | } else { | |
709 | doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); | |
710 | } | |
711 | WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell); | |
712 | ||
e33dac39 XY |
713 | /* setup the wptr shadow polling */ |
714 | wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | |
715 | ||
716 | WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO + sdma_offsets[i], | |
717 | lower_32_bits(wptr_gpu_addr)); | |
718 | WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i], | |
719 | upper_32_bits(wptr_gpu_addr)); | |
720 | wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]); | |
4062119b ED |
721 | if (ring->use_pollmem) { |
722 | /*wptr polling is not enogh fast, directly clean the wptr register */ | |
723 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | |
2ffe31de PD |
724 | wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, |
725 | SDMA0_GFX_RB_WPTR_POLL_CNTL, | |
726 | ENABLE, 1); | |
4062119b | 727 | } else { |
2ffe31de PD |
728 | wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, |
729 | SDMA0_GFX_RB_WPTR_POLL_CNTL, | |
730 | ENABLE, 0); | |
4062119b | 731 | } |
e33dac39 XY |
732 | WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl); |
733 | ||
aaa36a97 AD |
734 | /* enable DMA RB */ |
735 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); | |
736 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | |
737 | ||
738 | ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); | |
739 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); | |
740 | #ifdef __BIG_ENDIAN | |
741 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); | |
742 | #endif | |
743 | /* enable DMA IBs */ | |
744 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | |
745 | ||
c66ed765 | 746 | ring->sched.ready = true; |
505dfe76 | 747 | } |
aaa36a97 | 748 | |
505dfe76 ML |
749 | /* unhalt the MEs */ |
750 | sdma_v3_0_enable(adev, true); | |
751 | /* enable sdma ring preemption */ | |
752 | sdma_v3_0_ctx_switch_enable(adev, true); | |
753 | ||
754 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
755 | ring = &adev->sdma.instance[i].ring; | |
c66ed765 AG |
756 | r = amdgpu_ring_test_helper(ring); |
757 | if (r) | |
aaa36a97 | 758 | return r; |
aaa36a97 AD |
759 | |
760 | if (adev->mman.buffer_funcs_ring == ring) | |
57adc4ce | 761 | amdgpu_ttm_set_buffer_funcs_status(adev, true); |
aaa36a97 AD |
762 | } |
763 | ||
764 | return 0; | |
765 | } | |
766 | ||
767 | /** | |
768 | * sdma_v3_0_rlc_resume - setup and start the async dma engines | |
769 | * | |
770 | * @adev: amdgpu_device pointer | |
771 | * | |
772 | * Set up the compute DMA queues and enable them (VI). | |
773 | * Returns 0 for success, error for failure. | |
774 | */ | |
775 | static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev) | |
776 | { | |
777 | /* XXX todo */ | |
778 | return 0; | |
779 | } | |
780 | ||
aaa36a97 AD |
781 | /** |
782 | * sdma_v3_0_start - setup and start the async dma engines | |
783 | * | |
784 | * @adev: amdgpu_device pointer | |
785 | * | |
786 | * Set up the DMA engines and enable them (VI). | |
787 | * Returns 0 for success, error for failure. | |
788 | */ | |
789 | static int sdma_v3_0_start(struct amdgpu_device *adev) | |
790 | { | |
790d84fd | 791 | int r; |
aaa36a97 | 792 | |
8a1115ff | 793 | /* disable sdma engine before programing it */ |
505dfe76 ML |
794 | sdma_v3_0_ctx_switch_enable(adev, false); |
795 | sdma_v3_0_enable(adev, false); | |
aaa36a97 AD |
796 | |
797 | /* start the gfx rings and rlc compute queues */ | |
798 | r = sdma_v3_0_gfx_resume(adev); | |
799 | if (r) | |
800 | return r; | |
801 | r = sdma_v3_0_rlc_resume(adev); | |
802 | if (r) | |
803 | return r; | |
804 | ||
805 | return 0; | |
806 | } | |
807 | ||
808 | /** | |
809 | * sdma_v3_0_ring_test_ring - simple async dma engine test | |
810 | * | |
811 | * @ring: amdgpu_ring structure holding ring information | |
812 | * | |
813 | * Test the DMA engine by writing using it to write an | |
814 | * value to memory. (VI). | |
815 | * Returns 0 for success, error for failure. | |
816 | */ | |
817 | static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) | |
818 | { | |
819 | struct amdgpu_device *adev = ring->adev; | |
820 | unsigned i; | |
821 | unsigned index; | |
822 | int r; | |
823 | u32 tmp; | |
824 | u64 gpu_addr; | |
825 | ||
131b4b36 | 826 | r = amdgpu_device_wb_get(adev, &index); |
dc9eeff8 | 827 | if (r) |
aaa36a97 | 828 | return r; |
aaa36a97 AD |
829 | |
830 | gpu_addr = adev->wb.gpu_addr + (index * 4); | |
831 | tmp = 0xCAFEDEAD; | |
832 | adev->wb.wb[index] = cpu_to_le32(tmp); | |
833 | ||
a27de35c | 834 | r = amdgpu_ring_alloc(ring, 5); |
dc9eeff8 CK |
835 | if (r) |
836 | goto error_free_wb; | |
aaa36a97 AD |
837 | |
838 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | |
839 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); | |
840 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); | |
841 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); | |
842 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); | |
843 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
a27de35c | 844 | amdgpu_ring_commit(ring); |
aaa36a97 AD |
845 | |
846 | for (i = 0; i < adev->usec_timeout; i++) { | |
847 | tmp = le32_to_cpu(adev->wb.wb[index]); | |
848 | if (tmp == 0xDEADBEEF) | |
849 | break; | |
c366be54 | 850 | udelay(1); |
aaa36a97 AD |
851 | } |
852 | ||
dc9eeff8 CK |
853 | if (i >= adev->usec_timeout) |
854 | r = -ETIMEDOUT; | |
aaa36a97 | 855 | |
dc9eeff8 CK |
856 | error_free_wb: |
857 | amdgpu_device_wb_free(adev, index); | |
aaa36a97 AD |
858 | return r; |
859 | } | |
860 | ||
861 | /** | |
862 | * sdma_v3_0_ring_test_ib - test an IB on the DMA engine | |
863 | * | |
864 | * @ring: amdgpu_ring structure holding ring information | |
865 | * | |
866 | * Test a simple IB in the DMA ring (VI). | |
867 | * Returns 0 on success, error on failure. | |
868 | */ | |
bbec97aa | 869 | static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
aaa36a97 AD |
870 | { |
871 | struct amdgpu_device *adev = ring->adev; | |
872 | struct amdgpu_ib ib; | |
f54d1867 | 873 | struct dma_fence *f = NULL; |
aaa36a97 | 874 | unsigned index; |
aaa36a97 AD |
875 | u32 tmp = 0; |
876 | u64 gpu_addr; | |
bbec97aa | 877 | long r; |
aaa36a97 | 878 | |
131b4b36 | 879 | r = amdgpu_device_wb_get(adev, &index); |
98079389 | 880 | if (r) |
aaa36a97 | 881 | return r; |
aaa36a97 AD |
882 | |
883 | gpu_addr = adev->wb.gpu_addr + (index * 4); | |
884 | tmp = 0xCAFEDEAD; | |
885 | adev->wb.wb[index] = cpu_to_le32(tmp); | |
b203dd95 | 886 | memset(&ib, 0, sizeof(ib)); |
c8e42d57 | 887 | r = amdgpu_ib_get(adev, NULL, 256, |
888 | AMDGPU_IB_POOL_DIRECT, &ib); | |
98079389 | 889 | if (r) |
0011fdaa | 890 | goto err0; |
aaa36a97 AD |
891 | |
892 | ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | |
893 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); | |
894 | ib.ptr[1] = lower_32_bits(gpu_addr); | |
895 | ib.ptr[2] = upper_32_bits(gpu_addr); | |
896 | ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1); | |
897 | ib.ptr[4] = 0xDEADBEEF; | |
898 | ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); | |
899 | ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); | |
900 | ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); | |
901 | ib.length_dw = 8; | |
902 | ||
50ddc75e | 903 | r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); |
0011fdaa CZ |
904 | if (r) |
905 | goto err1; | |
906 | ||
f54d1867 | 907 | r = dma_fence_wait_timeout(f, false, timeout); |
bbec97aa | 908 | if (r == 0) { |
bbec97aa CK |
909 | r = -ETIMEDOUT; |
910 | goto err1; | |
911 | } else if (r < 0) { | |
0011fdaa | 912 | goto err1; |
aaa36a97 | 913 | } |
6d44565d | 914 | tmp = le32_to_cpu(adev->wb.wb[index]); |
98079389 | 915 | if (tmp == 0xDEADBEEF) |
bbec97aa | 916 | r = 0; |
98079389 | 917 | else |
aaa36a97 | 918 | r = -EINVAL; |
0011fdaa | 919 | err1: |
cc55c45d | 920 | amdgpu_ib_free(adev, &ib, NULL); |
f54d1867 | 921 | dma_fence_put(f); |
0011fdaa | 922 | err0: |
131b4b36 | 923 | amdgpu_device_wb_free(adev, index); |
aaa36a97 AD |
924 | return r; |
925 | } | |
926 | ||
927 | /** | |
928 | * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART | |
929 | * | |
930 | * @ib: indirect buffer to fill with commands | |
931 | * @pe: addr of the page entry | |
932 | * @src: src addr to copy from | |
933 | * @count: number of page entries to update | |
934 | * | |
935 | * Update PTEs by copying them from the GART using sDMA (CIK). | |
936 | */ | |
937 | static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, | |
938 | uint64_t pe, uint64_t src, | |
939 | unsigned count) | |
940 | { | |
96105e53 CK |
941 | unsigned bytes = count * 8; |
942 | ||
943 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | | |
944 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); | |
945 | ib->ptr[ib->length_dw++] = bytes; | |
946 | ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ | |
947 | ib->ptr[ib->length_dw++] = lower_32_bits(src); | |
948 | ib->ptr[ib->length_dw++] = upper_32_bits(src); | |
949 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); | |
950 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | |
aaa36a97 AD |
951 | } |
952 | ||
953 | /** | |
954 | * sdma_v3_0_vm_write_pte - update PTEs by writing them manually | |
955 | * | |
956 | * @ib: indirect buffer to fill with commands | |
957 | * @pe: addr of the page entry | |
de9ea7bd | 958 | * @value: dst addr to write into pe |
aaa36a97 AD |
959 | * @count: number of page entries to update |
960 | * @incr: increase next addr by incr bytes | |
aaa36a97 AD |
961 | * |
962 | * Update PTEs by writing them manually using sDMA (CIK). | |
963 | */ | |
de9ea7bd CK |
964 | static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, |
965 | uint64_t value, unsigned count, | |
966 | uint32_t incr) | |
aaa36a97 | 967 | { |
de9ea7bd CK |
968 | unsigned ndw = count * 2; |
969 | ||
970 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | |
6bf3f9c3 | 971 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); |
de9ea7bd CK |
972 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); |
973 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | |
974 | ib->ptr[ib->length_dw++] = ndw; | |
4bc07289 | 975 | for (; ndw > 0; ndw -= 2) { |
de9ea7bd CK |
976 | ib->ptr[ib->length_dw++] = lower_32_bits(value); |
977 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | |
978 | value += incr; | |
aaa36a97 AD |
979 | } |
980 | } | |
981 | ||
982 | /** | |
983 | * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA | |
984 | * | |
985 | * @ib: indirect buffer to fill with commands | |
986 | * @pe: addr of the page entry | |
987 | * @addr: dst addr to write into pe | |
988 | * @count: number of page entries to update | |
989 | * @incr: increase next addr by incr bytes | |
990 | * @flags: access flags | |
991 | * | |
992 | * Update the page tables using sDMA (CIK). | |
993 | */ | |
96105e53 | 994 | static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, |
aaa36a97 | 995 | uint64_t addr, unsigned count, |
6b777607 | 996 | uint32_t incr, uint64_t flags) |
aaa36a97 | 997 | { |
96105e53 CK |
998 | /* for physically contiguous pages (vram) */ |
999 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); | |
1000 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ | |
1001 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | |
b9be700e JZ |
1002 | ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ |
1003 | ib->ptr[ib->length_dw++] = upper_32_bits(flags); | |
96105e53 CK |
1004 | ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ |
1005 | ib->ptr[ib->length_dw++] = upper_32_bits(addr); | |
1006 | ib->ptr[ib->length_dw++] = incr; /* increment size */ | |
1007 | ib->ptr[ib->length_dw++] = 0; | |
1008 | ib->ptr[ib->length_dw++] = count; /* number of entries */ | |
aaa36a97 AD |
1009 | } |
1010 | ||
1011 | /** | |
9e5d5309 | 1012 | * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw |
aaa36a97 AD |
1013 | * |
1014 | * @ib: indirect buffer to fill with padding | |
1015 | * | |
1016 | */ | |
9e5d5309 | 1017 | static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) |
aaa36a97 | 1018 | { |
ccf191f8 | 1019 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); |
ac01db3d JZ |
1020 | u32 pad_count; |
1021 | int i; | |
1022 | ||
ce73516d | 1023 | pad_count = (-ib->length_dw) & 7; |
ac01db3d JZ |
1024 | for (i = 0; i < pad_count; i++) |
1025 | if (sdma && sdma->burst_nop && (i == 0)) | |
1026 | ib->ptr[ib->length_dw++] = | |
1027 | SDMA_PKT_HEADER_OP(SDMA_OP_NOP) | | |
1028 | SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); | |
1029 | else | |
1030 | ib->ptr[ib->length_dw++] = | |
1031 | SDMA_PKT_HEADER_OP(SDMA_OP_NOP); | |
aaa36a97 AD |
1032 | } |
1033 | ||
1034 | /** | |
00b7c4ff | 1035 | * sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline |
aaa36a97 AD |
1036 | * |
1037 | * @ring: amdgpu_ring pointer | |
aaa36a97 | 1038 | * |
00b7c4ff | 1039 | * Make sure all previous operations are completed (CIK). |
aaa36a97 | 1040 | */ |
00b7c4ff | 1041 | static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
aaa36a97 | 1042 | { |
5c55db83 CZ |
1043 | uint32_t seq = ring->fence_drv.sync_seq; |
1044 | uint64_t addr = ring->fence_drv.gpu_addr; | |
1045 | ||
1046 | /* wait for idle */ | |
1047 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | | |
1048 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | | |
1049 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ | |
1050 | SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1)); | |
1051 | amdgpu_ring_write(ring, addr & 0xfffffffc); | |
1052 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); | |
1053 | amdgpu_ring_write(ring, seq); /* reference */ | |
4a8e06f7 | 1054 | amdgpu_ring_write(ring, 0xffffffff); /* mask */ |
5c55db83 CZ |
1055 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | |
1056 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ | |
00b7c4ff | 1057 | } |
5c55db83 | 1058 | |
00b7c4ff CK |
1059 | /** |
1060 | * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA | |
1061 | * | |
1062 | * @ring: amdgpu_ring pointer | |
1063 | * @vm: amdgpu_vm pointer | |
1064 | * | |
1065 | * Update the page table base and flush the VM TLB | |
1066 | * using sDMA (VI). | |
1067 | */ | |
1068 | static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | |
c633c00b | 1069 | unsigned vmid, uint64_t pd_addr) |
00b7c4ff | 1070 | { |
c633c00b | 1071 | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); |
aaa36a97 AD |
1072 | |
1073 | /* wait for flush */ | |
1074 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | | |
1075 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | | |
1076 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */ | |
1077 | amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); | |
1078 | amdgpu_ring_write(ring, 0); | |
1079 | amdgpu_ring_write(ring, 0); /* reference */ | |
1080 | amdgpu_ring_write(ring, 0); /* mask */ | |
1081 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | | |
1082 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ | |
1083 | } | |
1084 | ||
3d31d4cb CK |
1085 | static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring, |
1086 | uint32_t reg, uint32_t val) | |
1087 | { | |
1088 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | |
1089 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | |
1090 | amdgpu_ring_write(ring, reg); | |
1091 | amdgpu_ring_write(ring, val); | |
1092 | } | |
1093 | ||
5fc3aeeb | 1094 | static int sdma_v3_0_early_init(void *handle) |
aaa36a97 | 1095 | { |
5fc3aeeb | 1096 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1097 | ||
c113ea1c | 1098 | switch (adev->asic_type) { |
bb16e3b6 SL |
1099 | case CHIP_STONEY: |
1100 | adev->sdma.num_instances = 1; | |
1101 | break; | |
c113ea1c AD |
1102 | default: |
1103 | adev->sdma.num_instances = SDMA_MAX_INSTANCE; | |
1104 | break; | |
1105 | } | |
1106 | ||
aaa36a97 AD |
1107 | sdma_v3_0_set_ring_funcs(adev); |
1108 | sdma_v3_0_set_buffer_funcs(adev); | |
1109 | sdma_v3_0_set_vm_pte_funcs(adev); | |
1110 | sdma_v3_0_set_irq_funcs(adev); | |
1111 | ||
1112 | return 0; | |
1113 | } | |
1114 | ||
5fc3aeeb | 1115 | static int sdma_v3_0_sw_init(void *handle) |
aaa36a97 AD |
1116 | { |
1117 | struct amdgpu_ring *ring; | |
c113ea1c | 1118 | int r, i; |
5fc3aeeb | 1119 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1120 | |
1121 | /* SDMA trap event */ | |
1ffdeca6 | 1122 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, |
d766e6a3 | 1123 | &adev->sdma.trap_irq); |
aaa36a97 AD |
1124 | if (r) |
1125 | return r; | |
1126 | ||
1127 | /* SDMA Privileged inst */ | |
1ffdeca6 | 1128 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241, |
d766e6a3 | 1129 | &adev->sdma.illegal_inst_irq); |
aaa36a97 AD |
1130 | if (r) |
1131 | return r; | |
1132 | ||
1133 | /* SDMA Privileged inst */ | |
1ffdeca6 | 1134 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE, |
d766e6a3 | 1135 | &adev->sdma.illegal_inst_irq); |
aaa36a97 AD |
1136 | if (r) |
1137 | return r; | |
1138 | ||
1139 | r = sdma_v3_0_init_microcode(adev); | |
1140 | if (r) { | |
1141 | DRM_ERROR("Failed to load sdma firmware!\n"); | |
1142 | return r; | |
1143 | } | |
1144 | ||
c113ea1c AD |
1145 | for (i = 0; i < adev->sdma.num_instances; i++) { |
1146 | ring = &adev->sdma.instance[i].ring; | |
1147 | ring->ring_obj = NULL; | |
2ffe31de PD |
1148 | if (!amdgpu_sriov_vf(adev)) { |
1149 | ring->use_doorbell = true; | |
898e0d9d | 1150 | ring->doorbell_index = adev->doorbell_index.sdma_engine[i]; |
2ffe31de PD |
1151 | } else { |
1152 | ring->use_pollmem = true; | |
1153 | } | |
c113ea1c AD |
1154 | |
1155 | sprintf(ring->name, "sdma%d", i); | |
b38d99c4 | 1156 | r = amdgpu_ring_init(adev, ring, 1024, |
c113ea1c AD |
1157 | &adev->sdma.trap_irq, |
1158 | (i == 0) ? | |
af67772d ED |
1159 | AMDGPU_SDMA_IRQ_INSTANCE0 : |
1160 | AMDGPU_SDMA_IRQ_INSTANCE1); | |
c113ea1c AD |
1161 | if (r) |
1162 | return r; | |
1163 | } | |
aaa36a97 AD |
1164 | |
1165 | return r; | |
1166 | } | |
1167 | ||
5fc3aeeb | 1168 | static int sdma_v3_0_sw_fini(void *handle) |
aaa36a97 | 1169 | { |
5fc3aeeb | 1170 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
c113ea1c | 1171 | int i; |
5fc3aeeb | 1172 | |
c113ea1c AD |
1173 | for (i = 0; i < adev->sdma.num_instances; i++) |
1174 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | |
aaa36a97 | 1175 | |
14d83e78 | 1176 | sdma_v3_0_free_microcode(adev); |
aaa36a97 AD |
1177 | return 0; |
1178 | } | |
1179 | ||
5fc3aeeb | 1180 | static int sdma_v3_0_hw_init(void *handle) |
aaa36a97 AD |
1181 | { |
1182 | int r; | |
5fc3aeeb | 1183 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1184 | |
1185 | sdma_v3_0_init_golden_registers(adev); | |
1186 | ||
1187 | r = sdma_v3_0_start(adev); | |
1188 | if (r) | |
1189 | return r; | |
1190 | ||
1191 | return r; | |
1192 | } | |
1193 | ||
5fc3aeeb | 1194 | static int sdma_v3_0_hw_fini(void *handle) |
aaa36a97 | 1195 | { |
5fc3aeeb | 1196 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1197 | ||
cd06bf68 | 1198 | sdma_v3_0_ctx_switch_enable(adev, false); |
aaa36a97 AD |
1199 | sdma_v3_0_enable(adev, false); |
1200 | ||
1201 | return 0; | |
1202 | } | |
1203 | ||
5fc3aeeb | 1204 | static int sdma_v3_0_suspend(void *handle) |
aaa36a97 | 1205 | { |
5fc3aeeb | 1206 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1207 | |
1208 | return sdma_v3_0_hw_fini(adev); | |
1209 | } | |
1210 | ||
5fc3aeeb | 1211 | static int sdma_v3_0_resume(void *handle) |
aaa36a97 | 1212 | { |
5fc3aeeb | 1213 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1214 | |
1215 | return sdma_v3_0_hw_init(adev); | |
1216 | } | |
1217 | ||
5fc3aeeb | 1218 | static bool sdma_v3_0_is_idle(void *handle) |
aaa36a97 | 1219 | { |
5fc3aeeb | 1220 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1221 | u32 tmp = RREG32(mmSRBM_STATUS2); |
1222 | ||
1223 | if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | | |
1224 | SRBM_STATUS2__SDMA1_BUSY_MASK)) | |
1225 | return false; | |
1226 | ||
1227 | return true; | |
1228 | } | |
1229 | ||
5fc3aeeb | 1230 | static int sdma_v3_0_wait_for_idle(void *handle) |
aaa36a97 AD |
1231 | { |
1232 | unsigned i; | |
1233 | u32 tmp; | |
5fc3aeeb | 1234 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1235 | |
1236 | for (i = 0; i < adev->usec_timeout; i++) { | |
1237 | tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | | |
1238 | SRBM_STATUS2__SDMA1_BUSY_MASK); | |
1239 | ||
1240 | if (!tmp) | |
1241 | return 0; | |
1242 | udelay(1); | |
1243 | } | |
1244 | return -ETIMEDOUT; | |
1245 | } | |
1246 | ||
da146d3b | 1247 | static bool sdma_v3_0_check_soft_reset(void *handle) |
aaa36a97 | 1248 | { |
5fc3aeeb | 1249 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
e702a680 | 1250 | u32 srbm_soft_reset = 0; |
aaa36a97 AD |
1251 | u32 tmp = RREG32(mmSRBM_STATUS2); |
1252 | ||
e702a680 CZ |
1253 | if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) || |
1254 | (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) { | |
aaa36a97 | 1255 | srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; |
aaa36a97 AD |
1256 | srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; |
1257 | } | |
1258 | ||
e702a680 | 1259 | if (srbm_soft_reset) { |
e702a680 | 1260 | adev->sdma.srbm_soft_reset = srbm_soft_reset; |
da146d3b | 1261 | return true; |
e702a680 | 1262 | } else { |
e702a680 | 1263 | adev->sdma.srbm_soft_reset = 0; |
da146d3b | 1264 | return false; |
e702a680 | 1265 | } |
e702a680 CZ |
1266 | } |
1267 | ||
1268 | static int sdma_v3_0_pre_soft_reset(void *handle) | |
1269 | { | |
1270 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1271 | u32 srbm_soft_reset = 0; | |
1272 | ||
da146d3b | 1273 | if (!adev->sdma.srbm_soft_reset) |
e702a680 CZ |
1274 | return 0; |
1275 | ||
1276 | srbm_soft_reset = adev->sdma.srbm_soft_reset; | |
1277 | ||
1278 | if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) || | |
1279 | REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) { | |
1280 | sdma_v3_0_ctx_switch_enable(adev, false); | |
1281 | sdma_v3_0_enable(adev, false); | |
1282 | } | |
1283 | ||
1284 | return 0; | |
1285 | } | |
1286 | ||
1287 | static int sdma_v3_0_post_soft_reset(void *handle) | |
1288 | { | |
1289 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1290 | u32 srbm_soft_reset = 0; | |
1291 | ||
da146d3b | 1292 | if (!adev->sdma.srbm_soft_reset) |
e702a680 CZ |
1293 | return 0; |
1294 | ||
1295 | srbm_soft_reset = adev->sdma.srbm_soft_reset; | |
1296 | ||
1297 | if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) || | |
1298 | REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) { | |
1299 | sdma_v3_0_gfx_resume(adev); | |
1300 | sdma_v3_0_rlc_resume(adev); | |
1301 | } | |
1302 | ||
1303 | return 0; | |
1304 | } | |
1305 | ||
1306 | static int sdma_v3_0_soft_reset(void *handle) | |
1307 | { | |
1308 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1309 | u32 srbm_soft_reset = 0; | |
1310 | u32 tmp; | |
1311 | ||
da146d3b | 1312 | if (!adev->sdma.srbm_soft_reset) |
e702a680 CZ |
1313 | return 0; |
1314 | ||
1315 | srbm_soft_reset = adev->sdma.srbm_soft_reset; | |
1316 | ||
aaa36a97 | 1317 | if (srbm_soft_reset) { |
aaa36a97 AD |
1318 | tmp = RREG32(mmSRBM_SOFT_RESET); |
1319 | tmp |= srbm_soft_reset; | |
1320 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | |
1321 | WREG32(mmSRBM_SOFT_RESET, tmp); | |
1322 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
1323 | ||
1324 | udelay(50); | |
1325 | ||
1326 | tmp &= ~srbm_soft_reset; | |
1327 | WREG32(mmSRBM_SOFT_RESET, tmp); | |
1328 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
1329 | ||
1330 | /* Wait a little for things to settle down */ | |
1331 | udelay(50); | |
aaa36a97 AD |
1332 | } |
1333 | ||
1334 | return 0; | |
1335 | } | |
1336 | ||
1337 | static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev, | |
1338 | struct amdgpu_irq_src *source, | |
1339 | unsigned type, | |
1340 | enum amdgpu_interrupt_state state) | |
1341 | { | |
1342 | u32 sdma_cntl; | |
1343 | ||
1344 | switch (type) { | |
af67772d | 1345 | case AMDGPU_SDMA_IRQ_INSTANCE0: |
aaa36a97 AD |
1346 | switch (state) { |
1347 | case AMDGPU_IRQ_STATE_DISABLE: | |
1348 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); | |
1349 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); | |
1350 | WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); | |
1351 | break; | |
1352 | case AMDGPU_IRQ_STATE_ENABLE: | |
1353 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); | |
1354 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); | |
1355 | WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); | |
1356 | break; | |
1357 | default: | |
1358 | break; | |
1359 | } | |
1360 | break; | |
af67772d | 1361 | case AMDGPU_SDMA_IRQ_INSTANCE1: |
aaa36a97 AD |
1362 | switch (state) { |
1363 | case AMDGPU_IRQ_STATE_DISABLE: | |
1364 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); | |
1365 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); | |
1366 | WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); | |
1367 | break; | |
1368 | case AMDGPU_IRQ_STATE_ENABLE: | |
1369 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); | |
1370 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); | |
1371 | WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); | |
1372 | break; | |
1373 | default: | |
1374 | break; | |
1375 | } | |
1376 | break; | |
1377 | default: | |
1378 | break; | |
1379 | } | |
1380 | return 0; | |
1381 | } | |
1382 | ||
1383 | static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev, | |
1384 | struct amdgpu_irq_src *source, | |
1385 | struct amdgpu_iv_entry *entry) | |
1386 | { | |
1387 | u8 instance_id, queue_id; | |
1388 | ||
1389 | instance_id = (entry->ring_id & 0x3) >> 0; | |
1390 | queue_id = (entry->ring_id & 0xc) >> 2; | |
1391 | DRM_DEBUG("IH: SDMA trap\n"); | |
1392 | switch (instance_id) { | |
1393 | case 0: | |
1394 | switch (queue_id) { | |
1395 | case 0: | |
c113ea1c | 1396 | amdgpu_fence_process(&adev->sdma.instance[0].ring); |
aaa36a97 AD |
1397 | break; |
1398 | case 1: | |
1399 | /* XXX compute */ | |
1400 | break; | |
1401 | case 2: | |
1402 | /* XXX compute */ | |
1403 | break; | |
1404 | } | |
1405 | break; | |
1406 | case 1: | |
1407 | switch (queue_id) { | |
1408 | case 0: | |
c113ea1c | 1409 | amdgpu_fence_process(&adev->sdma.instance[1].ring); |
aaa36a97 AD |
1410 | break; |
1411 | case 1: | |
1412 | /* XXX compute */ | |
1413 | break; | |
1414 | case 2: | |
1415 | /* XXX compute */ | |
1416 | break; | |
1417 | } | |
1418 | break; | |
1419 | } | |
1420 | return 0; | |
1421 | } | |
1422 | ||
1423 | static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev, | |
1424 | struct amdgpu_irq_src *source, | |
1425 | struct amdgpu_iv_entry *entry) | |
1426 | { | |
898c2cb5 CK |
1427 | u8 instance_id, queue_id; |
1428 | ||
aaa36a97 | 1429 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); |
898c2cb5 CK |
1430 | instance_id = (entry->ring_id & 0x3) >> 0; |
1431 | queue_id = (entry->ring_id & 0xc) >> 2; | |
1432 | ||
1433 | if (instance_id <= 1 && queue_id == 0) | |
1434 | drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); | |
aaa36a97 AD |
1435 | return 0; |
1436 | } | |
1437 | ||
ce22362b | 1438 | static void sdma_v3_0_update_sdma_medium_grain_clock_gating( |
3c997d24 EH |
1439 | struct amdgpu_device *adev, |
1440 | bool enable) | |
1441 | { | |
1442 | uint32_t temp, data; | |
ce22362b | 1443 | int i; |
3c997d24 | 1444 | |
e08d53cb | 1445 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { |
ce22362b AD |
1446 | for (i = 0; i < adev->sdma.num_instances; i++) { |
1447 | temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]); | |
1448 | data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | | |
1449 | SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | | |
1450 | SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | | |
1451 | SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | | |
1452 | SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | | |
1453 | SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | | |
1454 | SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | | |
1455 | SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK); | |
1456 | if (data != temp) | |
1457 | WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data); | |
1458 | } | |
3c997d24 | 1459 | } else { |
ce22362b AD |
1460 | for (i = 0; i < adev->sdma.num_instances; i++) { |
1461 | temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]); | |
1462 | data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | | |
3c997d24 EH |
1463 | SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | |
1464 | SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | | |
1465 | SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | | |
1466 | SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | | |
1467 | SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | | |
1468 | SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | | |
1469 | SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK; | |
1470 | ||
ce22362b AD |
1471 | if (data != temp) |
1472 | WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data); | |
1473 | } | |
3c997d24 EH |
1474 | } |
1475 | } | |
1476 | ||
ce22362b | 1477 | static void sdma_v3_0_update_sdma_medium_grain_light_sleep( |
3c997d24 EH |
1478 | struct amdgpu_device *adev, |
1479 | bool enable) | |
1480 | { | |
1481 | uint32_t temp, data; | |
ce22362b | 1482 | int i; |
3c997d24 | 1483 | |
e08d53cb | 1484 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { |
ce22362b AD |
1485 | for (i = 0; i < adev->sdma.num_instances; i++) { |
1486 | temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]); | |
1487 | data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; | |
3c997d24 | 1488 | |
ce22362b AD |
1489 | if (temp != data) |
1490 | WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data); | |
1491 | } | |
3c997d24 | 1492 | } else { |
ce22362b AD |
1493 | for (i = 0; i < adev->sdma.num_instances; i++) { |
1494 | temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]); | |
1495 | data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; | |
3c997d24 | 1496 | |
ce22362b AD |
1497 | if (temp != data) |
1498 | WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data); | |
1499 | } | |
3c997d24 EH |
1500 | } |
1501 | } | |
1502 | ||
5fc3aeeb | 1503 | static int sdma_v3_0_set_clockgating_state(void *handle, |
1504 | enum amd_clockgating_state state) | |
aaa36a97 | 1505 | { |
3c997d24 EH |
1506 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1507 | ||
ce137c04 ML |
1508 | if (amdgpu_sriov_vf(adev)) |
1509 | return 0; | |
1510 | ||
3c997d24 EH |
1511 | switch (adev->asic_type) { |
1512 | case CHIP_FIJI: | |
ce22362b AD |
1513 | case CHIP_CARRIZO: |
1514 | case CHIP_STONEY: | |
1515 | sdma_v3_0_update_sdma_medium_grain_clock_gating(adev, | |
7e913664 | 1516 | state == AMD_CG_STATE_GATE); |
ce22362b | 1517 | sdma_v3_0_update_sdma_medium_grain_light_sleep(adev, |
7e913664 | 1518 | state == AMD_CG_STATE_GATE); |
3c997d24 EH |
1519 | break; |
1520 | default: | |
1521 | break; | |
1522 | } | |
aaa36a97 AD |
1523 | return 0; |
1524 | } | |
1525 | ||
5fc3aeeb | 1526 | static int sdma_v3_0_set_powergating_state(void *handle, |
1527 | enum amd_powergating_state state) | |
aaa36a97 AD |
1528 | { |
1529 | return 0; | |
1530 | } | |
1531 | ||
41c360f6 HR |
1532 | static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags) |
1533 | { | |
1534 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1535 | int data; | |
1536 | ||
ce137c04 ML |
1537 | if (amdgpu_sriov_vf(adev)) |
1538 | *flags = 0; | |
1539 | ||
41c360f6 HR |
1540 | /* AMD_CG_SUPPORT_SDMA_MGCG */ |
1541 | data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[0]); | |
1542 | if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK)) | |
1543 | *flags |= AMD_CG_SUPPORT_SDMA_MGCG; | |
1544 | ||
1545 | /* AMD_CG_SUPPORT_SDMA_LS */ | |
1546 | data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[0]); | |
1547 | if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK) | |
1548 | *flags |= AMD_CG_SUPPORT_SDMA_LS; | |
1549 | } | |
1550 | ||
a1255107 | 1551 | static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { |
88a907d6 | 1552 | .name = "sdma_v3_0", |
aaa36a97 AD |
1553 | .early_init = sdma_v3_0_early_init, |
1554 | .late_init = NULL, | |
1555 | .sw_init = sdma_v3_0_sw_init, | |
1556 | .sw_fini = sdma_v3_0_sw_fini, | |
1557 | .hw_init = sdma_v3_0_hw_init, | |
1558 | .hw_fini = sdma_v3_0_hw_fini, | |
1559 | .suspend = sdma_v3_0_suspend, | |
1560 | .resume = sdma_v3_0_resume, | |
1561 | .is_idle = sdma_v3_0_is_idle, | |
1562 | .wait_for_idle = sdma_v3_0_wait_for_idle, | |
e702a680 CZ |
1563 | .check_soft_reset = sdma_v3_0_check_soft_reset, |
1564 | .pre_soft_reset = sdma_v3_0_pre_soft_reset, | |
1565 | .post_soft_reset = sdma_v3_0_post_soft_reset, | |
aaa36a97 | 1566 | .soft_reset = sdma_v3_0_soft_reset, |
aaa36a97 AD |
1567 | .set_clockgating_state = sdma_v3_0_set_clockgating_state, |
1568 | .set_powergating_state = sdma_v3_0_set_powergating_state, | |
41c360f6 | 1569 | .get_clockgating_state = sdma_v3_0_get_clockgating_state, |
aaa36a97 AD |
1570 | }; |
1571 | ||
aaa36a97 | 1572 | static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { |
21cd942e | 1573 | .type = AMDGPU_RING_TYPE_SDMA, |
79887142 CK |
1574 | .align_mask = 0xf, |
1575 | .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), | |
536fbf94 | 1576 | .support_64bit_ptrs = false, |
aaa36a97 AD |
1577 | .get_rptr = sdma_v3_0_ring_get_rptr, |
1578 | .get_wptr = sdma_v3_0_ring_get_wptr, | |
1579 | .set_wptr = sdma_v3_0_ring_set_wptr, | |
e12f3d7a CK |
1580 | .emit_frame_size = |
1581 | 6 + /* sdma_v3_0_ring_emit_hdp_flush */ | |
2ee150cd | 1582 | 3 + /* hdp invalidate */ |
e12f3d7a | 1583 | 6 + /* sdma_v3_0_ring_emit_pipeline_sync */ |
49135593 | 1584 | VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v3_0_ring_emit_vm_flush */ |
e12f3d7a CK |
1585 | 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */ |
1586 | .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */ | |
aaa36a97 AD |
1587 | .emit_ib = sdma_v3_0_ring_emit_ib, |
1588 | .emit_fence = sdma_v3_0_ring_emit_fence, | |
00b7c4ff | 1589 | .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync, |
aaa36a97 | 1590 | .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, |
d2edb07b | 1591 | .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush, |
aaa36a97 AD |
1592 | .test_ring = sdma_v3_0_ring_test_ring, |
1593 | .test_ib = sdma_v3_0_ring_test_ib, | |
ac01db3d | 1594 | .insert_nop = sdma_v3_0_ring_insert_nop, |
9e5d5309 | 1595 | .pad_ib = sdma_v3_0_ring_pad_ib, |
3d31d4cb | 1596 | .emit_wreg = sdma_v3_0_ring_emit_wreg, |
aaa36a97 AD |
1597 | }; |
1598 | ||
1599 | static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) | |
1600 | { | |
c113ea1c AD |
1601 | int i; |
1602 | ||
1cf0abb6 | 1603 | for (i = 0; i < adev->sdma.num_instances; i++) { |
c113ea1c | 1604 | adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs; |
1cf0abb6 AD |
1605 | adev->sdma.instance[i].ring.me = i; |
1606 | } | |
aaa36a97 AD |
1607 | } |
1608 | ||
1609 | static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = { | |
1610 | .set = sdma_v3_0_set_trap_irq_state, | |
1611 | .process = sdma_v3_0_process_trap_irq, | |
1612 | }; | |
1613 | ||
1614 | static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = { | |
1615 | .process = sdma_v3_0_process_illegal_inst_irq, | |
1616 | }; | |
1617 | ||
1618 | static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) | |
1619 | { | |
c113ea1c AD |
1620 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; |
1621 | adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs; | |
1622 | adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs; | |
aaa36a97 AD |
1623 | } |
1624 | ||
1625 | /** | |
1626 | * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine | |
1627 | * | |
1628 | * @ring: amdgpu_ring structure holding ring information | |
1629 | * @src_offset: src GPU address | |
1630 | * @dst_offset: dst GPU address | |
1631 | * @byte_count: number of bytes to xfer | |
1632 | * | |
1633 | * Copy GPU buffers using the DMA engine (VI). | |
1634 | * Used by the amdgpu ttm implementation to move pages if | |
1635 | * registered as the asic copy callback. | |
1636 | */ | |
c7ae72c0 | 1637 | static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib, |
aaa36a97 AD |
1638 | uint64_t src_offset, |
1639 | uint64_t dst_offset, | |
1640 | uint32_t byte_count) | |
1641 | { | |
c7ae72c0 CZ |
1642 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | |
1643 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); | |
1644 | ib->ptr[ib->length_dw++] = byte_count; | |
1645 | ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ | |
1646 | ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); | |
1647 | ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); | |
1648 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); | |
1649 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); | |
aaa36a97 AD |
1650 | } |
1651 | ||
1652 | /** | |
1653 | * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine | |
1654 | * | |
1655 | * @ring: amdgpu_ring structure holding ring information | |
1656 | * @src_data: value to write to buffer | |
1657 | * @dst_offset: dst GPU address | |
1658 | * @byte_count: number of bytes to xfer | |
1659 | * | |
1660 | * Fill GPU buffers using the DMA engine (VI). | |
1661 | */ | |
6e7a3840 | 1662 | static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib, |
aaa36a97 AD |
1663 | uint32_t src_data, |
1664 | uint64_t dst_offset, | |
1665 | uint32_t byte_count) | |
1666 | { | |
6e7a3840 CZ |
1667 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL); |
1668 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); | |
1669 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); | |
1670 | ib->ptr[ib->length_dw++] = src_data; | |
1671 | ib->ptr[ib->length_dw++] = byte_count; | |
aaa36a97 AD |
1672 | } |
1673 | ||
1674 | static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = { | |
dfe5c2b7 | 1675 | .copy_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */ |
aaa36a97 AD |
1676 | .copy_num_dw = 7, |
1677 | .emit_copy_buffer = sdma_v3_0_emit_copy_buffer, | |
1678 | ||
dfe5c2b7 | 1679 | .fill_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */ |
aaa36a97 AD |
1680 | .fill_num_dw = 5, |
1681 | .emit_fill_buffer = sdma_v3_0_emit_fill_buffer, | |
1682 | }; | |
1683 | ||
1684 | static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev) | |
1685 | { | |
f54b30d7 CK |
1686 | adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; |
1687 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; | |
aaa36a97 AD |
1688 | } |
1689 | ||
1690 | static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { | |
e6d92197 | 1691 | .copy_pte_num_dw = 7, |
aaa36a97 | 1692 | .copy_pte = sdma_v3_0_vm_copy_pte, |
e6d92197 | 1693 | |
aaa36a97 AD |
1694 | .write_pte = sdma_v3_0_vm_write_pte, |
1695 | .set_pte_pde = sdma_v3_0_vm_set_pte_pde, | |
aaa36a97 AD |
1696 | }; |
1697 | ||
1698 | static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) | |
1699 | { | |
2d55e45a CK |
1700 | unsigned i; |
1701 | ||
f54b30d7 CK |
1702 | adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; |
1703 | for (i = 0; i < adev->sdma.num_instances; i++) { | |
0c88b430 ND |
1704 | adev->vm_manager.vm_pte_scheds[i] = |
1705 | &adev->sdma.instance[i].ring.sched; | |
aaa36a97 | 1706 | } |
0c88b430 | 1707 | adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; |
aaa36a97 | 1708 | } |
a1255107 AD |
1709 | |
1710 | const struct amdgpu_ip_block_version sdma_v3_0_ip_block = | |
1711 | { | |
1712 | .type = AMD_IP_BLOCK_TYPE_SDMA, | |
1713 | .major = 3, | |
1714 | .minor = 0, | |
1715 | .rev = 0, | |
1716 | .funcs = &sdma_v3_0_ip_funcs, | |
1717 | }; | |
1718 | ||
1719 | const struct amdgpu_ip_block_version sdma_v3_1_ip_block = | |
1720 | { | |
1721 | .type = AMD_IP_BLOCK_TYPE_SDMA, | |
1722 | .major = 3, | |
1723 | .minor = 1, | |
1724 | .rev = 0, | |
1725 | .funcs = &sdma_v3_0_ip_funcs, | |
1726 | }; |