Commit | Line | Data |
---|---|---|
a2e73f56 AD |
1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Alex Deucher | |
23 | */ | |
24 | ||
25 | #include <linux/firmware.h> | |
26 | #include "drmP.h" | |
27 | #include "amdgpu.h" | |
28 | #include "cikd.h" | |
29 | #include "ppsmc.h" | |
30 | #include "amdgpu_ucode.h" | |
31 | #include "ci_dpm.h" | |
32 | ||
33 | #include "smu/smu_7_0_1_d.h" | |
34 | #include "smu/smu_7_0_1_sh_mask.h" | |
35 | ||
36 | static int ci_set_smc_sram_address(struct amdgpu_device *adev, | |
37 | u32 smc_address, u32 limit) | |
38 | { | |
39 | if (smc_address & 3) | |
40 | return -EINVAL; | |
41 | if ((smc_address + 3) > limit) | |
42 | return -EINVAL; | |
43 | ||
44 | WREG32(mmSMC_IND_INDEX_0, smc_address); | |
45 | WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); | |
46 | ||
47 | return 0; | |
48 | } | |
49 | ||
50 | int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev, | |
51 | u32 smc_start_address, | |
52 | const u8 *src, u32 byte_count, u32 limit) | |
53 | { | |
54 | unsigned long flags; | |
55 | u32 data, original_data; | |
56 | u32 addr; | |
57 | u32 extra_shift; | |
58 | int ret = 0; | |
59 | ||
60 | if (smc_start_address & 3) | |
61 | return -EINVAL; | |
62 | if ((smc_start_address + byte_count) > limit) | |
63 | return -EINVAL; | |
64 | ||
65 | addr = smc_start_address; | |
66 | ||
67 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | |
68 | while (byte_count >= 4) { | |
69 | /* SMC address space is BE */ | |
70 | data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; | |
71 | ||
72 | ret = ci_set_smc_sram_address(adev, addr, limit); | |
73 | if (ret) | |
74 | goto done; | |
75 | ||
76 | WREG32(mmSMC_IND_DATA_0, data); | |
77 | ||
78 | src += 4; | |
79 | byte_count -= 4; | |
80 | addr += 4; | |
81 | } | |
82 | ||
83 | /* RMW for the final bytes */ | |
84 | if (byte_count > 0) { | |
85 | data = 0; | |
86 | ||
87 | ret = ci_set_smc_sram_address(adev, addr, limit); | |
88 | if (ret) | |
89 | goto done; | |
90 | ||
91 | original_data = RREG32(mmSMC_IND_DATA_0); | |
92 | ||
93 | extra_shift = 8 * (4 - byte_count); | |
94 | ||
95 | while (byte_count > 0) { | |
96 | data = (data << 8) + *src++; | |
97 | byte_count--; | |
98 | } | |
99 | ||
100 | data <<= extra_shift; | |
101 | ||
102 | data |= (original_data & ~((~0UL) << extra_shift)); | |
103 | ||
104 | ret = ci_set_smc_sram_address(adev, addr, limit); | |
105 | if (ret) | |
106 | goto done; | |
107 | ||
108 | WREG32(mmSMC_IND_DATA_0, data); | |
109 | } | |
110 | ||
111 | done: | |
112 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | |
113 | ||
114 | return ret; | |
115 | } | |
116 | ||
117 | void amdgpu_ci_start_smc(struct amdgpu_device *adev) | |
118 | { | |
119 | u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); | |
120 | ||
121 | tmp &= ~SMC_SYSCON_RESET_CNTL__rst_reg_MASK; | |
122 | WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp); | |
123 | } | |
124 | ||
125 | void amdgpu_ci_reset_smc(struct amdgpu_device *adev) | |
126 | { | |
127 | u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); | |
128 | ||
129 | tmp |= SMC_SYSCON_RESET_CNTL__rst_reg_MASK; | |
130 | WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp); | |
131 | } | |
132 | ||
133 | int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev) | |
134 | { | |
135 | static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 }; | |
136 | ||
137 | return amdgpu_ci_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); | |
138 | } | |
139 | ||
140 | void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev) | |
141 | { | |
142 | u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | |
143 | ||
144 | tmp |= SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK; | |
145 | ||
146 | WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp); | |
147 | } | |
148 | ||
149 | void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev) | |
150 | { | |
151 | u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | |
152 | ||
153 | tmp &= ~SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK; | |
154 | ||
155 | WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp); | |
156 | } | |
157 | ||
158 | bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev) | |
159 | { | |
160 | u32 clk = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | |
161 | u32 pc_c = RREG32_SMC(ixSMC_PC_C); | |
162 | ||
163 | if (!(clk & SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK) && (0x20100 <= pc_c)) | |
164 | return true; | |
165 | ||
166 | return false; | |
167 | } | |
168 | ||
169 | PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) | |
170 | { | |
171 | u32 tmp; | |
172 | int i; | |
173 | ||
174 | if (!amdgpu_ci_is_smc_running(adev)) | |
175 | return PPSMC_Result_Failed; | |
176 | ||
177 | WREG32(mmSMC_MESSAGE_0, msg); | |
178 | ||
179 | for (i = 0; i < adev->usec_timeout; i++) { | |
180 | tmp = RREG32(mmSMC_RESP_0); | |
181 | if (tmp != 0) | |
182 | break; | |
183 | udelay(1); | |
184 | } | |
185 | tmp = RREG32(mmSMC_RESP_0); | |
186 | ||
187 | return (PPSMC_Result)tmp; | |
188 | } | |
189 | ||
190 | PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev) | |
191 | { | |
192 | u32 tmp; | |
193 | int i; | |
194 | ||
195 | if (!amdgpu_ci_is_smc_running(adev)) | |
196 | return PPSMC_Result_OK; | |
197 | ||
198 | for (i = 0; i < adev->usec_timeout; i++) { | |
199 | tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | |
200 | if ((tmp & SMC_SYSCON_CLOCK_CNTL_0__cken_MASK) == 0) | |
201 | break; | |
202 | udelay(1); | |
203 | } | |
204 | ||
205 | return PPSMC_Result_OK; | |
206 | } | |
207 | ||
208 | int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit) | |
209 | { | |
210 | const struct smc_firmware_header_v1_0 *hdr; | |
211 | unsigned long flags; | |
212 | u32 ucode_start_address; | |
213 | u32 ucode_size; | |
214 | const u8 *src; | |
215 | u32 data; | |
216 | ||
217 | if (!adev->pm.fw) | |
218 | return -EINVAL; | |
219 | ||
220 | hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; | |
221 | amdgpu_ucode_print_smc_hdr(&hdr->header); | |
222 | ||
223 | adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); | |
224 | ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); | |
225 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); | |
226 | src = (const u8 *) | |
227 | (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | |
228 | ||
229 | if (ucode_size & 3) | |
230 | return -EINVAL; | |
231 | ||
232 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | |
233 | WREG32(mmSMC_IND_INDEX_0, ucode_start_address); | |
234 | WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, | |
235 | ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); | |
236 | while (ucode_size >= 4) { | |
237 | /* SMC address space is BE */ | |
238 | data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; | |
239 | ||
240 | WREG32(mmSMC_IND_DATA_0, data); | |
241 | ||
242 | src += 4; | |
243 | ucode_size -= 4; | |
244 | } | |
245 | WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); | |
246 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | |
247 | ||
248 | return 0; | |
249 | } | |
250 | ||
251 | int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev, | |
252 | u32 smc_address, u32 *value, u32 limit) | |
253 | { | |
254 | unsigned long flags; | |
255 | int ret; | |
256 | ||
257 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | |
258 | ret = ci_set_smc_sram_address(adev, smc_address, limit); | |
259 | if (ret == 0) | |
260 | *value = RREG32(mmSMC_IND_DATA_0); | |
261 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | |
262 | ||
263 | return ret; | |
264 | } | |
265 | ||
266 | int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev, | |
267 | u32 smc_address, u32 value, u32 limit) | |
268 | { | |
269 | unsigned long flags; | |
270 | int ret; | |
271 | ||
272 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | |
273 | ret = ci_set_smc_sram_address(adev, smc_address, limit); | |
274 | if (ret == 0) | |
275 | WREG32(mmSMC_IND_DATA_0, value); | |
276 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | |
277 | ||
278 | return ret; | |
279 | } |