Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / cz_smc.c
CommitLineData
aaa36a97
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include "drmP.h"
25#include "amdgpu.h"
26#include "smu8.h"
27#include "smu8_fusion.h"
28#include "cz_ppsmc.h"
29#include "cz_smumgr.h"
30#include "smu_ucode_xfer_cz.h"
31#include "amdgpu_ucode.h"
9ca91fdd
BX
32#include "cz_dpm.h"
33#include "vi_dpm.h"
aaa36a97
AD
34
35#include "smu/smu_8_0_d.h"
36#include "smu/smu_8_0_sh_mask.h"
37#include "gca/gfx_8_0_d.h"
38#include "gca/gfx_8_0_sh_mask.h"
39
40uint32_t cz_get_argument(struct amdgpu_device *adev)
41{
42 return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
43}
44
45static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
46{
47 struct cz_smu_private_data *priv =
48 (struct cz_smu_private_data *)(adev->smu.priv);
49
50 return priv;
51}
52
761c2e82 53static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
aaa36a97
AD
54{
55 int i;
56 u32 content = 0, tmp;
57
58 for (i = 0; i < adev->usec_timeout; i++) {
59 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
60 SMU_MP1_SRBM2P_RESP_0, CONTENT);
61 if (content != tmp)
62 break;
63 udelay(1);
64 }
65
66 /* timeout means wrong logic*/
67 if (i == adev->usec_timeout)
68 return -EINVAL;
69
70 WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
71 WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
72
73 return 0;
74}
75
76int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
77{
78 int i;
79 u32 content = 0, tmp = 0;
80
81 if (cz_send_msg_to_smc_async(adev, msg))
82 return -EINVAL;
83
84 for (i = 0; i < adev->usec_timeout; i++) {
85 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
86 SMU_MP1_SRBM2P_RESP_0, CONTENT);
87 if (content != tmp)
88 break;
89 udelay(1);
90 }
91
92 /* timeout means wrong logic*/
93 if (i == adev->usec_timeout)
94 return -EINVAL;
95
96 if (PPSMC_Result_OK != tmp) {
97 dev_err(adev->dev, "SMC Failed to send Message.\n");
98 return -EINVAL;
99 }
100
101 return 0;
102}
103
aaa36a97
AD
104int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
105 u16 msg, u32 parameter)
106{
107 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
108 return cz_send_msg_to_smc(adev, msg);
109}
110
111static int cz_set_smc_sram_address(struct amdgpu_device *adev,
112 u32 smc_address, u32 limit)
113{
114 if (smc_address & 3)
115 return -EINVAL;
116 if ((smc_address + 3) > limit)
117 return -EINVAL;
118
119 WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
120
121 return 0;
122}
123
124int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
125 u32 *value, u32 limit)
126{
127 int ret;
128
129 ret = cz_set_smc_sram_address(adev, smc_address, limit);
130 if (ret)
131 return ret;
132
133 *value = RREG32(mmMP0PUB_IND_DATA_0);
134
135 return 0;
136}
137
761c2e82 138static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
aaa36a97
AD
139 u32 value, u32 limit)
140{
141 int ret;
142
143 ret = cz_set_smc_sram_address(adev, smc_address, limit);
144 if (ret)
145 return ret;
146
147 WREG32(mmMP0PUB_IND_DATA_0, value);
148
149 return 0;
150}
151
152static int cz_smu_request_load_fw(struct amdgpu_device *adev)
153{
154 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
155
156 uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
157 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
158
159 cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
160
161 /*prepare toc buffers*/
162 cz_send_msg_to_smc_with_parameter(adev,
163 PPSMC_MSG_DriverDramAddrHi,
164 priv->toc_buffer.mc_addr_high);
165 cz_send_msg_to_smc_with_parameter(adev,
166 PPSMC_MSG_DriverDramAddrLo,
167 priv->toc_buffer.mc_addr_low);
168 cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
169
170 /*execute jobs*/
171 cz_send_msg_to_smc_with_parameter(adev,
172 PPSMC_MSG_ExecuteJob,
173 priv->toc_entry_aram);
174
175 cz_send_msg_to_smc_with_parameter(adev,
176 PPSMC_MSG_ExecuteJob,
177 priv->toc_entry_power_profiling_index);
178
179 cz_send_msg_to_smc_with_parameter(adev,
180 PPSMC_MSG_ExecuteJob,
181 priv->toc_entry_initialize_index);
182
183 return 0;
184}
185
186/*
187 *Check if the FW has been loaded, SMU will not return if loading
188 *has not finished.
189 */
190static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
191 uint32_t fw_mask)
192{
193 int i;
194 uint32_t index = SMN_MP1_SRAM_START_ADDR +
195 SMU8_FIRMWARE_HEADER_LOCATION +
196 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
197
198 WREG32(mmMP0PUB_IND_INDEX, index);
199
200 for (i = 0; i < adev->usec_timeout; i++) {
201 if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
202 break;
203 udelay(1);
204 }
205
206 if (i >= adev->usec_timeout) {
207 dev_err(adev->dev,
208 "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
209 fw_mask, RREG32(mmMP0PUB_IND_DATA));
210 return -EINVAL;
211 }
212
213 return 0;
214}
215
216/*
217 * interfaces for different ip blocks to check firmware loading status
218 * 0 for success otherwise failed
219 */
220static int cz_smu_check_finished(struct amdgpu_device *adev,
221 enum AMDGPU_UCODE_ID id)
222{
223 switch (id) {
224 case AMDGPU_UCODE_ID_SDMA0:
225 if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
226 return 0;
227 break;
228 case AMDGPU_UCODE_ID_SDMA1:
229 if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
230 return 0;
231 break;
232 case AMDGPU_UCODE_ID_CP_CE:
233 if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
234 return 0;
235 break;
236 case AMDGPU_UCODE_ID_CP_PFP:
237 if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
238 return 0;
239 case AMDGPU_UCODE_ID_CP_ME:
240 if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
241 return 0;
242 break;
243 case AMDGPU_UCODE_ID_CP_MEC1:
244 if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
245 return 0;
246 break;
247 case AMDGPU_UCODE_ID_CP_MEC2:
248 if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
249 return 0;
250 break;
251 case AMDGPU_UCODE_ID_RLC_G:
252 if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
253 return 0;
254 break;
255 case AMDGPU_UCODE_ID_MAXIMUM:
256 default:
257 break;
258 }
259
260 return 1;
261}
262
263static int cz_load_mec_firmware(struct amdgpu_device *adev)
264{
265 struct amdgpu_firmware_info *ucode =
266 &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
267 uint32_t reg_data;
268 uint32_t tmp;
269
270 if (ucode->fw == NULL)
271 return -EINVAL;
272
273 /* Disable MEC parsing/prefetching */
274 tmp = RREG32(mmCP_MEC_CNTL);
275 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
276 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
277 WREG32(mmCP_MEC_CNTL, tmp);
278
279 tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
280 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
281 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
282 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
283 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
284 WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
285
286 reg_data = lower_32_bits(ucode->mc_addr) &
287 REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
288 WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
289
290 reg_data = upper_32_bits(ucode->mc_addr) &
291 REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
292 WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
293
294 return 0;
295}
296
297int cz_smu_start(struct amdgpu_device *adev)
298{
299 int ret = 0;
300
301 uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
302 UCODE_ID_SDMA0_MASK |
303 UCODE_ID_SDMA1_MASK |
304 UCODE_ID_CP_CE_MASK |
305 UCODE_ID_CP_ME_MASK |
306 UCODE_ID_CP_PFP_MASK |
307 UCODE_ID_CP_MEC_JT1_MASK |
308 UCODE_ID_CP_MEC_JT2_MASK;
309
7a753c3f
SL
310 if (adev->asic_type == CHIP_STONEY)
311 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
312
aaa36a97
AD
313 cz_smu_request_load_fw(adev);
314 ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
315 if (ret)
316 return ret;
317
318 /* manually load MEC firmware for CZ */
7a753c3f 319 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
aaa36a97
AD
320 ret = cz_load_mec_firmware(adev);
321 if (ret) {
322 dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
323 return ret;
324 }
325 }
326
327 /* setup fw load flag */
328 adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
329 AMDGPU_SDMA1_UCODE_LOADED |
330 AMDGPU_CPCE_UCODE_LOADED |
331 AMDGPU_CPPFP_UCODE_LOADED |
332 AMDGPU_CPME_UCODE_LOADED |
333 AMDGPU_CPMEC1_UCODE_LOADED |
334 AMDGPU_CPMEC2_UCODE_LOADED |
335 AMDGPU_CPRLC_UCODE_LOADED;
336
7a753c3f
SL
337 if (adev->asic_type == CHIP_STONEY)
338 adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
339
aaa36a97
AD
340 return ret;
341}
342
343static uint32_t cz_convert_fw_type(uint32_t fw_type)
344{
345 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
346
347 switch (fw_type) {
348 case UCODE_ID_SDMA0:
349 result = AMDGPU_UCODE_ID_SDMA0;
350 break;
351 case UCODE_ID_SDMA1:
352 result = AMDGPU_UCODE_ID_SDMA1;
353 break;
354 case UCODE_ID_CP_CE:
355 result = AMDGPU_UCODE_ID_CP_CE;
356 break;
357 case UCODE_ID_CP_PFP:
358 result = AMDGPU_UCODE_ID_CP_PFP;
359 break;
360 case UCODE_ID_CP_ME:
361 result = AMDGPU_UCODE_ID_CP_ME;
362 break;
363 case UCODE_ID_CP_MEC_JT1:
364 case UCODE_ID_CP_MEC_JT2:
365 result = AMDGPU_UCODE_ID_CP_MEC1;
366 break;
367 case UCODE_ID_RLC_G:
368 result = AMDGPU_UCODE_ID_RLC_G;
369 break;
370 default:
371 DRM_ERROR("UCode type is out of range!");
372 }
373
374 return result;
375}
376
377static uint8_t cz_smu_translate_firmware_enum_to_arg(
378 enum cz_scratch_entry firmware_enum)
379{
380 uint8_t ret = 0;
381
382 switch (firmware_enum) {
383 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
384 ret = UCODE_ID_SDMA0;
385 break;
386 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
387 ret = UCODE_ID_SDMA1;
388 break;
389 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
390 ret = UCODE_ID_CP_CE;
391 break;
392 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
393 ret = UCODE_ID_CP_PFP;
394 break;
395 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
396 ret = UCODE_ID_CP_ME;
397 break;
398 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
399 ret = UCODE_ID_CP_MEC_JT1;
400 break;
401 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
402 ret = UCODE_ID_CP_MEC_JT2;
403 break;
404 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
405 ret = UCODE_ID_GMCON_RENG;
406 break;
407 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
408 ret = UCODE_ID_RLC_G;
409 break;
410 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
411 ret = UCODE_ID_RLC_SCRATCH;
412 break;
413 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
414 ret = UCODE_ID_RLC_SRM_ARAM;
415 break;
416 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
417 ret = UCODE_ID_RLC_SRM_DRAM;
418 break;
419 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
420 ret = UCODE_ID_DMCU_ERAM;
421 break;
422 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
423 ret = UCODE_ID_DMCU_IRAM;
424 break;
425 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
426 ret = TASK_ARG_INIT_MM_PWR_LOG;
427 break;
428 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
429 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
430 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
431 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
432 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
433 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
434 ret = TASK_ARG_REG_MMIO;
435 break;
436 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
437 ret = TASK_ARG_INIT_CLK_TABLE;
438 break;
439 }
440
441 return ret;
442}
443
444static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
445 enum cz_scratch_entry firmware_enum,
446 struct cz_buffer_entry *entry)
447{
448 uint64_t gpu_addr;
449 uint32_t data_size;
450 uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
451 enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
452 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
453 const struct gfx_firmware_header_v1_0 *header;
454
455 if (ucode->fw == NULL)
456 return -EINVAL;
457
458 gpu_addr = ucode->mc_addr;
459 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
460 data_size = le32_to_cpu(header->header.ucode_size_bytes);
461
462 if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
463 (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
464 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
465 data_size = le32_to_cpu(header->jt_size) << 2;
466 }
467
468 entry->mc_addr_low = lower_32_bits(gpu_addr);
469 entry->mc_addr_high = upper_32_bits(gpu_addr);
470 entry->data_size = data_size;
471 entry->firmware_ID = firmware_enum;
472
473 return 0;
474}
475
476static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
477 enum cz_scratch_entry scratch_type,
478 uint32_t size_in_byte,
479 struct cz_buffer_entry *entry)
480{
481 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
482 uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
483 priv->smu_buffer.mc_addr_low;
484 mc_addr += size_in_byte;
485
486 priv->smu_buffer_used_bytes += size_in_byte;
487 entry->data_size = size_in_byte;
488 entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
489 entry->mc_addr_low = lower_32_bits(mc_addr);
490 entry->mc_addr_high = upper_32_bits(mc_addr);
491 entry->firmware_ID = scratch_type;
492
493 return 0;
494}
495
496static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
497 enum cz_scratch_entry firmware_enum,
498 bool is_last)
499{
500 uint8_t i;
501 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
502 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
503 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
504
505 task->type = TASK_TYPE_UCODE_LOAD;
506 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
507 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
508
509 for (i = 0; i < priv->driver_buffer_length; i++)
510 if (priv->driver_buffer[i].firmware_ID == firmware_enum)
511 break;
512
513 if (i >= priv->driver_buffer_length) {
514 dev_err(adev->dev, "Invalid Firmware Type\n");
515 return -EINVAL;
516 }
517
518 task->addr.low = priv->driver_buffer[i].mc_addr_low;
519 task->addr.high = priv->driver_buffer[i].mc_addr_high;
520 task->size_bytes = priv->driver_buffer[i].data_size;
521
522 return 0;
523}
524
525static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
526 enum cz_scratch_entry firmware_enum,
527 uint8_t type, bool is_last)
528{
529 uint8_t i;
530 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
531 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
532 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
533
534 task->type = type;
535 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
536 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
537
538 for (i = 0; i < priv->scratch_buffer_length; i++)
539 if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
540 break;
541
542 if (i >= priv->scratch_buffer_length) {
543 dev_err(adev->dev, "Invalid Firmware Type\n");
544 return -EINVAL;
545 }
546
547 task->addr.low = priv->scratch_buffer[i].mc_addr_low;
548 task->addr.high = priv->scratch_buffer[i].mc_addr_high;
549 task->size_bytes = priv->scratch_buffer[i].data_size;
550
551 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
552 struct cz_ih_meta_data *pIHReg_restore =
553 (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
554 pIHReg_restore->command =
555 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
556 }
557
558 return 0;
559}
560
561static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
562{
563 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
564 priv->toc_entry_aram = priv->toc_entry_used_count;
565 cz_smu_populate_single_scratch_task(adev,
566 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
567 TASK_TYPE_UCODE_SAVE, true);
568
569 return 0;
570}
571
572static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
573{
574 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
575 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
576
577 toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
578 cz_smu_populate_single_scratch_task(adev,
579 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
580 TASK_TYPE_UCODE_SAVE, false);
581 cz_smu_populate_single_scratch_task(adev,
582 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
583 TASK_TYPE_UCODE_SAVE, true);
584
585 return 0;
586}
587
588static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
589{
590 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
591 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
592
593 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
594
595 /* populate ucode */
596 if (adev->firmware.smu_load) {
597 cz_smu_populate_single_ucode_load_task(adev,
598 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
599 cz_smu_populate_single_ucode_load_task(adev,
600 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
601 cz_smu_populate_single_ucode_load_task(adev,
602 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
603 cz_smu_populate_single_ucode_load_task(adev,
604 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
7a753c3f
SL
605 if (adev->asic_type == CHIP_STONEY) {
606 cz_smu_populate_single_ucode_load_task(adev,
607 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
608 } else {
609 cz_smu_populate_single_ucode_load_task(adev,
aaa36a97 610 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
7a753c3f 611 }
aaa36a97
AD
612 cz_smu_populate_single_ucode_load_task(adev,
613 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
614 }
615
616 /* populate scratch */
617 cz_smu_populate_single_scratch_task(adev,
618 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
619 TASK_TYPE_UCODE_LOAD, false);
620 cz_smu_populate_single_scratch_task(adev,
621 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
622 TASK_TYPE_UCODE_LOAD, false);
623 cz_smu_populate_single_scratch_task(adev,
624 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
625 TASK_TYPE_UCODE_LOAD, true);
626
627 return 0;
628}
629
630static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
631{
632 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
633
634 priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
635
636 cz_smu_populate_single_scratch_task(adev,
637 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
638 TASK_TYPE_INITIALIZE, true);
639 return 0;
640}
641
642static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
643{
644 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
645
646 priv->toc_entry_initialize_index = priv->toc_entry_used_count;
647
648 if (adev->firmware.smu_load) {
649 cz_smu_populate_single_ucode_load_task(adev,
650 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
7a753c3f
SL
651 if (adev->asic_type == CHIP_STONEY) {
652 cz_smu_populate_single_ucode_load_task(adev,
653 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
654 } else {
655 cz_smu_populate_single_ucode_load_task(adev,
aaa36a97 656 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
7a753c3f 657 }
aaa36a97
AD
658 cz_smu_populate_single_ucode_load_task(adev,
659 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
660 cz_smu_populate_single_ucode_load_task(adev,
661 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
662 cz_smu_populate_single_ucode_load_task(adev,
663 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
664 cz_smu_populate_single_ucode_load_task(adev,
665 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
7a753c3f
SL
666 if (adev->asic_type == CHIP_STONEY) {
667 cz_smu_populate_single_ucode_load_task(adev,
668 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
669 } else {
670 cz_smu_populate_single_ucode_load_task(adev,
aaa36a97 671 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
7a753c3f 672 }
aaa36a97
AD
673 cz_smu_populate_single_ucode_load_task(adev,
674 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
675 }
676
677 return 0;
678}
679
680static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
681{
682 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
683
684 priv->toc_entry_clock_table = priv->toc_entry_used_count;
685
686 cz_smu_populate_single_scratch_task(adev,
687 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
688 TASK_TYPE_INITIALIZE, true);
689
690 return 0;
691}
692
693static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
694{
695 int i;
696 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
697 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
698
699 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
700 toc->JobList[i] = (uint8_t)IGNORE_JOB;
701
702 return 0;
703}
704
705/*
706 * cz smu uninitialization
707 */
708int cz_smu_fini(struct amdgpu_device *adev)
709{
710 amdgpu_bo_unref(&adev->smu.toc_buf);
711 amdgpu_bo_unref(&adev->smu.smu_buf);
712 kfree(adev->smu.priv);
713 adev->smu.priv = NULL;
714 if (adev->firmware.smu_load)
715 amdgpu_ucode_fini_bo(adev);
716
717 return 0;
718}
719
720int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
721{
722 uint8_t i;
723 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
724
725 for (i = 0; i < priv->scratch_buffer_length; i++)
726 if (priv->scratch_buffer[i].firmware_ID ==
727 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
728 break;
729
730 if (i >= priv->scratch_buffer_length) {
731 dev_err(adev->dev, "Invalid Scratch Type\n");
732 return -EINVAL;
733 }
734
735 *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
736
737 /* prepare buffer for pptable */
738 cz_send_msg_to_smc_with_parameter(adev,
739 PPSMC_MSG_SetClkTableAddrHi,
740 priv->scratch_buffer[i].mc_addr_high);
741 cz_send_msg_to_smc_with_parameter(adev,
742 PPSMC_MSG_SetClkTableAddrLo,
743 priv->scratch_buffer[i].mc_addr_low);
744 cz_send_msg_to_smc_with_parameter(adev,
745 PPSMC_MSG_ExecuteJob,
746 priv->toc_entry_clock_table);
747
748 /* actual downloading */
749 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
750
751 return 0;
752}
753
754int cz_smu_upload_pptable(struct amdgpu_device *adev)
755{
756 uint8_t i;
757 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
758
759 for (i = 0; i < priv->scratch_buffer_length; i++)
760 if (priv->scratch_buffer[i].firmware_ID ==
761 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
762 break;
763
764 if (i >= priv->scratch_buffer_length) {
765 dev_err(adev->dev, "Invalid Scratch Type\n");
766 return -EINVAL;
767 }
768
769 /* prepare SMU */
770 cz_send_msg_to_smc_with_parameter(adev,
771 PPSMC_MSG_SetClkTableAddrHi,
772 priv->scratch_buffer[i].mc_addr_high);
773 cz_send_msg_to_smc_with_parameter(adev,
774 PPSMC_MSG_SetClkTableAddrLo,
775 priv->scratch_buffer[i].mc_addr_low);
776 cz_send_msg_to_smc_with_parameter(adev,
777 PPSMC_MSG_ExecuteJob,
778 priv->toc_entry_clock_table);
779
780 /* actual uploading */
781 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
782
783 return 0;
784}
785
786/*
787 * cz smumgr functions initialization
788 */
789static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
790 .check_fw_load_finish = cz_smu_check_finished,
791 .request_smu_load_fw = NULL,
792 .request_smu_specific_fw = NULL,
793};
794
795/*
796 * cz smu initialization
797 */
798int cz_smu_init(struct amdgpu_device *adev)
799{
800 int ret = -EINVAL;
801 uint64_t mc_addr = 0;
802 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
803 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
804 void *toc_buf_ptr = NULL;
805 void *smu_buf_ptr = NULL;
806
807 struct cz_smu_private_data *priv =
808 kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
809 if (priv == NULL)
810 return -ENOMEM;
811
812 /* allocate firmware buffers */
813 if (adev->firmware.smu_load)
814 amdgpu_ucode_init_bo(adev);
815
816 adev->smu.priv = priv;
817 adev->smu.fw_flags = 0;
818 priv->toc_buffer.data_size = 4096;
819
820 priv->smu_buffer.data_size =
821 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
822 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
823 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
824 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
825 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
826
827 /* prepare toc buffer and smu buffer:
828 * 1. create amdgpu_bo for toc buffer and smu buffer
829 * 2. pin mc address
830 * 3. map kernel virtual address
831 */
832 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
72d7668b
CK
833 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
834 toc_buf);
aaa36a97
AD
835
836 if (ret) {
837 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
838 return ret;
839 }
840
841 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
72d7668b
CK
842 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
843 smu_buf);
aaa36a97
AD
844
845 if (ret) {
846 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
847 return ret;
848 }
849
850 /* toc buffer reserve/pin/map */
851 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
852 if (ret) {
853 amdgpu_bo_unref(&adev->smu.toc_buf);
854 dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
855 return ret;
856 }
857
858 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
859 if (ret) {
860 amdgpu_bo_unreserve(adev->smu.toc_buf);
861 amdgpu_bo_unref(&adev->smu.toc_buf);
862 dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
863 return ret;
864 }
865
866 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
867 if (ret)
868 goto smu_init_failed;
869
870 amdgpu_bo_unreserve(adev->smu.toc_buf);
871
872 priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
873 priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
874 priv->toc_buffer.kaddr = toc_buf_ptr;
875
876 /* smu buffer reserve/pin/map */
877 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
878 if (ret) {
879 amdgpu_bo_unref(&adev->smu.smu_buf);
880 dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
881 return ret;
882 }
883
884 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
885 if (ret) {
886 amdgpu_bo_unreserve(adev->smu.smu_buf);
887 amdgpu_bo_unref(&adev->smu.smu_buf);
888 dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
889 return ret;
890 }
891
892 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
893 if (ret)
894 goto smu_init_failed;
895
896 amdgpu_bo_unreserve(adev->smu.smu_buf);
897
898 priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
899 priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
900 priv->smu_buffer.kaddr = smu_buf_ptr;
901
902 if (adev->firmware.smu_load) {
903 if (cz_smu_populate_single_firmware_entry(adev,
904 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
905 &priv->driver_buffer[priv->driver_buffer_length++]))
906 goto smu_init_failed;
7a753c3f
SL
907
908 if (adev->asic_type == CHIP_STONEY) {
909 if (cz_smu_populate_single_firmware_entry(adev,
910 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
911 &priv->driver_buffer[priv->driver_buffer_length++]))
912 goto smu_init_failed;
913 } else {
914 if (cz_smu_populate_single_firmware_entry(adev,
915 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
916 &priv->driver_buffer[priv->driver_buffer_length++]))
917 goto smu_init_failed;
918 }
aaa36a97
AD
919 if (cz_smu_populate_single_firmware_entry(adev,
920 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
921 &priv->driver_buffer[priv->driver_buffer_length++]))
922 goto smu_init_failed;
923 if (cz_smu_populate_single_firmware_entry(adev,
924 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
925 &priv->driver_buffer[priv->driver_buffer_length++]))
926 goto smu_init_failed;
927 if (cz_smu_populate_single_firmware_entry(adev,
928 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
929 &priv->driver_buffer[priv->driver_buffer_length++]))
930 goto smu_init_failed;
931 if (cz_smu_populate_single_firmware_entry(adev,
932 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
933 &priv->driver_buffer[priv->driver_buffer_length++]))
934 goto smu_init_failed;
7a753c3f
SL
935 if (adev->asic_type == CHIP_STONEY) {
936 if (cz_smu_populate_single_firmware_entry(adev,
937 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
938 &priv->driver_buffer[priv->driver_buffer_length++]))
939 goto smu_init_failed;
940 } else {
941 if (cz_smu_populate_single_firmware_entry(adev,
942 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
943 &priv->driver_buffer[priv->driver_buffer_length++]))
944 goto smu_init_failed;
945 }
aaa36a97
AD
946 if (cz_smu_populate_single_firmware_entry(adev,
947 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
948 &priv->driver_buffer[priv->driver_buffer_length++]))
949 goto smu_init_failed;
950 }
951
952 if (cz_smu_populate_single_scratch_entry(adev,
953 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
954 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
955 &priv->scratch_buffer[priv->scratch_buffer_length++]))
956 goto smu_init_failed;
957 if (cz_smu_populate_single_scratch_entry(adev,
958 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
959 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
960 &priv->scratch_buffer[priv->scratch_buffer_length++]))
961 goto smu_init_failed;
962 if (cz_smu_populate_single_scratch_entry(adev,
963 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
964 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
965 &priv->scratch_buffer[priv->scratch_buffer_length++]))
966 goto smu_init_failed;
967 if (cz_smu_populate_single_scratch_entry(adev,
968 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
969 sizeof(struct SMU8_MultimediaPowerLogData),
970 &priv->scratch_buffer[priv->scratch_buffer_length++]))
971 goto smu_init_failed;
972 if (cz_smu_populate_single_scratch_entry(adev,
973 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
974 sizeof(struct SMU8_Fusion_ClkTable),
975 &priv->scratch_buffer[priv->scratch_buffer_length++]))
976 goto smu_init_failed;
977
978 cz_smu_initialize_toc_empty_job_list(adev);
979 cz_smu_construct_toc_for_rlc_aram_save(adev);
980 cz_smu_construct_toc_for_vddgfx_enter(adev);
981 cz_smu_construct_toc_for_vddgfx_exit(adev);
982 cz_smu_construct_toc_for_power_profiling(adev);
983 cz_smu_construct_toc_for_bootup(adev);
984 cz_smu_construct_toc_for_clock_table(adev);
985 /* init the smumgr functions */
986 adev->smu.smumgr_funcs = &cz_smumgr_funcs;
987
988 return 0;
989
990smu_init_failed:
991 amdgpu_bo_unref(toc_buf);
992 amdgpu_bo_unref(smu_buf);
993
994 return ret;
995}