2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
25 #include <linux/gfp.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
30 #include "cgs_common.h"
31 #include "smu/smu_8_0_d.h"
32 #include "smu/smu_8_0_sh_mask.h"
34 #include "smu8_fusion.h"
35 #include "cz_smumgr.h"
37 #include "smu_ucode_xfer_cz.h"
38 #include "gca/gfx_8_0_d.h"
39 #include "gca/gfx_8_0_sh_mask.h"
42 #define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
44 static const enum cz_scratch_entry firmware_list[] = {
45 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
46 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
47 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
48 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
49 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
50 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
51 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
52 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
55 static int cz_smum_get_argument(struct pp_smumgr *smumgr)
57 if (smumgr == NULL || smumgr->device == NULL)
60 return cgs_read_register(smumgr->device,
61 mmSMU_MP1_SRBM2P_ARG_0);
64 static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr,
69 if (smumgr == NULL || smumgr->device == NULL)
72 result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
73 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
75 pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg);
79 cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
80 cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
85 /* Send a message to the SMC, and wait for its response.*/
86 static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
90 result = cz_send_msg_to_smc_async(smumgr, msg);
94 return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
95 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
98 static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
99 uint32_t smc_address, uint32_t limit)
101 if (smumgr == NULL || smumgr->device == NULL)
104 if (0 != (3 & smc_address)) {
105 pr_err("SMC address must be 4 byte aligned\n");
109 if (limit <= (smc_address + 3)) {
110 pr_err("SMC address beyond the SMC RAM area\n");
114 cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
115 SMN_MP1_SRAM_START_ADDR + smc_address);
120 static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
121 uint32_t smc_address, uint32_t value, uint32_t limit)
125 if (smumgr == NULL || smumgr->device == NULL)
128 result = cz_set_smc_sram_address(smumgr, smc_address, limit);
130 cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
135 static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
136 uint16_t msg, uint32_t parameter)
138 if (smumgr == NULL || smumgr->device == NULL)
141 cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
143 return cz_send_msg_to_smc(smumgr, msg);
146 static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
150 uint32_t index = SMN_MP1_SRAM_START_ADDR +
151 SMU8_FIRMWARE_HEADER_LOCATION +
152 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
154 if (smumgr == NULL || smumgr->device == NULL)
157 cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
159 for (i = 0; i < smumgr->usec_timeout; i++) {
161 (cgs_read_register(smumgr->device, mmMP0PUB_IND_DATA) & firmware))
166 if (i >= smumgr->usec_timeout) {
167 pr_err("SMU check loaded firmware failed.\n");
174 static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
179 struct cgs_firmware_info info = {0};
180 struct cz_smumgr *cz_smu;
182 if (smumgr == NULL || smumgr->device == NULL)
185 cz_smu = (struct cz_smumgr *)smumgr->backend;
186 ret = cgs_get_firmware_info(smumgr->device,
187 CGS_UCODE_ID_CP_MEC, &info);
192 /* Disable MEC parsing/prefetching */
193 tmp = cgs_read_register(smumgr->device,
195 tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
196 tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
197 cgs_write_register(smumgr->device, mmCP_MEC_CNTL, tmp);
199 tmp = cgs_read_register(smumgr->device,
200 mmCP_CPC_IC_BASE_CNTL);
202 tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
203 tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
204 tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
205 tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
206 cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
208 reg_data = smu_lower_32_bits(info.mc_addr) &
209 SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
210 cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
212 reg_data = smu_upper_32_bits(info.mc_addr) &
213 SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
214 cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
219 static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
220 enum cz_scratch_entry firmware_enum)
224 switch (firmware_enum) {
225 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
226 ret = UCODE_ID_SDMA0;
228 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
229 if (smumgr->chip_id == CHIP_STONEY)
230 ret = UCODE_ID_SDMA0;
232 ret = UCODE_ID_SDMA1;
234 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
235 ret = UCODE_ID_CP_CE;
237 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
238 ret = UCODE_ID_CP_PFP;
240 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
241 ret = UCODE_ID_CP_ME;
243 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
244 ret = UCODE_ID_CP_MEC_JT1;
246 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
247 if (smumgr->chip_id == CHIP_STONEY)
248 ret = UCODE_ID_CP_MEC_JT1;
250 ret = UCODE_ID_CP_MEC_JT2;
252 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
253 ret = UCODE_ID_GMCON_RENG;
255 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
256 ret = UCODE_ID_RLC_G;
258 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
259 ret = UCODE_ID_RLC_SCRATCH;
261 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
262 ret = UCODE_ID_RLC_SRM_ARAM;
264 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
265 ret = UCODE_ID_RLC_SRM_DRAM;
267 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
268 ret = UCODE_ID_DMCU_ERAM;
270 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
271 ret = UCODE_ID_DMCU_IRAM;
273 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
274 ret = TASK_ARG_INIT_MM_PWR_LOG;
276 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
277 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
278 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
279 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
280 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
281 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
282 ret = TASK_ARG_REG_MMIO;
284 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
285 ret = TASK_ARG_INIT_CLK_TABLE;
292 static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
294 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
298 result = CGS_UCODE_ID_SDMA0;
301 result = CGS_UCODE_ID_SDMA1;
304 result = CGS_UCODE_ID_CP_CE;
306 case UCODE_ID_CP_PFP:
307 result = CGS_UCODE_ID_CP_PFP;
310 result = CGS_UCODE_ID_CP_ME;
312 case UCODE_ID_CP_MEC_JT1:
313 result = CGS_UCODE_ID_CP_MEC_JT1;
315 case UCODE_ID_CP_MEC_JT2:
316 result = CGS_UCODE_ID_CP_MEC_JT2;
319 result = CGS_UCODE_ID_RLC_G;
328 static int cz_smu_populate_single_scratch_task(
329 struct pp_smumgr *smumgr,
330 enum cz_scratch_entry fw_enum,
331 uint8_t type, bool is_last)
334 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
335 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
336 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
339 task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
340 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
342 for (i = 0; i < cz_smu->scratch_buffer_length; i++)
343 if (cz_smu->scratch_buffer[i].firmware_ID == fw_enum)
346 if (i >= cz_smu->scratch_buffer_length) {
347 pr_err("Invalid Firmware Type\n");
351 task->addr.low = cz_smu->scratch_buffer[i].mc_addr_low;
352 task->addr.high = cz_smu->scratch_buffer[i].mc_addr_high;
353 task->size_bytes = cz_smu->scratch_buffer[i].data_size;
355 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
356 struct cz_ih_meta_data *pIHReg_restore =
357 (struct cz_ih_meta_data *)cz_smu->scratch_buffer[i].kaddr;
358 pIHReg_restore->command =
359 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
365 static int cz_smu_populate_single_ucode_load_task(
366 struct pp_smumgr *smumgr,
367 enum cz_scratch_entry fw_enum,
371 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
372 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
373 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
375 task->type = TASK_TYPE_UCODE_LOAD;
376 task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
377 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
379 for (i = 0; i < cz_smu->driver_buffer_length; i++)
380 if (cz_smu->driver_buffer[i].firmware_ID == fw_enum)
383 if (i >= cz_smu->driver_buffer_length) {
384 pr_err("Invalid Firmware Type\n");
388 task->addr.low = cz_smu->driver_buffer[i].mc_addr_low;
389 task->addr.high = cz_smu->driver_buffer[i].mc_addr_high;
390 task->size_bytes = cz_smu->driver_buffer[i].data_size;
395 static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr *smumgr)
397 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
399 cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
400 cz_smu_populate_single_scratch_task(smumgr,
401 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
402 TASK_TYPE_UCODE_SAVE, true);
407 static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr)
410 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
411 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
413 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
414 toc->JobList[i] = (uint8_t)IGNORE_JOB;
419 static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr)
421 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
422 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
424 toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
425 cz_smu_populate_single_scratch_task(smumgr,
426 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
427 TASK_TYPE_UCODE_SAVE, false);
429 cz_smu_populate_single_scratch_task(smumgr,
430 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
431 TASK_TYPE_UCODE_SAVE, true);
437 static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr)
439 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
440 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
442 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
444 cz_smu_populate_single_ucode_load_task(smumgr,
445 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
446 cz_smu_populate_single_ucode_load_task(smumgr,
447 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
448 cz_smu_populate_single_ucode_load_task(smumgr,
449 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
450 cz_smu_populate_single_ucode_load_task(smumgr,
451 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
453 if (smumgr->chip_id == CHIP_STONEY)
454 cz_smu_populate_single_ucode_load_task(smumgr,
455 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
457 cz_smu_populate_single_ucode_load_task(smumgr,
458 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
460 cz_smu_populate_single_ucode_load_task(smumgr,
461 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
463 /* populate scratch */
464 cz_smu_populate_single_scratch_task(smumgr,
465 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
466 TASK_TYPE_UCODE_LOAD, false);
468 cz_smu_populate_single_scratch_task(smumgr,
469 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
470 TASK_TYPE_UCODE_LOAD, false);
472 cz_smu_populate_single_scratch_task(smumgr,
473 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
474 TASK_TYPE_UCODE_LOAD, true);
479 static int cz_smu_construct_toc_for_power_profiling(
480 struct pp_smumgr *smumgr)
482 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
484 cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
486 cz_smu_populate_single_scratch_task(smumgr,
487 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
488 TASK_TYPE_INITIALIZE, true);
492 static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
494 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
496 cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
498 cz_smu_populate_single_ucode_load_task(smumgr,
499 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
500 if (smumgr->chip_id != CHIP_STONEY)
501 cz_smu_populate_single_ucode_load_task(smumgr,
502 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
503 cz_smu_populate_single_ucode_load_task(smumgr,
504 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
505 cz_smu_populate_single_ucode_load_task(smumgr,
506 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
507 cz_smu_populate_single_ucode_load_task(smumgr,
508 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
509 cz_smu_populate_single_ucode_load_task(smumgr,
510 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
511 if (smumgr->chip_id != CHIP_STONEY)
512 cz_smu_populate_single_ucode_load_task(smumgr,
513 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
514 cz_smu_populate_single_ucode_load_task(smumgr,
515 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
520 static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr *smumgr)
522 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
524 cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
526 cz_smu_populate_single_scratch_task(smumgr,
527 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
528 TASK_TYPE_INITIALIZE, true);
533 static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
535 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
537 cz_smu->toc_entry_used_count = 0;
538 cz_smu_initialize_toc_empty_job_list(smumgr);
539 cz_smu_construct_toc_for_rlc_aram_save(smumgr);
540 cz_smu_construct_toc_for_vddgfx_enter(smumgr);
541 cz_smu_construct_toc_for_vddgfx_exit(smumgr);
542 cz_smu_construct_toc_for_power_profiling(smumgr);
543 cz_smu_construct_toc_for_bootup(smumgr);
544 cz_smu_construct_toc_for_clock_table(smumgr);
549 static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
551 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
552 uint32_t firmware_type;
555 enum cgs_ucode_id ucode_id;
556 struct cgs_firmware_info info = {0};
558 cz_smu->driver_buffer_length = 0;
560 for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
562 firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
565 ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
567 ret = cgs_get_firmware_info(smumgr->device,
571 cz_smu->driver_buffer[i].mc_addr_high =
572 smu_upper_32_bits(info.mc_addr);
574 cz_smu->driver_buffer[i].mc_addr_low =
575 smu_lower_32_bits(info.mc_addr);
577 cz_smu->driver_buffer[i].data_size = info.image_size;
579 cz_smu->driver_buffer[i].firmware_ID = firmware_list[i];
580 cz_smu->driver_buffer_length++;
587 static int cz_smu_populate_single_scratch_entry(
588 struct pp_smumgr *smumgr,
589 enum cz_scratch_entry scratch_type,
590 uint32_t ulsize_byte,
591 struct cz_buffer_entry *entry)
593 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
595 ((long long)(cz_smu->smu_buffer.mc_addr_high) << 32)
596 | cz_smu->smu_buffer.mc_addr_low;
598 uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
600 mc_addr += cz_smu->smu_buffer_used_bytes;
602 entry->data_size = ulsize_byte;
603 entry->kaddr = (char *) cz_smu->smu_buffer.kaddr +
604 cz_smu->smu_buffer_used_bytes;
605 entry->mc_addr_low = smu_lower_32_bits(mc_addr);
606 entry->mc_addr_high = smu_upper_32_bits(mc_addr);
607 entry->firmware_ID = scratch_type;
609 cz_smu->smu_buffer_used_bytes += ulsize_aligned;
614 static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table)
616 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
619 for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
620 if (cz_smu->scratch_buffer[i].firmware_ID
621 == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
625 *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
627 cz_send_msg_to_smc_with_parameter(smumgr,
628 PPSMC_MSG_SetClkTableAddrHi,
629 cz_smu->scratch_buffer[i].mc_addr_high);
631 cz_send_msg_to_smc_with_parameter(smumgr,
632 PPSMC_MSG_SetClkTableAddrLo,
633 cz_smu->scratch_buffer[i].mc_addr_low);
635 cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
636 cz_smu->toc_entry_clock_table);
638 cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToDram);
643 static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
645 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
648 for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
649 if (cz_smu->scratch_buffer[i].firmware_ID
650 == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
654 cz_send_msg_to_smc_with_parameter(smumgr,
655 PPSMC_MSG_SetClkTableAddrHi,
656 cz_smu->scratch_buffer[i].mc_addr_high);
658 cz_send_msg_to_smc_with_parameter(smumgr,
659 PPSMC_MSG_SetClkTableAddrLo,
660 cz_smu->scratch_buffer[i].mc_addr_low);
662 cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
663 cz_smu->toc_entry_clock_table);
665 cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToSmu);
670 static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
672 struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
673 uint32_t smc_address;
675 if (!smumgr->reload_fw) {
676 pr_info("skip reloading...\n");
680 cz_smu_populate_firmware_entries(smumgr);
682 cz_smu_construct_toc(smumgr);
684 smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
685 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
687 cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4);
689 cz_send_msg_to_smc_with_parameter(smumgr,
690 PPSMC_MSG_DriverDramAddrHi,
691 cz_smu->toc_buffer.mc_addr_high);
693 cz_send_msg_to_smc_with_parameter(smumgr,
694 PPSMC_MSG_DriverDramAddrLo,
695 cz_smu->toc_buffer.mc_addr_low);
697 cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs);
699 cz_send_msg_to_smc_with_parameter(smumgr,
700 PPSMC_MSG_ExecuteJob,
701 cz_smu->toc_entry_aram);
702 cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
703 cz_smu->toc_entry_power_profiling_index);
705 return cz_send_msg_to_smc_with_parameter(smumgr,
706 PPSMC_MSG_ExecuteJob,
707 cz_smu->toc_entry_initialize_index);
710 static int cz_start_smu(struct pp_smumgr *smumgr)
713 uint32_t fw_to_check = 0;
715 fw_to_check = UCODE_ID_RLC_G_MASK |
716 UCODE_ID_SDMA0_MASK |
717 UCODE_ID_SDMA1_MASK |
718 UCODE_ID_CP_CE_MASK |
719 UCODE_ID_CP_ME_MASK |
720 UCODE_ID_CP_PFP_MASK |
721 UCODE_ID_CP_MEC_JT1_MASK |
722 UCODE_ID_CP_MEC_JT2_MASK;
724 if (smumgr->chip_id == CHIP_STONEY)
725 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
727 ret = cz_request_smu_load_fw(smumgr);
729 pr_err("SMU firmware load failed\n");
731 cz_check_fw_load_finish(smumgr, fw_to_check);
733 ret = cz_load_mec_firmware(smumgr);
735 pr_err("Mec Firmware load failed\n");
740 static int cz_smu_init(struct pp_smumgr *smumgr)
742 uint64_t mc_addr = 0;
744 struct cz_smumgr *cz_smu;
746 cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL);
750 smumgr->backend = cz_smu;
752 cz_smu->toc_buffer.data_size = 4096;
753 cz_smu->smu_buffer.data_size =
754 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
755 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
756 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
757 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
758 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
760 ret = smu_allocate_memory(smumgr->device,
761 cz_smu->toc_buffer.data_size,
762 CGS_GPU_MEM_TYPE__GART_CACHEABLE,
765 &cz_smu->toc_buffer.kaddr,
766 &cz_smu->toc_buffer.handle);
770 cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
771 cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
773 ret = smu_allocate_memory(smumgr->device,
774 cz_smu->smu_buffer.data_size,
775 CGS_GPU_MEM_TYPE__GART_CACHEABLE,
778 &cz_smu->smu_buffer.kaddr,
779 &cz_smu->smu_buffer.handle);
783 cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
784 cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
786 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
787 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
788 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
789 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
790 pr_err("Error when Populate Firmware Entry.\n");
794 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
795 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
796 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
797 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
798 pr_err("Error when Populate Firmware Entry.\n");
801 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
802 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
803 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
804 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
805 pr_err("Error when Populate Firmware Entry.\n");
809 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
810 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
811 sizeof(struct SMU8_MultimediaPowerLogData),
812 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
813 pr_err("Error when Populate Firmware Entry.\n");
817 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
818 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
819 sizeof(struct SMU8_Fusion_ClkTable),
820 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
821 pr_err("Error when Populate Firmware Entry.\n");
828 static int cz_smu_fini(struct pp_smumgr *smumgr)
830 struct cz_smumgr *cz_smu;
832 if (smumgr == NULL || smumgr->device == NULL)
835 cz_smu = (struct cz_smumgr *)smumgr->backend;
837 cgs_free_gpu_mem(smumgr->device,
838 cz_smu->toc_buffer.handle);
839 cgs_free_gpu_mem(smumgr->device,
840 cz_smu->smu_buffer.handle);
847 const struct pp_smumgr_func cz_smu_funcs = {
848 .smu_init = cz_smu_init,
849 .smu_fini = cz_smu_fini,
850 .start_smu = cz_start_smu,
851 .check_fw_load_finish = cz_check_fw_load_finish,
852 .request_smu_load_fw = NULL,
853 .request_smu_load_specific_fw = NULL,
854 .get_argument = cz_smum_get_argument,
855 .send_msg_to_smc = cz_send_msg_to_smc,
856 .send_msg_to_smc_with_parameter = cz_send_msg_to_smc_with_parameter,
857 .download_pptable_settings = cz_download_pptable_settings,
858 .upload_pptable_settings = cz_upload_pptable_settings,