GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / gpu / drm / amd / powerplay / hwmgr / smu8_hwmgr.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include "atom-types.h"
28 #include "atombios.h"
29 #include "processpptables.h"
30 #include "cgs_common.h"
31 #include "smu/smu_8_0_d.h"
32 #include "smu8_fusion.h"
33 #include "smu/smu_8_0_sh_mask.h"
34 #include "smumgr.h"
35 #include "hwmgr.h"
36 #include "hardwaremanager.h"
37 #include "cz_ppsmc.h"
38 #include "smu8_hwmgr.h"
39 #include "power_state.h"
40 #include "pp_thermal.h"
41
42 #define ixSMUSVI_NB_CURRENTVID 0xD8230044
43 #define CURRENT_NB_VID_MASK 0xff000000
44 #define CURRENT_NB_VID__SHIFT 24
45 #define ixSMUSVI_GFX_CURRENTVID  0xD8230048
46 #define CURRENT_GFX_VID_MASK 0xff000000
47 #define CURRENT_GFX_VID__SHIFT 24
48
49 static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic;
50
51 static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps)
52 {
53         if (smu8_magic != hw_ps->magic)
54                 return NULL;
55
56         return (struct smu8_power_state *)hw_ps;
57 }
58
59 static const struct smu8_power_state *cast_const_smu8_power_state(
60                                 const struct pp_hw_power_state *hw_ps)
61 {
62         if (smu8_magic != hw_ps->magic)
63                 return NULL;
64
65         return (struct smu8_power_state *)hw_ps;
66 }
67
68 static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr,
69                                         uint32_t clock, uint32_t msg)
70 {
71         int i = 0;
72         struct phm_vce_clock_voltage_dependency_table *ptable =
73                 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
74
75         switch (msg) {
76         case PPSMC_MSG_SetEclkSoftMin:
77         case PPSMC_MSG_SetEclkHardMin:
78                 for (i = 0; i < (int)ptable->count; i++) {
79                         if (clock <= ptable->entries[i].ecclk)
80                                 break;
81                 }
82                 break;
83
84         case PPSMC_MSG_SetEclkSoftMax:
85         case PPSMC_MSG_SetEclkHardMax:
86                 for (i = ptable->count - 1; i >= 0; i--) {
87                         if (clock >= ptable->entries[i].ecclk)
88                                 break;
89                 }
90                 break;
91
92         default:
93                 break;
94         }
95
96         return i;
97 }
98
99 static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr,
100                                 uint32_t clock, uint32_t msg)
101 {
102         int i = 0;
103         struct phm_clock_voltage_dependency_table *table =
104                                 hwmgr->dyn_state.vddc_dependency_on_sclk;
105
106         switch (msg) {
107         case PPSMC_MSG_SetSclkSoftMin:
108         case PPSMC_MSG_SetSclkHardMin:
109                 for (i = 0; i < (int)table->count; i++) {
110                         if (clock <= table->entries[i].clk)
111                                 break;
112                 }
113                 break;
114
115         case PPSMC_MSG_SetSclkSoftMax:
116         case PPSMC_MSG_SetSclkHardMax:
117                 for (i = table->count - 1; i >= 0; i--) {
118                         if (clock >= table->entries[i].clk)
119                                 break;
120                 }
121                 break;
122
123         default:
124                 break;
125         }
126         return i;
127 }
128
129 static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr,
130                                         uint32_t clock, uint32_t msg)
131 {
132         int i = 0;
133         struct phm_uvd_clock_voltage_dependency_table *ptable =
134                 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
135
136         switch (msg) {
137         case PPSMC_MSG_SetUvdSoftMin:
138         case PPSMC_MSG_SetUvdHardMin:
139                 for (i = 0; i < (int)ptable->count; i++) {
140                         if (clock <= ptable->entries[i].vclk)
141                                 break;
142                 }
143                 break;
144
145         case PPSMC_MSG_SetUvdSoftMax:
146         case PPSMC_MSG_SetUvdHardMax:
147                 for (i = ptable->count - 1; i >= 0; i--) {
148                         if (clock >= ptable->entries[i].vclk)
149                                 break;
150                 }
151                 break;
152
153         default:
154                 break;
155         }
156
157         return i;
158 }
159
160 static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
161 {
162         struct smu8_hwmgr *data = hwmgr->backend;
163
164         if (data->max_sclk_level == 0) {
165                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
166                 data->max_sclk_level = smum_get_argument(hwmgr) + 1;
167         }
168
169         return data->max_sclk_level;
170 }
171
172 static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
173 {
174         struct smu8_hwmgr *data = hwmgr->backend;
175         struct amdgpu_device *adev = hwmgr->adev;
176
177         data->gfx_ramp_step = 256*25/100;
178         data->gfx_ramp_delay = 1; /* by default, we delay 1us */
179
180         data->mgcg_cgtt_local0 = 0x00000000;
181         data->mgcg_cgtt_local1 = 0x00000000;
182         data->clock_slow_down_freq = 25000;
183         data->skip_clock_slow_down = 1;
184         data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
185         data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
186         data->voting_rights_clients = 0x00C00033;
187         data->static_screen_threshold = 8;
188         data->ddi_power_gating_disabled = 0;
189         data->bapm_enabled = 1;
190         data->voltage_drop_threshold = 0;
191         data->gfx_power_gating_threshold = 500;
192         data->vce_slow_sclk_threshold = 20000;
193         data->dce_slow_sclk_threshold = 30000;
194         data->disable_driver_thermal_policy = 1;
195         data->disable_nb_ps3_in_battery = 0;
196
197         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
198                                                         PHM_PlatformCaps_ABM);
199
200         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
201                                     PHM_PlatformCaps_NonABMSupportInPPLib);
202
203         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
204                                         PHM_PlatformCaps_DynamicM3Arbiter);
205
206         data->override_dynamic_mgpg = 1;
207
208         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209                                   PHM_PlatformCaps_DynamicPatchPowerState);
210
211         data->thermal_auto_throttling_treshold = 0;
212         data->tdr_clock = 0;
213         data->disable_gfx_power_gating_in_uvd = 0;
214
215         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
216                                         PHM_PlatformCaps_DynamicUVDState);
217
218         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
219                         PHM_PlatformCaps_UVDDPM);
220         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
221                         PHM_PlatformCaps_VCEDPM);
222
223         data->cc6_settings.cpu_cc6_disable = false;
224         data->cc6_settings.cpu_pstate_disable = false;
225         data->cc6_settings.nb_pstate_switch_disable = false;
226         data->cc6_settings.cpu_pstate_separation_time = 0;
227
228         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
229                                    PHM_PlatformCaps_DisableVoltageIsland);
230
231         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
232                       PHM_PlatformCaps_UVDPowerGating);
233         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
234                       PHM_PlatformCaps_VCEPowerGating);
235
236         if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
237                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
238                               PHM_PlatformCaps_UVDPowerGating);
239         if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
240                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
241                               PHM_PlatformCaps_VCEPowerGating);
242
243
244         return 0;
245 }
246
247 /* convert form 8bit vid to real voltage in mV*4 */
248 static uint32_t smu8_convert_8Bit_index_to_voltage(
249                         struct pp_hwmgr *hwmgr, uint16_t voltage)
250 {
251         return 6200 - (voltage * 25);
252 }
253
254 static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
255                         struct phm_clock_and_voltage_limits *table)
256 {
257         struct smu8_hwmgr *data = hwmgr->backend;
258         struct smu8_sys_info *sys_info = &data->sys_info;
259         struct phm_clock_voltage_dependency_table *dep_table =
260                                 hwmgr->dyn_state.vddc_dependency_on_sclk;
261
262         if (dep_table->count > 0) {
263                 table->sclk = dep_table->entries[dep_table->count-1].clk;
264                 table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr,
265                    (uint16_t)dep_table->entries[dep_table->count-1].v);
266         }
267         table->mclk = sys_info->nbp_memory_clock[0];
268         return 0;
269 }
270
271 static int smu8_init_dynamic_state_adjustment_rule_settings(
272                         struct pp_hwmgr *hwmgr,
273                         ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
274 {
275         uint32_t table_size =
276                 sizeof(struct phm_clock_voltage_dependency_table) +
277                 (7 * sizeof(struct phm_clock_voltage_dependency_record));
278
279         struct phm_clock_voltage_dependency_table *table_clk_vlt =
280                                         kzalloc(table_size, GFP_KERNEL);
281
282         if (NULL == table_clk_vlt) {
283                 pr_err("Can not allocate memory!\n");
284                 return -ENOMEM;
285         }
286
287         table_clk_vlt->count = 8;
288         table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
289         table_clk_vlt->entries[0].v = 0;
290         table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
291         table_clk_vlt->entries[1].v = 1;
292         table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
293         table_clk_vlt->entries[2].v = 2;
294         table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
295         table_clk_vlt->entries[3].v = 3;
296         table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
297         table_clk_vlt->entries[4].v = 4;
298         table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
299         table_clk_vlt->entries[5].v = 5;
300         table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
301         table_clk_vlt->entries[6].v = 6;
302         table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
303         table_clk_vlt->entries[7].v = 7;
304         hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
305
306         return 0;
307 }
308
309 static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
310 {
311         struct smu8_hwmgr *data = hwmgr->backend;
312         ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
313         uint32_t i;
314         int result = 0;
315         uint8_t frev, crev;
316         uint16_t size;
317
318         info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev,
319                         GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
320                         &size, &frev, &crev);
321
322         if (info == NULL) {
323                 pr_err("Could not retrieve the Integrated System Info Table!\n");
324                 return -EINVAL;
325         }
326
327         if (crev != 9) {
328                 pr_err("Unsupported IGP table: %d %d\n", frev, crev);
329                 return -EINVAL;
330         }
331
332         data->sys_info.bootup_uma_clock =
333                                    le32_to_cpu(info->ulBootUpUMAClock);
334
335         data->sys_info.bootup_engine_clock =
336                                 le32_to_cpu(info->ulBootUpEngineClock);
337
338         data->sys_info.dentist_vco_freq =
339                                    le32_to_cpu(info->ulDentistVCOFreq);
340
341         data->sys_info.system_config =
342                                      le32_to_cpu(info->ulSystemConfig);
343
344         data->sys_info.bootup_nb_voltage_index =
345                                   le16_to_cpu(info->usBootUpNBVoltage);
346
347         data->sys_info.htc_hyst_lmt =
348                         (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
349
350         data->sys_info.htc_tmp_lmt =
351                         (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
352
353         if (data->sys_info.htc_tmp_lmt <=
354                         data->sys_info.htc_hyst_lmt) {
355                 pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
356                 return -EINVAL;
357         }
358
359         data->sys_info.nb_dpm_enable =
360                                 data->enable_nb_ps_policy &&
361                                 (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
362
363         for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
364                 if (i < SMU8_NUM_NBPMEMORYCLOCK) {
365                         data->sys_info.nbp_memory_clock[i] =
366                           le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
367                 }
368                 data->sys_info.nbp_n_clock[i] =
369                             le32_to_cpu(info->ulNbpStateNClkFreq[i]);
370         }
371
372         for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
373                 data->sys_info.display_clock[i] =
374                                         le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
375         }
376
377         /* Here use 4 levels, make sure not exceed */
378         for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
379                 data->sys_info.nbp_voltage_index[i] =
380                              le16_to_cpu(info->usNBPStateVoltage[i]);
381         }
382
383         if (!data->sys_info.nb_dpm_enable) {
384                 for (i = 1; i < SMU8_NUM_NBPSTATES; i++) {
385                         if (i < SMU8_NUM_NBPMEMORYCLOCK) {
386                                 data->sys_info.nbp_memory_clock[i] =
387                                     data->sys_info.nbp_memory_clock[0];
388                         }
389                         data->sys_info.nbp_n_clock[i] =
390                                     data->sys_info.nbp_n_clock[0];
391                         data->sys_info.nbp_voltage_index[i] =
392                                     data->sys_info.nbp_voltage_index[0];
393                 }
394         }
395
396         if (le32_to_cpu(info->ulGPUCapInfo) &
397                 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
398                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
399                                     PHM_PlatformCaps_EnableDFSBypass);
400         }
401
402         data->sys_info.uma_channel_number = info->ucUMAChannelNumber;
403
404         smu8_construct_max_power_limits_table (hwmgr,
405                                     &hwmgr->dyn_state.max_clock_voltage_on_ac);
406
407         smu8_init_dynamic_state_adjustment_rule_settings(hwmgr,
408                                     &info->sDISPCLK_Voltage[0]);
409
410         return result;
411 }
412
413 static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr)
414 {
415         struct smu8_hwmgr *data = hwmgr->backend;
416
417         data->boot_power_level.engineClock =
418                                 data->sys_info.bootup_engine_clock;
419
420         data->boot_power_level.vddcIndex =
421                         (uint8_t)data->sys_info.bootup_nb_voltage_index;
422
423         data->boot_power_level.dsDividerIndex = 0;
424         data->boot_power_level.ssDividerIndex = 0;
425         data->boot_power_level.allowGnbSlow = 1;
426         data->boot_power_level.forceNBPstate = 0;
427         data->boot_power_level.hysteresis_up = 0;
428         data->boot_power_level.numSIMDToPowerDown = 0;
429         data->boot_power_level.display_wm = 0;
430         data->boot_power_level.vce_wm = 0;
431
432         return 0;
433 }
434
435 static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
436 {
437         struct SMU8_Fusion_ClkTable *clock_table;
438         int ret;
439         uint32_t i;
440         void *table = NULL;
441         pp_atomctrl_clock_dividers_kong dividers;
442
443         struct phm_clock_voltage_dependency_table *vddc_table =
444                 hwmgr->dyn_state.vddc_dependency_on_sclk;
445         struct phm_clock_voltage_dependency_table *vdd_gfx_table =
446                 hwmgr->dyn_state.vdd_gfx_dependency_on_sclk;
447         struct phm_acp_clock_voltage_dependency_table *acp_table =
448                 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
449         struct phm_uvd_clock_voltage_dependency_table *uvd_table =
450                 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
451         struct phm_vce_clock_voltage_dependency_table *vce_table =
452                 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
453
454         if (!hwmgr->need_pp_table_upload)
455                 return 0;
456
457         ret = smum_download_powerplay_table(hwmgr, &table);
458
459         PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
460                             "Fail to get clock table from SMU!", return -EINVAL;);
461
462         clock_table = (struct SMU8_Fusion_ClkTable *)table;
463
464         /* patch clock table */
465         PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
466                             "Dependency table entry exceeds max limit!", return -EINVAL;);
467         PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
468                             "Dependency table entry exceeds max limit!", return -EINVAL;);
469         PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
470                             "Dependency table entry exceeds max limit!", return -EINVAL;);
471         PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
472                             "Dependency table entry exceeds max limit!", return -EINVAL;);
473         PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
474                             "Dependency table entry exceeds max limit!", return -EINVAL;);
475
476         for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) {
477
478                 /* vddc_sclk */
479                 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
480                         (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
481                 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
482                         (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
483
484                 atomctrl_get_engine_pll_dividers_kong(hwmgr,
485                                                       clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
486                                                       &dividers);
487
488                 clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
489                         (uint8_t)dividers.pll_post_divider;
490
491                 /* vddgfx_sclk */
492                 clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
493                         (i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0;
494
495                 /* acp breakdown */
496                 clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
497                         (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
498                 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
499                         (i < acp_table->count) ? acp_table->entries[i].acpclk : 0;
500
501                 atomctrl_get_engine_pll_dividers_kong(hwmgr,
502                                                       clock_table->AclkBreakdownTable.ClkLevel[i].Frequency,
503                                                       &dividers);
504
505                 clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
506                         (uint8_t)dividers.pll_post_divider;
507
508
509                 /* uvd breakdown */
510                 clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
511                         (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
512                 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
513                         (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
514
515                 atomctrl_get_engine_pll_dividers_kong(hwmgr,
516                                                       clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
517                                                       &dividers);
518
519                 clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
520                         (uint8_t)dividers.pll_post_divider;
521
522                 clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
523                         (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
524                 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
525                         (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
526
527                 atomctrl_get_engine_pll_dividers_kong(hwmgr,
528                                                       clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
529                                                       &dividers);
530
531                 clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
532                         (uint8_t)dividers.pll_post_divider;
533
534                 /* vce breakdown */
535                 clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
536                         (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
537                 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
538                         (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
539
540
541                 atomctrl_get_engine_pll_dividers_kong(hwmgr,
542                                                       clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
543                                                       &dividers);
544
545                 clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
546                         (uint8_t)dividers.pll_post_divider;
547
548         }
549         ret = smum_upload_powerplay_table(hwmgr);
550
551         return ret;
552 }
553
554 static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr)
555 {
556         struct smu8_hwmgr *data = hwmgr->backend;
557         struct phm_clock_voltage_dependency_table *table =
558                                         hwmgr->dyn_state.vddc_dependency_on_sclk;
559         unsigned long clock = 0, level;
560
561         if (NULL == table || table->count <= 0)
562                 return -EINVAL;
563
564         data->sclk_dpm.soft_min_clk = table->entries[0].clk;
565         data->sclk_dpm.hard_min_clk = table->entries[0].clk;
566
567         level = smu8_get_max_sclk_level(hwmgr) - 1;
568
569         if (level < table->count)
570                 clock = table->entries[level].clk;
571         else
572                 clock = table->entries[table->count - 1].clk;
573
574         data->sclk_dpm.soft_max_clk = clock;
575         data->sclk_dpm.hard_max_clk = clock;
576
577         return 0;
578 }
579
580 static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
581 {
582         struct smu8_hwmgr *data = hwmgr->backend;
583         struct phm_uvd_clock_voltage_dependency_table *table =
584                                 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
585         unsigned long clock = 0, level;
586
587         if (NULL == table || table->count <= 0)
588                 return -EINVAL;
589
590         data->uvd_dpm.soft_min_clk = 0;
591         data->uvd_dpm.hard_min_clk = 0;
592
593         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
594         level = smum_get_argument(hwmgr);
595
596         if (level < table->count)
597                 clock = table->entries[level].vclk;
598         else
599                 clock = table->entries[table->count - 1].vclk;
600
601         data->uvd_dpm.soft_max_clk = clock;
602         data->uvd_dpm.hard_max_clk = clock;
603
604         return 0;
605 }
606
607 static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
608 {
609         struct smu8_hwmgr *data = hwmgr->backend;
610         struct phm_vce_clock_voltage_dependency_table *table =
611                                 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
612         unsigned long clock = 0, level;
613
614         if (NULL == table || table->count <= 0)
615                 return -EINVAL;
616
617         data->vce_dpm.soft_min_clk = 0;
618         data->vce_dpm.hard_min_clk = 0;
619
620         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
621         level = smum_get_argument(hwmgr);
622
623         if (level < table->count)
624                 clock = table->entries[level].ecclk;
625         else
626                 clock = table->entries[table->count - 1].ecclk;
627
628         data->vce_dpm.soft_max_clk = clock;
629         data->vce_dpm.hard_max_clk = clock;
630
631         return 0;
632 }
633
634 static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
635 {
636         struct smu8_hwmgr *data = hwmgr->backend;
637         struct phm_acp_clock_voltage_dependency_table *table =
638                                 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
639         unsigned long clock = 0, level;
640
641         if (NULL == table || table->count <= 0)
642                 return -EINVAL;
643
644         data->acp_dpm.soft_min_clk = 0;
645         data->acp_dpm.hard_min_clk = 0;
646
647         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
648         level = smum_get_argument(hwmgr);
649
650         if (level < table->count)
651                 clock = table->entries[level].acpclk;
652         else
653                 clock = table->entries[table->count - 1].acpclk;
654
655         data->acp_dpm.soft_max_clk = clock;
656         data->acp_dpm.hard_max_clk = clock;
657         return 0;
658 }
659
660 static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
661 {
662         struct smu8_hwmgr *data = hwmgr->backend;
663
664         data->uvd_power_gated = false;
665         data->vce_power_gated = false;
666         data->samu_power_gated = false;
667         data->acp_power_gated = false;
668         data->pgacpinit = true;
669 }
670
671 static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr)
672 {
673         struct smu8_hwmgr *data = hwmgr->backend;
674
675         data->low_sclk_interrupt_threshold = 0;
676 }
677
678 static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
679 {
680         struct smu8_hwmgr *data = hwmgr->backend;
681         struct phm_clock_voltage_dependency_table *table =
682                                         hwmgr->dyn_state.vddc_dependency_on_sclk;
683
684         unsigned long clock = 0;
685         unsigned long level;
686         unsigned long stable_pstate_sclk;
687         unsigned long percentage;
688
689         data->sclk_dpm.soft_min_clk = table->entries[0].clk;
690         level = smu8_get_max_sclk_level(hwmgr) - 1;
691
692         if (level < table->count)
693                 data->sclk_dpm.soft_max_clk  = table->entries[level].clk;
694         else
695                 data->sclk_dpm.soft_max_clk  = table->entries[table->count - 1].clk;
696
697         clock = hwmgr->display_config->min_core_set_clock;
698         if (clock == 0)
699                 pr_debug("min_core_set_clock not set\n");
700
701         if (data->sclk_dpm.hard_min_clk != clock) {
702                 data->sclk_dpm.hard_min_clk = clock;
703
704                 smum_send_msg_to_smc_with_parameter(hwmgr,
705                                                 PPSMC_MSG_SetSclkHardMin,
706                                                  smu8_get_sclk_level(hwmgr,
707                                         data->sclk_dpm.hard_min_clk,
708                                              PPSMC_MSG_SetSclkHardMin));
709         }
710
711         clock = data->sclk_dpm.soft_min_clk;
712
713         /* update minimum clocks for Stable P-State feature */
714         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
715                                      PHM_PlatformCaps_StablePState)) {
716                 percentage = 75;
717                 /*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table  */
718                 stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk *
719                                         percentage) / 100;
720
721                 if (clock < stable_pstate_sclk)
722                         clock = stable_pstate_sclk;
723         }
724
725         if (data->sclk_dpm.soft_min_clk != clock) {
726                 data->sclk_dpm.soft_min_clk = clock;
727                 smum_send_msg_to_smc_with_parameter(hwmgr,
728                                                 PPSMC_MSG_SetSclkSoftMin,
729                                                 smu8_get_sclk_level(hwmgr,
730                                         data->sclk_dpm.soft_min_clk,
731                                              PPSMC_MSG_SetSclkSoftMin));
732         }
733
734         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
735                                     PHM_PlatformCaps_StablePState) &&
736                          data->sclk_dpm.soft_max_clk != clock) {
737                 data->sclk_dpm.soft_max_clk = clock;
738                 smum_send_msg_to_smc_with_parameter(hwmgr,
739                                                 PPSMC_MSG_SetSclkSoftMax,
740                                                 smu8_get_sclk_level(hwmgr,
741                                         data->sclk_dpm.soft_max_clk,
742                                         PPSMC_MSG_SetSclkSoftMax));
743         }
744
745         return 0;
746 }
747
748 static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
749 {
750         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
751                                 PHM_PlatformCaps_SclkDeepSleep)) {
752                 uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr;
753                 if (clks == 0)
754                         clks = SMU8_MIN_DEEP_SLEEP_SCLK;
755
756                 PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
757
758                 smum_send_msg_to_smc_with_parameter(hwmgr,
759                                 PPSMC_MSG_SetMinDeepSleepSclk,
760                                 clks);
761         }
762
763         return 0;
764 }
765
766 static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
767 {
768         struct smu8_hwmgr *data =
769                                   hwmgr->backend;
770
771         smum_send_msg_to_smc_with_parameter(hwmgr,
772                                         PPSMC_MSG_SetWatermarkFrequency,
773                                         data->sclk_dpm.soft_max_clk);
774
775         return 0;
776 }
777
778 static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
779 {
780         struct smu8_hwmgr *hw_data = hwmgr->backend;
781
782         if (hw_data->is_nb_dpm_enabled) {
783                 if (enable) {
784                         PP_DBG_LOG("enable Low Memory PState.\n");
785
786                         return smum_send_msg_to_smc_with_parameter(hwmgr,
787                                                 PPSMC_MSG_EnableLowMemoryPstate,
788                                                 (lock ? 1 : 0));
789                 } else {
790                         PP_DBG_LOG("disable Low Memory PState.\n");
791
792                         return smum_send_msg_to_smc_with_parameter(hwmgr,
793                                                 PPSMC_MSG_DisableLowMemoryPstate,
794                                                 (lock ? 1 : 0));
795                 }
796         }
797
798         return 0;
799 }
800
801 static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
802 {
803         int ret = 0;
804
805         struct smu8_hwmgr *data = hwmgr->backend;
806         unsigned long dpm_features = 0;
807
808         if (data->is_nb_dpm_enabled) {
809                 smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
810                 dpm_features |= NB_DPM_MASK;
811                 ret = smum_send_msg_to_smc_with_parameter(
812                                                           hwmgr,
813                                                           PPSMC_MSG_DisableAllSmuFeatures,
814                                                           dpm_features);
815                 if (ret == 0)
816                         data->is_nb_dpm_enabled = false;
817         }
818
819         return ret;
820 }
821
822 static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
823 {
824         int ret = 0;
825
826         struct smu8_hwmgr *data = hwmgr->backend;
827         unsigned long dpm_features = 0;
828
829         if (!data->is_nb_dpm_enabled) {
830                 PP_DBG_LOG("enabling ALL SMU features.\n");
831                 dpm_features |= NB_DPM_MASK;
832                 ret = smum_send_msg_to_smc_with_parameter(
833                                                           hwmgr,
834                                                           PPSMC_MSG_EnableAllSmuFeatures,
835                                                           dpm_features);
836                 if (ret == 0)
837                         data->is_nb_dpm_enabled = true;
838         }
839
840         return ret;
841 }
842
843 static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
844 {
845         bool disable_switch;
846         bool enable_low_mem_state;
847         struct smu8_hwmgr *hw_data = hwmgr->backend;
848         const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
849         const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state);
850
851         if (hw_data->sys_info.nb_dpm_enable) {
852                 disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
853                 enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
854
855                 if (pnew_state->action == FORCE_HIGH)
856                         smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
857                 else if (pnew_state->action == CANCEL_FORCE_HIGH)
858                         smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
859                 else
860                         smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
861         }
862         return 0;
863 }
864
865 static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
866 {
867         int ret = 0;
868
869         smu8_update_sclk_limit(hwmgr);
870         smu8_set_deep_sleep_sclk_threshold(hwmgr);
871         smu8_set_watermark_threshold(hwmgr);
872         ret = smu8_enable_nb_dpm(hwmgr);
873         if (ret)
874                 return ret;
875         smu8_update_low_mem_pstate(hwmgr, input);
876
877         return 0;
878 };
879
880
881 static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
882 {
883         int ret;
884
885         ret = smu8_upload_pptable_to_smu(hwmgr);
886         if (ret)
887                 return ret;
888         ret = smu8_init_sclk_limit(hwmgr);
889         if (ret)
890                 return ret;
891         ret = smu8_init_uvd_limit(hwmgr);
892         if (ret)
893                 return ret;
894         ret = smu8_init_vce_limit(hwmgr);
895         if (ret)
896                 return ret;
897         ret = smu8_init_acp_limit(hwmgr);
898         if (ret)
899                 return ret;
900
901         smu8_init_power_gate_state(hwmgr);
902         smu8_init_sclk_threshold(hwmgr);
903
904         return 0;
905 }
906
907 static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
908 {
909         struct smu8_hwmgr *hw_data = hwmgr->backend;
910
911         hw_data->disp_clk_bypass_pending = false;
912         hw_data->disp_clk_bypass = false;
913 }
914
915 static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
916 {
917         struct smu8_hwmgr *hw_data = hwmgr->backend;
918
919         hw_data->is_nb_dpm_enabled = false;
920 }
921
922 static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
923 {
924         struct smu8_hwmgr *hw_data = hwmgr->backend;
925
926         hw_data->cc6_settings.cc6_setting_changed = false;
927         hw_data->cc6_settings.cpu_pstate_separation_time = 0;
928         hw_data->cc6_settings.cpu_cc6_disable = false;
929         hw_data->cc6_settings.cpu_pstate_disable = false;
930 }
931
932 static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
933 {
934         smu8_power_up_display_clock_sys_pll(hwmgr);
935         smu8_clear_nb_dpm_flag(hwmgr);
936         smu8_reset_cc6_data(hwmgr);
937         return 0;
938 };
939
940 static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
941 {
942         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
943                                 ixCG_FREQ_TRAN_VOTING_0,
944                                 SMU8_VOTINGRIGHTSCLIENTS_DFLT0);
945 }
946
947 static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr)
948 {
949         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
950                                 ixCG_FREQ_TRAN_VOTING_0, 0);
951 }
952
953 static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
954 {
955         struct smu8_hwmgr *data = hwmgr->backend;
956
957         data->dpm_flags |= DPMFlags_SCLK_Enabled;
958
959         return smum_send_msg_to_smc_with_parameter(hwmgr,
960                                 PPSMC_MSG_EnableAllSmuFeatures,
961                                 SCLK_DPM_MASK);
962 }
963
964 static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
965 {
966         int ret = 0;
967         struct smu8_hwmgr *data = hwmgr->backend;
968         unsigned long dpm_features = 0;
969
970         if (data->dpm_flags & DPMFlags_SCLK_Enabled) {
971                 dpm_features |= SCLK_DPM_MASK;
972                 data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
973                 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
974                                         PPSMC_MSG_DisableAllSmuFeatures,
975                                         dpm_features);
976         }
977         return ret;
978 }
979
980 static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
981 {
982         struct smu8_hwmgr *data = hwmgr->backend;
983
984         data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock;
985         data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock;
986
987         smum_send_msg_to_smc_with_parameter(hwmgr,
988                                 PPSMC_MSG_SetSclkSoftMin,
989                                 smu8_get_sclk_level(hwmgr,
990                                 data->sclk_dpm.soft_min_clk,
991                                 PPSMC_MSG_SetSclkSoftMin));
992
993         smum_send_msg_to_smc_with_parameter(hwmgr,
994                                 PPSMC_MSG_SetSclkSoftMax,
995                                 smu8_get_sclk_level(hwmgr,
996                                 data->sclk_dpm.soft_max_clk,
997                                 PPSMC_MSG_SetSclkSoftMax));
998
999         return 0;
1000 }
1001
1002 static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
1003 {
1004         struct smu8_hwmgr *data = hwmgr->backend;
1005
1006         data->acp_boot_level = 0xff;
1007 }
1008
1009 static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1010 {
1011         smu8_disable_nb_dpm(hwmgr);
1012
1013         smu8_clear_voting_clients(hwmgr);
1014         if (smu8_stop_dpm(hwmgr))
1015                 return -EINVAL;
1016
1017         return 0;
1018 };
1019
1020 static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1021 {
1022         smu8_program_voting_clients(hwmgr);
1023         if (smu8_start_dpm(hwmgr))
1024                 return -EINVAL;
1025         smu8_program_bootup_state(hwmgr);
1026         smu8_reset_acp_boot_level(hwmgr);
1027
1028         return 0;
1029 };
1030
1031 static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1032                                 struct pp_power_state  *prequest_ps,
1033                         const struct pp_power_state *pcurrent_ps)
1034 {
1035         struct smu8_power_state *smu8_ps =
1036                                 cast_smu8_power_state(&prequest_ps->hardware);
1037
1038         const struct smu8_power_state *smu8_current_ps =
1039                                 cast_const_smu8_power_state(&pcurrent_ps->hardware);
1040
1041         struct smu8_hwmgr *data = hwmgr->backend;
1042         struct PP_Clocks clocks = {0, 0, 0, 0};
1043         bool force_high;
1044
1045         smu8_ps->need_dfs_bypass = true;
1046
1047         data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
1048
1049         clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ?
1050                                 hwmgr->display_config->min_mem_set_clock :
1051                                 data->sys_info.nbp_memory_clock[1];
1052
1053
1054         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
1055                 clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
1056
1057         force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
1058                         || (hwmgr->display_config->num_display >= 3);
1059
1060         smu8_ps->action = smu8_current_ps->action;
1061
1062         if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1063                 smu8_nbdpm_pstate_enable_disable(hwmgr, false, false);
1064         else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
1065                 smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
1066         else if (!force_high && (smu8_ps->action == FORCE_HIGH))
1067                 smu8_ps->action = CANCEL_FORCE_HIGH;
1068         else if (force_high && (smu8_ps->action != FORCE_HIGH))
1069                 smu8_ps->action = FORCE_HIGH;
1070         else
1071                 smu8_ps->action = DO_NOTHING;
1072
1073         return 0;
1074 }
1075
1076 static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
1077 {
1078         int result = 0;
1079         struct smu8_hwmgr *data;
1080
1081         data = kzalloc(sizeof(struct smu8_hwmgr), GFP_KERNEL);
1082         if (data == NULL)
1083                 return -ENOMEM;
1084
1085         hwmgr->backend = data;
1086
1087         result = smu8_initialize_dpm_defaults(hwmgr);
1088         if (result != 0) {
1089                 pr_err("smu8_initialize_dpm_defaults failed\n");
1090                 return result;
1091         }
1092
1093         result = smu8_get_system_info_data(hwmgr);
1094         if (result != 0) {
1095                 pr_err("smu8_get_system_info_data failed\n");
1096                 return result;
1097         }
1098
1099         smu8_construct_boot_state(hwmgr);
1100
1101         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =  SMU8_MAX_HARDWARE_POWERLEVELS;
1102
1103         return result;
1104 }
1105
1106 static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
1107 {
1108         if (hwmgr != NULL) {
1109                 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
1110                 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
1111
1112                 kfree(hwmgr->backend);
1113                 hwmgr->backend = NULL;
1114         }
1115         return 0;
1116 }
1117
1118 static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
1119 {
1120         struct smu8_hwmgr *data = hwmgr->backend;
1121
1122         smum_send_msg_to_smc_with_parameter(hwmgr,
1123                                         PPSMC_MSG_SetSclkSoftMin,
1124                                         smu8_get_sclk_level(hwmgr,
1125                                         data->sclk_dpm.soft_max_clk,
1126                                         PPSMC_MSG_SetSclkSoftMin));
1127
1128         smum_send_msg_to_smc_with_parameter(hwmgr,
1129                                 PPSMC_MSG_SetSclkSoftMax,
1130                                 smu8_get_sclk_level(hwmgr,
1131                                 data->sclk_dpm.soft_max_clk,
1132                                 PPSMC_MSG_SetSclkSoftMax));
1133
1134         return 0;
1135 }
1136
1137 static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1138 {
1139         struct smu8_hwmgr *data = hwmgr->backend;
1140         struct phm_clock_voltage_dependency_table *table =
1141                                 hwmgr->dyn_state.vddc_dependency_on_sclk;
1142         unsigned long clock = 0, level;
1143
1144         if (NULL == table || table->count <= 0)
1145                 return -EINVAL;
1146
1147         data->sclk_dpm.soft_min_clk = table->entries[0].clk;
1148         data->sclk_dpm.hard_min_clk = table->entries[0].clk;
1149         hwmgr->pstate_sclk = table->entries[0].clk;
1150         hwmgr->pstate_mclk = 0;
1151
1152         level = smu8_get_max_sclk_level(hwmgr) - 1;
1153
1154         if (level < table->count)
1155                 clock = table->entries[level].clk;
1156         else
1157                 clock = table->entries[table->count - 1].clk;
1158
1159         data->sclk_dpm.soft_max_clk = clock;
1160         data->sclk_dpm.hard_max_clk = clock;
1161
1162         smum_send_msg_to_smc_with_parameter(hwmgr,
1163                                 PPSMC_MSG_SetSclkSoftMin,
1164                                 smu8_get_sclk_level(hwmgr,
1165                                 data->sclk_dpm.soft_min_clk,
1166                                 PPSMC_MSG_SetSclkSoftMin));
1167
1168         smum_send_msg_to_smc_with_parameter(hwmgr,
1169                                 PPSMC_MSG_SetSclkSoftMax,
1170                                 smu8_get_sclk_level(hwmgr,
1171                                 data->sclk_dpm.soft_max_clk,
1172                                 PPSMC_MSG_SetSclkSoftMax));
1173
1174         return 0;
1175 }
1176
1177 static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1178 {
1179         struct smu8_hwmgr *data = hwmgr->backend;
1180
1181         smum_send_msg_to_smc_with_parameter(hwmgr,
1182                         PPSMC_MSG_SetSclkSoftMax,
1183                         smu8_get_sclk_level(hwmgr,
1184                         data->sclk_dpm.soft_min_clk,
1185                         PPSMC_MSG_SetSclkSoftMax));
1186
1187         smum_send_msg_to_smc_with_parameter(hwmgr,
1188                                 PPSMC_MSG_SetSclkSoftMin,
1189                                 smu8_get_sclk_level(hwmgr,
1190                                 data->sclk_dpm.soft_min_clk,
1191                                 PPSMC_MSG_SetSclkSoftMin));
1192
1193         return 0;
1194 }
1195
1196 static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1197                                 enum amd_dpm_forced_level level)
1198 {
1199         int ret = 0;
1200
1201         switch (level) {
1202         case AMD_DPM_FORCED_LEVEL_HIGH:
1203         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1204                 ret = smu8_phm_force_dpm_highest(hwmgr);
1205                 break;
1206         case AMD_DPM_FORCED_LEVEL_LOW:
1207         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1208         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1209                 ret = smu8_phm_force_dpm_lowest(hwmgr);
1210                 break;
1211         case AMD_DPM_FORCED_LEVEL_AUTO:
1212                 ret = smu8_phm_unforce_dpm_levels(hwmgr);
1213                 break;
1214         case AMD_DPM_FORCED_LEVEL_MANUAL:
1215         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1216         default:
1217                 break;
1218         }
1219
1220         return ret;
1221 }
1222
1223 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
1224 {
1225         if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
1226                 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
1227         return 0;
1228 }
1229
1230 static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1231 {
1232         if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
1233                 return smum_send_msg_to_smc_with_parameter(
1234                         hwmgr,
1235                         PPSMC_MSG_UVDPowerON,
1236                         PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
1237         }
1238
1239         return 0;
1240 }
1241
1242 static int  smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1243 {
1244         struct smu8_hwmgr *data = hwmgr->backend;
1245         struct phm_vce_clock_voltage_dependency_table *ptable =
1246                 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1247
1248         /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1249         if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1250             hwmgr->en_umd_pstate) {
1251                 data->vce_dpm.hard_min_clk =
1252                                   ptable->entries[ptable->count - 1].ecclk;
1253
1254                 smum_send_msg_to_smc_with_parameter(hwmgr,
1255                         PPSMC_MSG_SetEclkHardMin,
1256                         smu8_get_eclk_level(hwmgr,
1257                                 data->vce_dpm.hard_min_clk,
1258                                 PPSMC_MSG_SetEclkHardMin));
1259         } else {
1260
1261                 smum_send_msg_to_smc_with_parameter(hwmgr,
1262                                         PPSMC_MSG_SetEclkHardMin, 0);
1263                 /* disable ECLK DPM 0. Otherwise VCE could hang if
1264                  * switching SCLK from DPM 0 to 6/7 */
1265                 smum_send_msg_to_smc_with_parameter(hwmgr,
1266                                         PPSMC_MSG_SetEclkSoftMin, 1);
1267         }
1268         return 0;
1269 }
1270
1271 static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1272 {
1273         if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1274                 return smum_send_msg_to_smc(hwmgr,
1275                                                      PPSMC_MSG_VCEPowerOFF);
1276         return 0;
1277 }
1278
1279 static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1280 {
1281         if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1282                 return smum_send_msg_to_smc(hwmgr,
1283                                                      PPSMC_MSG_VCEPowerON);
1284         return 0;
1285 }
1286
1287 static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1288 {
1289         struct smu8_hwmgr *data = hwmgr->backend;
1290
1291         return data->sys_info.bootup_uma_clock;
1292 }
1293
1294 static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1295 {
1296         struct pp_power_state  *ps;
1297         struct smu8_power_state  *smu8_ps;
1298
1299         if (hwmgr == NULL)
1300                 return -EINVAL;
1301
1302         ps = hwmgr->request_ps;
1303
1304         if (ps == NULL)
1305                 return -EINVAL;
1306
1307         smu8_ps = cast_smu8_power_state(&ps->hardware);
1308
1309         if (low)
1310                 return smu8_ps->levels[0].engineClock;
1311         else
1312                 return smu8_ps->levels[smu8_ps->level-1].engineClock;
1313 }
1314
1315 static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
1316                                         struct pp_hw_power_state *hw_ps)
1317 {
1318         struct smu8_hwmgr *data = hwmgr->backend;
1319         struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1320
1321         smu8_ps->level = 1;
1322         smu8_ps->nbps_flags = 0;
1323         smu8_ps->bapm_flags = 0;
1324         smu8_ps->levels[0] = data->boot_power_level;
1325
1326         return 0;
1327 }
1328
1329 static int smu8_dpm_get_pp_table_entry_callback(
1330                                                      struct pp_hwmgr *hwmgr,
1331                                            struct pp_hw_power_state *hw_ps,
1332                                                           unsigned int index,
1333                                                      const void *clock_info)
1334 {
1335         struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1336
1337         const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info;
1338
1339         struct phm_clock_voltage_dependency_table *table =
1340                                     hwmgr->dyn_state.vddc_dependency_on_sclk;
1341         uint8_t clock_info_index = smu8_clock_info->index;
1342
1343         if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
1344                 clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
1345
1346         smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
1347         smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
1348
1349         smu8_ps->level = index + 1;
1350
1351         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
1352                 smu8_ps->levels[index].dsDividerIndex = 5;
1353                 smu8_ps->levels[index].ssDividerIndex = 5;
1354         }
1355
1356         return 0;
1357 }
1358
1359 static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
1360 {
1361         int result;
1362         unsigned long ret = 0;
1363
1364         result = pp_tables_get_num_of_entries(hwmgr, &ret);
1365
1366         return result ? 0 : ret;
1367 }
1368
1369 static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
1370                     unsigned long entry, struct pp_power_state *ps)
1371 {
1372         int result;
1373         struct smu8_power_state *smu8_ps;
1374
1375         ps->hardware.magic = smu8_magic;
1376
1377         smu8_ps = cast_smu8_power_state(&(ps->hardware));
1378
1379         result = pp_tables_get_entry(hwmgr, entry, ps,
1380                         smu8_dpm_get_pp_table_entry_callback);
1381
1382         smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
1383         smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
1384
1385         return result;
1386 }
1387
1388 static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr)
1389 {
1390         return sizeof(struct smu8_power_state);
1391 }
1392
1393 static void smu8_hw_print_display_cfg(
1394         const struct cc6_settings *cc6_settings)
1395 {
1396         PP_DBG_LOG("New Display Configuration:\n");
1397
1398         PP_DBG_LOG("   cpu_cc6_disable: %d\n",
1399                         cc6_settings->cpu_cc6_disable);
1400         PP_DBG_LOG("   cpu_pstate_disable: %d\n",
1401                         cc6_settings->cpu_pstate_disable);
1402         PP_DBG_LOG("   nb_pstate_switch_disable: %d\n",
1403                         cc6_settings->nb_pstate_switch_disable);
1404         PP_DBG_LOG("   cpu_pstate_separation_time: %d\n\n",
1405                         cc6_settings->cpu_pstate_separation_time);
1406 }
1407
1408  static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr)
1409 {
1410         struct smu8_hwmgr *hw_data = hwmgr->backend;
1411         uint32_t data = 0;
1412
1413         if (hw_data->cc6_settings.cc6_setting_changed) {
1414
1415                 hw_data->cc6_settings.cc6_setting_changed = false;
1416
1417                 smu8_hw_print_display_cfg(&hw_data->cc6_settings);
1418
1419                 data |= (hw_data->cc6_settings.cpu_pstate_separation_time
1420                         & PWRMGT_SEPARATION_TIME_MASK)
1421                         << PWRMGT_SEPARATION_TIME_SHIFT;
1422
1423                 data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
1424                         << PWRMGT_DISABLE_CPU_CSTATES_SHIFT;
1425
1426                 data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
1427                         << PWRMGT_DISABLE_CPU_PSTATES_SHIFT;
1428
1429                 PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
1430                         data);
1431
1432                 smum_send_msg_to_smc_with_parameter(hwmgr,
1433                                                 PPSMC_MSG_SetDisplaySizePowerParams,
1434                                                 data);
1435         }
1436
1437         return 0;
1438 }
1439
1440
1441 static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
1442                         bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
1443 {
1444         struct smu8_hwmgr *hw_data = hwmgr->backend;
1445
1446         if (separation_time !=
1447             hw_data->cc6_settings.cpu_pstate_separation_time ||
1448             cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
1449             pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
1450             pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
1451
1452                 hw_data->cc6_settings.cc6_setting_changed = true;
1453
1454                 hw_data->cc6_settings.cpu_pstate_separation_time =
1455                         separation_time;
1456                 hw_data->cc6_settings.cpu_cc6_disable =
1457                         cc6_disable;
1458                 hw_data->cc6_settings.cpu_pstate_disable =
1459                         pstate_disable;
1460                 hw_data->cc6_settings.nb_pstate_switch_disable =
1461                         pstate_switch_disable;
1462
1463         }
1464
1465         return 0;
1466 }
1467
1468 static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr,
1469                 struct amd_pp_simple_clock_info *info)
1470 {
1471         uint32_t i;
1472         const struct phm_clock_voltage_dependency_table *table =
1473                         hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
1474         const struct phm_clock_and_voltage_limits *limits =
1475                         &hwmgr->dyn_state.max_clock_voltage_on_ac;
1476
1477         info->engine_max_clock = limits->sclk;
1478         info->memory_max_clock = limits->mclk;
1479
1480         for (i = table->count - 1; i > 0; i--) {
1481                 if (limits->vddc >= table->entries[i].v) {
1482                         info->level = table->entries[i].clk;
1483                         return 0;
1484                 }
1485         }
1486         return -EINVAL;
1487 }
1488
1489 static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
1490                 enum pp_clock_type type, uint32_t mask)
1491 {
1492         switch (type) {
1493         case PP_SCLK:
1494                 smum_send_msg_to_smc_with_parameter(hwmgr,
1495                                 PPSMC_MSG_SetSclkSoftMin,
1496                                 mask);
1497                 smum_send_msg_to_smc_with_parameter(hwmgr,
1498                                 PPSMC_MSG_SetSclkSoftMax,
1499                                 mask);
1500                 break;
1501         default:
1502                 break;
1503         }
1504
1505         return 0;
1506 }
1507
1508 static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
1509                 enum pp_clock_type type, char *buf)
1510 {
1511         struct smu8_hwmgr *data = hwmgr->backend;
1512         struct phm_clock_voltage_dependency_table *sclk_table =
1513                         hwmgr->dyn_state.vddc_dependency_on_sclk;
1514         int i, now, size = 0;
1515
1516         switch (type) {
1517         case PP_SCLK:
1518                 now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1519                                 CGS_IND_REG__SMC,
1520                                 ixTARGET_AND_CURRENT_PROFILE_INDEX),
1521                                 TARGET_AND_CURRENT_PROFILE_INDEX,
1522                                 CURR_SCLK_INDEX);
1523
1524                 for (i = 0; i < sclk_table->count; i++)
1525                         size += sprintf(buf + size, "%d: %uMhz %s\n",
1526                                         i, sclk_table->entries[i].clk / 100,
1527                                         (i == now) ? "*" : "");
1528                 break;
1529         case PP_MCLK:
1530                 now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1531                                 CGS_IND_REG__SMC,
1532                                 ixTARGET_AND_CURRENT_PROFILE_INDEX),
1533                                 TARGET_AND_CURRENT_PROFILE_INDEX,
1534                                 CURR_MCLK_INDEX);
1535
1536                 for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
1537                         size += sprintf(buf + size, "%d: %uMhz %s\n",
1538                                         SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
1539                                         (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
1540                 break;
1541         default:
1542                 break;
1543         }
1544         return size;
1545 }
1546
1547 static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
1548                                 PHM_PerformanceLevelDesignation designation, uint32_t index,
1549                                 PHM_PerformanceLevel *level)
1550 {
1551         const struct smu8_power_state *ps;
1552         struct smu8_hwmgr *data;
1553         uint32_t level_index;
1554         uint32_t i;
1555
1556         if (level == NULL || hwmgr == NULL || state == NULL)
1557                 return -EINVAL;
1558
1559         data = hwmgr->backend;
1560         ps = cast_const_smu8_power_state(state);
1561
1562         level_index = index > ps->level - 1 ? ps->level - 1 : index;
1563         level->coreClock = ps->levels[level_index].engineClock;
1564
1565         if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
1566                 for (i = 1; i < ps->level; i++) {
1567                         if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) {
1568                                 level->coreClock = ps->levels[i].engineClock;
1569                                 break;
1570                         }
1571                 }
1572         }
1573
1574         if (level_index == 0)
1575                 level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1];
1576         else
1577                 level->memory_clock = data->sys_info.nbp_memory_clock[0];
1578
1579         level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
1580         level->nonLocalMemoryFreq = 0;
1581         level->nonLocalMemoryWidth = 0;
1582
1583         return 0;
1584 }
1585
1586 static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1587         const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
1588 {
1589         const struct smu8_power_state *ps = cast_const_smu8_power_state(state);
1590
1591         clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
1592         clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
1593
1594         return 0;
1595 }
1596
1597 static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
1598                                                 struct amd_pp_clocks *clocks)
1599 {
1600         struct smu8_hwmgr *data = hwmgr->backend;
1601         int i;
1602         struct phm_clock_voltage_dependency_table *table;
1603
1604         clocks->count = smu8_get_max_sclk_level(hwmgr);
1605         switch (type) {
1606         case amd_pp_disp_clock:
1607                 for (i = 0; i < clocks->count; i++)
1608                         clocks->clock[i] = data->sys_info.display_clock[i] * 10;
1609                 break;
1610         case amd_pp_sys_clock:
1611                 table = hwmgr->dyn_state.vddc_dependency_on_sclk;
1612                 for (i = 0; i < clocks->count; i++)
1613                         clocks->clock[i] = table->entries[i].clk * 10;
1614                 break;
1615         case amd_pp_mem_clock:
1616                 clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
1617                 for (i = 0; i < clocks->count; i++)
1618                         clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
1619                 break;
1620         default:
1621                 return -1;
1622         }
1623
1624         return 0;
1625 }
1626
1627 static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1628 {
1629         struct phm_clock_voltage_dependency_table *table =
1630                                         hwmgr->dyn_state.vddc_dependency_on_sclk;
1631         unsigned long level;
1632         const struct phm_clock_and_voltage_limits *limits =
1633                         &hwmgr->dyn_state.max_clock_voltage_on_ac;
1634
1635         if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
1636                 return -EINVAL;
1637
1638         level = smu8_get_max_sclk_level(hwmgr) - 1;
1639
1640         if (level < table->count)
1641                 clocks->engine_max_clock = table->entries[level].clk;
1642         else
1643                 clocks->engine_max_clock = table->entries[table->count - 1].clk;
1644
1645         clocks->memory_max_clock = limits->mclk;
1646
1647         return 0;
1648 }
1649
1650 static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1651 {
1652         int actual_temp = 0;
1653         uint32_t val = cgs_read_ind_register(hwmgr->device,
1654                                              CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
1655         uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
1656
1657         if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
1658                 actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1659         else
1660                 actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1661
1662         return actual_temp;
1663 }
1664
1665 static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1666                           void *value, int *size)
1667 {
1668         struct smu8_hwmgr *data = hwmgr->backend;
1669
1670         struct phm_clock_voltage_dependency_table *table =
1671                                 hwmgr->dyn_state.vddc_dependency_on_sclk;
1672
1673         struct phm_vce_clock_voltage_dependency_table *vce_table =
1674                 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1675
1676         struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1677                 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1678
1679         uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
1680                                         TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
1681         uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1682                                         TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
1683         uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1684                                         TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
1685
1686         uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
1687         uint16_t vddnb, vddgfx;
1688         int result;
1689
1690         /* size must be at least 4 bytes for all sensors */
1691         if (*size < 4)
1692                 return -EINVAL;
1693         *size = 4;
1694
1695         switch (idx) {
1696         case AMDGPU_PP_SENSOR_GFX_SCLK:
1697                 if (sclk_index < NUM_SCLK_LEVELS) {
1698                         sclk = table->entries[sclk_index].clk;
1699                         *((uint32_t *)value) = sclk;
1700                         return 0;
1701                 }
1702                 return -EINVAL;
1703         case AMDGPU_PP_SENSOR_VDDNB:
1704                 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1705                         CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1706                 vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
1707                 *((uint32_t *)value) = vddnb;
1708                 return 0;
1709         case AMDGPU_PP_SENSOR_VDDGFX:
1710                 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1711                         CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1712                 vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
1713                 *((uint32_t *)value) = vddgfx;
1714                 return 0;
1715         case AMDGPU_PP_SENSOR_UVD_VCLK:
1716                 if (!data->uvd_power_gated) {
1717                         if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1718                                 return -EINVAL;
1719                         } else {
1720                                 vclk = uvd_table->entries[uvd_index].vclk;
1721                                 *((uint32_t *)value) = vclk;
1722                                 return 0;
1723                         }
1724                 }
1725                 *((uint32_t *)value) = 0;
1726                 return 0;
1727         case AMDGPU_PP_SENSOR_UVD_DCLK:
1728                 if (!data->uvd_power_gated) {
1729                         if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1730                                 return -EINVAL;
1731                         } else {
1732                                 dclk = uvd_table->entries[uvd_index].dclk;
1733                                 *((uint32_t *)value) = dclk;
1734                                 return 0;
1735                         }
1736                 }
1737                 *((uint32_t *)value) = 0;
1738                 return 0;
1739         case AMDGPU_PP_SENSOR_VCE_ECCLK:
1740                 if (!data->vce_power_gated) {
1741                         if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1742                                 return -EINVAL;
1743                         } else {
1744                                 ecclk = vce_table->entries[vce_index].ecclk;
1745                                 *((uint32_t *)value) = ecclk;
1746                                 return 0;
1747                         }
1748                 }
1749                 *((uint32_t *)value) = 0;
1750                 return 0;
1751         case AMDGPU_PP_SENSOR_GPU_LOAD:
1752                 result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
1753                 if (0 == result) {
1754                         activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
1755                         activity_percent = activity_percent > 100 ? 100 : activity_percent;
1756                 } else {
1757                         activity_percent = 50;
1758                 }
1759                 *((uint32_t *)value) = activity_percent;
1760                 return 0;
1761         case AMDGPU_PP_SENSOR_UVD_POWER:
1762                 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1763                 return 0;
1764         case AMDGPU_PP_SENSOR_VCE_POWER:
1765                 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1766                 return 0;
1767         case AMDGPU_PP_SENSOR_GPU_TEMP:
1768                 *((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr);
1769                 return 0;
1770         default:
1771                 return -EINVAL;
1772         }
1773 }
1774
1775 static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
1776                                         uint32_t virtual_addr_low,
1777                                         uint32_t virtual_addr_hi,
1778                                         uint32_t mc_addr_low,
1779                                         uint32_t mc_addr_hi,
1780                                         uint32_t size)
1781 {
1782         smum_send_msg_to_smc_with_parameter(hwmgr,
1783                                         PPSMC_MSG_DramAddrHiVirtual,
1784                                         mc_addr_hi);
1785         smum_send_msg_to_smc_with_parameter(hwmgr,
1786                                         PPSMC_MSG_DramAddrLoVirtual,
1787                                         mc_addr_low);
1788         smum_send_msg_to_smc_with_parameter(hwmgr,
1789                                         PPSMC_MSG_DramAddrHiPhysical,
1790                                         virtual_addr_hi);
1791         smum_send_msg_to_smc_with_parameter(hwmgr,
1792                                         PPSMC_MSG_DramAddrLoPhysical,
1793                                         virtual_addr_low);
1794
1795         smum_send_msg_to_smc_with_parameter(hwmgr,
1796                                         PPSMC_MSG_DramBufferSize,
1797                                         size);
1798         return 0;
1799 }
1800
1801 static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
1802                 struct PP_TemperatureRange *thermal_data)
1803 {
1804         struct smu8_hwmgr *data = hwmgr->backend;
1805
1806         memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
1807
1808         thermal_data->max = (data->thermal_auto_throttling_treshold +
1809                         data->sys_info.htc_hyst_lmt) *
1810                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1811
1812         return 0;
1813 }
1814
1815 static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
1816 {
1817         struct smu8_hwmgr *data = hwmgr->backend;
1818         uint32_t dpm_features = 0;
1819
1820         if (enable &&
1821                 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1822                                   PHM_PlatformCaps_UVDDPM)) {
1823                 data->dpm_flags |= DPMFlags_UVD_Enabled;
1824                 dpm_features |= UVD_DPM_MASK;
1825                 smum_send_msg_to_smc_with_parameter(hwmgr,
1826                             PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
1827         } else {
1828                 dpm_features |= UVD_DPM_MASK;
1829                 data->dpm_flags &= ~DPMFlags_UVD_Enabled;
1830                 smum_send_msg_to_smc_with_parameter(hwmgr,
1831                            PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
1832         }
1833         return 0;
1834 }
1835
1836 int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
1837 {
1838         struct smu8_hwmgr *data = hwmgr->backend;
1839         struct phm_uvd_clock_voltage_dependency_table *ptable =
1840                 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1841
1842         if (!bgate) {
1843                 /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1844                 if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1845                     hwmgr->en_umd_pstate) {
1846                         data->uvd_dpm.hard_min_clk =
1847                                    ptable->entries[ptable->count - 1].vclk;
1848
1849                         smum_send_msg_to_smc_with_parameter(hwmgr,
1850                                 PPSMC_MSG_SetUvdHardMin,
1851                                 smu8_get_uvd_level(hwmgr,
1852                                         data->uvd_dpm.hard_min_clk,
1853                                         PPSMC_MSG_SetUvdHardMin));
1854
1855                         smu8_enable_disable_uvd_dpm(hwmgr, true);
1856                 } else {
1857                         smu8_enable_disable_uvd_dpm(hwmgr, true);
1858                 }
1859         } else {
1860                 smu8_enable_disable_uvd_dpm(hwmgr, false);
1861         }
1862
1863         return 0;
1864 }
1865
1866 static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1867 {
1868         struct smu8_hwmgr *data = hwmgr->backend;
1869         uint32_t dpm_features = 0;
1870
1871         if (enable && phm_cap_enabled(
1872                                 hwmgr->platform_descriptor.platformCaps,
1873                                 PHM_PlatformCaps_VCEDPM)) {
1874                 data->dpm_flags |= DPMFlags_VCE_Enabled;
1875                 dpm_features |= VCE_DPM_MASK;
1876                 smum_send_msg_to_smc_with_parameter(hwmgr,
1877                             PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
1878         } else {
1879                 dpm_features |= VCE_DPM_MASK;
1880                 data->dpm_flags &= ~DPMFlags_VCE_Enabled;
1881                 smum_send_msg_to_smc_with_parameter(hwmgr,
1882                            PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
1883         }
1884
1885         return 0;
1886 }
1887
1888
1889 static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
1890 {
1891         struct smu8_hwmgr *data = hwmgr->backend;
1892
1893         data->uvd_power_gated = bgate;
1894
1895         if (bgate) {
1896                 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1897                                                 AMD_IP_BLOCK_TYPE_UVD,
1898                                                 AMD_PG_STATE_GATE);
1899                 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1900                                                 AMD_IP_BLOCK_TYPE_UVD,
1901                                                 AMD_CG_STATE_GATE);
1902                 smu8_dpm_update_uvd_dpm(hwmgr, true);
1903                 smu8_dpm_powerdown_uvd(hwmgr);
1904         } else {
1905                 smu8_dpm_powerup_uvd(hwmgr);
1906                 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1907                                                 AMD_IP_BLOCK_TYPE_UVD,
1908                                                 AMD_CG_STATE_UNGATE);
1909                 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1910                                                 AMD_IP_BLOCK_TYPE_UVD,
1911                                                 AMD_PG_STATE_UNGATE);
1912                 smu8_dpm_update_uvd_dpm(hwmgr, false);
1913         }
1914
1915 }
1916
1917 static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
1918 {
1919         struct smu8_hwmgr *data = hwmgr->backend;
1920
1921         if (bgate) {
1922                 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1923                                         AMD_IP_BLOCK_TYPE_VCE,
1924                                         AMD_PG_STATE_GATE);
1925                 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1926                                         AMD_IP_BLOCK_TYPE_VCE,
1927                                         AMD_CG_STATE_GATE);
1928                 smu8_enable_disable_vce_dpm(hwmgr, false);
1929                 smu8_dpm_powerdown_vce(hwmgr);
1930                 data->vce_power_gated = true;
1931         } else {
1932                 smu8_dpm_powerup_vce(hwmgr);
1933                 data->vce_power_gated = false;
1934                 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1935                                         AMD_IP_BLOCK_TYPE_VCE,
1936                                         AMD_CG_STATE_UNGATE);
1937                 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1938                                         AMD_IP_BLOCK_TYPE_VCE,
1939                                         AMD_PG_STATE_UNGATE);
1940                 smu8_dpm_update_vce_dpm(hwmgr);
1941                 smu8_enable_disable_vce_dpm(hwmgr, true);
1942         }
1943 }
1944
1945 static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
1946         .backend_init = smu8_hwmgr_backend_init,
1947         .backend_fini = smu8_hwmgr_backend_fini,
1948         .apply_state_adjust_rules = smu8_apply_state_adjust_rules,
1949         .force_dpm_level = smu8_dpm_force_dpm_level,
1950         .get_power_state_size = smu8_get_power_state_size,
1951         .powerdown_uvd = smu8_dpm_powerdown_uvd,
1952         .powergate_uvd = smu8_dpm_powergate_uvd,
1953         .powergate_vce = smu8_dpm_powergate_vce,
1954         .get_mclk = smu8_dpm_get_mclk,
1955         .get_sclk = smu8_dpm_get_sclk,
1956         .patch_boot_state = smu8_dpm_patch_boot_state,
1957         .get_pp_table_entry = smu8_dpm_get_pp_table_entry,
1958         .get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries,
1959         .set_cpu_power_state = smu8_set_cpu_power_state,
1960         .store_cc6_data = smu8_store_cc6_data,
1961         .force_clock_level = smu8_force_clock_level,
1962         .print_clock_levels = smu8_print_clock_levels,
1963         .get_dal_power_level = smu8_get_dal_power_level,
1964         .get_performance_level = smu8_get_performance_level,
1965         .get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks,
1966         .get_clock_by_type = smu8_get_clock_by_type,
1967         .get_max_high_clocks = smu8_get_max_high_clocks,
1968         .read_sensor = smu8_read_sensor,
1969         .power_off_asic = smu8_power_off_asic,
1970         .asic_setup = smu8_setup_asic_task,
1971         .dynamic_state_management_enable = smu8_enable_dpm_tasks,
1972         .power_state_set = smu8_set_power_state_tasks,
1973         .dynamic_state_management_disable = smu8_disable_dpm_tasks,
1974         .notify_cac_buffer_info = smu8_notify_cac_buffer_info,
1975         .get_thermal_temperature_range = smu8_get_thermal_temperature_range,
1976 };
1977
1978 int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
1979 {
1980         hwmgr->hwmgr_func = &smu8_hwmgr_funcs;
1981         hwmgr->pptable_func = &pptable_funcs;
1982         return 0;
1983 }