1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "pp_debug.h"
25 #include <linux/delay.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <linux/pci.h>
30 #include <drm/amdgpu_drm.h>
31 #include "power_state.h"
32 #include "hwmgr.h"
33 #include "ppsmc.h"
34 #include "amd_acpi.h"
35 #include "pp_psm.h"
36 #include "vega10_hwmgr.h"
37 
38 extern const struct pp_smumgr_func ci_smu_funcs;
39 extern const struct pp_smumgr_func smu8_smu_funcs;
40 extern const struct pp_smumgr_func iceland_smu_funcs;
41 extern const struct pp_smumgr_func tonga_smu_funcs;
42 extern const struct pp_smumgr_func fiji_smu_funcs;
43 extern const struct pp_smumgr_func polaris10_smu_funcs;
44 extern const struct pp_smumgr_func vegam_smu_funcs;
45 extern const struct pp_smumgr_func vega10_smu_funcs;
46 extern const struct pp_smumgr_func vega12_smu_funcs;
47 extern const struct pp_smumgr_func smu10_smu_funcs;
48 extern const struct pp_smumgr_func vega20_smu_funcs;
49 
50 extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
51 
52 static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
53 static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
54 static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
55 static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
56 static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
57 static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
58 static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
59 
60 
hwmgr_init_workload_prority(struct pp_hwmgr * hwmgr)61 static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
62 {
63 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
64 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
65 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
66 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
67 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
68 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
69 
70 	hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
71 	hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
72 	hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
73 	hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
74 	hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
75 	hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
76 }
77 
hwmgr_early_init(struct pp_hwmgr * hwmgr)78 int hwmgr_early_init(struct pp_hwmgr *hwmgr)
79 {
80 	struct amdgpu_device *adev;
81 
82 	if (!hwmgr)
83 		return -EINVAL;
84 
85 	hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
86 	hwmgr->pp_table_version = PP_TABLE_V1;
87 	hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
88 	hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
89 	hwmgr_init_default_caps(hwmgr);
90 	hwmgr_set_user_specify_caps(hwmgr);
91 	hwmgr->fan_ctrl_is_in_default_mode = true;
92 	hwmgr_init_workload_prority(hwmgr);
93 	hwmgr->gfxoff_state_changed_by_workload = false;
94 
95 	adev = hwmgr->adev;
96 
97 	switch (hwmgr->chip_family) {
98 	case AMDGPU_FAMILY_CI:
99 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
100 		hwmgr->smumgr_funcs = &ci_smu_funcs;
101 		ci_set_asic_special_caps(hwmgr);
102 		hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
103 					 PP_ENABLE_GFX_CG_THRU_SMU |
104 					 PP_GFXOFF_MASK);
105 		hwmgr->pp_table_version = PP_TABLE_V0;
106 		hwmgr->od_enabled = false;
107 		smu7_init_function_pointers(hwmgr);
108 		break;
109 	case AMDGPU_FAMILY_CZ:
110 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
111 		hwmgr->od_enabled = false;
112 		hwmgr->smumgr_funcs = &smu8_smu_funcs;
113 		hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
114 		smu8_init_function_pointers(hwmgr);
115 		break;
116 	case AMDGPU_FAMILY_VI:
117 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
118 		hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
119 		switch (hwmgr->chip_id) {
120 		case CHIP_TOPAZ:
121 			hwmgr->smumgr_funcs = &iceland_smu_funcs;
122 			topaz_set_asic_special_caps(hwmgr);
123 			hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
124 						PP_ENABLE_GFX_CG_THRU_SMU);
125 			hwmgr->pp_table_version = PP_TABLE_V0;
126 			hwmgr->od_enabled = false;
127 			break;
128 		case CHIP_TONGA:
129 			hwmgr->smumgr_funcs = &tonga_smu_funcs;
130 			tonga_set_asic_special_caps(hwmgr);
131 			hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
132 			break;
133 		case CHIP_FIJI:
134 			hwmgr->smumgr_funcs = &fiji_smu_funcs;
135 			fiji_set_asic_special_caps(hwmgr);
136 			hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
137 						PP_ENABLE_GFX_CG_THRU_SMU);
138 			break;
139 		case CHIP_POLARIS11:
140 		case CHIP_POLARIS10:
141 		case CHIP_POLARIS12:
142 			hwmgr->smumgr_funcs = &polaris10_smu_funcs;
143 			polaris_set_asic_special_caps(hwmgr);
144 			hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
145 			break;
146 		case CHIP_VEGAM:
147 			hwmgr->smumgr_funcs = &vegam_smu_funcs;
148 			polaris_set_asic_special_caps(hwmgr);
149 			hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
150 			break;
151 		default:
152 			return -EINVAL;
153 		}
154 		smu7_init_function_pointers(hwmgr);
155 		break;
156 	case AMDGPU_FAMILY_AI:
157 		switch (hwmgr->chip_id) {
158 		case CHIP_VEGA10:
159 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
160 			hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
161 			hwmgr->smumgr_funcs = &vega10_smu_funcs;
162 			vega10_hwmgr_init(hwmgr);
163 			break;
164 		case CHIP_VEGA12:
165 			hwmgr->smumgr_funcs = &vega12_smu_funcs;
166 			vega12_hwmgr_init(hwmgr);
167 			break;
168 		case CHIP_VEGA20:
169 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
170 			hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
171 			hwmgr->smumgr_funcs = &vega20_smu_funcs;
172 			vega20_hwmgr_init(hwmgr);
173 			break;
174 		default:
175 			return -EINVAL;
176 		}
177 		break;
178 	case AMDGPU_FAMILY_RV:
179 		switch (hwmgr->chip_id) {
180 		case CHIP_RAVEN:
181 			hwmgr->od_enabled = false;
182 			hwmgr->smumgr_funcs = &smu10_smu_funcs;
183 			smu10_init_function_pointers(hwmgr);
184 			break;
185 		default:
186 			return -EINVAL;
187 		}
188 		break;
189 	default:
190 		return -EINVAL;
191 	}
192 
193 	return 0;
194 }
195 
hwmgr_sw_init(struct pp_hwmgr * hwmgr)196 int hwmgr_sw_init(struct pp_hwmgr *hwmgr)
197 {
198 	if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init)
199 		return -EINVAL;
200 
201 	phm_register_irq_handlers(hwmgr);
202 	pr_info("hwmgr_sw_init smu backed is %s\n", hwmgr->smumgr_funcs->name);
203 
204 	return hwmgr->smumgr_funcs->smu_init(hwmgr);
205 }
206 
207 
hwmgr_sw_fini(struct pp_hwmgr * hwmgr)208 int hwmgr_sw_fini(struct pp_hwmgr *hwmgr)
209 {
210 	if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini)
211 		hwmgr->smumgr_funcs->smu_fini(hwmgr);
212 
213 	return 0;
214 }
215 
hwmgr_hw_init(struct pp_hwmgr * hwmgr)216 int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
217 {
218 	int ret = 0;
219 
220 	hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf((struct amdgpu_device *)hwmgr->adev);
221 	hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf))
222 			? true : false;
223 	if (!hwmgr->pm_en)
224 		return 0;
225 
226 	if (!hwmgr->pptable_func ||
227 	    !hwmgr->pptable_func->pptable_init ||
228 	    !hwmgr->hwmgr_func->backend_init) {
229 		hwmgr->pm_en = false;
230 		pr_info("dpm not supported \n");
231 		return 0;
232 	}
233 
234 	ret = hwmgr->pptable_func->pptable_init(hwmgr);
235 	if (ret)
236 		goto err;
237 
238 	((struct amdgpu_device *)hwmgr->adev)->pm.no_fan =
239 				hwmgr->thermal_controller.fanInfo.bNoFan;
240 
241 	ret = hwmgr->hwmgr_func->backend_init(hwmgr);
242 	if (ret)
243 		goto err1;
244  /* make sure dc limits are valid */
245 	if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
246 			(hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0))
247 			hwmgr->dyn_state.max_clock_voltage_on_dc =
248 					hwmgr->dyn_state.max_clock_voltage_on_ac;
249 
250 	ret = psm_init_power_state_table(hwmgr);
251 	if (ret)
252 		goto err2;
253 
254 	ret = phm_setup_asic(hwmgr);
255 	if (ret)
256 		goto err2;
257 
258 	ret = phm_enable_dynamic_state_management(hwmgr);
259 	if (ret)
260 		goto err2;
261 	ret = phm_start_thermal_controller(hwmgr);
262 	ret |= psm_set_performance_states(hwmgr);
263 	if (ret)
264 		goto err2;
265 
266 	((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true;
267 
268 	return 0;
269 err2:
270 	if (hwmgr->hwmgr_func->backend_fini)
271 		hwmgr->hwmgr_func->backend_fini(hwmgr);
272 err1:
273 	if (hwmgr->pptable_func->pptable_fini)
274 		hwmgr->pptable_func->pptable_fini(hwmgr);
275 err:
276 	return ret;
277 }
278 
hwmgr_hw_fini(struct pp_hwmgr * hwmgr)279 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
280 {
281 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
282 		return 0;
283 
284 	phm_stop_thermal_controller(hwmgr);
285 	psm_set_boot_states(hwmgr);
286 	psm_adjust_power_state_dynamic(hwmgr, true, NULL);
287 	phm_disable_dynamic_state_management(hwmgr);
288 	phm_disable_clock_power_gatings(hwmgr);
289 
290 	if (hwmgr->hwmgr_func->backend_fini)
291 		hwmgr->hwmgr_func->backend_fini(hwmgr);
292 	if (hwmgr->pptable_func->pptable_fini)
293 		hwmgr->pptable_func->pptable_fini(hwmgr);
294 	return psm_fini_power_state_table(hwmgr);
295 }
296 
hwmgr_suspend(struct pp_hwmgr * hwmgr)297 int hwmgr_suspend(struct pp_hwmgr *hwmgr)
298 {
299 	int ret = 0;
300 
301 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
302 		return 0;
303 
304 	phm_disable_smc_firmware_ctf(hwmgr);
305 	ret = psm_set_boot_states(hwmgr);
306 	if (ret)
307 		return ret;
308 	ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
309 	if (ret)
310 		return ret;
311 	ret = phm_power_down_asic(hwmgr);
312 
313 	return ret;
314 }
315 
hwmgr_resume(struct pp_hwmgr * hwmgr)316 int hwmgr_resume(struct pp_hwmgr *hwmgr)
317 {
318 	int ret = 0;
319 
320 	if (!hwmgr)
321 		return -EINVAL;
322 
323 	if (!hwmgr->not_vf || !hwmgr->pm_en)
324 		return 0;
325 
326 	ret = phm_setup_asic(hwmgr);
327 	if (ret)
328 		return ret;
329 
330 	ret = phm_enable_dynamic_state_management(hwmgr);
331 	if (ret)
332 		return ret;
333 	ret = phm_start_thermal_controller(hwmgr);
334 	ret |= psm_set_performance_states(hwmgr);
335 	if (ret)
336 		return ret;
337 
338 	ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
339 
340 	return ret;
341 }
342 
power_state_convert(enum amd_pm_state_type state)343 static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type  state)
344 {
345 	switch (state) {
346 	case POWER_STATE_TYPE_BATTERY:
347 		return PP_StateUILabel_Battery;
348 	case POWER_STATE_TYPE_BALANCED:
349 		return PP_StateUILabel_Balanced;
350 	case POWER_STATE_TYPE_PERFORMANCE:
351 		return PP_StateUILabel_Performance;
352 	default:
353 		return PP_StateUILabel_None;
354 	}
355 }
356 
hwmgr_handle_task(struct pp_hwmgr * hwmgr,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)357 int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
358 		enum amd_pm_state_type *user_state)
359 {
360 	int ret = 0;
361 
362 	if (hwmgr == NULL)
363 		return -EINVAL;
364 
365 	switch (task_id) {
366 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
367 		if (!hwmgr->not_vf)
368 			return ret;
369 		ret = phm_pre_display_configuration_changed(hwmgr);
370 		if (ret)
371 			return ret;
372 		ret = phm_set_cpu_power_state(hwmgr);
373 		if (ret)
374 			return ret;
375 		ret = psm_set_performance_states(hwmgr);
376 		if (ret)
377 			return ret;
378 		ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
379 		break;
380 	case AMD_PP_TASK_ENABLE_USER_STATE:
381 	{
382 		enum PP_StateUILabel requested_ui_label;
383 		struct pp_power_state *requested_ps = NULL;
384 
385 		if (!hwmgr->not_vf)
386 			return ret;
387 		if (user_state == NULL) {
388 			ret = -EINVAL;
389 			break;
390 		}
391 
392 		requested_ui_label = power_state_convert(*user_state);
393 		ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
394 		if (ret)
395 			return ret;
396 		ret = psm_adjust_power_state_dynamic(hwmgr, true, requested_ps);
397 		break;
398 	}
399 	case AMD_PP_TASK_COMPLETE_INIT:
400 	case AMD_PP_TASK_READJUST_POWER_STATE:
401 		ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
402 		break;
403 	default:
404 		break;
405 	}
406 	return ret;
407 }
408 
hwmgr_init_default_caps(struct pp_hwmgr * hwmgr)409 void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
410 {
411 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
412 
413 	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
414 	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
415 
416 #if defined(CONFIG_ACPI)
417 	if (amdgpu_acpi_is_pcie_performance_request_supported(hwmgr->adev))
418 		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
419 #endif
420 
421 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
422 		PHM_PlatformCaps_DynamicPatchPowerState);
423 
424 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
425 		PHM_PlatformCaps_EnableSMU7ThermalManagement);
426 
427 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
428 			PHM_PlatformCaps_DynamicPowerManagement);
429 
430 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
431 					PHM_PlatformCaps_SMC);
432 
433 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
434 					PHM_PlatformCaps_DynamicUVDState);
435 
436 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
437 						PHM_PlatformCaps_FanSpeedInTableIsRPM);
438 	return;
439 }
440 
hwmgr_set_user_specify_caps(struct pp_hwmgr * hwmgr)441 int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
442 {
443 	if (hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
444 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
445 			PHM_PlatformCaps_SclkDeepSleep);
446 	else
447 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
448 			PHM_PlatformCaps_SclkDeepSleep);
449 
450 	if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
451 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
452 			    PHM_PlatformCaps_PowerContainment);
453 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
454 			PHM_PlatformCaps_CAC);
455 	} else {
456 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
457 			    PHM_PlatformCaps_PowerContainment);
458 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
459 			PHM_PlatformCaps_CAC);
460 	}
461 
462 	if (hwmgr->feature_mask & PP_OVERDRIVE_MASK)
463 		hwmgr->od_enabled = true;
464 
465 	return 0;
466 }
467 
polaris_set_asic_special_caps(struct pp_hwmgr * hwmgr)468 int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
469 {
470 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
471 						PHM_PlatformCaps_EVV);
472 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
473 						PHM_PlatformCaps_SQRamping);
474 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
475 						PHM_PlatformCaps_RegulatorHot);
476 
477 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
478 			PHM_PlatformCaps_MemorySpreadSpectrumSupport);
479 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
480 			PHM_PlatformCaps_EngineSpreadSpectrumSupport);
481 
482 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
483 					PHM_PlatformCaps_AutomaticDCTransition);
484 
485 	if (((hwmgr->chip_id == CHIP_POLARIS11) && !hwmgr->is_kicker) ||
486 	    (hwmgr->chip_id == CHIP_POLARIS12))
487 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
488 				PHM_PlatformCaps_SPLLShutdownSupport);
489 
490 	if (hwmgr->chip_id != CHIP_POLARIS11) {
491 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
492 							PHM_PlatformCaps_DBRamping);
493 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
494 							PHM_PlatformCaps_TDRamping);
495 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
496 							PHM_PlatformCaps_TCPRamping);
497 	}
498 	return 0;
499 }
500 
fiji_set_asic_special_caps(struct pp_hwmgr * hwmgr)501 int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
502 {
503 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
504 						PHM_PlatformCaps_EVV);
505 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
506 			PHM_PlatformCaps_SQRamping);
507 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
508 			PHM_PlatformCaps_DBRamping);
509 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
510 			PHM_PlatformCaps_TDRamping);
511 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
512 			PHM_PlatformCaps_TCPRamping);
513 	return 0;
514 }
515 
tonga_set_asic_special_caps(struct pp_hwmgr * hwmgr)516 int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
517 {
518 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
519 						PHM_PlatformCaps_EVV);
520 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
521 			PHM_PlatformCaps_SQRamping);
522 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
523 			PHM_PlatformCaps_DBRamping);
524 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
525 			PHM_PlatformCaps_TDRamping);
526 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
527 			PHM_PlatformCaps_TCPRamping);
528 
529 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
530 		      PHM_PlatformCaps_UVDPowerGating);
531 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
532 		      PHM_PlatformCaps_VCEPowerGating);
533 	return 0;
534 }
535 
topaz_set_asic_special_caps(struct pp_hwmgr * hwmgr)536 int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
537 {
538 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
539 						PHM_PlatformCaps_EVV);
540 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
541 			PHM_PlatformCaps_SQRamping);
542 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
543 			PHM_PlatformCaps_DBRamping);
544 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
545 			PHM_PlatformCaps_TDRamping);
546 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
547 			PHM_PlatformCaps_TCPRamping);
548 	return 0;
549 }
550 
ci_set_asic_special_caps(struct pp_hwmgr * hwmgr)551 int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr)
552 {
553 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
554 			PHM_PlatformCaps_SQRamping);
555 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
556 			PHM_PlatformCaps_DBRamping);
557 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
558 			PHM_PlatformCaps_TDRamping);
559 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
560 			PHM_PlatformCaps_TCPRamping);
561 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
562 			PHM_PlatformCaps_MemorySpreadSpectrumSupport);
563 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
564 			PHM_PlatformCaps_EngineSpreadSpectrumSupport);
565 	return 0;
566 }
567