1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #define SWSMU_CODE_LAYER_L4
24
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "smu_cmn.h"
28 #include "soc15_common.h"
29
30 /*
31 * DO NOT use these for err/warn/info/debug messages.
32 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
33 * They are more MGPU friendly.
34 */
35 #undef pr_err
36 #undef pr_warn
37 #undef pr_info
38 #undef pr_debug
39
40 /*
41 * Although these are defined in each ASIC's specific header file.
42 * They share the same definitions and values. That makes common
43 * APIs for SMC messages issuing for all ASICs possible.
44 */
45 #define mmMP1_SMN_C2PMSG_66 0x0282
46 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
47
48 #define mmMP1_SMN_C2PMSG_82 0x0292
49 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
50
51 #define mmMP1_SMN_C2PMSG_90 0x029a
52 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
53
54 /* SMU 13.0.5 has its specific mailbox messaging registers */
55
56 #define mmMP1_C2PMSG_2 (0xbee142 + 0xb00000 / 4)
57 #define mmMP1_C2PMSG_2_BASE_IDX 0
58
59 #define mmMP1_C2PMSG_34 (0xbee262 + 0xb00000 / 4)
60 #define mmMP1_C2PMSG_34_BASE_IDX 0
61
62 #define mmMP1_C2PMSG_33 (0xbee261 + 0xb00000 / 4)
63 #define mmMP1_C2PMSG_33_BASE_IDX 0
64
65 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
66
67 #undef __SMU_DUMMY_MAP
68 #define __SMU_DUMMY_MAP(type) #type
69 static const char * const __smu_message_names[] = {
70 SMU_MESSAGE_TYPES
71 };
72
73 #define smu_cmn_call_asic_func(intf, smu, args...) \
74 ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \
75 (smu)->ppt_funcs->intf(smu, ##args) : \
76 -ENOTSUPP) : \
77 -EINVAL)
78
smu_get_message_name(struct smu_context * smu,enum smu_message_type type)79 static const char *smu_get_message_name(struct smu_context *smu,
80 enum smu_message_type type)
81 {
82 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
83 return "unknown smu message";
84
85 return __smu_message_names[type];
86 }
87
smu_cmn_read_arg(struct smu_context * smu,uint32_t * arg)88 static void smu_cmn_read_arg(struct smu_context *smu,
89 uint32_t *arg)
90 {
91 struct amdgpu_device *adev = smu->adev;
92
93 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5))
94 *arg = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34);
95 else
96 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
97 }
98
99 /* Redefine the SMU error codes here.
100 *
101 * Note that these definitions are redundant and should be removed
102 * when the SMU has exported a unified header file containing these
103 * macros, which header file we can just include and use the SMU's
104 * macros. At the moment, these error codes are defined by the SMU
105 * per-ASIC unfortunately, yet we're a one driver for all ASICs.
106 */
107 #define SMU_RESP_NONE 0
108 #define SMU_RESP_OK 1
109 #define SMU_RESP_CMD_FAIL 0xFF
110 #define SMU_RESP_CMD_UNKNOWN 0xFE
111 #define SMU_RESP_CMD_BAD_PREREQ 0xFD
112 #define SMU_RESP_BUSY_OTHER 0xFC
113 #define SMU_RESP_DEBUG_END 0xFB
114
115 /**
116 * __smu_cmn_poll_stat -- poll for a status from the SMU
117 * @smu: a pointer to SMU context
118 *
119 * Returns the status of the SMU, which could be,
120 * 0, the SMU is busy with your command;
121 * 1, execution status: success, execution result: success;
122 * 0xFF, execution status: success, execution result: failure;
123 * 0xFE, unknown command;
124 * 0xFD, valid command, but bad (command) prerequisites;
125 * 0xFC, the command was rejected as the SMU is busy;
126 * 0xFB, "SMC_Result_DebugDataDumpEnd".
127 *
128 * The values here are not defined by macros, because I'd rather we
129 * include a single header file which defines them, which is
130 * maintained by the SMU FW team, so that we're impervious to firmware
131 * changes. At the moment those values are defined in various header
132 * files, one for each ASIC, yet here we're a single ASIC-agnostic
133 * interface. Such a change can be followed-up by a subsequent patch.
134 */
__smu_cmn_poll_stat(struct smu_context * smu)135 static u32 __smu_cmn_poll_stat(struct smu_context *smu)
136 {
137 struct amdgpu_device *adev = smu->adev;
138 int timeout = adev->usec_timeout * 20;
139 u32 reg;
140
141 for ( ; timeout > 0; timeout--) {
142 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5))
143 reg = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_33);
144 else
145 reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
146 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
147 break;
148
149 udelay(1);
150 }
151
152 return reg;
153 }
154
__smu_cmn_reg_print_error(struct smu_context * smu,u32 reg_c2pmsg_90,int msg_index,u32 param,enum smu_message_type msg)155 static void __smu_cmn_reg_print_error(struct smu_context *smu,
156 u32 reg_c2pmsg_90,
157 int msg_index,
158 u32 param,
159 enum smu_message_type msg)
160 {
161 struct amdgpu_device *adev = smu->adev;
162 const char *message = smu_get_message_name(smu, msg);
163 u32 msg_idx, prm;
164
165 switch (reg_c2pmsg_90) {
166 case SMU_RESP_NONE: {
167 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5)) {
168 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_2);
169 prm = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34);
170 } else {
171 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66);
172 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
173 }
174 dev_err_ratelimited(adev->dev,
175 "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
176 msg_idx, prm);
177 }
178 break;
179 case SMU_RESP_OK:
180 /* The SMU executed the command. It completed with a
181 * successful result.
182 */
183 break;
184 case SMU_RESP_CMD_FAIL:
185 /* The SMU executed the command. It completed with an
186 * unsuccessful result.
187 */
188 break;
189 case SMU_RESP_CMD_UNKNOWN:
190 dev_err_ratelimited(adev->dev,
191 "SMU: unknown command: index:%d param:0x%08X message:%s",
192 msg_index, param, message);
193 break;
194 case SMU_RESP_CMD_BAD_PREREQ:
195 dev_err_ratelimited(adev->dev,
196 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",
197 msg_index, param, message);
198 break;
199 case SMU_RESP_BUSY_OTHER:
200 dev_err_ratelimited(adev->dev,
201 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
202 msg_index, param, message);
203 break;
204 case SMU_RESP_DEBUG_END:
205 dev_err_ratelimited(adev->dev,
206 "SMU: I'm debugging!");
207 break;
208 default:
209 dev_err_ratelimited(adev->dev,
210 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
211 reg_c2pmsg_90, msg_index, param, message);
212 break;
213 }
214 }
215
__smu_cmn_reg2errno(struct smu_context * smu,u32 reg_c2pmsg_90)216 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)
217 {
218 int res;
219
220 switch (reg_c2pmsg_90) {
221 case SMU_RESP_NONE:
222 /* The SMU is busy--still executing your command.
223 */
224 res = -ETIME;
225 break;
226 case SMU_RESP_OK:
227 res = 0;
228 break;
229 case SMU_RESP_CMD_FAIL:
230 /* Command completed successfully, but the command
231 * status was failure.
232 */
233 res = -EIO;
234 break;
235 case SMU_RESP_CMD_UNKNOWN:
236 /* Unknown command--ignored by the SMU.
237 */
238 res = -EOPNOTSUPP;
239 break;
240 case SMU_RESP_CMD_BAD_PREREQ:
241 /* Valid command--bad prerequisites.
242 */
243 res = -EINVAL;
244 break;
245 case SMU_RESP_BUSY_OTHER:
246 /* The SMU is busy with other commands. The client
247 * should retry in 10 us.
248 */
249 res = -EBUSY;
250 break;
251 default:
252 /* Unknown or debug response from the SMU.
253 */
254 res = -EREMOTEIO;
255 break;
256 }
257
258 return res;
259 }
260
__smu_cmn_send_msg(struct smu_context * smu,u16 msg,u32 param)261 static void __smu_cmn_send_msg(struct smu_context *smu,
262 u16 msg,
263 u32 param)
264 {
265 struct amdgpu_device *adev = smu->adev;
266
267 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5)) {
268 WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_33, 0);
269 WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34, param);
270 WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_2, msg);
271 } else {
272 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
273 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
274 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
275 }
276
277 }
278
279 /**
280 * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status
281 * @smu: pointer to an SMU context
282 * @msg_index: message index
283 * @param: message parameter to send to the SMU
284 *
285 * Send a message to the SMU with the parameter passed. Do not wait
286 * for status/result of the message, thus the "without_waiting".
287 *
288 * Return 0 on success, -errno on error if we weren't able to _send_
289 * the message for some reason. See __smu_cmn_reg2errno() for details
290 * of the -errno.
291 */
smu_cmn_send_msg_without_waiting(struct smu_context * smu,uint16_t msg_index,uint32_t param)292 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
293 uint16_t msg_index,
294 uint32_t param)
295 {
296 struct amdgpu_device *adev = smu->adev;
297 u32 reg;
298 int res;
299
300 if (adev->no_hw_access)
301 return 0;
302
303 reg = __smu_cmn_poll_stat(smu);
304 res = __smu_cmn_reg2errno(smu, reg);
305 if (reg == SMU_RESP_NONE ||
306 res == -EREMOTEIO)
307 goto Out;
308 __smu_cmn_send_msg(smu, msg_index, param);
309 res = 0;
310 Out:
311 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
312 res && (res != -ETIME)) {
313 amdgpu_device_halt(adev);
314 WARN_ON(1);
315 }
316
317 return res;
318 }
319
320 /**
321 * smu_cmn_wait_for_response -- wait for response from the SMU
322 * @smu: pointer to an SMU context
323 *
324 * Wait for status from the SMU.
325 *
326 * Return 0 on success, -errno on error, indicating the execution
327 * status and result of the message being waited for. See
328 * __smu_cmn_reg2errno() for details of the -errno.
329 */
smu_cmn_wait_for_response(struct smu_context * smu)330 int smu_cmn_wait_for_response(struct smu_context *smu)
331 {
332 u32 reg;
333 int res;
334
335 reg = __smu_cmn_poll_stat(smu);
336 res = __smu_cmn_reg2errno(smu, reg);
337
338 if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
339 res && (res != -ETIME)) {
340 amdgpu_device_halt(smu->adev);
341 WARN_ON(1);
342 }
343
344 return res;
345 }
346
347 /**
348 * smu_cmn_send_smc_msg_with_param -- send a message with parameter
349 * @smu: pointer to an SMU context
350 * @msg: message to send
351 * @param: parameter to send to the SMU
352 * @read_arg: pointer to u32 to return a value from the SMU back
353 * to the caller
354 *
355 * Send the message @msg with parameter @param to the SMU, wait for
356 * completion of the command, and return back a value from the SMU in
357 * @read_arg pointer.
358 *
359 * Return 0 on success, -errno when a problem is encountered sending
360 * message or receiving reply. If there is a PCI bus recovery or
361 * the destination is a virtual GPU which does not allow this message
362 * type, the message is simply dropped and success is also returned.
363 * See __smu_cmn_reg2errno() for details of the -errno.
364 *
365 * If we weren't able to send the message to the SMU, we also print
366 * the error to the standard log.
367 *
368 * Command completion status is printed only if the -errno is
369 * -EREMOTEIO, indicating that the SMU returned back an
370 * undefined/unknown/unspecified result. All other cases are
371 * well-defined, not printed, but instead given back to the client to
372 * decide what further to do.
373 *
374 * The return value, @read_arg is read back regardless, to give back
375 * more information to the client, which on error would most likely be
376 * @param, but we can't assume that. This also eliminates more
377 * conditionals.
378 */
smu_cmn_send_smc_msg_with_param(struct smu_context * smu,enum smu_message_type msg,uint32_t param,uint32_t * read_arg)379 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
380 enum smu_message_type msg,
381 uint32_t param,
382 uint32_t *read_arg)
383 {
384 struct amdgpu_device *adev = smu->adev;
385 int res, index;
386 u32 reg;
387
388 if (adev->no_hw_access)
389 return 0;
390
391 index = smu_cmn_to_asic_specific_index(smu,
392 CMN2ASIC_MAPPING_MSG,
393 msg);
394 if (index < 0)
395 return index == -EACCES ? 0 : index;
396
397 mutex_lock(&smu->message_lock);
398 reg = __smu_cmn_poll_stat(smu);
399 res = __smu_cmn_reg2errno(smu, reg);
400 if (reg == SMU_RESP_NONE ||
401 res == -EREMOTEIO) {
402 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
403 goto Out;
404 }
405 __smu_cmn_send_msg(smu, (uint16_t) index, param);
406 reg = __smu_cmn_poll_stat(smu);
407 res = __smu_cmn_reg2errno(smu, reg);
408 if (res != 0)
409 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
410 if (read_arg)
411 smu_cmn_read_arg(smu, read_arg);
412 Out:
413 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
414 amdgpu_device_halt(adev);
415 WARN_ON(1);
416 }
417
418 mutex_unlock(&smu->message_lock);
419 return res;
420 }
421
smu_cmn_send_smc_msg(struct smu_context * smu,enum smu_message_type msg,uint32_t * read_arg)422 int smu_cmn_send_smc_msg(struct smu_context *smu,
423 enum smu_message_type msg,
424 uint32_t *read_arg)
425 {
426 return smu_cmn_send_smc_msg_with_param(smu,
427 msg,
428 0,
429 read_arg);
430 }
431
smu_cmn_to_asic_specific_index(struct smu_context * smu,enum smu_cmn2asic_mapping_type type,uint32_t index)432 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
433 enum smu_cmn2asic_mapping_type type,
434 uint32_t index)
435 {
436 struct cmn2asic_msg_mapping msg_mapping;
437 struct cmn2asic_mapping mapping;
438
439 switch (type) {
440 case CMN2ASIC_MAPPING_MSG:
441 if (index >= SMU_MSG_MAX_COUNT ||
442 !smu->message_map)
443 return -EINVAL;
444
445 msg_mapping = smu->message_map[index];
446 if (!msg_mapping.valid_mapping)
447 return -EINVAL;
448
449 if (amdgpu_sriov_vf(smu->adev) &&
450 !msg_mapping.valid_in_vf)
451 return -EACCES;
452
453 return msg_mapping.map_to;
454
455 case CMN2ASIC_MAPPING_CLK:
456 if (index >= SMU_CLK_COUNT ||
457 !smu->clock_map)
458 return -EINVAL;
459
460 mapping = smu->clock_map[index];
461 if (!mapping.valid_mapping)
462 return -EINVAL;
463
464 return mapping.map_to;
465
466 case CMN2ASIC_MAPPING_FEATURE:
467 if (index >= SMU_FEATURE_COUNT ||
468 !smu->feature_map)
469 return -EINVAL;
470
471 mapping = smu->feature_map[index];
472 if (!mapping.valid_mapping)
473 return -EINVAL;
474
475 return mapping.map_to;
476
477 case CMN2ASIC_MAPPING_TABLE:
478 if (index >= SMU_TABLE_COUNT ||
479 !smu->table_map)
480 return -EINVAL;
481
482 mapping = smu->table_map[index];
483 if (!mapping.valid_mapping)
484 return -EINVAL;
485
486 return mapping.map_to;
487
488 case CMN2ASIC_MAPPING_PWR:
489 if (index >= SMU_POWER_SOURCE_COUNT ||
490 !smu->pwr_src_map)
491 return -EINVAL;
492
493 mapping = smu->pwr_src_map[index];
494 if (!mapping.valid_mapping)
495 return -EINVAL;
496
497 return mapping.map_to;
498
499 case CMN2ASIC_MAPPING_WORKLOAD:
500 if (index > PP_SMC_POWER_PROFILE_WINDOW3D ||
501 !smu->workload_map)
502 return -EINVAL;
503
504 mapping = smu->workload_map[index];
505 if (!mapping.valid_mapping)
506 return -EINVAL;
507
508 return mapping.map_to;
509
510 default:
511 return -EINVAL;
512 }
513 }
514
smu_cmn_feature_is_supported(struct smu_context * smu,enum smu_feature_mask mask)515 int smu_cmn_feature_is_supported(struct smu_context *smu,
516 enum smu_feature_mask mask)
517 {
518 struct smu_feature *feature = &smu->smu_feature;
519 int feature_id;
520
521 feature_id = smu_cmn_to_asic_specific_index(smu,
522 CMN2ASIC_MAPPING_FEATURE,
523 mask);
524 if (feature_id < 0)
525 return 0;
526
527 WARN_ON(feature_id > feature->feature_num);
528
529 return test_bit(feature_id, feature->supported);
530 }
531
__smu_get_enabled_features(struct smu_context * smu,uint64_t * enabled_features)532 static int __smu_get_enabled_features(struct smu_context *smu,
533 uint64_t *enabled_features)
534 {
535 return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
536 }
537
smu_cmn_feature_is_enabled(struct smu_context * smu,enum smu_feature_mask mask)538 int smu_cmn_feature_is_enabled(struct smu_context *smu,
539 enum smu_feature_mask mask)
540 {
541 struct amdgpu_device *adev = smu->adev;
542 uint64_t enabled_features;
543 int feature_id;
544
545 if (__smu_get_enabled_features(smu, &enabled_features)) {
546 dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
547 return 0;
548 }
549
550 /*
551 * For Renoir and Cyan Skillfish, they are assumed to have all features
552 * enabled. Also considering they have no feature_map available, the
553 * check here can avoid unwanted feature_map check below.
554 */
555 if (enabled_features == ULLONG_MAX)
556 return 1;
557
558 feature_id = smu_cmn_to_asic_specific_index(smu,
559 CMN2ASIC_MAPPING_FEATURE,
560 mask);
561 if (feature_id < 0)
562 return 0;
563
564 return test_bit(feature_id, (unsigned long *)&enabled_features);
565 }
566
smu_cmn_clk_dpm_is_enabled(struct smu_context * smu,enum smu_clk_type clk_type)567 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
568 enum smu_clk_type clk_type)
569 {
570 enum smu_feature_mask feature_id = 0;
571
572 switch (clk_type) {
573 case SMU_MCLK:
574 case SMU_UCLK:
575 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
576 break;
577 case SMU_GFXCLK:
578 case SMU_SCLK:
579 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
580 break;
581 case SMU_SOCCLK:
582 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
583 break;
584 case SMU_VCLK:
585 case SMU_VCLK1:
586 feature_id = SMU_FEATURE_DPM_VCLK_BIT;
587 break;
588 case SMU_DCLK:
589 case SMU_DCLK1:
590 feature_id = SMU_FEATURE_DPM_DCLK_BIT;
591 break;
592 case SMU_FCLK:
593 feature_id = SMU_FEATURE_DPM_FCLK_BIT;
594 break;
595 default:
596 return true;
597 }
598
599 if (!smu_cmn_feature_is_enabled(smu, feature_id))
600 return false;
601
602 return true;
603 }
604
smu_cmn_get_enabled_mask(struct smu_context * smu,uint64_t * feature_mask)605 int smu_cmn_get_enabled_mask(struct smu_context *smu,
606 uint64_t *feature_mask)
607 {
608 uint32_t *feature_mask_high;
609 uint32_t *feature_mask_low;
610 int ret = 0, index = 0;
611
612 if (!feature_mask)
613 return -EINVAL;
614
615 feature_mask_low = &((uint32_t *)feature_mask)[0];
616 feature_mask_high = &((uint32_t *)feature_mask)[1];
617
618 index = smu_cmn_to_asic_specific_index(smu,
619 CMN2ASIC_MAPPING_MSG,
620 SMU_MSG_GetEnabledSmuFeatures);
621 if (index > 0) {
622 ret = smu_cmn_send_smc_msg_with_param(smu,
623 SMU_MSG_GetEnabledSmuFeatures,
624 0,
625 feature_mask_low);
626 if (ret)
627 return ret;
628
629 ret = smu_cmn_send_smc_msg_with_param(smu,
630 SMU_MSG_GetEnabledSmuFeatures,
631 1,
632 feature_mask_high);
633 } else {
634 ret = smu_cmn_send_smc_msg(smu,
635 SMU_MSG_GetEnabledSmuFeaturesHigh,
636 feature_mask_high);
637 if (ret)
638 return ret;
639
640 ret = smu_cmn_send_smc_msg(smu,
641 SMU_MSG_GetEnabledSmuFeaturesLow,
642 feature_mask_low);
643 }
644
645 return ret;
646 }
647
smu_cmn_get_indep_throttler_status(const unsigned long dep_status,const uint8_t * throttler_map)648 uint64_t smu_cmn_get_indep_throttler_status(
649 const unsigned long dep_status,
650 const uint8_t *throttler_map)
651 {
652 uint64_t indep_status = 0;
653 uint8_t dep_bit = 0;
654
655 for_each_set_bit(dep_bit, &dep_status, 32)
656 indep_status |= 1ULL << throttler_map[dep_bit];
657
658 return indep_status;
659 }
660
smu_cmn_feature_update_enable_state(struct smu_context * smu,uint64_t feature_mask,bool enabled)661 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
662 uint64_t feature_mask,
663 bool enabled)
664 {
665 int ret = 0;
666
667 if (enabled) {
668 ret = smu_cmn_send_smc_msg_with_param(smu,
669 SMU_MSG_EnableSmuFeaturesLow,
670 lower_32_bits(feature_mask),
671 NULL);
672 if (ret)
673 return ret;
674 ret = smu_cmn_send_smc_msg_with_param(smu,
675 SMU_MSG_EnableSmuFeaturesHigh,
676 upper_32_bits(feature_mask),
677 NULL);
678 } else {
679 ret = smu_cmn_send_smc_msg_with_param(smu,
680 SMU_MSG_DisableSmuFeaturesLow,
681 lower_32_bits(feature_mask),
682 NULL);
683 if (ret)
684 return ret;
685 ret = smu_cmn_send_smc_msg_with_param(smu,
686 SMU_MSG_DisableSmuFeaturesHigh,
687 upper_32_bits(feature_mask),
688 NULL);
689 }
690
691 return ret;
692 }
693
smu_cmn_feature_set_enabled(struct smu_context * smu,enum smu_feature_mask mask,bool enable)694 int smu_cmn_feature_set_enabled(struct smu_context *smu,
695 enum smu_feature_mask mask,
696 bool enable)
697 {
698 int feature_id;
699
700 feature_id = smu_cmn_to_asic_specific_index(smu,
701 CMN2ASIC_MAPPING_FEATURE,
702 mask);
703 if (feature_id < 0)
704 return -EINVAL;
705
706 return smu_cmn_feature_update_enable_state(smu,
707 1ULL << feature_id,
708 enable);
709 }
710
711 #undef __SMU_DUMMY_MAP
712 #define __SMU_DUMMY_MAP(fea) #fea
713 static const char* __smu_feature_names[] = {
714 SMU_FEATURE_MASKS
715 };
716
smu_get_feature_name(struct smu_context * smu,enum smu_feature_mask feature)717 static const char *smu_get_feature_name(struct smu_context *smu,
718 enum smu_feature_mask feature)
719 {
720 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
721 return "unknown smu feature";
722 return __smu_feature_names[feature];
723 }
724
smu_cmn_get_pp_feature_mask(struct smu_context * smu,char * buf)725 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
726 char *buf)
727 {
728 uint64_t feature_mask;
729 int feature_index = 0;
730 uint32_t count = 0;
731 int8_t sort_feature[SMU_FEATURE_COUNT];
732 size_t size = 0;
733 int ret = 0, i;
734 int feature_id;
735
736 ret = __smu_get_enabled_features(smu, &feature_mask);
737 if (ret)
738 return 0;
739
740 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
741 upper_32_bits(feature_mask), lower_32_bits(feature_mask));
742
743 memset(sort_feature, -1, sizeof(sort_feature));
744
745 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
746 feature_index = smu_cmn_to_asic_specific_index(smu,
747 CMN2ASIC_MAPPING_FEATURE,
748 i);
749 if (feature_index < 0)
750 continue;
751
752 sort_feature[feature_index] = i;
753 }
754
755 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
756 "No", "Feature", "Bit", "State");
757
758 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
759 if (sort_feature[i] < 0)
760 continue;
761
762 /* convert to asic spcific feature ID */
763 feature_id = smu_cmn_to_asic_specific_index(smu,
764 CMN2ASIC_MAPPING_FEATURE,
765 sort_feature[i]);
766 if (feature_id < 0)
767 continue;
768
769 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
770 count++,
771 smu_get_feature_name(smu, sort_feature[i]),
772 i,
773 !!test_bit(feature_id, (unsigned long *)&feature_mask) ?
774 "enabled" : "disabled");
775 }
776
777 return size;
778 }
779
smu_cmn_set_pp_feature_mask(struct smu_context * smu,uint64_t new_mask)780 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
781 uint64_t new_mask)
782 {
783 int ret = 0;
784 uint64_t feature_mask;
785 uint64_t feature_2_enabled = 0;
786 uint64_t feature_2_disabled = 0;
787
788 ret = __smu_get_enabled_features(smu, &feature_mask);
789 if (ret)
790 return ret;
791
792 feature_2_enabled = ~feature_mask & new_mask;
793 feature_2_disabled = feature_mask & ~new_mask;
794
795 if (feature_2_enabled) {
796 ret = smu_cmn_feature_update_enable_state(smu,
797 feature_2_enabled,
798 true);
799 if (ret)
800 return ret;
801 }
802 if (feature_2_disabled) {
803 ret = smu_cmn_feature_update_enable_state(smu,
804 feature_2_disabled,
805 false);
806 if (ret)
807 return ret;
808 }
809
810 return ret;
811 }
812
813 /**
814 * smu_cmn_disable_all_features_with_exception - disable all dpm features
815 * except this specified by
816 * @mask
817 *
818 * @smu: smu_context pointer
819 * @mask: the dpm feature which should not be disabled
820 * SMU_FEATURE_COUNT: no exception, all dpm features
821 * to disable
822 *
823 * Returns:
824 * 0 on success or a negative error code on failure.
825 */
smu_cmn_disable_all_features_with_exception(struct smu_context * smu,enum smu_feature_mask mask)826 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
827 enum smu_feature_mask mask)
828 {
829 uint64_t features_to_disable = U64_MAX;
830 int skipped_feature_id;
831
832 if (mask != SMU_FEATURE_COUNT) {
833 skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
834 CMN2ASIC_MAPPING_FEATURE,
835 mask);
836 if (skipped_feature_id < 0)
837 return -EINVAL;
838
839 features_to_disable &= ~(1ULL << skipped_feature_id);
840 }
841
842 return smu_cmn_feature_update_enable_state(smu,
843 features_to_disable,
844 0);
845 }
846
smu_cmn_get_smc_version(struct smu_context * smu,uint32_t * if_version,uint32_t * smu_version)847 int smu_cmn_get_smc_version(struct smu_context *smu,
848 uint32_t *if_version,
849 uint32_t *smu_version)
850 {
851 int ret = 0;
852
853 if (!if_version && !smu_version)
854 return -EINVAL;
855
856 if (smu->smc_fw_if_version && smu->smc_fw_version)
857 {
858 if (if_version)
859 *if_version = smu->smc_fw_if_version;
860
861 if (smu_version)
862 *smu_version = smu->smc_fw_version;
863
864 return 0;
865 }
866
867 if (if_version) {
868 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
869 if (ret)
870 return ret;
871
872 smu->smc_fw_if_version = *if_version;
873 }
874
875 if (smu_version) {
876 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
877 if (ret)
878 return ret;
879
880 smu->smc_fw_version = *smu_version;
881 }
882
883 return ret;
884 }
885
smu_cmn_update_table(struct smu_context * smu,enum smu_table_id table_index,int argument,void * table_data,bool drv2smu)886 int smu_cmn_update_table(struct smu_context *smu,
887 enum smu_table_id table_index,
888 int argument,
889 void *table_data,
890 bool drv2smu)
891 {
892 struct smu_table_context *smu_table = &smu->smu_table;
893 struct amdgpu_device *adev = smu->adev;
894 struct smu_table *table = &smu_table->driver_table;
895 int table_id = smu_cmn_to_asic_specific_index(smu,
896 CMN2ASIC_MAPPING_TABLE,
897 table_index);
898 uint32_t table_size;
899 int ret = 0;
900 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
901 return -EINVAL;
902
903 table_size = smu_table->tables[table_index].size;
904
905 if (drv2smu) {
906 memcpy(table->cpu_addr, table_data, table_size);
907 /*
908 * Flush hdp cache: to guard the content seen by
909 * GPU is consitent with CPU.
910 */
911 amdgpu_asic_flush_hdp(adev, NULL);
912 }
913
914 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
915 SMU_MSG_TransferTableDram2Smu :
916 SMU_MSG_TransferTableSmu2Dram,
917 table_id | ((argument & 0xFFFF) << 16),
918 NULL);
919 if (ret)
920 return ret;
921
922 if (!drv2smu) {
923 amdgpu_asic_invalidate_hdp(adev, NULL);
924 memcpy(table_data, table->cpu_addr, table_size);
925 }
926
927 return 0;
928 }
929
smu_cmn_write_watermarks_table(struct smu_context * smu)930 int smu_cmn_write_watermarks_table(struct smu_context *smu)
931 {
932 void *watermarks_table = smu->smu_table.watermarks_table;
933
934 if (!watermarks_table)
935 return -EINVAL;
936
937 return smu_cmn_update_table(smu,
938 SMU_TABLE_WATERMARKS,
939 0,
940 watermarks_table,
941 true);
942 }
943
smu_cmn_write_pptable(struct smu_context * smu)944 int smu_cmn_write_pptable(struct smu_context *smu)
945 {
946 void *pptable = smu->smu_table.driver_pptable;
947
948 return smu_cmn_update_table(smu,
949 SMU_TABLE_PPTABLE,
950 0,
951 pptable,
952 true);
953 }
954
smu_cmn_get_metrics_table(struct smu_context * smu,void * metrics_table,bool bypass_cache)955 int smu_cmn_get_metrics_table(struct smu_context *smu,
956 void *metrics_table,
957 bool bypass_cache)
958 {
959 struct smu_table_context *smu_table= &smu->smu_table;
960 uint32_t table_size =
961 smu_table->tables[SMU_TABLE_SMU_METRICS].size;
962 int ret = 0;
963
964 if (bypass_cache ||
965 !smu_table->metrics_time ||
966 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
967 ret = smu_cmn_update_table(smu,
968 SMU_TABLE_SMU_METRICS,
969 0,
970 smu_table->metrics_table,
971 false);
972 if (ret) {
973 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
974 return ret;
975 }
976 smu_table->metrics_time = jiffies;
977 }
978
979 if (metrics_table)
980 memcpy(metrics_table, smu_table->metrics_table, table_size);
981
982 return 0;
983 }
984
smu_cmn_get_combo_pptable(struct smu_context * smu)985 int smu_cmn_get_combo_pptable(struct smu_context *smu)
986 {
987 void *pptable = smu->smu_table.combo_pptable;
988
989 return smu_cmn_update_table(smu,
990 SMU_TABLE_COMBO_PPTABLE,
991 0,
992 pptable,
993 false);
994 }
995
smu_cmn_init_soft_gpu_metrics(void * table,uint8_t frev,uint8_t crev)996 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
997 {
998 struct metrics_table_header *header = (struct metrics_table_header *)table;
999 uint16_t structure_size;
1000
1001 #define METRICS_VERSION(a, b) ((a << 16) | b )
1002
1003 switch (METRICS_VERSION(frev, crev)) {
1004 case METRICS_VERSION(1, 0):
1005 structure_size = sizeof(struct gpu_metrics_v1_0);
1006 break;
1007 case METRICS_VERSION(1, 1):
1008 structure_size = sizeof(struct gpu_metrics_v1_1);
1009 break;
1010 case METRICS_VERSION(1, 2):
1011 structure_size = sizeof(struct gpu_metrics_v1_2);
1012 break;
1013 case METRICS_VERSION(1, 3):
1014 structure_size = sizeof(struct gpu_metrics_v1_3);
1015 break;
1016 case METRICS_VERSION(2, 0):
1017 structure_size = sizeof(struct gpu_metrics_v2_0);
1018 break;
1019 case METRICS_VERSION(2, 1):
1020 structure_size = sizeof(struct gpu_metrics_v2_1);
1021 break;
1022 case METRICS_VERSION(2, 2):
1023 structure_size = sizeof(struct gpu_metrics_v2_2);
1024 break;
1025 default:
1026 return;
1027 }
1028
1029 #undef METRICS_VERSION
1030
1031 memset(header, 0xFF, structure_size);
1032
1033 header->format_revision = frev;
1034 header->content_revision = crev;
1035 header->structure_size = structure_size;
1036
1037 }
1038
smu_cmn_set_mp1_state(struct smu_context * smu,enum pp_mp1_state mp1_state)1039 int smu_cmn_set_mp1_state(struct smu_context *smu,
1040 enum pp_mp1_state mp1_state)
1041 {
1042 enum smu_message_type msg;
1043 int ret;
1044
1045 switch (mp1_state) {
1046 case PP_MP1_STATE_SHUTDOWN:
1047 msg = SMU_MSG_PrepareMp1ForShutdown;
1048 break;
1049 case PP_MP1_STATE_UNLOAD:
1050 msg = SMU_MSG_PrepareMp1ForUnload;
1051 break;
1052 case PP_MP1_STATE_RESET:
1053 msg = SMU_MSG_PrepareMp1ForReset;
1054 break;
1055 case PP_MP1_STATE_NONE:
1056 default:
1057 return 0;
1058 }
1059
1060 ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1061 if (ret)
1062 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1063
1064 return ret;
1065 }
1066
smu_cmn_is_audio_func_enabled(struct amdgpu_device * adev)1067 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1068 {
1069 struct pci_dev *p = NULL;
1070 bool snd_driver_loaded;
1071
1072 /*
1073 * If the ASIC comes with no audio function, we always assume
1074 * it is "enabled".
1075 */
1076 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1077 adev->pdev->bus->number, 1);
1078 if (!p)
1079 return true;
1080
1081 snd_driver_loaded = pci_is_enabled(p) ? true : false;
1082
1083 pci_dev_put(p);
1084
1085 return snd_driver_loaded;
1086 }
1087