1 /*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/slab.h>
27
28 #include "dal_asic_id.h"
29 #include "dc_types.h"
30 #include "dccg.h"
31 #include "clk_mgr_internal.h"
32
33 #include "dce100/dce_clk_mgr.h"
34 #include "dce110/dce110_clk_mgr.h"
35 #include "dce112/dce112_clk_mgr.h"
36 #include "dce120/dce120_clk_mgr.h"
37 #include "dce60/dce60_clk_mgr.h"
38 #include "dcn10/rv1_clk_mgr.h"
39 #include "dcn10/rv2_clk_mgr.h"
40 #include "dcn20/dcn20_clk_mgr.h"
41 #include "dcn21/rn_clk_mgr.h"
42 #include "dcn201/dcn201_clk_mgr.h"
43 #include "dcn30/dcn30_clk_mgr.h"
44 #include "dcn301/vg_clk_mgr.h"
45 #include "dcn31/dcn31_clk_mgr.h"
46 #include "dcn314/dcn314_clk_mgr.h"
47 #include "dcn315/dcn315_clk_mgr.h"
48 #include "dcn316/dcn316_clk_mgr.h"
49 #include "dcn32/dcn32_clk_mgr.h"
50
clk_mgr_helper_get_active_display_cnt(struct dc * dc,struct dc_state * context)51 int clk_mgr_helper_get_active_display_cnt(
52 struct dc *dc,
53 struct dc_state *context)
54 {
55 int i, display_count;
56
57 display_count = 0;
58 for (i = 0; i < context->stream_count; i++) {
59 const struct dc_stream_state *stream = context->streams[i];
60
61 /* Don't count SubVP phantom pipes as part of active
62 * display count
63 */
64 if (stream->mall_stream_config.type == SUBVP_PHANTOM)
65 continue;
66
67 /*
68 * Only notify active stream or virtual stream.
69 * Need to notify virtual stream to work around
70 * headless case. HPD does not fire when system is in
71 * S0i2.
72 */
73 if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
74 display_count++;
75 }
76
77 return display_count;
78 }
79
clk_mgr_helper_get_active_plane_cnt(struct dc * dc,struct dc_state * context)80 int clk_mgr_helper_get_active_plane_cnt(
81 struct dc *dc,
82 struct dc_state *context)
83 {
84 int i, total_plane_count;
85
86 total_plane_count = 0;
87 for (i = 0; i < context->stream_count; i++) {
88 const struct dc_stream_status stream_status = context->stream_status[i];
89
90 /*
91 * Sum up plane_count for all streams ( active and virtual ).
92 */
93 total_plane_count += stream_status.plane_count;
94 }
95
96 return total_plane_count;
97 }
98
clk_mgr_exit_optimized_pwr_state(const struct dc * dc,struct clk_mgr * clk_mgr)99 void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
100 {
101 struct dc_link *edp_links[MAX_NUM_EDP];
102 struct dc_link *edp_link = NULL;
103 int edp_num;
104 unsigned int panel_inst;
105
106 get_edp_links(dc, edp_links, &edp_num);
107 if (dc->hwss.exit_optimized_pwr_state)
108 dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
109
110 if (edp_num) {
111 for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
112 bool allow_active = false;
113
114 edp_link = edp_links[panel_inst];
115 if (!edp_link->psr_settings.psr_feature_enabled)
116 continue;
117 clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
118 dc_link_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
119 }
120 }
121
122 }
123
clk_mgr_optimize_pwr_state(const struct dc * dc,struct clk_mgr * clk_mgr)124 void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
125 {
126 struct dc_link *edp_links[MAX_NUM_EDP];
127 struct dc_link *edp_link = NULL;
128 int edp_num;
129 unsigned int panel_inst;
130
131 get_edp_links(dc, edp_links, &edp_num);
132 if (edp_num) {
133 for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
134 edp_link = edp_links[panel_inst];
135 if (!edp_link->psr_settings.psr_feature_enabled)
136 continue;
137 dc_link_set_psr_allow_active(edp_link,
138 &clk_mgr->psr_allow_active_cache, false, false, NULL);
139 }
140 }
141
142 if (dc->hwss.optimize_pwr_state)
143 dc->hwss.optimize_pwr_state(dc, dc->current_state);
144
145 }
146
dc_clk_mgr_create(struct dc_context * ctx,struct pp_smu_funcs * pp_smu,struct dccg * dccg)147 struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg)
148 {
149 struct hw_asic_id asic_id = ctx->asic_id;
150
151 switch (asic_id.chip_family) {
152 #if defined(CONFIG_DRM_AMD_DC_SI)
153 case FAMILY_SI: {
154 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
155
156 if (clk_mgr == NULL) {
157 BREAK_TO_DEBUGGER();
158 return NULL;
159 }
160 dce60_clk_mgr_construct(ctx, clk_mgr);
161 dce_clk_mgr_construct(ctx, clk_mgr);
162 return &clk_mgr->base;
163 }
164 #endif
165 case FAMILY_CI:
166 case FAMILY_KV: {
167 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
168
169 if (clk_mgr == NULL) {
170 BREAK_TO_DEBUGGER();
171 return NULL;
172 }
173 dce_clk_mgr_construct(ctx, clk_mgr);
174 return &clk_mgr->base;
175 }
176 case FAMILY_CZ: {
177 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
178
179 if (clk_mgr == NULL) {
180 BREAK_TO_DEBUGGER();
181 return NULL;
182 }
183 dce110_clk_mgr_construct(ctx, clk_mgr);
184 return &clk_mgr->base;
185 }
186 case FAMILY_VI: {
187 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
188
189 if (clk_mgr == NULL) {
190 BREAK_TO_DEBUGGER();
191 return NULL;
192 }
193 if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
194 ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
195 dce_clk_mgr_construct(ctx, clk_mgr);
196 return &clk_mgr->base;
197 }
198 if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
199 ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
200 ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
201 dce112_clk_mgr_construct(ctx, clk_mgr);
202 return &clk_mgr->base;
203 }
204 if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) {
205 dce112_clk_mgr_construct(ctx, clk_mgr);
206 return &clk_mgr->base;
207 }
208 return &clk_mgr->base;
209 }
210 case FAMILY_AI: {
211 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
212
213 if (clk_mgr == NULL) {
214 BREAK_TO_DEBUGGER();
215 return NULL;
216 }
217 if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
218 dce121_clk_mgr_construct(ctx, clk_mgr);
219 else
220 dce120_clk_mgr_construct(ctx, clk_mgr);
221 return &clk_mgr->base;
222 }
223 #if defined(CONFIG_DRM_AMD_DC_DCN)
224 case FAMILY_RV: {
225 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
226
227 if (clk_mgr == NULL) {
228 BREAK_TO_DEBUGGER();
229 return NULL;
230 }
231
232 if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
233 rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
234 return &clk_mgr->base;
235 }
236
237 if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev)) {
238 rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
239 return &clk_mgr->base;
240 }
241 if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
242 rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
243 return &clk_mgr->base;
244 }
245 if (ASICREV_IS_RAVEN(asic_id.hw_internal_rev) ||
246 ASICREV_IS_PICASSO(asic_id.hw_internal_rev)) {
247 rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu);
248 return &clk_mgr->base;
249 }
250 return &clk_mgr->base;
251 }
252 case FAMILY_NV: {
253 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
254
255 if (clk_mgr == NULL) {
256 BREAK_TO_DEBUGGER();
257 return NULL;
258 }
259 if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) {
260 dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
261 return &clk_mgr->base;
262 }
263 if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev)) {
264 dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
265 return &clk_mgr->base;
266 }
267 if (ASICREV_IS_BEIGE_GOBY_P(asic_id.hw_internal_rev)) {
268 dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
269 return &clk_mgr->base;
270 }
271 if (asic_id.chip_id == DEVICE_ID_NV_13FE) {
272 dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
273 return &clk_mgr->base;
274 }
275 dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
276 return &clk_mgr->base;
277 }
278 case FAMILY_VGH:
279 if (ASICREV_IS_VANGOGH(asic_id.hw_internal_rev)) {
280 struct clk_mgr_vgh *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
281
282 if (clk_mgr == NULL) {
283 BREAK_TO_DEBUGGER();
284 return NULL;
285 }
286 vg_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
287 return &clk_mgr->base.base;
288 }
289 break;
290
291 case FAMILY_YELLOW_CARP: {
292 struct clk_mgr_dcn31 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
293
294 if (clk_mgr == NULL) {
295 BREAK_TO_DEBUGGER();
296 return NULL;
297 }
298
299 dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
300 return &clk_mgr->base.base;
301 }
302 break;
303 case AMDGPU_FAMILY_GC_10_3_6: {
304 struct clk_mgr_dcn315 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
305
306 if (clk_mgr == NULL) {
307 BREAK_TO_DEBUGGER();
308 return NULL;
309 }
310
311 dcn315_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
312 return &clk_mgr->base.base;
313 }
314 break;
315 case AMDGPU_FAMILY_GC_10_3_7: {
316 struct clk_mgr_dcn316 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
317
318 if (clk_mgr == NULL) {
319 BREAK_TO_DEBUGGER();
320 return NULL;
321 }
322
323 dcn316_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
324 return &clk_mgr->base.base;
325 }
326 break;
327 case AMDGPU_FAMILY_GC_11_0_0: {
328 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
329
330 if (clk_mgr == NULL) {
331 BREAK_TO_DEBUGGER();
332 return NULL;
333 }
334
335 dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
336 return &clk_mgr->base;
337 break;
338 }
339
340 case AMDGPU_FAMILY_GC_11_0_1: {
341 struct clk_mgr_dcn314 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
342
343 if (clk_mgr == NULL) {
344 BREAK_TO_DEBUGGER();
345 return NULL;
346 }
347
348 dcn314_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
349 return &clk_mgr->base.base;
350 }
351 break;
352
353 #endif
354 default:
355 ASSERT(0); /* Unknown Asic */
356 break;
357 }
358
359 return NULL;
360 }
361
dc_destroy_clk_mgr(struct clk_mgr * clk_mgr_base)362 void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
363 {
364 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
365
366 #ifdef CONFIG_DRM_AMD_DC_DCN
367 switch (clk_mgr_base->ctx->asic_id.chip_family) {
368 case FAMILY_NV:
369 if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
370 dcn3_clk_mgr_destroy(clk_mgr);
371 } else if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
372 dcn3_clk_mgr_destroy(clk_mgr);
373 }
374 if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
375 dcn3_clk_mgr_destroy(clk_mgr);
376 }
377 break;
378
379 case FAMILY_VGH:
380 if (ASICREV_IS_VANGOGH(clk_mgr_base->ctx->asic_id.hw_internal_rev))
381 vg_clk_mgr_destroy(clk_mgr);
382 break;
383
384 case FAMILY_YELLOW_CARP:
385 dcn31_clk_mgr_destroy(clk_mgr);
386 break;
387
388 case AMDGPU_FAMILY_GC_10_3_6:
389 dcn315_clk_mgr_destroy(clk_mgr);
390 break;
391
392 case AMDGPU_FAMILY_GC_10_3_7:
393 dcn316_clk_mgr_destroy(clk_mgr);
394 break;
395
396 case AMDGPU_FAMILY_GC_11_0_0:
397 dcn32_clk_mgr_destroy(clk_mgr);
398 break;
399
400 case AMDGPU_FAMILY_GC_11_0_1:
401 dcn314_clk_mgr_destroy(clk_mgr);
402 break;
403
404 default:
405 break;
406 }
407 #endif
408
409 kfree(clk_mgr);
410 }
411
412