1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include <linux/string_helpers.h>
7 #include <linux/kernel.h>
8
9 #include <drm/drm_print.h>
10
11 #include "i915_drv.h"
12 #include "i915_reg.h"
13 #include "i915_trace.h"
14 #include "i915_utils.h"
15 #include "intel_pm.h"
16 #include "vlv_suspend.h"
17
18 #include "gt/intel_gt_regs.h"
19
20 struct vlv_s0ix_state {
21 /* GAM */
22 u32 wr_watermark;
23 u32 gfx_prio_ctrl;
24 u32 arb_mode;
25 u32 gfx_pend_tlb0;
26 u32 gfx_pend_tlb1;
27 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
28 u32 media_max_req_count;
29 u32 gfx_max_req_count;
30 u32 render_hwsp;
31 u32 ecochk;
32 u32 bsd_hwsp;
33 u32 blt_hwsp;
34 u32 tlb_rd_addr;
35
36 /* MBC */
37 u32 g3dctl;
38 u32 gsckgctl;
39 u32 mbctl;
40
41 /* GCP */
42 u32 ucgctl1;
43 u32 ucgctl3;
44 u32 rcgctl1;
45 u32 rcgctl2;
46 u32 rstctl;
47 u32 misccpctl;
48
49 /* GPM */
50 u32 gfxpause;
51 u32 rpdeuhwtc;
52 u32 rpdeuc;
53 u32 ecobus;
54 u32 pwrdwnupctl;
55 u32 rp_down_timeout;
56 u32 rp_deucsw;
57 u32 rcubmabdtmr;
58 u32 rcedata;
59 u32 spare2gh;
60
61 /* Display 1 CZ domain */
62 u32 gt_imr;
63 u32 gt_ier;
64 u32 pm_imr;
65 u32 pm_ier;
66 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
67
68 /* GT SA CZ domain */
69 u32 tilectl;
70 u32 gt_fifoctl;
71 u32 gtlc_wake_ctrl;
72 u32 gtlc_survive;
73 u32 pmwgicz;
74
75 /* Display 2 CZ domain */
76 u32 gu_ctl0;
77 u32 gu_ctl1;
78 u32 pcbr;
79 u32 clock_gate_dis2;
80 };
81
82 /*
83 * Save all Gunit registers that may be lost after a D3 and a subsequent
84 * S0i[R123] transition. The list of registers needing a save/restore is
85 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
86 * registers in the following way:
87 * - Driver: saved/restored by the driver
88 * - Punit : saved/restored by the Punit firmware
89 * - No, w/o marking: no need to save/restore, since the register is R/O or
90 * used internally by the HW in a way that doesn't depend
91 * keeping the content across a suspend/resume.
92 * - Debug : used for debugging
93 *
94 * We save/restore all registers marked with 'Driver', with the following
95 * exceptions:
96 * - Registers out of use, including also registers marked with 'Debug'.
97 * These have no effect on the driver's operation, so we don't save/restore
98 * them to reduce the overhead.
99 * - Registers that are fully setup by an initialization function called from
100 * the resume path. For example many clock gating and RPS/RC6 registers.
101 * - Registers that provide the right functionality with their reset defaults.
102 *
103 * TODO: Except for registers that based on the above 3 criteria can be safely
104 * ignored, we save/restore all others, practically treating the HW context as
105 * a black-box for the driver. Further investigation is needed to reduce the
106 * saved/restored registers even further, by following the same 3 criteria.
107 */
vlv_save_gunit_s0ix_state(struct drm_i915_private * i915)108 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *i915)
109 {
110 struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
111 struct intel_uncore *uncore = &i915->uncore;
112 int i;
113
114 if (!s)
115 return;
116
117 /* GAM 0x4000-0x4770 */
118 s->wr_watermark = intel_uncore_read(uncore, GEN7_WR_WATERMARK);
119 s->gfx_prio_ctrl = intel_uncore_read(uncore, GEN7_GFX_PRIO_CTRL);
120 s->arb_mode = intel_uncore_read(uncore, ARB_MODE);
121 s->gfx_pend_tlb0 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB0);
122 s->gfx_pend_tlb1 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB1);
123
124 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
125 s->lra_limits[i] = intel_uncore_read(uncore, GEN7_LRA_LIMITS(i));
126
127 s->media_max_req_count = intel_uncore_read(uncore, GEN7_MEDIA_MAX_REQ_COUNT);
128 s->gfx_max_req_count = intel_uncore_read(uncore, GEN7_GFX_MAX_REQ_COUNT);
129
130 s->render_hwsp = intel_uncore_read(uncore, RENDER_HWS_PGA_GEN7);
131 s->ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
132 s->bsd_hwsp = intel_uncore_read(uncore, BSD_HWS_PGA_GEN7);
133 s->blt_hwsp = intel_uncore_read(uncore, BLT_HWS_PGA_GEN7);
134
135 s->tlb_rd_addr = intel_uncore_read(uncore, GEN7_TLB_RD_ADDR);
136
137 /* MBC 0x9024-0x91D0, 0x8500 */
138 s->g3dctl = intel_uncore_read(uncore, VLV_G3DCTL);
139 s->gsckgctl = intel_uncore_read(uncore, VLV_GSCKGCTL);
140 s->mbctl = intel_uncore_read(uncore, GEN6_MBCTL);
141
142 /* GCP 0x9400-0x9424, 0x8100-0x810C */
143 s->ucgctl1 = intel_uncore_read(uncore, GEN6_UCGCTL1);
144 s->ucgctl3 = intel_uncore_read(uncore, GEN6_UCGCTL3);
145 s->rcgctl1 = intel_uncore_read(uncore, GEN6_RCGCTL1);
146 s->rcgctl2 = intel_uncore_read(uncore, GEN6_RCGCTL2);
147 s->rstctl = intel_uncore_read(uncore, GEN6_RSTCTL);
148 s->misccpctl = intel_uncore_read(uncore, GEN7_MISCCPCTL);
149
150 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
151 s->gfxpause = intel_uncore_read(uncore, GEN6_GFXPAUSE);
152 s->rpdeuhwtc = intel_uncore_read(uncore, GEN6_RPDEUHWTC);
153 s->rpdeuc = intel_uncore_read(uncore, GEN6_RPDEUC);
154 s->ecobus = intel_uncore_read(uncore, ECOBUS);
155 s->pwrdwnupctl = intel_uncore_read(uncore, VLV_PWRDWNUPCTL);
156 s->rp_down_timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_TIMEOUT);
157 s->rp_deucsw = intel_uncore_read(uncore, GEN6_RPDEUCSW);
158 s->rcubmabdtmr = intel_uncore_read(uncore, GEN6_RCUBMABDTMR);
159 s->rcedata = intel_uncore_read(uncore, VLV_RCEDATA);
160 s->spare2gh = intel_uncore_read(uncore, VLV_SPAREG2H);
161
162 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
163 s->gt_imr = intel_uncore_read(uncore, GTIMR);
164 s->gt_ier = intel_uncore_read(uncore, GTIER);
165 s->pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
166 s->pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
167
168 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
169 s->gt_scratch[i] = intel_uncore_read(uncore, GEN7_GT_SCRATCH(i));
170
171 /* GT SA CZ domain, 0x100000-0x138124 */
172 s->tilectl = intel_uncore_read(uncore, TILECTL);
173 s->gt_fifoctl = intel_uncore_read(uncore, GTFIFOCTL);
174 s->gtlc_wake_ctrl = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
175 s->gtlc_survive = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
176 s->pmwgicz = intel_uncore_read(uncore, VLV_PMWGICZ);
177
178 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
179 s->gu_ctl0 = intel_uncore_read(uncore, VLV_GU_CTL0);
180 s->gu_ctl1 = intel_uncore_read(uncore, VLV_GU_CTL1);
181 s->pcbr = intel_uncore_read(uncore, VLV_PCBR);
182 s->clock_gate_dis2 = intel_uncore_read(uncore, VLV_GUNIT_CLOCK_GATE2);
183
184 /*
185 * Not saving any of:
186 * DFT, 0x9800-0x9EC0
187 * SARB, 0xB000-0xB1FC
188 * GAC, 0x5208-0x524C, 0x14000-0x14C000
189 * PCI CFG
190 */
191 }
192
vlv_restore_gunit_s0ix_state(struct drm_i915_private * i915)193 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *i915)
194 {
195 struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
196 struct intel_uncore *uncore = &i915->uncore;
197 u32 val;
198 int i;
199
200 if (!s)
201 return;
202
203 /* GAM 0x4000-0x4770 */
204 intel_uncore_write(uncore, GEN7_WR_WATERMARK, s->wr_watermark);
205 intel_uncore_write(uncore, GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
206 intel_uncore_write(uncore, ARB_MODE, s->arb_mode | (0xffff << 16));
207 intel_uncore_write(uncore, GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
208 intel_uncore_write(uncore, GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
209
210 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
211 intel_uncore_write(uncore, GEN7_LRA_LIMITS(i), s->lra_limits[i]);
212
213 intel_uncore_write(uncore, GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
214 intel_uncore_write(uncore, GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
215
216 intel_uncore_write(uncore, RENDER_HWS_PGA_GEN7, s->render_hwsp);
217 intel_uncore_write(uncore, GAM_ECOCHK, s->ecochk);
218 intel_uncore_write(uncore, BSD_HWS_PGA_GEN7, s->bsd_hwsp);
219 intel_uncore_write(uncore, BLT_HWS_PGA_GEN7, s->blt_hwsp);
220
221 intel_uncore_write(uncore, GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
222
223 /* MBC 0x9024-0x91D0, 0x8500 */
224 intel_uncore_write(uncore, VLV_G3DCTL, s->g3dctl);
225 intel_uncore_write(uncore, VLV_GSCKGCTL, s->gsckgctl);
226 intel_uncore_write(uncore, GEN6_MBCTL, s->mbctl);
227
228 /* GCP 0x9400-0x9424, 0x8100-0x810C */
229 intel_uncore_write(uncore, GEN6_UCGCTL1, s->ucgctl1);
230 intel_uncore_write(uncore, GEN6_UCGCTL3, s->ucgctl3);
231 intel_uncore_write(uncore, GEN6_RCGCTL1, s->rcgctl1);
232 intel_uncore_write(uncore, GEN6_RCGCTL2, s->rcgctl2);
233 intel_uncore_write(uncore, GEN6_RSTCTL, s->rstctl);
234 intel_uncore_write(uncore, GEN7_MISCCPCTL, s->misccpctl);
235
236 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
237 intel_uncore_write(uncore, GEN6_GFXPAUSE, s->gfxpause);
238 intel_uncore_write(uncore, GEN6_RPDEUHWTC, s->rpdeuhwtc);
239 intel_uncore_write(uncore, GEN6_RPDEUC, s->rpdeuc);
240 intel_uncore_write(uncore, ECOBUS, s->ecobus);
241 intel_uncore_write(uncore, VLV_PWRDWNUPCTL, s->pwrdwnupctl);
242 intel_uncore_write(uncore, GEN6_RP_DOWN_TIMEOUT, s->rp_down_timeout);
243 intel_uncore_write(uncore, GEN6_RPDEUCSW, s->rp_deucsw);
244 intel_uncore_write(uncore, GEN6_RCUBMABDTMR, s->rcubmabdtmr);
245 intel_uncore_write(uncore, VLV_RCEDATA, s->rcedata);
246 intel_uncore_write(uncore, VLV_SPAREG2H, s->spare2gh);
247
248 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
249 intel_uncore_write(uncore, GTIMR, s->gt_imr);
250 intel_uncore_write(uncore, GTIER, s->gt_ier);
251 intel_uncore_write(uncore, GEN6_PMIMR, s->pm_imr);
252 intel_uncore_write(uncore, GEN6_PMIER, s->pm_ier);
253
254 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
255 intel_uncore_write(uncore, GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
256
257 /* GT SA CZ domain, 0x100000-0x138124 */
258 intel_uncore_write(uncore, TILECTL, s->tilectl);
259 intel_uncore_write(uncore, GTFIFOCTL, s->gt_fifoctl);
260 /*
261 * Preserve the GT allow wake and GFX force clock bit, they are not
262 * be restored, as they are used to control the s0ix suspend/resume
263 * sequence by the caller.
264 */
265 val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
266 val &= VLV_GTLC_ALLOWWAKEREQ;
267 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
268 intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
269
270 val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
271 val &= VLV_GFX_CLK_FORCE_ON_BIT;
272 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
273 intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
274
275 intel_uncore_write(uncore, VLV_PMWGICZ, s->pmwgicz);
276
277 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
278 intel_uncore_write(uncore, VLV_GU_CTL0, s->gu_ctl0);
279 intel_uncore_write(uncore, VLV_GU_CTL1, s->gu_ctl1);
280 intel_uncore_write(uncore, VLV_PCBR, s->pcbr);
281 intel_uncore_write(uncore, VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
282 }
283
vlv_wait_for_pw_status(struct drm_i915_private * i915,u32 mask,u32 val)284 static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
285 u32 mask, u32 val)
286 {
287 i915_reg_t reg = VLV_GTLC_PW_STATUS;
288 u32 reg_value;
289 int ret;
290
291 /* The HW does not like us polling for PW_STATUS frequently, so
292 * use the sleeping loop rather than risk the busy spin within
293 * intel_wait_for_register().
294 *
295 * Transitioning between RC6 states should be at most 2ms (see
296 * valleyview_enable_rps) so use a 3ms timeout.
297 */
298 ret = wait_for(((reg_value =
299 intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
300 == val, 3);
301
302 /* just trace the final value */
303 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
304
305 return ret;
306 }
307
vlv_force_gfx_clock(struct drm_i915_private * i915,bool force_on)308 static int vlv_force_gfx_clock(struct drm_i915_private *i915, bool force_on)
309 {
310 struct intel_uncore *uncore = &i915->uncore;
311 u32 val;
312 int err;
313
314 val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
315 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
316 if (force_on)
317 val |= VLV_GFX_CLK_FORCE_ON_BIT;
318 intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
319
320 if (!force_on)
321 return 0;
322
323 err = intel_wait_for_register(uncore,
324 VLV_GTLC_SURVIVABILITY_REG,
325 VLV_GFX_CLK_STATUS_BIT,
326 VLV_GFX_CLK_STATUS_BIT,
327 20);
328 if (err)
329 drm_err(&i915->drm,
330 "timeout waiting for GFX clock force-on (%08x)\n",
331 intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG));
332
333 return err;
334 }
335
vlv_allow_gt_wake(struct drm_i915_private * i915,bool allow)336 static int vlv_allow_gt_wake(struct drm_i915_private *i915, bool allow)
337 {
338 struct intel_uncore *uncore = &i915->uncore;
339 u32 mask;
340 u32 val;
341 int err;
342
343 val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
344 val &= ~VLV_GTLC_ALLOWWAKEREQ;
345 if (allow)
346 val |= VLV_GTLC_ALLOWWAKEREQ;
347 intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
348 intel_uncore_posting_read(uncore, VLV_GTLC_WAKE_CTRL);
349
350 mask = VLV_GTLC_ALLOWWAKEACK;
351 val = allow ? mask : 0;
352
353 err = vlv_wait_for_pw_status(i915, mask, val);
354 if (err)
355 drm_err(&i915->drm, "timeout disabling GT waking\n");
356
357 return err;
358 }
359
vlv_wait_for_gt_wells(struct drm_i915_private * dev_priv,bool wait_for_on)360 static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
361 bool wait_for_on)
362 {
363 u32 mask;
364 u32 val;
365
366 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
367 val = wait_for_on ? mask : 0;
368
369 /*
370 * RC6 transitioning can be delayed up to 2 msec (see
371 * valleyview_enable_rps), use 3 msec for safety.
372 *
373 * This can fail to turn off the rc6 if the GPU is stuck after a failed
374 * reset and we are trying to force the machine to sleep.
375 */
376 if (vlv_wait_for_pw_status(dev_priv, mask, val))
377 drm_dbg(&dev_priv->drm,
378 "timeout waiting for GT wells to go %s\n",
379 str_on_off(wait_for_on));
380 }
381
vlv_check_no_gt_access(struct drm_i915_private * i915)382 static void vlv_check_no_gt_access(struct drm_i915_private *i915)
383 {
384 struct intel_uncore *uncore = &i915->uncore;
385
386 if (!(intel_uncore_read(uncore, VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
387 return;
388
389 drm_dbg(&i915->drm, "GT register access while GT waking disabled\n");
390 intel_uncore_write(uncore, VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
391 }
392
vlv_suspend_complete(struct drm_i915_private * dev_priv)393 int vlv_suspend_complete(struct drm_i915_private *dev_priv)
394 {
395 u32 mask;
396 int err;
397
398 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
399 return 0;
400
401 /*
402 * Bspec defines the following GT well on flags as debug only, so
403 * don't treat them as hard failures.
404 */
405 vlv_wait_for_gt_wells(dev_priv, false);
406
407 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
408 drm_WARN_ON(&dev_priv->drm,
409 (intel_uncore_read(&dev_priv->uncore, VLV_GTLC_WAKE_CTRL) & mask) != mask);
410
411 vlv_check_no_gt_access(dev_priv);
412
413 err = vlv_force_gfx_clock(dev_priv, true);
414 if (err)
415 goto err1;
416
417 err = vlv_allow_gt_wake(dev_priv, false);
418 if (err)
419 goto err2;
420
421 vlv_save_gunit_s0ix_state(dev_priv);
422
423 err = vlv_force_gfx_clock(dev_priv, false);
424 if (err)
425 goto err2;
426
427 return 0;
428
429 err2:
430 /* For safety always re-enable waking and disable gfx clock forcing */
431 vlv_allow_gt_wake(dev_priv, true);
432 err1:
433 vlv_force_gfx_clock(dev_priv, false);
434
435 return err;
436 }
437
vlv_resume_prepare(struct drm_i915_private * dev_priv,bool rpm_resume)438 int vlv_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume)
439 {
440 int err;
441 int ret;
442
443 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
444 return 0;
445
446 /*
447 * If any of the steps fail just try to continue, that's the best we
448 * can do at this point. Return the first error code (which will also
449 * leave RPM permanently disabled).
450 */
451 ret = vlv_force_gfx_clock(dev_priv, true);
452
453 vlv_restore_gunit_s0ix_state(dev_priv);
454
455 err = vlv_allow_gt_wake(dev_priv, true);
456 if (!ret)
457 ret = err;
458
459 err = vlv_force_gfx_clock(dev_priv, false);
460 if (!ret)
461 ret = err;
462
463 vlv_check_no_gt_access(dev_priv);
464
465 if (rpm_resume)
466 intel_init_clock_gating(dev_priv);
467
468 return ret;
469 }
470
vlv_suspend_init(struct drm_i915_private * i915)471 int vlv_suspend_init(struct drm_i915_private *i915)
472 {
473 if (!IS_VALLEYVIEW(i915))
474 return 0;
475
476 /* we write all the values in the struct, so no need to zero it out */
477 i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
478 GFP_KERNEL);
479 if (!i915->vlv_s0ix_state)
480 return -ENOMEM;
481
482 return 0;
483 }
484
vlv_suspend_cleanup(struct drm_i915_private * i915)485 void vlv_suspend_cleanup(struct drm_i915_private *i915)
486 {
487 if (!i915->vlv_s0ix_state)
488 return;
489
490 kfree(i915->vlv_s0ix_state);
491 i915->vlv_s0ix_state = NULL;
492 }
493