1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * ARM-specific support for Broadcom STB S2/S3/S5 power management
4 *
5 * S2: clock gate CPUs and as many peripherals as possible
6 * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
7 * self-refresh
8 * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
9 * treat this mode like a soft power-off, with wakeup allowed from AON
10 *
11 * Copyright © 2014-2017 Broadcom
12 */
13
14 #define pr_fmt(fmt) "brcmstb-pm: " fmt
15
16 #include <linux/bitops.h>
17 #include <linux/compiler.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/io.h>
23 #include <linux/ioport.h>
24 #include <linux/kconfig.h>
25 #include <linux/kernel.h>
26 #include <linux/memblock.h>
27 #include <linux/module.h>
28 #include <linux/of.h>
29 #include <linux/of_address.h>
30 #include <linux/panic_notifier.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm.h>
33 #include <linux/printk.h>
34 #include <linux/proc_fs.h>
35 #include <linux/sizes.h>
36 #include <linux/slab.h>
37 #include <linux/sort.h>
38 #include <linux/suspend.h>
39 #include <linux/types.h>
40 #include <linux/uaccess.h>
41 #include <linux/soc/brcmstb/brcmstb.h>
42
43 #include <asm/fncpy.h>
44 #include <asm/setup.h>
45 #include <asm/suspend.h>
46
47 #include "pm.h"
48 #include "aon_defs.h"
49
50 #define SHIMPHY_DDR_PAD_CNTRL 0x8c
51
52 /* Method #0 */
53 #define SHIMPHY_PAD_PLL_SEQUENCE BIT(8)
54 #define SHIMPHY_PAD_GATE_PLL_S3 BIT(9)
55
56 /* Method #1 */
57 #define PWRDWN_SEQ_NO_SEQUENCING 0
58 #define PWRDWN_SEQ_HOLD_CHANNEL 1
59 #define PWRDWN_SEQ_RESET_PLL 2
60 #define PWRDWN_SEQ_POWERDOWN_PLL 3
61
62 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK 0x00f00000
63 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT 20
64
65 #define DDR_FORCE_CKE_RST_N BIT(3)
66 #define DDR_PHY_RST_N BIT(2)
67 #define DDR_PHY_CKE BIT(1)
68
69 #define DDR_PHY_NO_CHANNEL 0xffffffff
70
71 #define MAX_NUM_MEMC 3
72
73 struct brcmstb_memc {
74 void __iomem *ddr_phy_base;
75 void __iomem *ddr_shimphy_base;
76 void __iomem *ddr_ctrl;
77 };
78
79 struct brcmstb_pm_control {
80 void __iomem *aon_ctrl_base;
81 void __iomem *aon_sram;
82 struct brcmstb_memc memcs[MAX_NUM_MEMC];
83
84 void __iomem *boot_sram;
85 size_t boot_sram_len;
86
87 bool support_warm_boot;
88 size_t pll_status_offset;
89 int num_memc;
90
91 struct brcmstb_s3_params *s3_params;
92 dma_addr_t s3_params_pa;
93 int s3entry_method;
94 u32 warm_boot_offset;
95 u32 phy_a_standby_ctrl_offs;
96 u32 phy_b_standby_ctrl_offs;
97 bool needs_ddr_pad;
98 struct platform_device *pdev;
99 };
100
101 enum bsp_initiate_command {
102 BSP_CLOCK_STOP = 0x00,
103 BSP_GEN_RANDOM_KEY = 0x4A,
104 BSP_RESTORE_RANDOM_KEY = 0x55,
105 BSP_GEN_FIXED_KEY = 0x63,
106 };
107
108 #define PM_INITIATE 0x01
109 #define PM_INITIATE_SUCCESS 0x00
110 #define PM_INITIATE_FAIL 0xfe
111
112 static struct brcmstb_pm_control ctrl;
113
114 noinline int brcmstb_pm_s3_finish(void);
115
116 static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
117 void __iomem *ddr_phy_pll_status);
118
brcmstb_init_sram(struct device_node * dn)119 static int brcmstb_init_sram(struct device_node *dn)
120 {
121 void __iomem *sram;
122 struct resource res;
123 int ret;
124
125 ret = of_address_to_resource(dn, 0, &res);
126 if (ret)
127 return ret;
128
129 /* Uncached, executable remapping of SRAM */
130 sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
131 if (!sram)
132 return -ENOMEM;
133
134 ctrl.boot_sram = sram;
135 ctrl.boot_sram_len = resource_size(&res);
136
137 return 0;
138 }
139
140 static const struct of_device_id sram_dt_ids[] = {
141 { .compatible = "mmio-sram" },
142 { /* sentinel */ }
143 };
144
do_bsp_initiate_command(enum bsp_initiate_command cmd)145 static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
146 {
147 void __iomem *base = ctrl.aon_ctrl_base;
148 int ret;
149 int timeo = 1000 * 1000; /* 1 second */
150
151 writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
152 (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
153
154 /* Go! */
155 writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
156
157 /*
158 * If firmware doesn't support the 'ack', then just assume it's done
159 * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
160 */
161 if (of_machine_is_compatible("brcm,bcm74371a0")) {
162 (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
163 mdelay(10);
164 return 0;
165 }
166
167 for (;;) {
168 ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
169 if (!(ret & PM_INITIATE))
170 break;
171 if (timeo <= 0) {
172 pr_err("error: timeout waiting for BSP (%x)\n", ret);
173 break;
174 }
175 timeo -= 50;
176 udelay(50);
177 }
178
179 return (ret & 0xff) != PM_INITIATE_SUCCESS;
180 }
181
brcmstb_pm_handshake(void)182 static int brcmstb_pm_handshake(void)
183 {
184 void __iomem *base = ctrl.aon_ctrl_base;
185 u32 tmp;
186 int ret;
187
188 /* BSP power handshake, v1 */
189 tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
190 tmp &= ~1UL;
191 writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
192 (void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
193
194 ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
195 if (ret)
196 pr_err("BSP handshake failed\n");
197
198 /*
199 * HACK: BSP may have internal race on the CLOCK_STOP command.
200 * Avoid touching the BSP for a few milliseconds.
201 */
202 mdelay(3);
203
204 return ret;
205 }
206
shimphy_set(u32 value,u32 mask)207 static inline void shimphy_set(u32 value, u32 mask)
208 {
209 int i;
210
211 if (!ctrl.needs_ddr_pad)
212 return;
213
214 for (i = 0; i < ctrl.num_memc; i++) {
215 u32 tmp;
216
217 tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
218 SHIMPHY_DDR_PAD_CNTRL);
219 tmp = value | (tmp & mask);
220 writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
221 SHIMPHY_DDR_PAD_CNTRL);
222 }
223 wmb(); /* Complete sequence in order. */
224 }
225
ddr_ctrl_set(bool warmboot)226 static inline void ddr_ctrl_set(bool warmboot)
227 {
228 int i;
229
230 for (i = 0; i < ctrl.num_memc; i++) {
231 u32 tmp;
232
233 tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
234 ctrl.warm_boot_offset);
235 if (warmboot)
236 tmp |= 1;
237 else
238 tmp &= ~1; /* Cold boot */
239 writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
240 ctrl.warm_boot_offset);
241 }
242 /* Complete sequence in order */
243 wmb();
244 }
245
s3entry_method0(void)246 static inline void s3entry_method0(void)
247 {
248 shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
249 0xffffffff);
250 }
251
s3entry_method1(void)252 static inline void s3entry_method1(void)
253 {
254 /*
255 * S3 Entry Sequence
256 * -----------------
257 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
258 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
259 */
260 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
261 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
262 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
263
264 ddr_ctrl_set(true);
265 }
266
s5entry_method1(void)267 static inline void s5entry_method1(void)
268 {
269 int i;
270
271 /*
272 * S5 Entry Sequence
273 * -----------------
274 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
275 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
276 * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
277 * DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
278 */
279 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
280 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
281 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
282
283 ddr_ctrl_set(false);
284
285 for (i = 0; i < ctrl.num_memc; i++) {
286 u32 tmp;
287
288 /* Step 3: Channel A (RST_N = CKE = 0) */
289 tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
290 ctrl.phy_a_standby_ctrl_offs);
291 tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
292 writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
293 ctrl.phy_a_standby_ctrl_offs);
294
295 /* Step 3: Channel B? */
296 if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
297 tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
298 ctrl.phy_b_standby_ctrl_offs);
299 tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
300 writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
301 ctrl.phy_b_standby_ctrl_offs);
302 }
303 }
304 /* Must complete */
305 wmb();
306 }
307
308 /*
309 * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
310 * into a low-power mode
311 */
brcmstb_do_pmsm_power_down(unsigned long base_cmd,bool onewrite)312 static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
313 {
314 void __iomem *base = ctrl.aon_ctrl_base;
315
316 if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
317 s5entry_method1();
318
319 /* pm_start_pwrdn transition 0->1 */
320 writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
321
322 if (!onewrite) {
323 (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
324
325 writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
326 (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
327 }
328 wfi();
329 }
330
331 /* Support S5 cold boot out of "poweroff" */
brcmstb_pm_poweroff(void)332 static void brcmstb_pm_poweroff(void)
333 {
334 brcmstb_pm_handshake();
335
336 /* Clear magic S3 warm-boot value */
337 writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
338 (void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
339
340 /* Skip wait-for-interrupt signal; just use a countdown */
341 writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
342 (void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
343
344 if (ctrl.s3entry_method == 1) {
345 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
346 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
347 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
348 ddr_ctrl_set(false);
349 brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
350 return; /* We should never actually get here */
351 }
352
353 brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
354 }
355
brcmstb_pm_copy_to_sram(void * fn,size_t len)356 static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
357 {
358 unsigned int size = ALIGN(len, FNCPY_ALIGN);
359
360 if (ctrl.boot_sram_len < size) {
361 pr_err("standby code will not fit in SRAM\n");
362 return NULL;
363 }
364
365 return fncpy(ctrl.boot_sram, fn, size);
366 }
367
368 /*
369 * S2 suspend/resume picks up where we left off, so we must execute carefully
370 * from SRAM, in order to allow DDR to come back up safely before we continue.
371 */
brcmstb_pm_s2(void)372 static int brcmstb_pm_s2(void)
373 {
374 /* A previous S3 can set a value hazardous to S2, so make sure. */
375 if (ctrl.s3entry_method == 1) {
376 shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
377 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
378 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
379 ddr_ctrl_set(false);
380 }
381
382 brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
383 brcmstb_pm_do_s2_sz);
384 if (!brcmstb_pm_do_s2_sram)
385 return -EINVAL;
386
387 return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
388 ctrl.memcs[0].ddr_phy_base +
389 ctrl.pll_status_offset);
390 }
391
392 /*
393 * This function is called on a new stack, so don't allow inlining (which will
394 * generate stack references on the old stack). It cannot be made static because
395 * it is referenced from brcmstb_pm_s3()
396 */
brcmstb_pm_s3_finish(void)397 noinline int brcmstb_pm_s3_finish(void)
398 {
399 struct brcmstb_s3_params *params = ctrl.s3_params;
400 dma_addr_t params_pa = ctrl.s3_params_pa;
401 phys_addr_t reentry = virt_to_phys(&cpu_resume_arm);
402 enum bsp_initiate_command cmd;
403 u32 flags;
404
405 /*
406 * Clear parameter structure, but not DTU area, which has already been
407 * filled in. We know DTU is a the end, so we can just subtract its
408 * size.
409 */
410 memset(params, 0, sizeof(*params) - sizeof(params->dtu));
411
412 flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
413
414 flags &= S3_BOOTLOADER_RESERVED;
415 flags |= S3_FLAG_NO_MEM_VERIFY;
416 flags |= S3_FLAG_LOAD_RANDKEY;
417
418 /* Load random / fixed key */
419 if (flags & S3_FLAG_LOAD_RANDKEY)
420 cmd = BSP_GEN_RANDOM_KEY;
421 else
422 cmd = BSP_GEN_FIXED_KEY;
423 if (do_bsp_initiate_command(cmd)) {
424 pr_info("key loading failed\n");
425 return -EIO;
426 }
427
428 params->magic = BRCMSTB_S3_MAGIC;
429 params->reentry = reentry;
430
431 /* No more writes to DRAM */
432 flush_cache_all();
433
434 flags |= BRCMSTB_S3_MAGIC_SHORT;
435
436 writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
437 writel_relaxed(lower_32_bits(params_pa),
438 ctrl.aon_sram + AON_REG_CONTROL_LOW);
439 writel_relaxed(upper_32_bits(params_pa),
440 ctrl.aon_sram + AON_REG_CONTROL_HIGH);
441
442 switch (ctrl.s3entry_method) {
443 case 0:
444 s3entry_method0();
445 brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
446 break;
447 case 1:
448 s3entry_method1();
449 brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
450 break;
451 default:
452 return -EINVAL;
453 }
454
455 /* Must have been interrupted from wfi()? */
456 return -EINTR;
457 }
458
brcmstb_pm_do_s3(unsigned long sp)459 static int brcmstb_pm_do_s3(unsigned long sp)
460 {
461 unsigned long save_sp;
462 int ret;
463
464 asm volatile (
465 "mov %[save], sp\n"
466 "mov sp, %[new]\n"
467 "bl brcmstb_pm_s3_finish\n"
468 "mov %[ret], r0\n"
469 "mov %[new], sp\n"
470 "mov sp, %[save]\n"
471 : [save] "=&r" (save_sp), [ret] "=&r" (ret)
472 : [new] "r" (sp)
473 );
474
475 return ret;
476 }
477
brcmstb_pm_s3(void)478 static int brcmstb_pm_s3(void)
479 {
480 void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
481
482 return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
483 }
484
brcmstb_pm_standby(bool deep_standby)485 static int brcmstb_pm_standby(bool deep_standby)
486 {
487 int ret;
488
489 if (brcmstb_pm_handshake())
490 return -EIO;
491
492 if (deep_standby)
493 ret = brcmstb_pm_s3();
494 else
495 ret = brcmstb_pm_s2();
496 if (ret)
497 pr_err("%s: standby failed\n", __func__);
498
499 return ret;
500 }
501
brcmstb_pm_enter(suspend_state_t state)502 static int brcmstb_pm_enter(suspend_state_t state)
503 {
504 int ret = -EINVAL;
505
506 switch (state) {
507 case PM_SUSPEND_STANDBY:
508 ret = brcmstb_pm_standby(false);
509 break;
510 case PM_SUSPEND_MEM:
511 ret = brcmstb_pm_standby(true);
512 break;
513 }
514
515 return ret;
516 }
517
brcmstb_pm_valid(suspend_state_t state)518 static int brcmstb_pm_valid(suspend_state_t state)
519 {
520 switch (state) {
521 case PM_SUSPEND_STANDBY:
522 return true;
523 case PM_SUSPEND_MEM:
524 return ctrl.support_warm_boot;
525 default:
526 return false;
527 }
528 }
529
530 static const struct platform_suspend_ops brcmstb_pm_ops = {
531 .enter = brcmstb_pm_enter,
532 .valid = brcmstb_pm_valid,
533 };
534
535 static const struct of_device_id aon_ctrl_dt_ids[] = {
536 { .compatible = "brcm,brcmstb-aon-ctrl" },
537 {}
538 };
539
540 struct ddr_phy_ofdata {
541 bool supports_warm_boot;
542 size_t pll_status_offset;
543 int s3entry_method;
544 u32 warm_boot_offset;
545 u32 phy_a_standby_ctrl_offs;
546 u32 phy_b_standby_ctrl_offs;
547 };
548
549 static struct ddr_phy_ofdata ddr_phy_71_1 = {
550 .supports_warm_boot = true,
551 .pll_status_offset = 0x0c,
552 .s3entry_method = 1,
553 .warm_boot_offset = 0x2c,
554 .phy_a_standby_ctrl_offs = 0x198,
555 .phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
556 };
557
558 static struct ddr_phy_ofdata ddr_phy_72_0 = {
559 .supports_warm_boot = true,
560 .pll_status_offset = 0x10,
561 .s3entry_method = 1,
562 .warm_boot_offset = 0x40,
563 .phy_a_standby_ctrl_offs = 0x2a4,
564 .phy_b_standby_ctrl_offs = 0x8a4
565 };
566
567 static struct ddr_phy_ofdata ddr_phy_225_1 = {
568 .supports_warm_boot = false,
569 .pll_status_offset = 0x4,
570 .s3entry_method = 0
571 };
572
573 static struct ddr_phy_ofdata ddr_phy_240_1 = {
574 .supports_warm_boot = true,
575 .pll_status_offset = 0x4,
576 .s3entry_method = 0
577 };
578
579 static const struct of_device_id ddr_phy_dt_ids[] = {
580 {
581 .compatible = "brcm,brcmstb-ddr-phy-v71.1",
582 .data = &ddr_phy_71_1,
583 },
584 {
585 .compatible = "brcm,brcmstb-ddr-phy-v72.0",
586 .data = &ddr_phy_72_0,
587 },
588 {
589 .compatible = "brcm,brcmstb-ddr-phy-v225.1",
590 .data = &ddr_phy_225_1,
591 },
592 {
593 .compatible = "brcm,brcmstb-ddr-phy-v240.1",
594 .data = &ddr_phy_240_1,
595 },
596 {
597 /* Same as v240.1, for the registers we care about */
598 .compatible = "brcm,brcmstb-ddr-phy-v240.2",
599 .data = &ddr_phy_240_1,
600 },
601 {}
602 };
603
604 struct ddr_seq_ofdata {
605 bool needs_ddr_pad;
606 u32 warm_boot_offset;
607 };
608
609 static const struct ddr_seq_ofdata ddr_seq_b22 = {
610 .needs_ddr_pad = false,
611 .warm_boot_offset = 0x2c,
612 };
613
614 static const struct ddr_seq_ofdata ddr_seq = {
615 .needs_ddr_pad = true,
616 };
617
618 static const struct of_device_id ddr_shimphy_dt_ids[] = {
619 { .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
620 {}
621 };
622
623 static const struct of_device_id brcmstb_memc_of_match[] = {
624 {
625 .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
626 .data = &ddr_seq,
627 },
628 {
629 .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
630 .data = &ddr_seq_b22,
631 },
632 {
633 .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
634 .data = &ddr_seq_b22,
635 },
636 {
637 .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
638 .data = &ddr_seq_b22,
639 },
640 {
641 .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
642 .data = &ddr_seq_b22,
643 },
644 {
645 .compatible = "brcm,brcmstb-memc-ddr",
646 .data = &ddr_seq,
647 },
648 {},
649 };
650
brcmstb_ioremap_match(const struct of_device_id * matches,int index,const void ** ofdata)651 static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
652 int index, const void **ofdata)
653 {
654 struct device_node *dn;
655 const struct of_device_id *match;
656
657 dn = of_find_matching_node_and_match(NULL, matches, &match);
658 if (!dn)
659 return ERR_PTR(-EINVAL);
660
661 if (ofdata)
662 *ofdata = match->data;
663
664 return of_io_request_and_map(dn, index, dn->full_name);
665 }
666 /*
667 * The AON is a small domain in the SoC that can retain its state across
668 * various system wide sleep states and specific reset conditions; the
669 * AON DATA RAM is a small RAM of a few words (< 1KB) which can store
670 * persistent information across such events.
671 *
672 * The purpose of the below panic notifier is to help with notifying
673 * the bootloader that a panic occurred and so that it should try its
674 * best to preserve the DRAM contents holding that buffer for recovery
675 * by the kernel as opposed to wiping out DRAM clean again.
676 *
677 * Reference: comment from Florian Fainelli, at
678 * https://lore.kernel.org/lkml/781cafb0-8d06-8b56-907a-5175c2da196a@gmail.com
679 */
brcmstb_pm_panic_notify(struct notifier_block * nb,unsigned long action,void * data)680 static int brcmstb_pm_panic_notify(struct notifier_block *nb,
681 unsigned long action, void *data)
682 {
683 writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
684
685 return NOTIFY_DONE;
686 }
687
688 static struct notifier_block brcmstb_pm_panic_nb = {
689 .notifier_call = brcmstb_pm_panic_notify,
690 };
691
brcmstb_pm_probe(struct platform_device * pdev)692 static int brcmstb_pm_probe(struct platform_device *pdev)
693 {
694 const struct ddr_phy_ofdata *ddr_phy_data;
695 const struct ddr_seq_ofdata *ddr_seq_data;
696 const struct of_device_id *of_id = NULL;
697 struct device_node *dn;
698 void __iomem *base;
699 int ret, i, s;
700
701 /* AON ctrl registers */
702 base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
703 if (IS_ERR(base)) {
704 pr_err("error mapping AON_CTRL\n");
705 ret = PTR_ERR(base);
706 goto aon_err;
707 }
708 ctrl.aon_ctrl_base = base;
709
710 /* AON SRAM registers */
711 base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
712 if (IS_ERR(base)) {
713 /* Assume standard offset */
714 ctrl.aon_sram = ctrl.aon_ctrl_base +
715 AON_CTRL_SYSTEM_DATA_RAM_OFS;
716 s = 0;
717 } else {
718 ctrl.aon_sram = base;
719 s = 1;
720 }
721
722 writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
723
724 /* DDR PHY registers */
725 base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
726 (const void **)&ddr_phy_data);
727 if (IS_ERR(base)) {
728 pr_err("error mapping DDR PHY\n");
729 ret = PTR_ERR(base);
730 goto ddr_phy_err;
731 }
732 ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
733 ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
734 /* Only need DDR PHY 0 for now? */
735 ctrl.memcs[0].ddr_phy_base = base;
736 ctrl.s3entry_method = ddr_phy_data->s3entry_method;
737 ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
738 ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
739 /*
740 * Slightly gross to use the phy ver to get a memc,
741 * offset but that is the only versioned things so far
742 * we can test for.
743 */
744 ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
745
746 /* DDR SHIM-PHY registers */
747 for_each_matching_node(dn, ddr_shimphy_dt_ids) {
748 i = ctrl.num_memc;
749 if (i >= MAX_NUM_MEMC) {
750 of_node_put(dn);
751 pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
752 break;
753 }
754
755 base = of_io_request_and_map(dn, 0, dn->full_name);
756 if (IS_ERR(base)) {
757 of_node_put(dn);
758 if (!ctrl.support_warm_boot)
759 break;
760
761 pr_err("error mapping DDR SHIMPHY %d\n", i);
762 ret = PTR_ERR(base);
763 goto ddr_shimphy_err;
764 }
765 ctrl.memcs[i].ddr_shimphy_base = base;
766 ctrl.num_memc++;
767 }
768
769 /* Sequencer DRAM Param and Control Registers */
770 i = 0;
771 for_each_matching_node(dn, brcmstb_memc_of_match) {
772 base = of_iomap(dn, 0);
773 if (!base) {
774 of_node_put(dn);
775 pr_err("error mapping DDR Sequencer %d\n", i);
776 ret = -ENOMEM;
777 goto brcmstb_memc_err;
778 }
779
780 of_id = of_match_node(brcmstb_memc_of_match, dn);
781 if (!of_id) {
782 iounmap(base);
783 of_node_put(dn);
784 ret = -EINVAL;
785 goto brcmstb_memc_err;
786 }
787
788 ddr_seq_data = of_id->data;
789 ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
790 /* Adjust warm boot offset based on the DDR sequencer */
791 if (ddr_seq_data->warm_boot_offset)
792 ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
793
794 ctrl.memcs[i].ddr_ctrl = base;
795 i++;
796 }
797
798 pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
799 ctrl.support_warm_boot, ctrl.s3entry_method,
800 ctrl.warm_boot_offset);
801
802 dn = of_find_matching_node(NULL, sram_dt_ids);
803 if (!dn) {
804 pr_err("SRAM not found\n");
805 ret = -EINVAL;
806 goto brcmstb_memc_err;
807 }
808
809 ret = brcmstb_init_sram(dn);
810 of_node_put(dn);
811 if (ret) {
812 pr_err("error setting up SRAM for PM\n");
813 goto brcmstb_memc_err;
814 }
815
816 ctrl.pdev = pdev;
817
818 ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
819 if (!ctrl.s3_params) {
820 ret = -ENOMEM;
821 goto s3_params_err;
822 }
823 ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
824 sizeof(*ctrl.s3_params),
825 DMA_TO_DEVICE);
826 if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
827 pr_err("error mapping DMA memory\n");
828 ret = -ENOMEM;
829 goto out;
830 }
831
832 atomic_notifier_chain_register(&panic_notifier_list,
833 &brcmstb_pm_panic_nb);
834
835 pm_power_off = brcmstb_pm_poweroff;
836 suspend_set_ops(&brcmstb_pm_ops);
837
838 return 0;
839
840 out:
841 kfree(ctrl.s3_params);
842 s3_params_err:
843 iounmap(ctrl.boot_sram);
844 brcmstb_memc_err:
845 for (i--; i >= 0; i--)
846 iounmap(ctrl.memcs[i].ddr_ctrl);
847 ddr_shimphy_err:
848 for (i = 0; i < ctrl.num_memc; i++)
849 iounmap(ctrl.memcs[i].ddr_shimphy_base);
850
851 iounmap(ctrl.memcs[0].ddr_phy_base);
852 ddr_phy_err:
853 iounmap(ctrl.aon_ctrl_base);
854 if (s)
855 iounmap(ctrl.aon_sram);
856 aon_err:
857 pr_warn("PM: initialization failed with code %d\n", ret);
858
859 return ret;
860 }
861
862 static struct platform_driver brcmstb_pm_driver = {
863 .driver = {
864 .name = "brcmstb-pm",
865 .of_match_table = aon_ctrl_dt_ids,
866 },
867 };
868
brcmstb_pm_init(void)869 static int __init brcmstb_pm_init(void)
870 {
871 return platform_driver_probe(&brcmstb_pm_driver,
872 brcmstb_pm_probe);
873 }
874 module_init(brcmstb_pm_init);
875