1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ARM-specific support for Broadcom STB S2/S3/S5 power management
4  *
5  * S2: clock gate CPUs and as many peripherals as possible
6  * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
7  *     self-refresh
8  * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
9  *     treat this mode like a soft power-off, with wakeup allowed from AON
10  *
11  * Copyright © 2014-2017 Broadcom
12  */
13 
14 #define pr_fmt(fmt) "brcmstb-pm: " fmt
15 
16 #include <linux/bitops.h>
17 #include <linux/compiler.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/io.h>
23 #include <linux/ioport.h>
24 #include <linux/kconfig.h>
25 #include <linux/kernel.h>
26 #include <linux/memblock.h>
27 #include <linux/module.h>
28 #include <linux/notifier.h>
29 #include <linux/of.h>
30 #include <linux/of_address.h>
31 #include <linux/panic_notifier.h>
32 #include <linux/platform_device.h>
33 #include <linux/pm.h>
34 #include <linux/printk.h>
35 #include <linux/proc_fs.h>
36 #include <linux/sizes.h>
37 #include <linux/slab.h>
38 #include <linux/sort.h>
39 #include <linux/suspend.h>
40 #include <linux/types.h>
41 #include <linux/uaccess.h>
42 #include <linux/soc/brcmstb/brcmstb.h>
43 
44 #include <asm/fncpy.h>
45 #include <asm/setup.h>
46 #include <asm/suspend.h>
47 
48 #include "pm.h"
49 #include "aon_defs.h"
50 
51 #define SHIMPHY_DDR_PAD_CNTRL		0x8c
52 
53 /* Method #0 */
54 #define SHIMPHY_PAD_PLL_SEQUENCE	BIT(8)
55 #define SHIMPHY_PAD_GATE_PLL_S3		BIT(9)
56 
57 /* Method #1 */
58 #define PWRDWN_SEQ_NO_SEQUENCING	0
59 #define PWRDWN_SEQ_HOLD_CHANNEL		1
60 #define	PWRDWN_SEQ_RESET_PLL		2
61 #define PWRDWN_SEQ_POWERDOWN_PLL	3
62 
63 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK	0x00f00000
64 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT	20
65 
66 #define	DDR_FORCE_CKE_RST_N		BIT(3)
67 #define	DDR_PHY_RST_N			BIT(2)
68 #define	DDR_PHY_CKE			BIT(1)
69 
70 #define	DDR_PHY_NO_CHANNEL		0xffffffff
71 
72 #define MAX_NUM_MEMC			3
73 
74 struct brcmstb_memc {
75 	void __iomem *ddr_phy_base;
76 	void __iomem *ddr_shimphy_base;
77 	void __iomem *ddr_ctrl;
78 };
79 
80 struct brcmstb_pm_control {
81 	void __iomem *aon_ctrl_base;
82 	void __iomem *aon_sram;
83 	struct brcmstb_memc memcs[MAX_NUM_MEMC];
84 
85 	void __iomem *boot_sram;
86 	size_t boot_sram_len;
87 
88 	bool support_warm_boot;
89 	size_t pll_status_offset;
90 	int num_memc;
91 
92 	struct brcmstb_s3_params *s3_params;
93 	dma_addr_t s3_params_pa;
94 	int s3entry_method;
95 	u32 warm_boot_offset;
96 	u32 phy_a_standby_ctrl_offs;
97 	u32 phy_b_standby_ctrl_offs;
98 	bool needs_ddr_pad;
99 	struct platform_device *pdev;
100 };
101 
102 enum bsp_initiate_command {
103 	BSP_CLOCK_STOP		= 0x00,
104 	BSP_GEN_RANDOM_KEY	= 0x4A,
105 	BSP_RESTORE_RANDOM_KEY	= 0x55,
106 	BSP_GEN_FIXED_KEY	= 0x63,
107 };
108 
109 #define PM_INITIATE		0x01
110 #define PM_INITIATE_SUCCESS	0x00
111 #define PM_INITIATE_FAIL	0xfe
112 
113 static struct brcmstb_pm_control ctrl;
114 
115 noinline int brcmstb_pm_s3_finish(void);
116 
117 static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
118 		void __iomem *ddr_phy_pll_status);
119 
brcmstb_init_sram(struct device_node * dn)120 static int brcmstb_init_sram(struct device_node *dn)
121 {
122 	void __iomem *sram;
123 	struct resource res;
124 	int ret;
125 
126 	ret = of_address_to_resource(dn, 0, &res);
127 	if (ret)
128 		return ret;
129 
130 	/* Uncached, executable remapping of SRAM */
131 	sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
132 	if (!sram)
133 		return -ENOMEM;
134 
135 	ctrl.boot_sram = sram;
136 	ctrl.boot_sram_len = resource_size(&res);
137 
138 	return 0;
139 }
140 
141 static const struct of_device_id sram_dt_ids[] = {
142 	{ .compatible = "mmio-sram" },
143 	{ /* sentinel */ }
144 };
145 
do_bsp_initiate_command(enum bsp_initiate_command cmd)146 static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
147 {
148 	void __iomem *base = ctrl.aon_ctrl_base;
149 	int ret;
150 	int timeo = 1000 * 1000; /* 1 second */
151 
152 	writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
153 	(void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
154 
155 	/* Go! */
156 	writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
157 
158 	/*
159 	 * If firmware doesn't support the 'ack', then just assume it's done
160 	 * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
161 	 */
162 	if (of_machine_is_compatible("brcm,bcm74371a0")) {
163 		(void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
164 		mdelay(10);
165 		return 0;
166 	}
167 
168 	for (;;) {
169 		ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
170 		if (!(ret & PM_INITIATE))
171 			break;
172 		if (timeo <= 0) {
173 			pr_err("error: timeout waiting for BSP (%x)\n", ret);
174 			break;
175 		}
176 		timeo -= 50;
177 		udelay(50);
178 	}
179 
180 	return (ret & 0xff) != PM_INITIATE_SUCCESS;
181 }
182 
brcmstb_pm_handshake(void)183 static int brcmstb_pm_handshake(void)
184 {
185 	void __iomem *base = ctrl.aon_ctrl_base;
186 	u32 tmp;
187 	int ret;
188 
189 	/* BSP power handshake, v1 */
190 	tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
191 	tmp &= ~1UL;
192 	writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
193 	(void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
194 
195 	ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
196 	if (ret)
197 		pr_err("BSP handshake failed\n");
198 
199 	/*
200 	 * HACK: BSP may have internal race on the CLOCK_STOP command.
201 	 * Avoid touching the BSP for a few milliseconds.
202 	 */
203 	mdelay(3);
204 
205 	return ret;
206 }
207 
shimphy_set(u32 value,u32 mask)208 static inline void shimphy_set(u32 value, u32 mask)
209 {
210 	int i;
211 
212 	if (!ctrl.needs_ddr_pad)
213 		return;
214 
215 	for (i = 0; i < ctrl.num_memc; i++) {
216 		u32 tmp;
217 
218 		tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
219 			SHIMPHY_DDR_PAD_CNTRL);
220 		tmp = value | (tmp & mask);
221 		writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
222 			SHIMPHY_DDR_PAD_CNTRL);
223 	}
224 	wmb(); /* Complete sequence in order. */
225 }
226 
ddr_ctrl_set(bool warmboot)227 static inline void ddr_ctrl_set(bool warmboot)
228 {
229 	int i;
230 
231 	for (i = 0; i < ctrl.num_memc; i++) {
232 		u32 tmp;
233 
234 		tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
235 				ctrl.warm_boot_offset);
236 		if (warmboot)
237 			tmp |= 1;
238 		else
239 			tmp &= ~1; /* Cold boot */
240 		writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
241 				ctrl.warm_boot_offset);
242 	}
243 	/* Complete sequence in order */
244 	wmb();
245 }
246 
s3entry_method0(void)247 static inline void s3entry_method0(void)
248 {
249 	shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
250 		    0xffffffff);
251 }
252 
s3entry_method1(void)253 static inline void s3entry_method1(void)
254 {
255 	/*
256 	 * S3 Entry Sequence
257 	 * -----------------
258 	 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
259 	 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
260 	 */
261 	shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
262 		    SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
263 		    ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
264 
265 	ddr_ctrl_set(true);
266 }
267 
s5entry_method1(void)268 static inline void s5entry_method1(void)
269 {
270 	int i;
271 
272 	/*
273 	 * S5 Entry Sequence
274 	 * -----------------
275 	 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
276 	 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
277 	 * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
278 	 *	   DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
279 	 */
280 	shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
281 		    SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
282 		    ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
283 
284 	ddr_ctrl_set(false);
285 
286 	for (i = 0; i < ctrl.num_memc; i++) {
287 		u32 tmp;
288 
289 		/* Step 3: Channel A (RST_N = CKE = 0) */
290 		tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
291 				  ctrl.phy_a_standby_ctrl_offs);
292 		tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
293 		writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
294 			     ctrl.phy_a_standby_ctrl_offs);
295 
296 		/* Step 3: Channel B? */
297 		if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
298 			tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
299 					  ctrl.phy_b_standby_ctrl_offs);
300 			tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
301 			writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
302 				     ctrl.phy_b_standby_ctrl_offs);
303 		}
304 	}
305 	/* Must complete */
306 	wmb();
307 }
308 
309 /*
310  * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
311  * into a low-power mode
312  */
brcmstb_do_pmsm_power_down(unsigned long base_cmd,bool onewrite)313 static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
314 {
315 	void __iomem *base = ctrl.aon_ctrl_base;
316 
317 	if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
318 		s5entry_method1();
319 
320 	/* pm_start_pwrdn transition 0->1 */
321 	writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
322 
323 	if (!onewrite) {
324 		(void)readl_relaxed(base + AON_CTRL_PM_CTRL);
325 
326 		writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
327 		(void)readl_relaxed(base + AON_CTRL_PM_CTRL);
328 	}
329 	wfi();
330 }
331 
332 /* Support S5 cold boot out of "poweroff" */
brcmstb_pm_poweroff(void)333 static void brcmstb_pm_poweroff(void)
334 {
335 	brcmstb_pm_handshake();
336 
337 	/* Clear magic S3 warm-boot value */
338 	writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
339 	(void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
340 
341 	/* Skip wait-for-interrupt signal; just use a countdown */
342 	writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
343 	(void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
344 
345 	if (ctrl.s3entry_method == 1) {
346 		shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
347 			     SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
348 			     ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
349 		ddr_ctrl_set(false);
350 		brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
351 		return; /* We should never actually get here */
352 	}
353 
354 	brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
355 }
356 
brcmstb_pm_copy_to_sram(void * fn,size_t len)357 static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
358 {
359 	unsigned int size = ALIGN(len, FNCPY_ALIGN);
360 
361 	if (ctrl.boot_sram_len < size) {
362 		pr_err("standby code will not fit in SRAM\n");
363 		return NULL;
364 	}
365 
366 	return fncpy(ctrl.boot_sram, fn, size);
367 }
368 
369 /*
370  * S2 suspend/resume picks up where we left off, so we must execute carefully
371  * from SRAM, in order to allow DDR to come back up safely before we continue.
372  */
brcmstb_pm_s2(void)373 static int brcmstb_pm_s2(void)
374 {
375 	/* A previous S3 can set a value hazardous to S2, so make sure. */
376 	if (ctrl.s3entry_method == 1) {
377 		shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
378 			    SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
379 			    ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
380 		ddr_ctrl_set(false);
381 	}
382 
383 	brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
384 			brcmstb_pm_do_s2_sz);
385 	if (!brcmstb_pm_do_s2_sram)
386 		return -EINVAL;
387 
388 	return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
389 			ctrl.memcs[0].ddr_phy_base +
390 			ctrl.pll_status_offset);
391 }
392 
393 /*
394  * This function is called on a new stack, so don't allow inlining (which will
395  * generate stack references on the old stack). It cannot be made static because
396  * it is referenced from brcmstb_pm_s3()
397  */
brcmstb_pm_s3_finish(void)398 noinline int brcmstb_pm_s3_finish(void)
399 {
400 	struct brcmstb_s3_params *params = ctrl.s3_params;
401 	dma_addr_t params_pa = ctrl.s3_params_pa;
402 	phys_addr_t reentry = virt_to_phys(&cpu_resume_arm);
403 	enum bsp_initiate_command cmd;
404 	u32 flags;
405 
406 	/*
407 	 * Clear parameter structure, but not DTU area, which has already been
408 	 * filled in. We know DTU is a the end, so we can just subtract its
409 	 * size.
410 	 */
411 	memset(params, 0, sizeof(*params) - sizeof(params->dtu));
412 
413 	flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
414 
415 	flags &= S3_BOOTLOADER_RESERVED;
416 	flags |= S3_FLAG_NO_MEM_VERIFY;
417 	flags |= S3_FLAG_LOAD_RANDKEY;
418 
419 	/* Load random / fixed key */
420 	if (flags & S3_FLAG_LOAD_RANDKEY)
421 		cmd = BSP_GEN_RANDOM_KEY;
422 	else
423 		cmd = BSP_GEN_FIXED_KEY;
424 	if (do_bsp_initiate_command(cmd)) {
425 		pr_info("key loading failed\n");
426 		return -EIO;
427 	}
428 
429 	params->magic = BRCMSTB_S3_MAGIC;
430 	params->reentry = reentry;
431 
432 	/* No more writes to DRAM */
433 	flush_cache_all();
434 
435 	flags |= BRCMSTB_S3_MAGIC_SHORT;
436 
437 	writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
438 	writel_relaxed(lower_32_bits(params_pa),
439 		       ctrl.aon_sram + AON_REG_CONTROL_LOW);
440 	writel_relaxed(upper_32_bits(params_pa),
441 		       ctrl.aon_sram + AON_REG_CONTROL_HIGH);
442 
443 	switch (ctrl.s3entry_method) {
444 	case 0:
445 		s3entry_method0();
446 		brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
447 		break;
448 	case 1:
449 		s3entry_method1();
450 		brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
451 		break;
452 	default:
453 		return -EINVAL;
454 	}
455 
456 	/* Must have been interrupted from wfi()? */
457 	return -EINTR;
458 }
459 
brcmstb_pm_do_s3(unsigned long sp)460 static int brcmstb_pm_do_s3(unsigned long sp)
461 {
462 	unsigned long save_sp;
463 	int ret;
464 
465 	asm volatile (
466 		"mov	%[save], sp\n"
467 		"mov	sp, %[new]\n"
468 		"bl	brcmstb_pm_s3_finish\n"
469 		"mov	%[ret], r0\n"
470 		"mov	%[new], sp\n"
471 		"mov	sp, %[save]\n"
472 		: [save] "=&r" (save_sp), [ret] "=&r" (ret)
473 		: [new] "r" (sp)
474 	);
475 
476 	return ret;
477 }
478 
brcmstb_pm_s3(void)479 static int brcmstb_pm_s3(void)
480 {
481 	void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
482 
483 	return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
484 }
485 
brcmstb_pm_standby(bool deep_standby)486 static int brcmstb_pm_standby(bool deep_standby)
487 {
488 	int ret;
489 
490 	if (brcmstb_pm_handshake())
491 		return -EIO;
492 
493 	if (deep_standby)
494 		ret = brcmstb_pm_s3();
495 	else
496 		ret = brcmstb_pm_s2();
497 	if (ret)
498 		pr_err("%s: standby failed\n", __func__);
499 
500 	return ret;
501 }
502 
brcmstb_pm_enter(suspend_state_t state)503 static int brcmstb_pm_enter(suspend_state_t state)
504 {
505 	int ret = -EINVAL;
506 
507 	switch (state) {
508 	case PM_SUSPEND_STANDBY:
509 		ret = brcmstb_pm_standby(false);
510 		break;
511 	case PM_SUSPEND_MEM:
512 		ret = brcmstb_pm_standby(true);
513 		break;
514 	}
515 
516 	return ret;
517 }
518 
brcmstb_pm_valid(suspend_state_t state)519 static int brcmstb_pm_valid(suspend_state_t state)
520 {
521 	switch (state) {
522 	case PM_SUSPEND_STANDBY:
523 		return true;
524 	case PM_SUSPEND_MEM:
525 		return ctrl.support_warm_boot;
526 	default:
527 		return false;
528 	}
529 }
530 
531 static const struct platform_suspend_ops brcmstb_pm_ops = {
532 	.enter		= brcmstb_pm_enter,
533 	.valid		= brcmstb_pm_valid,
534 };
535 
536 static const struct of_device_id aon_ctrl_dt_ids[] = {
537 	{ .compatible = "brcm,brcmstb-aon-ctrl" },
538 	{}
539 };
540 
541 struct ddr_phy_ofdata {
542 	bool supports_warm_boot;
543 	size_t pll_status_offset;
544 	int s3entry_method;
545 	u32 warm_boot_offset;
546 	u32 phy_a_standby_ctrl_offs;
547 	u32 phy_b_standby_ctrl_offs;
548 };
549 
550 static struct ddr_phy_ofdata ddr_phy_71_1 = {
551 	.supports_warm_boot = true,
552 	.pll_status_offset = 0x0c,
553 	.s3entry_method = 1,
554 	.warm_boot_offset = 0x2c,
555 	.phy_a_standby_ctrl_offs = 0x198,
556 	.phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
557 };
558 
559 static struct ddr_phy_ofdata ddr_phy_72_0 = {
560 	.supports_warm_boot = true,
561 	.pll_status_offset = 0x10,
562 	.s3entry_method = 1,
563 	.warm_boot_offset = 0x40,
564 	.phy_a_standby_ctrl_offs = 0x2a4,
565 	.phy_b_standby_ctrl_offs = 0x8a4
566 };
567 
568 static struct ddr_phy_ofdata ddr_phy_225_1 = {
569 	.supports_warm_boot = false,
570 	.pll_status_offset = 0x4,
571 	.s3entry_method = 0
572 };
573 
574 static struct ddr_phy_ofdata ddr_phy_240_1 = {
575 	.supports_warm_boot = true,
576 	.pll_status_offset = 0x4,
577 	.s3entry_method = 0
578 };
579 
580 static const struct of_device_id ddr_phy_dt_ids[] = {
581 	{
582 		.compatible = "brcm,brcmstb-ddr-phy-v71.1",
583 		.data = &ddr_phy_71_1,
584 	},
585 	{
586 		.compatible = "brcm,brcmstb-ddr-phy-v72.0",
587 		.data = &ddr_phy_72_0,
588 	},
589 	{
590 		.compatible = "brcm,brcmstb-ddr-phy-v225.1",
591 		.data = &ddr_phy_225_1,
592 	},
593 	{
594 		.compatible = "brcm,brcmstb-ddr-phy-v240.1",
595 		.data = &ddr_phy_240_1,
596 	},
597 	{
598 		/* Same as v240.1, for the registers we care about */
599 		.compatible = "brcm,brcmstb-ddr-phy-v240.2",
600 		.data = &ddr_phy_240_1,
601 	},
602 	{}
603 };
604 
605 struct ddr_seq_ofdata {
606 	bool needs_ddr_pad;
607 	u32 warm_boot_offset;
608 };
609 
610 static const struct ddr_seq_ofdata ddr_seq_b22 = {
611 	.needs_ddr_pad = false,
612 	.warm_boot_offset = 0x2c,
613 };
614 
615 static const struct ddr_seq_ofdata ddr_seq = {
616 	.needs_ddr_pad = true,
617 };
618 
619 static const struct of_device_id ddr_shimphy_dt_ids[] = {
620 	{ .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
621 	{}
622 };
623 
624 static const struct of_device_id brcmstb_memc_of_match[] = {
625 	{
626 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
627 		.data = &ddr_seq,
628 	},
629 	{
630 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
631 		.data = &ddr_seq_b22,
632 	},
633 	{
634 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
635 		.data = &ddr_seq_b22,
636 	},
637 	{
638 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
639 		.data = &ddr_seq_b22,
640 	},
641 	{
642 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
643 		.data = &ddr_seq_b22,
644 	},
645 	{
646 		.compatible = "brcm,brcmstb-memc-ddr",
647 		.data = &ddr_seq,
648 	},
649 	{},
650 };
651 
brcmstb_ioremap_match(const struct of_device_id * matches,int index,const void ** ofdata)652 static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
653 					   int index, const void **ofdata)
654 {
655 	struct device_node *dn;
656 	const struct of_device_id *match;
657 
658 	dn = of_find_matching_node_and_match(NULL, matches, &match);
659 	if (!dn)
660 		return ERR_PTR(-EINVAL);
661 
662 	if (ofdata)
663 		*ofdata = match->data;
664 
665 	return of_io_request_and_map(dn, index, dn->full_name);
666 }
667 
brcmstb_pm_panic_notify(struct notifier_block * nb,unsigned long action,void * data)668 static int brcmstb_pm_panic_notify(struct notifier_block *nb,
669 		unsigned long action, void *data)
670 {
671 	writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
672 
673 	return NOTIFY_DONE;
674 }
675 
676 static struct notifier_block brcmstb_pm_panic_nb = {
677 	.notifier_call = brcmstb_pm_panic_notify,
678 };
679 
brcmstb_pm_probe(struct platform_device * pdev)680 static int brcmstb_pm_probe(struct platform_device *pdev)
681 {
682 	const struct ddr_phy_ofdata *ddr_phy_data;
683 	const struct ddr_seq_ofdata *ddr_seq_data;
684 	const struct of_device_id *of_id = NULL;
685 	struct device_node *dn;
686 	void __iomem *base;
687 	int ret, i, s;
688 
689 	/* AON ctrl registers */
690 	base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
691 	if (IS_ERR(base)) {
692 		pr_err("error mapping AON_CTRL\n");
693 		ret = PTR_ERR(base);
694 		goto aon_err;
695 	}
696 	ctrl.aon_ctrl_base = base;
697 
698 	/* AON SRAM registers */
699 	base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
700 	if (IS_ERR(base)) {
701 		/* Assume standard offset */
702 		ctrl.aon_sram = ctrl.aon_ctrl_base +
703 				     AON_CTRL_SYSTEM_DATA_RAM_OFS;
704 		s = 0;
705 	} else {
706 		ctrl.aon_sram = base;
707 		s = 1;
708 	}
709 
710 	writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
711 
712 	/* DDR PHY registers */
713 	base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
714 				     (const void **)&ddr_phy_data);
715 	if (IS_ERR(base)) {
716 		pr_err("error mapping DDR PHY\n");
717 		ret = PTR_ERR(base);
718 		goto ddr_phy_err;
719 	}
720 	ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
721 	ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
722 	/* Only need DDR PHY 0 for now? */
723 	ctrl.memcs[0].ddr_phy_base = base;
724 	ctrl.s3entry_method = ddr_phy_data->s3entry_method;
725 	ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
726 	ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
727 	/*
728 	 * Slightly grosss to use the phy ver to get a memc,
729 	 * offset but that is the only versioned things so far
730 	 * we can test for.
731 	 */
732 	ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
733 
734 	/* DDR SHIM-PHY registers */
735 	for_each_matching_node(dn, ddr_shimphy_dt_ids) {
736 		i = ctrl.num_memc;
737 		if (i >= MAX_NUM_MEMC) {
738 			of_node_put(dn);
739 			pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
740 			break;
741 		}
742 
743 		base = of_io_request_and_map(dn, 0, dn->full_name);
744 		if (IS_ERR(base)) {
745 			of_node_put(dn);
746 			if (!ctrl.support_warm_boot)
747 				break;
748 
749 			pr_err("error mapping DDR SHIMPHY %d\n", i);
750 			ret = PTR_ERR(base);
751 			goto ddr_shimphy_err;
752 		}
753 		ctrl.memcs[i].ddr_shimphy_base = base;
754 		ctrl.num_memc++;
755 	}
756 
757 	/* Sequencer DRAM Param and Control Registers */
758 	i = 0;
759 	for_each_matching_node(dn, brcmstb_memc_of_match) {
760 		base = of_iomap(dn, 0);
761 		if (!base) {
762 			of_node_put(dn);
763 			pr_err("error mapping DDR Sequencer %d\n", i);
764 			ret = -ENOMEM;
765 			goto brcmstb_memc_err;
766 		}
767 
768 		of_id = of_match_node(brcmstb_memc_of_match, dn);
769 		if (!of_id) {
770 			iounmap(base);
771 			of_node_put(dn);
772 			ret = -EINVAL;
773 			goto brcmstb_memc_err;
774 		}
775 
776 		ddr_seq_data = of_id->data;
777 		ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
778 		/* Adjust warm boot offset based on the DDR sequencer */
779 		if (ddr_seq_data->warm_boot_offset)
780 			ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
781 
782 		ctrl.memcs[i].ddr_ctrl = base;
783 		i++;
784 	}
785 
786 	pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
787 		ctrl.support_warm_boot, ctrl.s3entry_method,
788 		ctrl.warm_boot_offset);
789 
790 	dn = of_find_matching_node(NULL, sram_dt_ids);
791 	if (!dn) {
792 		pr_err("SRAM not found\n");
793 		ret = -EINVAL;
794 		goto brcmstb_memc_err;
795 	}
796 
797 	ret = brcmstb_init_sram(dn);
798 	of_node_put(dn);
799 	if (ret) {
800 		pr_err("error setting up SRAM for PM\n");
801 		goto brcmstb_memc_err;
802 	}
803 
804 	ctrl.pdev = pdev;
805 
806 	ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
807 	if (!ctrl.s3_params) {
808 		ret = -ENOMEM;
809 		goto s3_params_err;
810 	}
811 	ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
812 					   sizeof(*ctrl.s3_params),
813 					   DMA_TO_DEVICE);
814 	if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
815 		pr_err("error mapping DMA memory\n");
816 		ret = -ENOMEM;
817 		goto out;
818 	}
819 
820 	atomic_notifier_chain_register(&panic_notifier_list,
821 				       &brcmstb_pm_panic_nb);
822 
823 	pm_power_off = brcmstb_pm_poweroff;
824 	suspend_set_ops(&brcmstb_pm_ops);
825 
826 	return 0;
827 
828 out:
829 	kfree(ctrl.s3_params);
830 s3_params_err:
831 	iounmap(ctrl.boot_sram);
832 brcmstb_memc_err:
833 	for (i--; i >= 0; i--)
834 		iounmap(ctrl.memcs[i].ddr_ctrl);
835 ddr_shimphy_err:
836 	for (i = 0; i < ctrl.num_memc; i++)
837 		iounmap(ctrl.memcs[i].ddr_shimphy_base);
838 
839 	iounmap(ctrl.memcs[0].ddr_phy_base);
840 ddr_phy_err:
841 	iounmap(ctrl.aon_ctrl_base);
842 	if (s)
843 		iounmap(ctrl.aon_sram);
844 aon_err:
845 	pr_warn("PM: initialization failed with code %d\n", ret);
846 
847 	return ret;
848 }
849 
850 static struct platform_driver brcmstb_pm_driver = {
851 	.driver = {
852 		.name	= "brcmstb-pm",
853 		.of_match_table = aon_ctrl_dt_ids,
854 	},
855 };
856 
brcmstb_pm_init(void)857 static int __init brcmstb_pm_init(void)
858 {
859 	return platform_driver_probe(&brcmstb_pm_driver,
860 				     brcmstb_pm_probe);
861 }
862 module_init(brcmstb_pm_init);
863