1 /*
2  * Versatile Express Core Tile Cortex A9x4 Support
3  */
4 #include <linux/init.h>
5 #include <linux/gfp.h>
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/platform_device.h>
9 #include <linux/amba/bus.h>
10 #include <linux/amba/clcd.h>
11 #include <linux/clkdev.h>
12 
13 #include <asm/hardware/arm_timer.h>
14 #include <asm/hardware/cache-l2x0.h>
15 #include <asm/hardware/gic.h>
16 #include <asm/pmu.h>
17 #include <asm/smp_scu.h>
18 #include <asm/smp_twd.h>
19 
20 #include <mach/ct-ca9x4.h>
21 
22 #include <asm/hardware/timer-sp.h>
23 
24 #include <asm/mach/map.h>
25 #include <asm/mach/time.h>
26 
27 #include "core.h"
28 
29 #include <mach/motherboard.h>
30 
31 #include <plat/clcd.h>
32 
33 static struct map_desc ct_ca9x4_io_desc[] __initdata = {
34 	{
35 		.virtual        = V2T_PERIPH,
36 		.pfn            = __phys_to_pfn(CT_CA9X4_MPIC),
37 		.length         = SZ_8K,
38 		.type           = MT_DEVICE,
39 	},
40 };
41 
ct_ca9x4_map_io(void)42 static void __init ct_ca9x4_map_io(void)
43 {
44 	iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
45 }
46 
47 #ifdef CONFIG_HAVE_ARM_TWD
48 static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER);
49 
ca9x4_twd_init(void)50 static void __init ca9x4_twd_init(void)
51 {
52 	int err = twd_local_timer_register(&twd_local_timer);
53 	if (err)
54 		pr_err("twd_local_timer_register failed %d\n", err);
55 }
56 #else
57 #define ca9x4_twd_init()	do {} while(0)
58 #endif
59 
ct_ca9x4_init_irq(void)60 static void __init ct_ca9x4_init_irq(void)
61 {
62 	gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K),
63 		 ioremap(A9_MPCORE_GIC_CPU, SZ_256));
64 	ca9x4_twd_init();
65 }
66 
ct_ca9x4_clcd_enable(struct clcd_fb * fb)67 static void ct_ca9x4_clcd_enable(struct clcd_fb *fb)
68 {
69 	v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE_DB1, 0);
70 	v2m_cfg_write(SYS_CFG_DVIMODE | SYS_CFG_SITE_DB1, 2);
71 }
72 
ct_ca9x4_clcd_setup(struct clcd_fb * fb)73 static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
74 {
75 	unsigned long framesize = 1024 * 768 * 2;
76 
77 	fb->panel = versatile_clcd_get_panel("XVGA");
78 	if (!fb->panel)
79 		return -EINVAL;
80 
81 	return versatile_clcd_setup_dma(fb, framesize);
82 }
83 
84 static struct clcd_board ct_ca9x4_clcd_data = {
85 	.name		= "CT-CA9X4",
86 	.caps		= CLCD_CAP_5551 | CLCD_CAP_565,
87 	.check		= clcdfb_check,
88 	.decode		= clcdfb_decode,
89 	.enable		= ct_ca9x4_clcd_enable,
90 	.setup		= ct_ca9x4_clcd_setup,
91 	.mmap		= versatile_clcd_mmap_dma,
92 	.remove		= versatile_clcd_remove_dma,
93 };
94 
95 static AMBA_AHB_DEVICE(clcd, "ct:clcd", 0, CT_CA9X4_CLCDC, IRQ_CT_CA9X4_CLCDC, &ct_ca9x4_clcd_data);
96 static AMBA_APB_DEVICE(dmc, "ct:dmc", 0, CT_CA9X4_DMC, IRQ_CT_CA9X4_DMC, NULL);
97 static AMBA_APB_DEVICE(smc, "ct:smc", 0, CT_CA9X4_SMC, IRQ_CT_CA9X4_SMC, NULL);
98 static AMBA_APB_DEVICE(gpio, "ct:gpio", 0, CT_CA9X4_GPIO, IRQ_CT_CA9X4_GPIO, NULL);
99 
100 static struct amba_device *ct_ca9x4_amba_devs[] __initdata = {
101 	&clcd_device,
102 	&dmc_device,
103 	&smc_device,
104 	&gpio_device,
105 };
106 
107 
ct_round(struct clk * clk,unsigned long rate)108 static long ct_round(struct clk *clk, unsigned long rate)
109 {
110 	return rate;
111 }
112 
ct_set(struct clk * clk,unsigned long rate)113 static int ct_set(struct clk *clk, unsigned long rate)
114 {
115 	return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_DB1 | 1, rate);
116 }
117 
118 static const struct clk_ops osc1_clk_ops = {
119 	.round	= ct_round,
120 	.set	= ct_set,
121 };
122 
123 static struct clk osc1_clk = {
124 	.ops	= &osc1_clk_ops,
125 	.rate	= 24000000,
126 };
127 
128 static struct clk ct_sp804_clk = {
129 	.rate	= 1000000,
130 };
131 
132 static struct clk_lookup lookups[] = {
133 	{	/* CLCD */
134 		.dev_id		= "ct:clcd",
135 		.clk		= &osc1_clk,
136 	}, {	/* SP804 timers */
137 		.dev_id		= "sp804",
138 		.con_id		= "ct-timer0",
139 		.clk		= &ct_sp804_clk,
140 	}, {	/* SP804 timers */
141 		.dev_id		= "sp804",
142 		.con_id		= "ct-timer1",
143 		.clk		= &ct_sp804_clk,
144 	},
145 };
146 
147 static struct resource pmu_resources[] = {
148 	[0] = {
149 		.start	= IRQ_CT_CA9X4_PMU_CPU0,
150 		.end	= IRQ_CT_CA9X4_PMU_CPU0,
151 		.flags	= IORESOURCE_IRQ,
152 	},
153 	[1] = {
154 		.start	= IRQ_CT_CA9X4_PMU_CPU1,
155 		.end	= IRQ_CT_CA9X4_PMU_CPU1,
156 		.flags	= IORESOURCE_IRQ,
157 	},
158 	[2] = {
159 		.start	= IRQ_CT_CA9X4_PMU_CPU2,
160 		.end	= IRQ_CT_CA9X4_PMU_CPU2,
161 		.flags	= IORESOURCE_IRQ,
162 	},
163 	[3] = {
164 		.start	= IRQ_CT_CA9X4_PMU_CPU3,
165 		.end	= IRQ_CT_CA9X4_PMU_CPU3,
166 		.flags	= IORESOURCE_IRQ,
167 	},
168 };
169 
170 static struct platform_device pmu_device = {
171 	.name		= "arm-pmu",
172 	.id		= ARM_PMU_DEVICE_CPU,
173 	.num_resources	= ARRAY_SIZE(pmu_resources),
174 	.resource	= pmu_resources,
175 };
176 
ct_ca9x4_init_early(void)177 static void __init ct_ca9x4_init_early(void)
178 {
179 	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
180 }
181 
ct_ca9x4_init(void)182 static void __init ct_ca9x4_init(void)
183 {
184 	int i;
185 
186 #ifdef CONFIG_CACHE_L2X0
187 	void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
188 
189 	/* set RAM latencies to 1 cycle for this core tile. */
190 	writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
191 	writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
192 
193 	l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
194 #endif
195 
196 	for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
197 		amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
198 
199 	platform_device_register(&pmu_device);
200 }
201 
202 #ifdef CONFIG_SMP
203 static void *ct_ca9x4_scu_base __initdata;
204 
ct_ca9x4_init_cpu_map(void)205 static void __init ct_ca9x4_init_cpu_map(void)
206 {
207 	int i, ncores;
208 
209 	ct_ca9x4_scu_base = ioremap(A9_MPCORE_SCU, SZ_128);
210 	if (WARN_ON(!ct_ca9x4_scu_base))
211 		return;
212 
213 	ncores = scu_get_core_count(ct_ca9x4_scu_base);
214 
215 	if (ncores > nr_cpu_ids) {
216 		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
217 			ncores, nr_cpu_ids);
218 		ncores = nr_cpu_ids;
219 	}
220 
221 	for (i = 0; i < ncores; ++i)
222 		set_cpu_possible(i, true);
223 
224 	set_smp_cross_call(gic_raise_softirq);
225 }
226 
ct_ca9x4_smp_enable(unsigned int max_cpus)227 static void __init ct_ca9x4_smp_enable(unsigned int max_cpus)
228 {
229 	scu_enable(ct_ca9x4_scu_base);
230 }
231 #endif
232 
233 struct ct_desc ct_ca9x4_desc __initdata = {
234 	.id		= V2M_CT_ID_CA9,
235 	.name		= "CA9x4",
236 	.map_io		= ct_ca9x4_map_io,
237 	.init_early	= ct_ca9x4_init_early,
238 	.init_irq	= ct_ca9x4_init_irq,
239 	.init_tile	= ct_ca9x4_init,
240 #ifdef CONFIG_SMP
241 	.init_cpu_map	= ct_ca9x4_init_cpu_map,
242 	.smp_enable	= ct_ca9x4_smp_enable,
243 #endif
244 };
245