1 /*
2  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3  *		http://www.samsung.com
4  *
5  * Common Codes for EXYNOS
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/io.h>
16 #include <linux/device.h>
17 #include <linux/gpio.h>
18 #include <linux/sched.h>
19 #include <linux/serial_core.h>
20 #include <linux/of.h>
21 #include <linux/of_irq.h>
22 
23 #include <asm/proc-fns.h>
24 #include <asm/exception.h>
25 #include <asm/hardware/cache-l2x0.h>
26 #include <asm/hardware/gic.h>
27 #include <asm/mach/map.h>
28 #include <asm/mach/irq.h>
29 #include <asm/cacheflush.h>
30 
31 #include <mach/regs-irq.h>
32 #include <mach/regs-pmu.h>
33 #include <mach/regs-gpio.h>
34 #include <mach/pmu.h>
35 
36 #include <plat/cpu.h>
37 #include <plat/clock.h>
38 #include <plat/devs.h>
39 #include <plat/pm.h>
40 #include <plat/sdhci.h>
41 #include <plat/gpio-cfg.h>
42 #include <plat/adc-core.h>
43 #include <plat/fb-core.h>
44 #include <plat/fimc-core.h>
45 #include <plat/iic-core.h>
46 #include <plat/tv-core.h>
47 #include <plat/regs-serial.h>
48 
49 #include "common.h"
50 #define L2_AUX_VAL 0x7C470001
51 #define L2_AUX_MASK 0xC200ffff
52 
53 static const char name_exynos4210[] = "EXYNOS4210";
54 static const char name_exynos4212[] = "EXYNOS4212";
55 static const char name_exynos4412[] = "EXYNOS4412";
56 static const char name_exynos5250[] = "EXYNOS5250";
57 
58 static void exynos4_map_io(void);
59 static void exynos5_map_io(void);
60 static void exynos4_init_clocks(int xtal);
61 static void exynos5_init_clocks(int xtal);
62 static void exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no);
63 static int exynos_init(void);
64 
65 static struct cpu_table cpu_ids[] __initdata = {
66 	{
67 		.idcode		= EXYNOS4210_CPU_ID,
68 		.idmask		= EXYNOS4_CPU_MASK,
69 		.map_io		= exynos4_map_io,
70 		.init_clocks	= exynos4_init_clocks,
71 		.init_uarts	= exynos_init_uarts,
72 		.init		= exynos_init,
73 		.name		= name_exynos4210,
74 	}, {
75 		.idcode		= EXYNOS4212_CPU_ID,
76 		.idmask		= EXYNOS4_CPU_MASK,
77 		.map_io		= exynos4_map_io,
78 		.init_clocks	= exynos4_init_clocks,
79 		.init_uarts	= exynos_init_uarts,
80 		.init		= exynos_init,
81 		.name		= name_exynos4212,
82 	}, {
83 		.idcode		= EXYNOS4412_CPU_ID,
84 		.idmask		= EXYNOS4_CPU_MASK,
85 		.map_io		= exynos4_map_io,
86 		.init_clocks	= exynos4_init_clocks,
87 		.init_uarts	= exynos_init_uarts,
88 		.init		= exynos_init,
89 		.name		= name_exynos4412,
90 	}, {
91 		.idcode		= EXYNOS5250_SOC_ID,
92 		.idmask		= EXYNOS5_SOC_MASK,
93 		.map_io		= exynos5_map_io,
94 		.init_clocks	= exynos5_init_clocks,
95 		.init_uarts	= exynos_init_uarts,
96 		.init		= exynos_init,
97 		.name		= name_exynos5250,
98 	},
99 };
100 
101 /* Initial IO mappings */
102 
103 static struct map_desc exynos_iodesc[] __initdata = {
104 	{
105 		.virtual	= (unsigned long)S5P_VA_CHIPID,
106 		.pfn		= __phys_to_pfn(EXYNOS_PA_CHIPID),
107 		.length		= SZ_4K,
108 		.type		= MT_DEVICE,
109 	},
110 };
111 
112 static struct map_desc exynos4_iodesc[] __initdata = {
113 	{
114 		.virtual	= (unsigned long)S3C_VA_SYS,
115 		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSCON),
116 		.length		= SZ_64K,
117 		.type		= MT_DEVICE,
118 	}, {
119 		.virtual	= (unsigned long)S3C_VA_TIMER,
120 		.pfn		= __phys_to_pfn(EXYNOS4_PA_TIMER),
121 		.length		= SZ_16K,
122 		.type		= MT_DEVICE,
123 	}, {
124 		.virtual	= (unsigned long)S3C_VA_WATCHDOG,
125 		.pfn		= __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
126 		.length		= SZ_4K,
127 		.type		= MT_DEVICE,
128 	}, {
129 		.virtual	= (unsigned long)S5P_VA_SROMC,
130 		.pfn		= __phys_to_pfn(EXYNOS4_PA_SROMC),
131 		.length		= SZ_4K,
132 		.type		= MT_DEVICE,
133 	}, {
134 		.virtual	= (unsigned long)S5P_VA_SYSTIMER,
135 		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
136 		.length		= SZ_4K,
137 		.type		= MT_DEVICE,
138 	}, {
139 		.virtual	= (unsigned long)S5P_VA_PMU,
140 		.pfn		= __phys_to_pfn(EXYNOS4_PA_PMU),
141 		.length		= SZ_64K,
142 		.type		= MT_DEVICE,
143 	}, {
144 		.virtual	= (unsigned long)S5P_VA_COMBINER_BASE,
145 		.pfn		= __phys_to_pfn(EXYNOS4_PA_COMBINER),
146 		.length		= SZ_4K,
147 		.type		= MT_DEVICE,
148 	}, {
149 		.virtual	= (unsigned long)S5P_VA_GIC_CPU,
150 		.pfn		= __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
151 		.length		= SZ_64K,
152 		.type		= MT_DEVICE,
153 	}, {
154 		.virtual	= (unsigned long)S5P_VA_GIC_DIST,
155 		.pfn		= __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
156 		.length		= SZ_64K,
157 		.type		= MT_DEVICE,
158 	}, {
159 		.virtual	= (unsigned long)S3C_VA_UART,
160 		.pfn		= __phys_to_pfn(EXYNOS4_PA_UART),
161 		.length		= SZ_512K,
162 		.type		= MT_DEVICE,
163 	}, {
164 		.virtual	= (unsigned long)S5P_VA_CMU,
165 		.pfn		= __phys_to_pfn(EXYNOS4_PA_CMU),
166 		.length		= SZ_128K,
167 		.type		= MT_DEVICE,
168 	}, {
169 		.virtual	= (unsigned long)S5P_VA_COREPERI_BASE,
170 		.pfn		= __phys_to_pfn(EXYNOS4_PA_COREPERI),
171 		.length		= SZ_8K,
172 		.type		= MT_DEVICE,
173 	}, {
174 		.virtual	= (unsigned long)S5P_VA_L2CC,
175 		.pfn		= __phys_to_pfn(EXYNOS4_PA_L2CC),
176 		.length		= SZ_4K,
177 		.type		= MT_DEVICE,
178 	}, {
179 		.virtual	= (unsigned long)S5P_VA_DMC0,
180 		.pfn		= __phys_to_pfn(EXYNOS4_PA_DMC0),
181 		.length		= SZ_64K,
182 		.type		= MT_DEVICE,
183 	}, {
184 		.virtual	= (unsigned long)S5P_VA_DMC1,
185 		.pfn		= __phys_to_pfn(EXYNOS4_PA_DMC1),
186 		.length		= SZ_64K,
187 		.type		= MT_DEVICE,
188 	}, {
189 		.virtual	= (unsigned long)S3C_VA_USB_HSPHY,
190 		.pfn		= __phys_to_pfn(EXYNOS4_PA_HSPHY),
191 		.length		= SZ_4K,
192 		.type		= MT_DEVICE,
193 	},
194 };
195 
196 static struct map_desc exynos4_iodesc0[] __initdata = {
197 	{
198 		.virtual	= (unsigned long)S5P_VA_SYSRAM,
199 		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
200 		.length		= SZ_4K,
201 		.type		= MT_DEVICE,
202 	},
203 };
204 
205 static struct map_desc exynos4_iodesc1[] __initdata = {
206 	{
207 		.virtual	= (unsigned long)S5P_VA_SYSRAM,
208 		.pfn		= __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
209 		.length		= SZ_4K,
210 		.type		= MT_DEVICE,
211 	},
212 };
213 
214 static struct map_desc exynos5_iodesc[] __initdata = {
215 	{
216 		.virtual	= (unsigned long)S3C_VA_SYS,
217 		.pfn		= __phys_to_pfn(EXYNOS5_PA_SYSCON),
218 		.length		= SZ_64K,
219 		.type		= MT_DEVICE,
220 	}, {
221 		.virtual	= (unsigned long)S3C_VA_TIMER,
222 		.pfn		= __phys_to_pfn(EXYNOS5_PA_TIMER),
223 		.length		= SZ_16K,
224 		.type		= MT_DEVICE,
225 	}, {
226 		.virtual	= (unsigned long)S3C_VA_WATCHDOG,
227 		.pfn		= __phys_to_pfn(EXYNOS5_PA_WATCHDOG),
228 		.length		= SZ_4K,
229 		.type		= MT_DEVICE,
230 	}, {
231 		.virtual	= (unsigned long)S5P_VA_SROMC,
232 		.pfn		= __phys_to_pfn(EXYNOS5_PA_SROMC),
233 		.length		= SZ_4K,
234 		.type		= MT_DEVICE,
235 	}, {
236 		.virtual	= (unsigned long)S5P_VA_SYSTIMER,
237 		.pfn		= __phys_to_pfn(EXYNOS5_PA_SYSTIMER),
238 		.length		= SZ_4K,
239 		.type		= MT_DEVICE,
240 	}, {
241 		.virtual	= (unsigned long)S5P_VA_SYSRAM,
242 		.pfn		= __phys_to_pfn(EXYNOS5_PA_SYSRAM),
243 		.length		= SZ_4K,
244 		.type		= MT_DEVICE,
245 	}, {
246 		.virtual	= (unsigned long)S5P_VA_CMU,
247 		.pfn		= __phys_to_pfn(EXYNOS5_PA_CMU),
248 		.length		= 144 * SZ_1K,
249 		.type		= MT_DEVICE,
250 	}, {
251 		.virtual	= (unsigned long)S5P_VA_PMU,
252 		.pfn		= __phys_to_pfn(EXYNOS5_PA_PMU),
253 		.length		= SZ_64K,
254 		.type		= MT_DEVICE,
255 	}, {
256 		.virtual	= (unsigned long)S5P_VA_COMBINER_BASE,
257 		.pfn		= __phys_to_pfn(EXYNOS5_PA_COMBINER),
258 		.length		= SZ_4K,
259 		.type		= MT_DEVICE,
260 	}, {
261 		.virtual	= (unsigned long)S3C_VA_UART,
262 		.pfn		= __phys_to_pfn(EXYNOS5_PA_UART),
263 		.length		= SZ_512K,
264 		.type		= MT_DEVICE,
265 	}, {
266 		.virtual	= (unsigned long)S5P_VA_GIC_CPU,
267 		.pfn		= __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
268 		.length		= SZ_64K,
269 		.type		= MT_DEVICE,
270 	}, {
271 		.virtual	= (unsigned long)S5P_VA_GIC_DIST,
272 		.pfn		= __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
273 		.length		= SZ_64K,
274 		.type		= MT_DEVICE,
275 	},
276 };
277 
exynos4_restart(char mode,const char * cmd)278 void exynos4_restart(char mode, const char *cmd)
279 {
280 	__raw_writel(0x1, S5P_SWRESET);
281 }
282 
exynos5_restart(char mode,const char * cmd)283 void exynos5_restart(char mode, const char *cmd)
284 {
285 	__raw_writel(0x1, EXYNOS_SWRESET);
286 }
287 
288 /*
289  * exynos_map_io
290  *
291  * register the standard cpu IO areas
292  */
293 
exynos_init_io(struct map_desc * mach_desc,int size)294 void __init exynos_init_io(struct map_desc *mach_desc, int size)
295 {
296 	/* initialize the io descriptors we need for initialization */
297 	iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
298 	if (mach_desc)
299 		iotable_init(mach_desc, size);
300 
301 	/* detect cpu id and rev. */
302 	s5p_init_cpu(S5P_VA_CHIPID);
303 
304 	s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
305 }
306 
exynos4_map_io(void)307 static void __init exynos4_map_io(void)
308 {
309 	iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
310 
311 	if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
312 		iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
313 	else
314 		iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
315 
316 	/* initialize device information early */
317 	exynos4_default_sdhci0();
318 	exynos4_default_sdhci1();
319 	exynos4_default_sdhci2();
320 	exynos4_default_sdhci3();
321 
322 	s3c_adc_setname("samsung-adc-v3");
323 
324 	s3c_fimc_setname(0, "exynos4-fimc");
325 	s3c_fimc_setname(1, "exynos4-fimc");
326 	s3c_fimc_setname(2, "exynos4-fimc");
327 	s3c_fimc_setname(3, "exynos4-fimc");
328 
329 	s3c_sdhci_setname(0, "exynos4-sdhci");
330 	s3c_sdhci_setname(1, "exynos4-sdhci");
331 	s3c_sdhci_setname(2, "exynos4-sdhci");
332 	s3c_sdhci_setname(3, "exynos4-sdhci");
333 
334 	/* The I2C bus controllers are directly compatible with s3c2440 */
335 	s3c_i2c0_setname("s3c2440-i2c");
336 	s3c_i2c1_setname("s3c2440-i2c");
337 	s3c_i2c2_setname("s3c2440-i2c");
338 
339 	s5p_fb_setname(0, "exynos4-fb");
340 	s5p_hdmi_setname("exynos4-hdmi");
341 }
342 
exynos5_map_io(void)343 static void __init exynos5_map_io(void)
344 {
345 	iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
346 
347 	s3c_device_i2c0.resource[0].start = EXYNOS5_PA_IIC(0);
348 	s3c_device_i2c0.resource[0].end   = EXYNOS5_PA_IIC(0) + SZ_4K - 1;
349 	s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC;
350 	s3c_device_i2c0.resource[1].end   = EXYNOS5_IRQ_IIC;
351 
352 	s3c_sdhci_setname(0, "exynos4-sdhci");
353 	s3c_sdhci_setname(1, "exynos4-sdhci");
354 	s3c_sdhci_setname(2, "exynos4-sdhci");
355 	s3c_sdhci_setname(3, "exynos4-sdhci");
356 
357 	/* The I2C bus controllers are directly compatible with s3c2440 */
358 	s3c_i2c0_setname("s3c2440-i2c");
359 	s3c_i2c1_setname("s3c2440-i2c");
360 	s3c_i2c2_setname("s3c2440-i2c");
361 }
362 
exynos4_init_clocks(int xtal)363 static void __init exynos4_init_clocks(int xtal)
364 {
365 	printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
366 
367 	s3c24xx_register_baseclocks(xtal);
368 	s5p_register_clocks(xtal);
369 
370 	if (soc_is_exynos4210())
371 		exynos4210_register_clocks();
372 	else if (soc_is_exynos4212() || soc_is_exynos4412())
373 		exynos4212_register_clocks();
374 
375 	exynos4_register_clocks();
376 	exynos4_setup_clocks();
377 }
378 
exynos5_init_clocks(int xtal)379 static void __init exynos5_init_clocks(int xtal)
380 {
381 	printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
382 
383 	s3c24xx_register_baseclocks(xtal);
384 	s5p_register_clocks(xtal);
385 
386 	exynos5_register_clocks();
387 	exynos5_setup_clocks();
388 }
389 
390 #define COMBINER_ENABLE_SET	0x0
391 #define COMBINER_ENABLE_CLEAR	0x4
392 #define COMBINER_INT_STATUS	0xC
393 
394 static DEFINE_SPINLOCK(irq_controller_lock);
395 
396 struct combiner_chip_data {
397 	unsigned int irq_offset;
398 	unsigned int irq_mask;
399 	void __iomem *base;
400 };
401 
402 static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
403 
combiner_base(struct irq_data * data)404 static inline void __iomem *combiner_base(struct irq_data *data)
405 {
406 	struct combiner_chip_data *combiner_data =
407 		irq_data_get_irq_chip_data(data);
408 
409 	return combiner_data->base;
410 }
411 
combiner_mask_irq(struct irq_data * data)412 static void combiner_mask_irq(struct irq_data *data)
413 {
414 	u32 mask = 1 << (data->irq % 32);
415 
416 	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
417 }
418 
combiner_unmask_irq(struct irq_data * data)419 static void combiner_unmask_irq(struct irq_data *data)
420 {
421 	u32 mask = 1 << (data->irq % 32);
422 
423 	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
424 }
425 
combiner_handle_cascade_irq(unsigned int irq,struct irq_desc * desc)426 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
427 {
428 	struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
429 	struct irq_chip *chip = irq_get_chip(irq);
430 	unsigned int cascade_irq, combiner_irq;
431 	unsigned long status;
432 
433 	chained_irq_enter(chip, desc);
434 
435 	spin_lock(&irq_controller_lock);
436 	status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
437 	spin_unlock(&irq_controller_lock);
438 	status &= chip_data->irq_mask;
439 
440 	if (status == 0)
441 		goto out;
442 
443 	combiner_irq = __ffs(status);
444 
445 	cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
446 	if (unlikely(cascade_irq >= NR_IRQS))
447 		do_bad_IRQ(cascade_irq, desc);
448 	else
449 		generic_handle_irq(cascade_irq);
450 
451  out:
452 	chained_irq_exit(chip, desc);
453 }
454 
455 static struct irq_chip combiner_chip = {
456 	.name		= "COMBINER",
457 	.irq_mask	= combiner_mask_irq,
458 	.irq_unmask	= combiner_unmask_irq,
459 };
460 
combiner_cascade_irq(unsigned int combiner_nr,unsigned int irq)461 static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
462 {
463 	unsigned int max_nr;
464 
465 	if (soc_is_exynos5250())
466 		max_nr = EXYNOS5_MAX_COMBINER_NR;
467 	else
468 		max_nr = EXYNOS4_MAX_COMBINER_NR;
469 
470 	if (combiner_nr >= max_nr)
471 		BUG();
472 	if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
473 		BUG();
474 	irq_set_chained_handler(irq, combiner_handle_cascade_irq);
475 }
476 
combiner_init(unsigned int combiner_nr,void __iomem * base,unsigned int irq_start)477 static void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
478 			  unsigned int irq_start)
479 {
480 	unsigned int i;
481 	unsigned int max_nr;
482 
483 	if (soc_is_exynos5250())
484 		max_nr = EXYNOS5_MAX_COMBINER_NR;
485 	else
486 		max_nr = EXYNOS4_MAX_COMBINER_NR;
487 
488 	if (combiner_nr >= max_nr)
489 		BUG();
490 
491 	combiner_data[combiner_nr].base = base;
492 	combiner_data[combiner_nr].irq_offset = irq_start;
493 	combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
494 
495 	/* Disable all interrupts */
496 
497 	__raw_writel(combiner_data[combiner_nr].irq_mask,
498 		     base + COMBINER_ENABLE_CLEAR);
499 
500 	/* Setup the Linux IRQ subsystem */
501 
502 	for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
503 				+ MAX_IRQ_IN_COMBINER; i++) {
504 		irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
505 		irq_set_chip_data(i, &combiner_data[combiner_nr]);
506 		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
507 	}
508 }
509 
510 #ifdef CONFIG_OF
511 static const struct of_device_id exynos4_dt_irq_match[] = {
512 	{ .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
513 	{},
514 };
515 #endif
516 
exynos4_init_irq(void)517 void __init exynos4_init_irq(void)
518 {
519 	int irq;
520 	unsigned int gic_bank_offset;
521 
522 	gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
523 
524 	if (!of_have_populated_dt())
525 		gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
526 #ifdef CONFIG_OF
527 	else
528 		of_irq_init(exynos4_dt_irq_match);
529 #endif
530 
531 	for (irq = 0; irq < EXYNOS4_MAX_COMBINER_NR; irq++) {
532 
533 		combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
534 				COMBINER_IRQ(irq, 0));
535 		combiner_cascade_irq(irq, IRQ_SPI(irq));
536 	}
537 
538 	/*
539 	 * The parameters of s5p_init_irq() are for VIC init.
540 	 * Theses parameters should be NULL and 0 because EXYNOS4
541 	 * uses GIC instead of VIC.
542 	 */
543 	s5p_init_irq(NULL, 0);
544 }
545 
exynos5_init_irq(void)546 void __init exynos5_init_irq(void)
547 {
548 	int irq;
549 
550 #ifdef CONFIG_OF
551 	of_irq_init(exynos4_dt_irq_match);
552 #endif
553 
554 	for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) {
555 		combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
556 				COMBINER_IRQ(irq, 0));
557 		combiner_cascade_irq(irq, IRQ_SPI(irq));
558 	}
559 
560 	/*
561 	 * The parameters of s5p_init_irq() are for VIC init.
562 	 * Theses parameters should be NULL and 0 because EXYNOS4
563 	 * uses GIC instead of VIC.
564 	 */
565 	s5p_init_irq(NULL, 0);
566 }
567 
568 struct bus_type exynos4_subsys = {
569 	.name		= "exynos4-core",
570 	.dev_name	= "exynos4-core",
571 };
572 
573 struct bus_type exynos5_subsys = {
574 	.name		= "exynos5-core",
575 	.dev_name	= "exynos5-core",
576 };
577 
578 static struct device exynos4_dev = {
579 	.bus	= &exynos4_subsys,
580 };
581 
582 static struct device exynos5_dev = {
583 	.bus	= &exynos5_subsys,
584 };
585 
exynos_core_init(void)586 static int __init exynos_core_init(void)
587 {
588 	if (soc_is_exynos5250())
589 		return subsys_system_register(&exynos5_subsys, NULL);
590 	else
591 		return subsys_system_register(&exynos4_subsys, NULL);
592 }
593 core_initcall(exynos_core_init);
594 
595 #ifdef CONFIG_CACHE_L2X0
exynos4_l2x0_cache_init(void)596 static int __init exynos4_l2x0_cache_init(void)
597 {
598 	int ret;
599 
600 	if (soc_is_exynos5250())
601 		return 0;
602 
603 	ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
604 	if (!ret) {
605 		l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
606 		clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
607 		return 0;
608 	}
609 
610 	if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) {
611 		l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC;
612 		/* TAG, Data Latency Control: 2 cycles */
613 		l2x0_saved_regs.tag_latency = 0x110;
614 
615 		if (soc_is_exynos4212() || soc_is_exynos4412())
616 			l2x0_saved_regs.data_latency = 0x120;
617 		else
618 			l2x0_saved_regs.data_latency = 0x110;
619 
620 		l2x0_saved_regs.prefetch_ctrl = 0x30000007;
621 		l2x0_saved_regs.pwr_ctrl =
622 			(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN);
623 
624 		l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
625 
626 		__raw_writel(l2x0_saved_regs.tag_latency,
627 				S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
628 		__raw_writel(l2x0_saved_regs.data_latency,
629 				S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
630 
631 		/* L2X0 Prefetch Control */
632 		__raw_writel(l2x0_saved_regs.prefetch_ctrl,
633 				S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
634 
635 		/* L2X0 Power Control */
636 		__raw_writel(l2x0_saved_regs.pwr_ctrl,
637 				S5P_VA_L2CC + L2X0_POWER_CTRL);
638 
639 		clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
640 		clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs));
641 	}
642 
643 	l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK);
644 	return 0;
645 }
646 early_initcall(exynos4_l2x0_cache_init);
647 #endif
648 
exynos5_l2_cache_init(void)649 static int __init exynos5_l2_cache_init(void)
650 {
651 	unsigned int val;
652 
653 	if (!soc_is_exynos5250())
654 		return 0;
655 
656 	asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
657 		     "bic %0, %0, #(1 << 2)\n"	/* cache disable */
658 		     "mcr p15, 0, %0, c1, c0, 0\n"
659 		     "mrc p15, 1, %0, c9, c0, 2\n"
660 		     : "=r"(val));
661 
662 	val |= (1 << 9) | (1 << 5) | (2 << 6) | (2 << 0);
663 
664 	asm volatile("mcr p15, 1, %0, c9, c0, 2\n" : : "r"(val));
665 	asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
666 		     "orr %0, %0, #(1 << 2)\n"	/* cache enable */
667 		     "mcr p15, 0, %0, c1, c0, 0\n"
668 		     : : "r"(val));
669 
670 	return 0;
671 }
672 early_initcall(exynos5_l2_cache_init);
673 
exynos_init(void)674 static int __init exynos_init(void)
675 {
676 	printk(KERN_INFO "EXYNOS: Initializing architecture\n");
677 
678 	if (soc_is_exynos5250())
679 		return device_register(&exynos5_dev);
680 	else
681 		return device_register(&exynos4_dev);
682 }
683 
684 /* uart registration process */
685 
exynos_init_uarts(struct s3c2410_uartcfg * cfg,int no)686 static void __init exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no)
687 {
688 	struct s3c2410_uartcfg *tcfg = cfg;
689 	u32 ucnt;
690 
691 	for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
692 		tcfg->has_fracval = 1;
693 
694 	if (soc_is_exynos5250())
695 		s3c24xx_init_uartdevs("exynos4210-uart", exynos5_uart_resources, cfg, no);
696 	else
697 		s3c24xx_init_uartdevs("exynos4210-uart", exynos4_uart_resources, cfg, no);
698 }
699 
700 static void __iomem *exynos_eint_base;
701 
702 static DEFINE_SPINLOCK(eint_lock);
703 
704 static unsigned int eint0_15_data[16];
705 
exynos4_irq_to_gpio(unsigned int irq)706 static inline int exynos4_irq_to_gpio(unsigned int irq)
707 {
708 	if (irq < IRQ_EINT(0))
709 		return -EINVAL;
710 
711 	irq -= IRQ_EINT(0);
712 	if (irq < 8)
713 		return EXYNOS4_GPX0(irq);
714 
715 	irq -= 8;
716 	if (irq < 8)
717 		return EXYNOS4_GPX1(irq);
718 
719 	irq -= 8;
720 	if (irq < 8)
721 		return EXYNOS4_GPX2(irq);
722 
723 	irq -= 8;
724 	if (irq < 8)
725 		return EXYNOS4_GPX3(irq);
726 
727 	return -EINVAL;
728 }
729 
exynos5_irq_to_gpio(unsigned int irq)730 static inline int exynos5_irq_to_gpio(unsigned int irq)
731 {
732 	if (irq < IRQ_EINT(0))
733 		return -EINVAL;
734 
735 	irq -= IRQ_EINT(0);
736 	if (irq < 8)
737 		return EXYNOS5_GPX0(irq);
738 
739 	irq -= 8;
740 	if (irq < 8)
741 		return EXYNOS5_GPX1(irq);
742 
743 	irq -= 8;
744 	if (irq < 8)
745 		return EXYNOS5_GPX2(irq);
746 
747 	irq -= 8;
748 	if (irq < 8)
749 		return EXYNOS5_GPX3(irq);
750 
751 	return -EINVAL;
752 }
753 
754 static unsigned int exynos4_eint0_15_src_int[16] = {
755 	EXYNOS4_IRQ_EINT0,
756 	EXYNOS4_IRQ_EINT1,
757 	EXYNOS4_IRQ_EINT2,
758 	EXYNOS4_IRQ_EINT3,
759 	EXYNOS4_IRQ_EINT4,
760 	EXYNOS4_IRQ_EINT5,
761 	EXYNOS4_IRQ_EINT6,
762 	EXYNOS4_IRQ_EINT7,
763 	EXYNOS4_IRQ_EINT8,
764 	EXYNOS4_IRQ_EINT9,
765 	EXYNOS4_IRQ_EINT10,
766 	EXYNOS4_IRQ_EINT11,
767 	EXYNOS4_IRQ_EINT12,
768 	EXYNOS4_IRQ_EINT13,
769 	EXYNOS4_IRQ_EINT14,
770 	EXYNOS4_IRQ_EINT15,
771 };
772 
773 static unsigned int exynos5_eint0_15_src_int[16] = {
774 	EXYNOS5_IRQ_EINT0,
775 	EXYNOS5_IRQ_EINT1,
776 	EXYNOS5_IRQ_EINT2,
777 	EXYNOS5_IRQ_EINT3,
778 	EXYNOS5_IRQ_EINT4,
779 	EXYNOS5_IRQ_EINT5,
780 	EXYNOS5_IRQ_EINT6,
781 	EXYNOS5_IRQ_EINT7,
782 	EXYNOS5_IRQ_EINT8,
783 	EXYNOS5_IRQ_EINT9,
784 	EXYNOS5_IRQ_EINT10,
785 	EXYNOS5_IRQ_EINT11,
786 	EXYNOS5_IRQ_EINT12,
787 	EXYNOS5_IRQ_EINT13,
788 	EXYNOS5_IRQ_EINT14,
789 	EXYNOS5_IRQ_EINT15,
790 };
exynos_irq_eint_mask(struct irq_data * data)791 static inline void exynos_irq_eint_mask(struct irq_data *data)
792 {
793 	u32 mask;
794 
795 	spin_lock(&eint_lock);
796 	mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
797 	mask |= EINT_OFFSET_BIT(data->irq);
798 	__raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
799 	spin_unlock(&eint_lock);
800 }
801 
exynos_irq_eint_unmask(struct irq_data * data)802 static void exynos_irq_eint_unmask(struct irq_data *data)
803 {
804 	u32 mask;
805 
806 	spin_lock(&eint_lock);
807 	mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
808 	mask &= ~(EINT_OFFSET_BIT(data->irq));
809 	__raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
810 	spin_unlock(&eint_lock);
811 }
812 
exynos_irq_eint_ack(struct irq_data * data)813 static inline void exynos_irq_eint_ack(struct irq_data *data)
814 {
815 	__raw_writel(EINT_OFFSET_BIT(data->irq),
816 		     EINT_PEND(exynos_eint_base, data->irq));
817 }
818 
exynos_irq_eint_maskack(struct irq_data * data)819 static void exynos_irq_eint_maskack(struct irq_data *data)
820 {
821 	exynos_irq_eint_mask(data);
822 	exynos_irq_eint_ack(data);
823 }
824 
exynos_irq_eint_set_type(struct irq_data * data,unsigned int type)825 static int exynos_irq_eint_set_type(struct irq_data *data, unsigned int type)
826 {
827 	int offs = EINT_OFFSET(data->irq);
828 	int shift;
829 	u32 ctrl, mask;
830 	u32 newvalue = 0;
831 
832 	switch (type) {
833 	case IRQ_TYPE_EDGE_RISING:
834 		newvalue = S5P_IRQ_TYPE_EDGE_RISING;
835 		break;
836 
837 	case IRQ_TYPE_EDGE_FALLING:
838 		newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
839 		break;
840 
841 	case IRQ_TYPE_EDGE_BOTH:
842 		newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
843 		break;
844 
845 	case IRQ_TYPE_LEVEL_LOW:
846 		newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
847 		break;
848 
849 	case IRQ_TYPE_LEVEL_HIGH:
850 		newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
851 		break;
852 
853 	default:
854 		printk(KERN_ERR "No such irq type %d", type);
855 		return -EINVAL;
856 	}
857 
858 	shift = (offs & 0x7) * 4;
859 	mask = 0x7 << shift;
860 
861 	spin_lock(&eint_lock);
862 	ctrl = __raw_readl(EINT_CON(exynos_eint_base, data->irq));
863 	ctrl &= ~mask;
864 	ctrl |= newvalue << shift;
865 	__raw_writel(ctrl, EINT_CON(exynos_eint_base, data->irq));
866 	spin_unlock(&eint_lock);
867 
868 	if (soc_is_exynos5250())
869 		s3c_gpio_cfgpin(exynos5_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
870 	else
871 		s3c_gpio_cfgpin(exynos4_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
872 
873 	return 0;
874 }
875 
876 static struct irq_chip exynos_irq_eint = {
877 	.name		= "exynos-eint",
878 	.irq_mask	= exynos_irq_eint_mask,
879 	.irq_unmask	= exynos_irq_eint_unmask,
880 	.irq_mask_ack	= exynos_irq_eint_maskack,
881 	.irq_ack	= exynos_irq_eint_ack,
882 	.irq_set_type	= exynos_irq_eint_set_type,
883 #ifdef CONFIG_PM
884 	.irq_set_wake	= s3c_irqext_wake,
885 #endif
886 };
887 
888 /*
889  * exynos4_irq_demux_eint
890  *
891  * This function demuxes the IRQ from from EINTs 16 to 31.
892  * It is designed to be inlined into the specific handler
893  * s5p_irq_demux_eintX_Y.
894  *
895  * Each EINT pend/mask registers handle eight of them.
896  */
exynos_irq_demux_eint(unsigned int start)897 static inline void exynos_irq_demux_eint(unsigned int start)
898 {
899 	unsigned int irq;
900 
901 	u32 status = __raw_readl(EINT_PEND(exynos_eint_base, start));
902 	u32 mask = __raw_readl(EINT_MASK(exynos_eint_base, start));
903 
904 	status &= ~mask;
905 	status &= 0xff;
906 
907 	while (status) {
908 		irq = fls(status) - 1;
909 		generic_handle_irq(irq + start);
910 		status &= ~(1 << irq);
911 	}
912 }
913 
exynos_irq_demux_eint16_31(unsigned int irq,struct irq_desc * desc)914 static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
915 {
916 	struct irq_chip *chip = irq_get_chip(irq);
917 	chained_irq_enter(chip, desc);
918 	exynos_irq_demux_eint(IRQ_EINT(16));
919 	exynos_irq_demux_eint(IRQ_EINT(24));
920 	chained_irq_exit(chip, desc);
921 }
922 
exynos_irq_eint0_15(unsigned int irq,struct irq_desc * desc)923 static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
924 {
925 	u32 *irq_data = irq_get_handler_data(irq);
926 	struct irq_chip *chip = irq_get_chip(irq);
927 
928 	chained_irq_enter(chip, desc);
929 	chip->irq_mask(&desc->irq_data);
930 
931 	if (chip->irq_ack)
932 		chip->irq_ack(&desc->irq_data);
933 
934 	generic_handle_irq(*irq_data);
935 
936 	chip->irq_unmask(&desc->irq_data);
937 	chained_irq_exit(chip, desc);
938 }
939 
exynos_init_irq_eint(void)940 static int __init exynos_init_irq_eint(void)
941 {
942 	int irq;
943 
944 	if (soc_is_exynos5250())
945 		exynos_eint_base = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
946 	else
947 		exynos_eint_base = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
948 
949 	if (exynos_eint_base == NULL) {
950 		pr_err("unable to ioremap for EINT base address\n");
951 		return -ENOMEM;
952 	}
953 
954 	for (irq = 0 ; irq <= 31 ; irq++) {
955 		irq_set_chip_and_handler(IRQ_EINT(irq), &exynos_irq_eint,
956 					 handle_level_irq);
957 		set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
958 	}
959 
960 	irq_set_chained_handler(EXYNOS_IRQ_EINT16_31, exynos_irq_demux_eint16_31);
961 
962 	for (irq = 0 ; irq <= 15 ; irq++) {
963 		eint0_15_data[irq] = IRQ_EINT(irq);
964 
965 		if (soc_is_exynos5250()) {
966 			irq_set_handler_data(exynos5_eint0_15_src_int[irq],
967 					     &eint0_15_data[irq]);
968 			irq_set_chained_handler(exynos5_eint0_15_src_int[irq],
969 						exynos_irq_eint0_15);
970 		} else {
971 			irq_set_handler_data(exynos4_eint0_15_src_int[irq],
972 					     &eint0_15_data[irq]);
973 			irq_set_chained_handler(exynos4_eint0_15_src_int[irq],
974 						exynos_irq_eint0_15);
975 		}
976 	}
977 
978 	return 0;
979 }
980 arch_initcall(exynos_init_irq_eint);
981