1 /*
2  *	linux/arch/alpha/kernel/sys_titan.c
3  *
4  *	Copyright (C) 1995 David A Rusling
5  *	Copyright (C) 1996, 1999 Jay A Estabrook
6  *	Copyright (C) 1998, 1999 Richard Henderson
7  *      Copyright (C) 1999, 2000 Jeff Wiedemeier
8  *
9  * Code supporting TITAN systems (EV6+TITAN), currently:
10  *      Privateer
11  *	Falcon
12  *	Granite
13  */
14 
15 #include <linux/config.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/mm.h>
19 #include <linux/sched.h>
20 #include <linux/pci.h>
21 #include <linux/init.h>
22 
23 #include <asm/ptrace.h>
24 #include <asm/system.h>
25 #include <asm/dma.h>
26 #include <asm/irq.h>
27 #include <asm/bitops.h>
28 #include <asm/mmu_context.h>
29 #include <asm/io.h>
30 #include <asm/pgtable.h>
31 #include <asm/core_titan.h>
32 #include <asm/hwrpb.h>
33 
34 #include "proto.h"
35 #include "irq_impl.h"
36 #include "pci_impl.h"
37 #include "machvec_impl.h"
38 #include "err_impl.h"
39 
40 
41 /*
42  * Titan generic
43  */
44 
45 /*
46  * Titan supports up to 4 CPUs
47  */
48 static unsigned long titan_cpu_irq_affinity[4] = { ~0UL, ~0UL, ~0UL, ~0UL };
49 
50 /*
51  * Mask is set (1) if enabled
52  */
53 static unsigned long titan_cached_irq_mask;
54 
55 /*
56  * Need SMP-safe access to interrupt CSRs
57  */
58 spinlock_t titan_irq_lock = SPIN_LOCK_UNLOCKED;
59 
60 static void
titan_update_irq_hw(unsigned long mask)61 titan_update_irq_hw(unsigned long mask)
62 {
63 	register titan_cchip *cchip = TITAN_cchip;
64 	unsigned long isa_enable = 1UL << 55;
65 	register int bcpu = boot_cpuid;
66 
67 #ifdef CONFIG_SMP
68 	register unsigned long cpm = cpu_present_mask;
69 	volatile unsigned long *dim0, *dim1, *dim2, *dim3;
70 	unsigned long mask0, mask1, mask2, mask3, dummy;
71 
72 	mask &= ~isa_enable;
73 	mask0 = mask & titan_cpu_irq_affinity[0];
74 	mask1 = mask & titan_cpu_irq_affinity[1];
75 	mask2 = mask & titan_cpu_irq_affinity[2];
76 	mask3 = mask & titan_cpu_irq_affinity[3];
77 
78 	if (bcpu == 0) mask0 |= isa_enable;
79 	else if (bcpu == 1) mask1 |= isa_enable;
80 	else if (bcpu == 2) mask2 |= isa_enable;
81 	else mask3 |= isa_enable;
82 
83 	dim0 = &cchip->dim0.csr;
84 	dim1 = &cchip->dim1.csr;
85 	dim2 = &cchip->dim2.csr;
86 	dim3 = &cchip->dim3.csr;
87 	if ((cpm & 1) == 0) dim0 = &dummy;
88 	if ((cpm & 2) == 0) dim1 = &dummy;
89 	if ((cpm & 4) == 0) dim2 = &dummy;
90 	if ((cpm & 8) == 0) dim3 = &dummy;
91 
92 	*dim0 = mask0;
93 	*dim1 = mask1;
94 	*dim2 = mask2;
95 	*dim3 = mask3;
96 	mb();
97 	*dim0;
98 	*dim1;
99 	*dim2;
100 	*dim3;
101 #else
102 	volatile unsigned long *dimB;
103 	dimB = &cchip->dim0.csr;
104 	if (bcpu == 1) dimB = &cchip->dim1.csr;
105 	else if (bcpu == 2) dimB = &cchip->dim2.csr;
106 	else if (bcpu == 3) dimB = &cchip->dim3.csr;
107 
108 	*dimB = mask | isa_enable;
109 	mb();
110 	*dimB;
111 #endif
112 }
113 
114 static inline void
titan_enable_irq(unsigned int irq)115 titan_enable_irq(unsigned int irq)
116 {
117 	spin_lock(&titan_irq_lock);
118 	titan_cached_irq_mask |= 1UL << (irq - 16);
119 	titan_update_irq_hw(titan_cached_irq_mask);
120 	spin_unlock(&titan_irq_lock);
121 }
122 
123 static inline void
titan_disable_irq(unsigned int irq)124 titan_disable_irq(unsigned int irq)
125 {
126 	spin_lock(&titan_irq_lock);
127 	titan_cached_irq_mask &= ~(1UL << (irq - 16));
128 	titan_update_irq_hw(titan_cached_irq_mask);
129 	spin_unlock(&titan_irq_lock);
130 }
131 
132 static unsigned int
titan_startup_irq(unsigned int irq)133 titan_startup_irq(unsigned int irq)
134 {
135 	titan_enable_irq(irq);
136 	return 0;	/* never anything pending */
137 }
138 
139 static void
titan_end_irq(unsigned int irq)140 titan_end_irq(unsigned int irq)
141 {
142 	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
143 		titan_enable_irq(irq);
144 }
145 
146 static void
titan_cpu_set_irq_affinity(unsigned int irq,unsigned long affinity)147 titan_cpu_set_irq_affinity(unsigned int irq, unsigned long affinity)
148 {
149 	int cpu;
150 
151 	for (cpu = 0; cpu < 4; cpu++) {
152 		if (affinity & (1UL << cpu))
153 			titan_cpu_irq_affinity[cpu] |= 1UL << irq;
154 		else
155 			titan_cpu_irq_affinity[cpu] &= ~(1UL << irq);
156 	}
157 
158 }
159 
160 static void
titan_set_irq_affinity(unsigned int irq,unsigned long affinity)161 titan_set_irq_affinity(unsigned int irq, unsigned long affinity)
162 {
163 	spin_lock(&titan_irq_lock);
164 	titan_cpu_set_irq_affinity(irq - 16, affinity);
165 	titan_update_irq_hw(titan_cached_irq_mask);
166 	spin_unlock(&titan_irq_lock);
167 }
168 
169 static void
titan_device_interrupt(unsigned long vector,struct pt_regs * regs)170 titan_device_interrupt(unsigned long vector, struct pt_regs * regs)
171 {
172 	printk("titan_device_interrupt: NOT IMPLEMENTED YET!! \n");
173 }
174 
175 static void
titan_srm_device_interrupt(unsigned long vector,struct pt_regs * regs)176 titan_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
177 {
178 	int irq;
179 
180 	irq = (vector - 0x800) >> 4;
181 	handle_irq(irq, regs);
182 }
183 
184 
185 static void __init
init_titan_irqs(struct hw_interrupt_type * ops,int imin,int imax)186 init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax)
187 {
188 	long i;
189 	for (i = imin; i <= imax; ++i) {
190 		irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
191 		irq_desc[i].handler = ops;
192 	}
193 }
194 
195 static struct hw_interrupt_type titan_irq_type = {
196 	typename:	"TITAN",
197 	startup:	titan_startup_irq,
198 	shutdown:	titan_disable_irq,
199 	enable:		titan_enable_irq,
200 	disable:	titan_disable_irq,
201 	ack:		titan_disable_irq,
202 	end:		titan_end_irq,
203 	set_affinity:	titan_set_irq_affinity,
204 };
205 
206 static void
titan_intr_nop(int irq,void * dev_id,struct pt_regs * regs)207 titan_intr_nop(int irq, void *dev_id, struct pt_regs *regs)
208 {
209 	/*
210 	 * This is a NOP interrupt handler for the purposes of
211 	 * event counting -- just return.
212 	 */
213 }
214 
215 static void __init
titan_init_irq(void)216 titan_init_irq(void)
217 {
218 	if (alpha_using_srm && !alpha_mv.device_interrupt)
219 		alpha_mv.device_interrupt = titan_srm_device_interrupt;
220 	if (!alpha_mv.device_interrupt)
221 		alpha_mv.device_interrupt = titan_device_interrupt;
222 
223 	titan_update_irq_hw(0);
224 
225 	init_titan_irqs(&titan_irq_type, 16, 63 + 16);
226 }
227 
228 static void __init
titan_legacy_init_irq(void)229 titan_legacy_init_irq(void)
230 {
231 	/* init the legacy dma controller */
232 	outb(0, DMA1_RESET_REG);
233 	outb(0, DMA2_RESET_REG);
234 	outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
235 	outb(0, DMA2_MASK_REG);
236 
237 	/* init the legacy irq controller */
238 	init_i8259a_irqs();
239 
240 	/* init the titan irqs */
241 	titan_init_irq();
242 }
243 
244 void
titan_dispatch_irqs(u64 mask,struct pt_regs * regs)245 titan_dispatch_irqs(u64 mask, struct pt_regs *regs)
246 {
247 	unsigned long vector;
248 
249 	/*
250 	 * Mask down to those interrupts which are enable on this processor
251 	 */
252 	mask &= titan_cpu_irq_affinity[smp_processor_id()];
253 
254 	/*
255 	 * Dispatch all requested interrupts
256 	 */
257 	while (mask) {
258 		/* convert to SRM vector... priority is <63> -> <0> */
259 		__asm__("ctlz %1, %0" : "=r"(vector) : "r"(mask));
260 		vector = 63 - vector;
261 		mask &= ~(1UL << vector);	/* clear it out 	 */
262 		vector = 0x900 + (vector << 4);	/* convert to SRM vector */
263 
264 		/* dispatch it */
265 		alpha_mv.device_interrupt(vector, regs);
266 	}
267 }
268 
269 
270 /*
271  * Titan Family
272  */
273 static void __init
titan_late_init(void)274 titan_late_init(void)
275 {
276 	/*
277 	 * Enable the system error interrupts. These interrupts are
278 	 * all reported to the kernel as machine checks, so the handler
279 	 * is a nop so it can be called to count the individual events.
280 	 */
281 	request_irq(63+16, titan_intr_nop, SA_INTERRUPT,
282 		    "CChip Error", NULL);
283 	request_irq(62+16, titan_intr_nop, SA_INTERRUPT,
284 		    "PChip 0 H_Error", NULL);
285 	request_irq(61+16, titan_intr_nop, SA_INTERRUPT,
286 		    "PChip 1 H_Error", NULL);
287 	request_irq(60+16, titan_intr_nop, SA_INTERRUPT,
288 		    "PChip 0 C_Error", NULL);
289 	request_irq(59+16, titan_intr_nop, SA_INTERRUPT,
290 		    "PChip 1 C_Error", NULL);
291 
292 	/*
293 	 * Register our error handlers.
294 	 */
295 	titan_register_error_handlers();
296 
297 	/*
298 	 * Check if the console left us any error logs.
299 	 */
300 	cdl_check_console_data_log();
301 
302 }
303 
304 static int __devinit
titan_map_irq(struct pci_dev * dev,u8 slot,u8 pin)305 titan_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
306 {
307 	u8 intline;
308 	int irq;
309 
310  	/* Get the current intline.  */
311 	pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
312 	irq = intline;
313 
314  	/* Is it explicitly routed through ISA?  */
315  	if ((irq & 0xF0) == 0xE0)
316  		return irq;
317 
318  	/* Offset by 16 to make room for ISA interrupts 0 - 15.  */
319  	return irq + 16;
320 }
321 
322 static void __init
titan_init_pci(void)323 titan_init_pci(void)
324 {
325  	/*
326  	 * This isn't really the right place, but there's some init
327  	 * that needs to be done after everything is basically up.
328  	 */
329  	titan_late_init();
330 
331 	pci_probe_only = 1;
332 	common_init_pci();
333 	SMC669_Init(0);
334 #ifdef CONFIG_VGA_HOSE
335 	locate_and_init_vga(NULL);
336 #endif
337 }
338 
339 
340 /*
341  * Privateer
342  */
343 static void __init
privateer_init_pci(void)344 privateer_init_pci(void)
345 {
346 	/*
347 	 * Hook a couple of extra err interrupts that the
348 	 * common titan code won't.
349 	 */
350 	request_irq(53+16, titan_intr_nop, SA_INTERRUPT,
351 		    "NMI", NULL);
352 	request_irq(50+16, titan_intr_nop, SA_INTERRUPT,
353 		    "Temperature Warning", NULL);
354 
355 	/*
356 	 * Finish with the common version.
357 	 */
358 	return titan_init_pci();
359 }
360 
361 
362 /*
363  * The System Vectors.
364  */
365 struct alpha_machine_vector titan_mv __initmv = {
366 	vector_name:		"TITAN",
367 	DO_EV6_MMU,
368 	DO_DEFAULT_RTC,
369 	DO_TITAN_IO,
370 	DO_TITAN_BUS,
371 	machine_check:		titan_machine_check,
372 	max_dma_address:	ALPHA_MAX_DMA_ADDRESS,
373 	min_io_address:		DEFAULT_IO_BASE,
374 	min_mem_address:	DEFAULT_MEM_BASE,
375 	pci_dac_offset:		TITAN_DAC_OFFSET,
376 
377 	nr_irqs:		80,	/* 64 + 16 */
378 	/* device_interrupt will be filled in by titan_init_irq */
379 
380 	agp_info:		titan_agp_info,
381 
382 	init_arch:		titan_init_arch,
383 	init_irq:		titan_legacy_init_irq,
384 	init_rtc:		common_init_rtc,
385 	init_pci:		titan_init_pci,
386 
387 	kill_arch:		titan_kill_arch,
388 	pci_map_irq:		titan_map_irq,
389 	pci_swizzle:		common_swizzle,
390 };
391 ALIAS_MV(titan)
392 
393 struct alpha_machine_vector privateer_mv __initmv = {
394 	vector_name:		"PRIVATEER",
395 	DO_EV6_MMU,
396 	DO_DEFAULT_RTC,
397 	DO_TITAN_IO,
398 	DO_TITAN_BUS,
399 	machine_check:		privateer_machine_check,
400 	max_dma_address:	ALPHA_MAX_DMA_ADDRESS,
401 	min_io_address:		DEFAULT_IO_BASE,
402 	min_mem_address:	DEFAULT_MEM_BASE,
403 	pci_dac_offset:		TITAN_DAC_OFFSET,
404 
405 	nr_irqs:		80,	/* 64 + 16 */
406 	/* device_interrupt will be filled in by titan_init_irq */
407 
408 	agp_info:		titan_agp_info,
409 
410 	init_arch:		titan_init_arch,
411 	init_irq:		titan_legacy_init_irq,
412 	init_rtc:		common_init_rtc,
413 	init_pci:		privateer_init_pci,
414 
415 	kill_arch:		titan_kill_arch,
416 	pci_map_irq:		titan_map_irq,
417 	pci_swizzle:		common_swizzle,
418 };
419 /* No alpha_mv alias for privateer since we compile it
420    in unconditionally with titan; setup_arch knows how to cope. */
421