1 /*
2  *	linux/arch/alpha/kernel/core_titan.c
3  *
4  * Code common to all TITAN core logic chips.
5  */
6 
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/pci.h>
11 #include <linux/sched.h>
12 #include <linux/init.h>
13 #include <linux/vmalloc.h>
14 
15 #include <asm/hwrpb.h>
16 #include <asm/ptrace.h>
17 #include <asm/system.h>
18 #include <asm/smp.h>
19 #include <asm/pgalloc.h>
20 
21 #define __EXTERN_INLINE inline
22 #include <asm/io.h>
23 #include <asm/core_titan.h>
24 #undef __EXTERN_INLINE
25 
26 #include <linux/bootmem.h>
27 
28 #include "proto.h"
29 #include "pci_impl.h"
30 
31 /* Save Titan configuration data as the console had it set up.  */
32 
33 struct
34 {
35 	unsigned long wsba[4];
36 	unsigned long wsm[4];
37 	unsigned long tba[4];
38 } saved_config[4] __attribute__((common));
39 
40 /*
41  * BIOS32-style PCI interface:
42  */
43 
44 #define DEBUG_MCHECK 0  /* 0 = minimum, 1 = debug, 2 = dump+dump */
45 #define DEBUG_CONFIG 0
46 
47 #if DEBUG_CONFIG
48 # define DBG_CFG(args)	printk args
49 #else
50 # define DBG_CFG(args)
51 #endif
52 
53 
54 /*
55  * Routines to access TIG registers.
56  */
57 static inline volatile unsigned long *
mk_tig_addr(int offset)58 mk_tig_addr(int offset)
59 {
60 	return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6));
61 }
62 
63 static inline u8
titan_read_tig(int offset,u8 value)64 titan_read_tig(int offset, u8 value)
65 {
66 	volatile unsigned long *tig_addr = mk_tig_addr(offset);
67 	return (u8)(*tig_addr & 0xff);
68 }
69 
70 static inline void
titan_write_tig(int offset,u8 value)71 titan_write_tig(int offset, u8 value)
72 {
73 	volatile unsigned long *tig_addr = mk_tig_addr(offset);
74 	*tig_addr = (unsigned long)value;
75 }
76 
77 
78 /*
79  * Given a bus, device, and function number, compute resulting
80  * configuration space address
81  * accordingly.  It is therefore not safe to have concurrent
82  * invocations to configuration space access routines, but there
83  * really shouldn't be any need for this.
84  *
85  * Note that all config space accesses use Type 1 address format.
86  *
87  * Note also that type 1 is determined by non-zero bus number.
88  *
89  * Type 1:
90  *
91  *  3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
92  *  3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
93  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
94  * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
95  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
96  *
97  *	31:24	reserved
98  *	23:16	bus number (8 bits = 128 possible buses)
99  *	15:11	Device number (5 bits)
100  *	10:8	function number
101  *	 7:2	register number
102  *
103  * Notes:
104  *	The function number selects which function of a multi-function device
105  *	(e.g., SCSI and Ethernet).
106  *
107  *	The register selects a DWORD (32 bit) register offset.  Hence it
108  *	doesn't get shifted by 2 bits as we want to "drop" the bottom two
109  *	bits.
110  */
111 
112 static int
mk_conf_addr(struct pci_dev * dev,int where,unsigned long * pci_addr,unsigned char * type1)113 mk_conf_addr(struct pci_dev *dev, int where, unsigned long *pci_addr,
114 	     unsigned char *type1)
115 {
116 	struct pci_controller *hose = dev->sysdata;
117 	unsigned long addr;
118 	u8 bus = dev->bus->number;
119 	u8 device_fn = dev->devfn;
120 
121 	DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
122 		 "pci_addr=0x%p, type1=0x%p)\n",
123 		 bus, device_fn, where, pci_addr, type1));
124 
125         if (hose->first_busno == bus)
126 		bus = 0;
127         *type1 = (bus != 0);
128 
129         addr = (bus << 16) | (device_fn << 8) | where;
130 	addr |= hose->config_space_base;
131 
132 	*pci_addr = addr;
133 	DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
134 	return 0;
135 }
136 
137 static int
titan_read_config_byte(struct pci_dev * dev,int where,u8 * value)138 titan_read_config_byte(struct pci_dev *dev, int where, u8 *value)
139 {
140 	unsigned long addr;
141 	unsigned char type1;
142 
143 	if (mk_conf_addr(dev, where, &addr, &type1))
144 		return PCIBIOS_DEVICE_NOT_FOUND;
145 
146 	*value = __kernel_ldbu(*(vucp)addr);
147 	return PCIBIOS_SUCCESSFUL;
148 }
149 
150 static int
titan_read_config_word(struct pci_dev * dev,int where,u16 * value)151 titan_read_config_word(struct pci_dev *dev, int where, u16 *value)
152 {
153 	unsigned long addr;
154 	unsigned char type1;
155 
156 	if (mk_conf_addr(dev, where, &addr, &type1))
157 		return PCIBIOS_DEVICE_NOT_FOUND;
158 
159 	*value = __kernel_ldwu(*(vusp)addr);
160 	return PCIBIOS_SUCCESSFUL;
161 }
162 
163 static int
titan_read_config_dword(struct pci_dev * dev,int where,u32 * value)164 titan_read_config_dword(struct pci_dev *dev, int where, u32 *value)
165 {
166 	unsigned long addr;
167 	unsigned char type1;
168 
169 	if (mk_conf_addr(dev, where, &addr, &type1))
170 		return PCIBIOS_DEVICE_NOT_FOUND;
171 
172 	*value = *(vuip)addr;
173 	return PCIBIOS_SUCCESSFUL;
174 }
175 
176 static int
titan_write_config_byte(struct pci_dev * dev,int where,u8 value)177 titan_write_config_byte(struct pci_dev *dev, int where, u8 value)
178 {
179 	unsigned long addr;
180 	unsigned char type1;
181 
182 	if (mk_conf_addr(dev, where, &addr, &type1))
183 		return PCIBIOS_DEVICE_NOT_FOUND;
184 
185 	__kernel_stb(value, *(vucp)addr);
186 	mb();
187 	__kernel_ldbu(*(vucp)addr);
188 	return PCIBIOS_SUCCESSFUL;
189 }
190 
191 static int
titan_write_config_word(struct pci_dev * dev,int where,u16 value)192 titan_write_config_word(struct pci_dev *dev, int where, u16 value)
193 {
194 	unsigned long addr;
195 	unsigned char type1;
196 
197 	if (mk_conf_addr(dev, where, &addr, &type1))
198 		return PCIBIOS_DEVICE_NOT_FOUND;
199 
200 	__kernel_stw(value, *(vusp)addr);
201 	mb();
202 	__kernel_ldwu(*(vusp)addr);
203 	return PCIBIOS_SUCCESSFUL;
204 }
205 
206 static int
titan_write_config_dword(struct pci_dev * dev,int where,u32 value)207 titan_write_config_dword(struct pci_dev *dev, int where, u32 value)
208 {
209 	unsigned long addr;
210 	unsigned char type1;
211 
212 	if (mk_conf_addr(dev, where, &addr, &type1))
213 		return PCIBIOS_DEVICE_NOT_FOUND;
214 
215 	*(vuip)addr = value;
216 	mb();
217 	*(vuip)addr;
218 	return PCIBIOS_SUCCESSFUL;
219 }
220 
221 struct pci_ops titan_pci_ops =
222 {
223 	read_byte:	titan_read_config_byte,
224 	read_word:	titan_read_config_word,
225 	read_dword:	titan_read_config_dword,
226 	write_byte:	titan_write_config_byte,
227 	write_word:	titan_write_config_word,
228 	write_dword:	titan_write_config_dword
229 };
230 
231 
232 void
titan_pci_tbi(struct pci_controller * hose,dma_addr_t start,dma_addr_t end)233 titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
234 {
235 	titan_pachip *pachip =
236 	  (hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0;
237 	titan_pachip_port *port;
238 	volatile unsigned long *csr;
239 	unsigned long value;
240 
241 	/* Get the right hose.  */
242 	port = &pachip->g_port;
243 	if (hose->index & 2)
244 		port = &pachip->a_port;
245 
246 	/* We can invalidate up to 8 tlb entries in a go.  The flush
247 	   matches against <31:16> in the pci address.
248 	   Note that gtlbi* and atlbi* are in the same place in the g_port
249 	   and a_port, respectively, so the g_port offset can be used
250 	   even if hose is an a_port */
251 	csr = &port->port_specific.g.gtlbia.csr;
252 	if (((start ^ end) & 0xffff0000) == 0)
253 		csr = &port->port_specific.g.gtlbiv.csr;
254 
255 	/* For TBIA, it doesn't matter what value we write.  For TBI,
256 	   it's the shifted tag bits.  */
257 	value = (start & 0xffff0000) >> 12;
258 
259 	wmb();
260 	*csr = value;
261 	mb();
262 	*csr;
263 }
264 
265 static int
titan_query_agp(titan_pachip_port * port)266 titan_query_agp(titan_pachip_port *port)
267 {
268 	union TPAchipPCTL pctl;
269 
270 	/* set up APCTL */
271 	pctl.pctl_q_whole = port->pctl.csr;
272 
273 	return pctl.pctl_r_bits.apctl_v_agp_present;
274 
275 }
276 
277 static void __init
titan_init_one_pachip_port(titan_pachip_port * port,int index)278 titan_init_one_pachip_port(titan_pachip_port *port, int index)
279 {
280 	struct pci_controller *hose;
281 
282 	hose = alloc_pci_controller();
283 	if (index == 0)
284 		pci_isa_hose = hose;
285 	hose->io_space = alloc_resource();
286 	hose->mem_space = alloc_resource();
287 
288 	/*
289 	 * This is for userland consumption.  The 40-bit PIO bias that we
290 	 * use in the kernel through KSEG doesn't work in the page table
291 	 * based user mappings. (43-bit KSEG sign extends the physical
292 	 * address from bit 40 to hit the I/O bit - mapped addresses don't).
293 	 * So make sure we get the 43-bit PIO bias.
294 	 */
295 	hose->sparse_mem_base = 0;
296 	hose->sparse_io_base = 0;
297 	hose->dense_mem_base
298 	  = (TITAN_MEM(index) & 0xffffffffff) | 0x80000000000;
299 	hose->dense_io_base
300 	  = (TITAN_IO(index) & 0xffffffffff) | 0x80000000000;
301 
302 	hose->config_space_base = TITAN_CONF(index);
303 	hose->index = index;
304 
305 	hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS;
306 	hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1;
307 	hose->io_space->name = pci_io_names[index];
308 	hose->io_space->flags = IORESOURCE_IO;
309 
310 	hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS;
311 	hose->mem_space->end = hose->mem_space->start + 0xffffffff;
312 	hose->mem_space->name = pci_mem_names[index];
313 	hose->mem_space->flags = IORESOURCE_MEM;
314 
315 	if (request_resource(&ioport_resource, hose->io_space) < 0)
316 		printk(KERN_ERR "Failed to request IO on hose %d\n", index);
317 	if (request_resource(&iomem_resource, hose->mem_space) < 0)
318 		printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
319 
320 	/*
321 	 * Save the existing PCI window translations.  SRM will
322 	 * need them when we go to reboot.
323 	 */
324 	saved_config[index].wsba[0] = port->wsba[0].csr;
325 	saved_config[index].wsm[0]  = port->wsm[0].csr;
326 	saved_config[index].tba[0]  = port->tba[0].csr;
327 
328 	saved_config[index].wsba[1] = port->wsba[1].csr;
329 	saved_config[index].wsm[1]  = port->wsm[1].csr;
330 	saved_config[index].tba[1]  = port->tba[1].csr;
331 
332 	saved_config[index].wsba[2] = port->wsba[2].csr;
333 	saved_config[index].wsm[2]  = port->wsm[2].csr;
334 	saved_config[index].tba[2]  = port->tba[2].csr;
335 
336 	saved_config[index].wsba[3] = port->wsba[3].csr;
337 	saved_config[index].wsm[3]  = port->wsm[3].csr;
338 	saved_config[index].tba[3]  = port->tba[3].csr;
339 
340 	/*
341 	 * Set up the PCI to main memory translation windows.
342 	 *
343 	 * Note: Window 3 on Titan is Scatter-Gather ONLY.
344 	 *
345 	 * Window 0 is scatter-gather 8MB at 8MB (for isa)
346 	 * Window 1 is direct access 1GB at 2GB
347 	 * Window 2 is scatter-gather 1GB at 3GB
348 	 */
349 	hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
350 	hose->sg_isa->align_entry = 8; /* 64KB for ISA */
351 
352 	hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0);
353 	hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */
354 
355 	port->wsba[0].csr = hose->sg_isa->dma_base | 3;
356 	port->wsm[0].csr  = (hose->sg_isa->size - 1) & 0xfff00000;
357 	port->tba[0].csr  = virt_to_phys(hose->sg_isa->ptes);
358 
359 	port->wsba[1].csr = __direct_map_base | 1;
360 	port->wsm[1].csr  = (__direct_map_size - 1) & 0xfff00000;
361 	port->tba[1].csr  = 0;
362 
363 	port->wsba[2].csr = hose->sg_pci->dma_base | 3;
364 	port->wsm[2].csr  = (hose->sg_pci->size - 1) & 0xfff00000;
365 	port->tba[2].csr  = virt_to_phys(hose->sg_pci->ptes);
366 
367 	port->wsba[3].csr = 0;
368 
369 	/* Enable the Monster Window to make DAC pci64 possible.  */
370 	port->pctl.csr |= pctl_m_mwin;
371 
372 	/*
373 	 * If it's an AGP port, initialize agplastwr.
374 	 */
375 	if (titan_query_agp(port))
376 		port->port_specific.a.agplastwr.csr = __direct_map_base;
377 
378 	titan_pci_tbi(hose, 0, -1);
379 }
380 
381 static void __init
titan_init_pachips(titan_pachip * pachip0,titan_pachip * pachip1)382 titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
383 {
384 	int pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
385 
386 	/* Init the ports in hose order... */
387 	titan_init_one_pachip_port(&pachip0->g_port, 0);	/* hose 0 */
388 	if (pchip1_present)
389 		titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */
390 	titan_init_one_pachip_port(&pachip0->a_port, 2);	/* hose 2 */
391 	if (pchip1_present)
392 		titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */
393 }
394 
395 static void __init
titan_init_vga_hose(void)396 titan_init_vga_hose(void)
397 {
398 #ifdef CONFIG_VGA_HOSE
399 	u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
400 
401 	if (pu64[7] == 3) {	/* TERM_TYPE == graphics */
402 		struct pci_controller *hose;
403 		int h = (pu64[30] >> 24) & 0xff;	/* console hose # */
404 
405 		/*
406 		 * Our hose numbering matches the console's, so just find
407 		 * the right one...
408 		 */
409 		for (hose = hose_head; hose; hose = hose->next) {
410 			if (hose->index == h) break;
411 		}
412 
413 		if (hose) {
414 			printk("Console graphics on hose %d\n", hose->index);
415 			pci_vga_hose = hose;
416 		}
417 	}
418 #endif /* CONFIG_VGA_HOSE */
419 }
420 
421 void __init
titan_init_arch(void)422 titan_init_arch(void)
423 {
424 #if 0
425 	printk("%s: titan_init_arch()\n", __FUNCTION__);
426 	printk("%s: CChip registers:\n", __FUNCTION__);
427 	printk("%s: CSR_CSC 0x%lx\n", __FUNCTION__, TITAN_cchip->csc.csr);
428 	printk("%s: CSR_MTR 0x%lx\n", __FUNCTION__, TITAN_cchip->mtr.csr);
429 	printk("%s: CSR_MISC 0x%lx\n", __FUNCTION__, TITAN_cchip->misc.csr);
430 	printk("%s: CSR_DIM0 0x%lx\n", __FUNCTION__, TITAN_cchip->dim0.csr);
431 	printk("%s: CSR_DIM1 0x%lx\n", __FUNCTION__, TITAN_cchip->dim1.csr);
432 	printk("%s: CSR_DIR0 0x%lx\n", __FUNCTION__, TITAN_cchip->dir0.csr);
433 	printk("%s: CSR_DIR1 0x%lx\n", __FUNCTION__, TITAN_cchip->dir1.csr);
434 	printk("%s: CSR_DRIR 0x%lx\n", __FUNCTION__, TITAN_cchip->drir.csr);
435 
436 	printk("%s: DChip registers:\n", __FUNCTION__);
437 	printk("%s: CSR_DSC 0x%lx\n", __FUNCTION__, TITAN_dchip->dsc.csr);
438 	printk("%s: CSR_STR 0x%lx\n", __FUNCTION__, TITAN_dchip->str.csr);
439 	printk("%s: CSR_DREV 0x%lx\n", __FUNCTION__, TITAN_dchip->drev.csr);
440 #endif
441 
442 	boot_cpuid = __hard_smp_processor_id();
443 
444 	/* With multiple PCI busses, we play with I/O as physical addrs.  */
445 	ioport_resource.end = ~0UL;
446 	iomem_resource.end = ~0UL;
447 
448 	/* PCI DMA Direct Mapping is 1GB at 2GB.  */
449 	__direct_map_base = 0x80000000;
450 	__direct_map_size = 0x40000000;
451 
452 	/* Init the PA chip(s).  */
453 	titan_init_pachips(TITAN_pachip0, TITAN_pachip1);
454 
455 	/* Check for graphic console location (if any).  */
456 	titan_init_vga_hose();
457 }
458 
459 static void
titan_kill_one_pachip_port(titan_pachip_port * port,int index)460 titan_kill_one_pachip_port(titan_pachip_port *port, int index)
461 {
462 	port->wsba[0].csr = saved_config[index].wsba[0];
463 	port->wsm[0].csr  = saved_config[index].wsm[0];
464 	port->tba[0].csr  = saved_config[index].tba[0];
465 
466 	port->wsba[1].csr = saved_config[index].wsba[1];
467 	port->wsm[1].csr  = saved_config[index].wsm[1];
468 	port->tba[1].csr  = saved_config[index].tba[1];
469 
470 	port->wsba[2].csr = saved_config[index].wsba[2];
471 	port->wsm[2].csr  = saved_config[index].wsm[2];
472 	port->tba[2].csr  = saved_config[index].tba[2];
473 
474 	port->wsba[3].csr = saved_config[index].wsba[3];
475 	port->wsm[3].csr  = saved_config[index].wsm[3];
476 	port->tba[3].csr  = saved_config[index].tba[3];
477 }
478 
479 static void
titan_kill_pachips(titan_pachip * pachip0,titan_pachip * pachip1)480 titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
481 {
482 	int pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
483 
484 	if (pchip1_present) {
485 		titan_kill_one_pachip_port(&pachip1->g_port, 1);
486 		titan_kill_one_pachip_port(&pachip1->a_port, 3);
487 	}
488 	titan_kill_one_pachip_port(&pachip0->g_port, 0);
489 	titan_kill_one_pachip_port(&pachip0->a_port, 2);
490 }
491 
492 void
titan_kill_arch(int mode)493 titan_kill_arch(int mode)
494 {
495 	titan_kill_pachips(TITAN_pachip0, TITAN_pachip1);
496 }
497 
498 
499 /*
500  * IO map support.
501  */
502 unsigned long
titan_ioremap(unsigned long addr,unsigned long size)503 titan_ioremap(unsigned long addr, unsigned long size)
504 {
505 	int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT;
506 	unsigned long baddr = addr & ~TITAN_HOSE_MASK;
507 	unsigned long last = baddr + size - 1;
508 	struct pci_controller *hose;
509 	struct vm_struct *area;
510 	unsigned long vaddr;
511 	unsigned long *ptes;
512 	unsigned long pfn;
513 
514 	/*
515 	 * Adjust the addr.
516 	 */
517 #ifdef CONFIG_VGA_HOSE
518 	if (pci_vga_hose && __titan_is_mem_vga(addr)) {
519 		h = pci_vga_hose->index;
520 		addr += pci_vga_hose->mem_space->start;
521 	}
522 #endif
523 
524 	/*
525 	 * Find the hose.
526 	 */
527 	for (hose = hose_head; hose; hose = hose->next)
528 		if (hose->index == h) break;
529 	if (!hose) return (unsigned long)NULL;
530 
531 	/*
532 	 * Is it direct-mapped?
533 	 */
534 	if ((baddr >= __direct_map_base) &&
535 	    ((baddr + size - 1) < __direct_map_base + __direct_map_size))
536 		return addr - __direct_map_base + TITAN_MEM_BIAS;
537 
538 	/*
539 	 * Check the scatter-gather arena.
540 	 */
541 	if (hose->sg_pci &&
542 	    baddr >= (unsigned long)hose->sg_pci->dma_base &&
543 	    last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){
544 
545 		/*
546 		 * Adjust the limits (mappings must be page aligned)
547 		 */
548 		baddr -= hose->sg_pci->dma_base;
549 		last -= hose->sg_pci->dma_base;
550 		baddr &= PAGE_MASK;
551 		size = PAGE_ALIGN(last) - baddr;
552 
553 		/*
554 		 * Map it
555 		 */
556 		area = get_vm_area(size, VM_IOREMAP);
557 		if (!area) return (unsigned long)NULL;
558 		ptes = hose->sg_pci->ptes;
559 		for (vaddr = (unsigned long)area->addr;
560 		    baddr <= last;
561 		    baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
562 			pfn = ptes[baddr >> PAGE_SHIFT];
563 			if (!(pfn & 1)) {
564 				printk("ioremap failed... pte not valid...\n");
565 				vfree(area->addr);
566 				return (unsigned long)NULL;
567 			}
568 			pfn >>= 1;	/* make it a true pfn */
569 
570 			if (__alpha_remap_area_pages(VMALLOC_VMADDR(vaddr),
571 						     pfn << PAGE_SHIFT,
572 						     PAGE_SIZE, 0)) {
573 				printk("FAILED to map...\n");
574 				vfree(area->addr);
575 				return (unsigned long)NULL;
576 			}
577 		}
578 
579 		flush_tlb_all();
580 
581 		vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
582 		return vaddr;
583 	}
584 
585 	/*
586 	 * Not found - assume legacy ioremap.
587 	 */
588 	return addr + TITAN_MEM_BIAS;
589 
590 }
591 
592 void
titan_iounmap(unsigned long addr)593 titan_iounmap(unsigned long addr)
594 {
595 	if (((long)addr >> 41) == -2)
596 		return;	/* kseg map, nothing to do */
597 	if (addr)
598 		vfree((void *)(PAGE_MASK & addr));
599 }
600 
601 
602 /*
603  * AGP GART Support.
604  */
605 #include <linux/agp_backend.h>
606 #include <asm/agp_backend.h>
607 #include <linux/slab.h>
608 #include <linux/delay.h>
609 
610 struct titan_agp_aperture {
611 	struct pci_iommu_arena *arena;
612 	long pg_start;
613 	long pg_count;
614 };
615 
616 static int
titan_agp_setup(alpha_agp_info * agp)617 titan_agp_setup(alpha_agp_info *agp)
618 {
619 	struct titan_agp_aperture *aper;
620 
621 	if (!alpha_agpgart_size)
622 		return -ENOMEM;
623 
624 	aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL);
625 	if (aper == NULL)
626 		return -ENOMEM;
627 
628 	aper->arena = agp->hose->sg_pci;
629 	aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
630 	aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
631 				       aper->pg_count - 1);
632 	if (aper->pg_start < 0) {
633 		printk(KERN_ERR "Failed to reserve AGP memory\n");
634 		kfree(aper);
635 		return -ENOMEM;
636 	}
637 
638 	agp->aperture.bus_base =
639 		aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
640 	agp->aperture.size = aper->pg_count * PAGE_SIZE;
641 	agp->aperture.sysdata = aper;
642 
643 	return 0;
644 }
645 
646 static void
titan_agp_cleanup(alpha_agp_info * agp)647 titan_agp_cleanup(alpha_agp_info *agp)
648 {
649 	struct titan_agp_aperture *aper = agp->aperture.sysdata;
650 	int status;
651 
652 	status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
653 	if (status == -EBUSY) {
654 		printk(KERN_WARNING
655 		       "Attempted to release bound AGP memory - unbinding\n");
656 		iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
657 		status = iommu_release(aper->arena, aper->pg_start,
658 				       aper->pg_count);
659 	}
660 	if (status < 0)
661 		printk(KERN_ERR "Failed to release AGP memory\n");
662 
663 	kfree(aper);
664 	kfree(agp);
665 }
666 
667 static int
titan_agp_configure(alpha_agp_info * agp)668 titan_agp_configure(alpha_agp_info *agp)
669 {
670 	union TPAchipPCTL pctl;
671 	titan_pachip_port *port = agp->private;
672 	pctl.pctl_q_whole = port->pctl.csr;
673 
674 	/* Side-Band Addressing? */
675 	pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba;
676 
677 	/* AGP Rate? */
678 	pctl.pctl_r_bits.apctl_v_agp_rate = 0;		/* 1x */
679 	if (agp->mode.bits.rate & 2)
680 		pctl.pctl_r_bits.apctl_v_agp_rate = 1;	/* 2x */
681 #if 0
682 	if (agp->mode.bits.rate & 4)
683 		pctl.pctl_r_bits.apctl_v_agp_rate = 2;	/* 4x */
684 #endif
685 
686 	/* RQ Depth? */
687 	pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2;
688 	pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7;
689 
690 	/*
691 	 * AGP Enable.
692 	 */
693 	pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable;
694 
695 	/* Tell the user.  */
696 	printk("Enabling AGP: %dX%s\n",
697 	       1 << pctl.pctl_r_bits.apctl_v_agp_rate,
698 	       pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : "");
699 
700 	/* Write it.  */
701 	port->pctl.csr = pctl.pctl_q_whole;
702 
703 	/* And wait at least 5000 66MHz cycles (per Titan spec).  */
704 	udelay(100);
705 
706 	return 0;
707 }
708 
709 static int
titan_agp_bind_memory(alpha_agp_info * agp,off_t pg_start,agp_memory * mem)710 titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, agp_memory *mem)
711 {
712 	struct titan_agp_aperture *aper = agp->aperture.sysdata;
713 	return iommu_bind(aper->arena, aper->pg_start + pg_start,
714 			  mem->page_count, mem->memory);
715 }
716 
717 static int
titan_agp_unbind_memory(alpha_agp_info * agp,off_t pg_start,agp_memory * mem)718 titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, agp_memory *mem)
719 {
720 	struct titan_agp_aperture *aper = agp->aperture.sysdata;
721 	return iommu_unbind(aper->arena, aper->pg_start + pg_start,
722 			    mem->page_count);
723 }
724 
725 static unsigned long
titan_agp_translate(alpha_agp_info * agp,dma_addr_t addr)726 titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
727 {
728 	struct titan_agp_aperture *aper = agp->aperture.sysdata;
729 	unsigned long baddr = addr - aper->arena->dma_base;
730 	unsigned long pte;
731 
732 	if (addr < agp->aperture.bus_base ||
733 	    addr >= agp->aperture.bus_base + agp->aperture.size) {
734 		printk("%s: addr out of range\n", __FUNCTION__);
735 		return -EINVAL;
736 	}
737 
738 	pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
739 	if (!(pte & 1)) {
740 		printk("%s: pte not valid\n", __FUNCTION__);
741 		return -EINVAL;
742 	}
743 
744 	return (pte >> 1) << PAGE_SHIFT;
745 }
746 
747 struct alpha_agp_ops titan_agp_ops =
748 {
749 	setup:		titan_agp_setup,
750 	cleanup:	titan_agp_cleanup,
751 	configure:	titan_agp_configure,
752 	bind:		titan_agp_bind_memory,
753 	unbind:		titan_agp_unbind_memory,
754 	translate:	titan_agp_translate
755 };
756 
757 alpha_agp_info *
titan_agp_info(void)758 titan_agp_info(void)
759 {
760 	alpha_agp_info *agp;
761 	struct pci_controller *hose;
762 	titan_pachip_port *port;
763 	int hosenum = -1;
764 	union TPAchipPCTL pctl;
765 
766 	/*
767 	 * Find the AGP port.
768 	 */
769 	port = &TITAN_pachip0->a_port;
770 	if (titan_query_agp(port))
771 		hosenum = 2;
772 	if (hosenum < 0 &&
773 	    titan_query_agp(port = &TITAN_pachip1->a_port))
774 		hosenum = 3;
775 
776 	/*
777 	 * Find the hose the port is on.
778 	 */
779 	for (hose = hose_head; hose; hose = hose->next)
780 		if (hose->index == hosenum)
781 			break;
782 
783 	if (!hose || !hose->sg_pci)
784 		return NULL;
785 
786 	/*
787 	 * Allocate the info structure.
788 	 */
789 	agp = kmalloc(sizeof(*agp), GFP_KERNEL);
790 
791 	/*
792 	 * Fill it in.
793 	 */
794 	agp->type = 0 /* FIXME: ALPHA_CORE_AGP */;
795 	agp->hose = hose;
796 	agp->private = port;
797 	agp->ops = &titan_agp_ops;
798 
799 	/*
800 	 * Aperture - not configured until ops.setup().
801 	 *
802 	 * FIXME - should we go ahead and allocate it here?
803 	 */
804 	agp->aperture.bus_base = 0;
805 	agp->aperture.size = 0;
806 	agp->aperture.sysdata = NULL;
807 
808 	/*
809 	 * Capabilities.
810 	 */
811 	agp->capability.lw = 0;
812 	agp->capability.bits.rate = 3; 	/* 2x, 1x */
813 	agp->capability.bits.sba = 1;
814 	agp->capability.bits.rq = 7;	/* 8 - 1 */
815 
816 	/*
817 	 * Mode.
818 	 */
819 	pctl.pctl_q_whole = port->pctl.csr;
820 	agp->mode.lw = 0;
821 	agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate;
822 	agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en;
823 	agp->mode.bits.rq = 7;	/* RQ Depth? */
824 	agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en;
825 
826 	return agp;
827 }
828