1 /* $Id: io-unit.c,v 1.23 2001/02/13 01:16:43 davem Exp $
2  * io-unit.c:  IO-UNIT specific routines for memory management.
3  *
4  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
5  */
6 
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <asm/scatterlist.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/sbus.h>
16 #include <asm/io.h>
17 #include <asm/io-unit.h>
18 #include <asm/mxcc.h>
19 #include <asm/bitops.h>
20 
21 /* #define IOUNIT_DEBUG */
22 #ifdef IOUNIT_DEBUG
23 #define IOD(x) printk(x)
24 #else
25 #define IOD(x) do { } while (0)
26 #endif
27 
28 #define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
29 #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
30 
31 void __init
iounit_init(int sbi_node,int io_node,struct sbus_bus * sbus)32 iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
33 {
34 	iopte_t *xpt, *xptend;
35 	struct iounit_struct *iounit;
36 	struct linux_prom_registers iommu_promregs[PROMREG_MAX];
37 	struct resource r;
38 
39 	iounit = kmalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
40 
41 	memset(iounit, 0, sizeof(*iounit));
42 	iounit->limit[0] = IOUNIT_BMAP1_START;
43 	iounit->limit[1] = IOUNIT_BMAP2_START;
44 	iounit->limit[2] = IOUNIT_BMAPM_START;
45 	iounit->limit[3] = IOUNIT_BMAPM_END;
46 	iounit->rotor[1] = IOUNIT_BMAP2_START;
47 	iounit->rotor[2] = IOUNIT_BMAPM_START;
48 
49 	prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
50 			 sizeof(iommu_promregs));
51 	prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3);
52 	memset(&r, 0, sizeof(r));
53 	r.flags = iommu_promregs[2].which_io;
54 	r.start = iommu_promregs[2].phys_addr;
55 	xpt = (iopte_t *) sbus_ioremap(&r, 0, PAGE_SIZE * 16, "XPT");
56 	if(!xpt) panic("Cannot map External Page Table.");
57 
58 	sbus->iommu = (struct iommu_struct *)iounit;
59 	iounit->page_table = xpt;
60 
61 	for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
62 	     xpt < xptend;)
63 	     	*xpt++ = 0;
64 }
65 
66 /* One has to hold iounit->lock to call this */
iounit_get_area(struct iounit_struct * iounit,unsigned long vaddr,int size)67 static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
68 {
69 	int i, j, k, npages;
70 	unsigned long rotor, scan, limit;
71 	iopte_t iopte;
72 
73         npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
74 
75 	/* A tiny bit of magic ingredience :) */
76 	switch (npages) {
77 	case 1: i = 0x0231; break;
78 	case 2: i = 0x0132; break;
79 	default: i = 0x0213; break;
80 	}
81 
82 	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
83 
84 next:	j = (i & 15);
85 	rotor = iounit->rotor[j - 1];
86 	limit = iounit->limit[j];
87 	scan = rotor;
88 nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
89 	if (scan + npages > limit) {
90 		if (limit != rotor) {
91 			limit = rotor;
92 			scan = iounit->limit[j - 1];
93 			goto nexti;
94 		}
95 		i >>= 4;
96 		if (!(i & 15))
97 			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
98 		goto next;
99 	}
100 	for (k = 1, scan++; k < npages; k++)
101 		if (test_bit(scan++, iounit->bmap))
102 			goto nexti;
103 	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
104 	scan -= npages;
105 	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
106 	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
107 	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
108 		set_bit(scan, iounit->bmap);
109 		iounit->page_table[scan] = iopte;
110 	}
111 	IOD(("%08lx\n", vaddr));
112 	return vaddr;
113 }
114 
iounit_get_scsi_one(char * vaddr,unsigned long len,struct sbus_bus * sbus)115 static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct sbus_bus *sbus)
116 {
117 	unsigned long ret, flags;
118 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
119 
120 	spin_lock_irqsave(&iounit->lock, flags);
121 	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
122 	spin_unlock_irqrestore(&iounit->lock, flags);
123 	return ret;
124 }
125 
iounit_get_scsi_sgl(struct scatterlist * sg,int sz,struct sbus_bus * sbus)126 static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
127 {
128 	unsigned long flags;
129 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
130 
131 	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
132 	spin_lock_irqsave(&iounit->lock, flags);
133 	while (sz != 0) {
134 		sz--;
135 		sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)sg[sz].address, sg[sz].length);
136 		sg[sz].dvma_length = sg[sz].length;
137 	}
138 	spin_unlock_irqrestore(&iounit->lock, flags);
139 }
140 
iounit_release_scsi_one(__u32 vaddr,unsigned long len,struct sbus_bus * sbus)141 static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
142 {
143 	unsigned long flags;
144 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
145 
146 	spin_lock_irqsave(&iounit->lock, flags);
147 	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
148 	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
149 	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
150 	for (len += vaddr; vaddr < len; vaddr++)
151 		clear_bit(vaddr, iounit->bmap);
152 	spin_unlock_irqrestore(&iounit->lock, flags);
153 }
154 
iounit_release_scsi_sgl(struct scatterlist * sg,int sz,struct sbus_bus * sbus)155 static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
156 {
157 	unsigned long flags;
158 	unsigned long vaddr, len;
159 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
160 
161 	spin_lock_irqsave(&iounit->lock, flags);
162 	while (sz != 0) {
163 		--sz;
164 		len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
165 		vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
166 		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
167 		for (len += vaddr; vaddr < len; vaddr++)
168 			clear_bit(vaddr, iounit->bmap);
169 	}
170 	spin_unlock_irqrestore(&iounit->lock, flags);
171 }
172 
173 #ifdef CONFIG_SBUS
iounit_map_dma_area(unsigned long va,__u32 addr,int len)174 static void iounit_map_dma_area(unsigned long va, __u32 addr, int len)
175 {
176 	unsigned long page, end;
177 	pgprot_t dvma_prot;
178 	iopte_t *iopte;
179 	struct sbus_bus *sbus;
180 
181 	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
182 	end = PAGE_ALIGN((addr + len));
183 	while(addr < end) {
184 		page = va;
185 		{
186 			pgd_t *pgdp;
187 			pmd_t *pmdp;
188 			pte_t *ptep;
189 			long i;
190 
191 			pgdp = pgd_offset(&init_mm, addr);
192 			pmdp = pmd_offset(pgdp, addr);
193 			ptep = pte_offset(pmdp, addr);
194 
195 			set_pte(ptep, pte_val(mk_pte(virt_to_page(page), dvma_prot)));
196 
197 			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
198 
199 			for_each_sbus(sbus) {
200 				struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
201 
202 				iopte = (iopte_t *)(iounit->page_table + i);
203 				*iopte = __iopte(MKIOPTE(__pa(page)));
204 			}
205 		}
206 		addr += PAGE_SIZE;
207 		va += PAGE_SIZE;
208 	}
209 	flush_cache_all();
210 	flush_tlb_all();
211 }
212 
iounit_unmap_dma_area(unsigned long addr,int len)213 static void iounit_unmap_dma_area(unsigned long addr, int len)
214 {
215 	/* XXX Somebody please fill this in */
216 }
217 
218 /* XXX We do not pass sbus device here, bad. */
iounit_translate_dvma(unsigned long addr)219 static unsigned long iounit_translate_dvma(unsigned long addr)
220 {
221 	struct sbus_bus *sbus = sbus_root;	/* They are all the same */
222 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
223 	int i;
224 	iopte_t *iopte;
225 
226 	i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
227 	iopte = (iopte_t *)(iounit->page_table + i);
228 	return (iopte_val(*iopte) & 0xFFFFFFF0) << 4; /* XXX sun4d guru, help */
229 }
230 #endif
231 
iounit_lockarea(char * vaddr,unsigned long len)232 static char *iounit_lockarea(char *vaddr, unsigned long len)
233 {
234 /* FIXME: Write this */
235 	return vaddr;
236 }
237 
iounit_unlockarea(char * vaddr,unsigned long len)238 static void iounit_unlockarea(char *vaddr, unsigned long len)
239 {
240 /* FIXME: Write this */
241 }
242 
ld_mmu_iounit(void)243 void __init ld_mmu_iounit(void)
244 {
245 	BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
246 	BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
247 
248 	BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
249 	BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
250 	BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
251 	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
252 
253 #ifdef CONFIG_SBUS
254 	BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
255 	BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
256 	BTFIXUPSET_CALL(mmu_translate_dvma, iounit_translate_dvma, BTFIXUPCALL_NORM);
257 #endif
258 }
259 
iounit_map_dma_init(struct sbus_bus * sbus,int size)260 __u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
261 {
262 	int i, j, k, npages;
263 	unsigned long rotor, scan, limit;
264 	unsigned long flags;
265 	__u32 ret;
266 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
267 
268         npages = (size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
269 	i = 0x0213;
270 	spin_lock_irqsave(&iounit->lock, flags);
271 next:	j = (i & 15);
272 	rotor = iounit->rotor[j - 1];
273 	limit = iounit->limit[j];
274 	scan = rotor;
275 nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
276 	if (scan + npages > limit) {
277 		if (limit != rotor) {
278 			limit = rotor;
279 			scan = iounit->limit[j - 1];
280 			goto nexti;
281 		}
282 		i >>= 4;
283 		if (!(i & 15))
284 			panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size);
285 		goto next;
286 	}
287 	for (k = 1, scan++; k < npages; k++)
288 		if (test_bit(scan++, iounit->bmap))
289 			goto nexti;
290 	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
291 	scan -= npages;
292 	ret = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT);
293 	for (k = 0; k < npages; k++, scan++)
294 		set_bit(scan, iounit->bmap);
295 	spin_unlock_irqrestore(&iounit->lock, flags);
296 	return ret;
297 }
298 
iounit_map_dma_page(__u32 vaddr,void * addr,struct sbus_bus * sbus)299 __u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
300 {
301 	int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
302 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
303 
304 	iounit->page_table[scan] = MKIOPTE(__pa(((unsigned long)addr) & PAGE_MASK));
305 	return vaddr + (((unsigned long)addr) & ~PAGE_MASK);
306 }
307