1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * io-unit.c: IO-UNIT specific routines for memory management.
4 *
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/mm.h>
13 #include <linux/bitops.h>
14 #include <linux/dma-map-ops.h>
15 #include <linux/of.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18
19 #include <asm/io.h>
20 #include <asm/io-unit.h>
21 #include <asm/mxcc.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
24 #include <asm/dma.h>
25 #include <asm/oplib.h>
26
27 #include "mm_32.h"
28
29 /* #define IOUNIT_DEBUG */
30 #ifdef IOUNIT_DEBUG
31 #define IOD(x) printk(x)
32 #else
33 #define IOD(x) do { } while (0)
34 #endif
35
36 #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
37 #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
38
39 static const struct dma_map_ops iounit_dma_ops;
40
iounit_iommu_init(struct platform_device * op)41 static void __init iounit_iommu_init(struct platform_device *op)
42 {
43 struct iounit_struct *iounit;
44 iopte_t __iomem *xpt;
45 iopte_t __iomem *xptend;
46
47 iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
48 if (!iounit) {
49 prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
50 prom_halt();
51 }
52
53 iounit->limit[0] = IOUNIT_BMAP1_START;
54 iounit->limit[1] = IOUNIT_BMAP2_START;
55 iounit->limit[2] = IOUNIT_BMAPM_START;
56 iounit->limit[3] = IOUNIT_BMAPM_END;
57 iounit->rotor[1] = IOUNIT_BMAP2_START;
58 iounit->rotor[2] = IOUNIT_BMAPM_START;
59
60 xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
61 if (!xpt) {
62 prom_printf("SUN4D: Cannot map External Page Table.");
63 prom_halt();
64 }
65
66 op->dev.archdata.iommu = iounit;
67 iounit->page_table = xpt;
68 spin_lock_init(&iounit->lock);
69
70 xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
71 for (; xpt < xptend; xpt++)
72 sbus_writel(0, xpt);
73
74 op->dev.dma_ops = &iounit_dma_ops;
75 }
76
iounit_init(void)77 static int __init iounit_init(void)
78 {
79 extern void sun4d_init_sbi_irq(void);
80 struct device_node *dp;
81
82 for_each_node_by_name(dp, "sbi") {
83 struct platform_device *op = of_find_device_by_node(dp);
84
85 iounit_iommu_init(op);
86 of_propagate_archdata(op);
87 }
88
89 sun4d_init_sbi_irq();
90
91 return 0;
92 }
93
94 subsys_initcall(iounit_init);
95
96 /* One has to hold iounit->lock to call this */
iounit_get_area(struct iounit_struct * iounit,unsigned long vaddr,int size)97 static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
98 {
99 int i, j, k, npages;
100 unsigned long rotor, scan, limit;
101 iopte_t iopte;
102
103 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
104
105 /* A tiny bit of magic ingredience :) */
106 switch (npages) {
107 case 1: i = 0x0231; break;
108 case 2: i = 0x0132; break;
109 default: i = 0x0213; break;
110 }
111
112 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
113
114 next: j = (i & 15);
115 rotor = iounit->rotor[j - 1];
116 limit = iounit->limit[j];
117 scan = rotor;
118 nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
119 if (scan + npages > limit) {
120 if (limit != rotor) {
121 limit = rotor;
122 scan = iounit->limit[j - 1];
123 goto nexti;
124 }
125 i >>= 4;
126 if (!(i & 15))
127 panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
128 goto next;
129 }
130 for (k = 1, scan++; k < npages; k++)
131 if (test_bit(scan++, iounit->bmap))
132 goto nexti;
133 iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
134 scan -= npages;
135 iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
136 vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
137 for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
138 set_bit(scan, iounit->bmap);
139 sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
140 }
141 IOD(("%08lx\n", vaddr));
142 return vaddr;
143 }
144
iounit_map_page(struct device * dev,struct page * page,unsigned long offset,size_t len,enum dma_data_direction dir,unsigned long attrs)145 static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
146 unsigned long offset, size_t len, enum dma_data_direction dir,
147 unsigned long attrs)
148 {
149 void *vaddr = page_address(page) + offset;
150 struct iounit_struct *iounit = dev->archdata.iommu;
151 unsigned long ret, flags;
152
153 /* XXX So what is maxphys for us and how do drivers know it? */
154 if (!len || len > 256 * 1024)
155 return DMA_MAPPING_ERROR;
156
157 spin_lock_irqsave(&iounit->lock, flags);
158 ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
159 spin_unlock_irqrestore(&iounit->lock, flags);
160 return ret;
161 }
162
iounit_map_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)163 static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
164 enum dma_data_direction dir, unsigned long attrs)
165 {
166 struct iounit_struct *iounit = dev->archdata.iommu;
167 struct scatterlist *sg;
168 unsigned long flags;
169 int i;
170
171 /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
172 spin_lock_irqsave(&iounit->lock, flags);
173 for_each_sg(sgl, sg, nents, i) {
174 sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
175 sg->dma_length = sg->length;
176 }
177 spin_unlock_irqrestore(&iounit->lock, flags);
178 return nents;
179 }
180
iounit_unmap_page(struct device * dev,dma_addr_t vaddr,size_t len,enum dma_data_direction dir,unsigned long attrs)181 static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len,
182 enum dma_data_direction dir, unsigned long attrs)
183 {
184 struct iounit_struct *iounit = dev->archdata.iommu;
185 unsigned long flags;
186
187 spin_lock_irqsave(&iounit->lock, flags);
188 len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
189 vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
190 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
191 for (len += vaddr; vaddr < len; vaddr++)
192 clear_bit(vaddr, iounit->bmap);
193 spin_unlock_irqrestore(&iounit->lock, flags);
194 }
195
iounit_unmap_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)196 static void iounit_unmap_sg(struct device *dev, struct scatterlist *sgl,
197 int nents, enum dma_data_direction dir, unsigned long attrs)
198 {
199 struct iounit_struct *iounit = dev->archdata.iommu;
200 unsigned long flags, vaddr, len;
201 struct scatterlist *sg;
202 int i;
203
204 spin_lock_irqsave(&iounit->lock, flags);
205 for_each_sg(sgl, sg, nents, i) {
206 len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
207 vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
208 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
209 for (len += vaddr; vaddr < len; vaddr++)
210 clear_bit(vaddr, iounit->bmap);
211 }
212 spin_unlock_irqrestore(&iounit->lock, flags);
213 }
214
215 #ifdef CONFIG_SBUS
iounit_alloc(struct device * dev,size_t len,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)216 static void *iounit_alloc(struct device *dev, size_t len,
217 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
218 {
219 struct iounit_struct *iounit = dev->archdata.iommu;
220 unsigned long va, addr, page, end, ret;
221 pgprot_t dvma_prot;
222 iopte_t __iomem *iopte;
223
224 /* XXX So what is maxphys for us and how do drivers know it? */
225 if (!len || len > 256 * 1024)
226 return NULL;
227
228 len = PAGE_ALIGN(len);
229 va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
230 if (!va)
231 return NULL;
232
233 addr = ret = sparc_dma_alloc_resource(dev, len);
234 if (!addr)
235 goto out_free_pages;
236 *dma_handle = addr;
237
238 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
239 end = PAGE_ALIGN((addr + len));
240 while(addr < end) {
241 page = va;
242 {
243 pmd_t *pmdp;
244 pte_t *ptep;
245 long i;
246
247 pmdp = pmd_off_k(addr);
248 ptep = pte_offset_kernel(pmdp, addr);
249
250 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
251
252 i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
253
254 iopte = iounit->page_table + i;
255 sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
256 }
257 addr += PAGE_SIZE;
258 va += PAGE_SIZE;
259 }
260 flush_cache_all();
261 flush_tlb_all();
262
263 return (void *)ret;
264
265 out_free_pages:
266 free_pages(va, get_order(len));
267 return NULL;
268 }
269
iounit_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr,unsigned long attrs)270 static void iounit_free(struct device *dev, size_t size, void *cpu_addr,
271 dma_addr_t dma_addr, unsigned long attrs)
272 {
273 /* XXX Somebody please fill this in */
274 }
275 #endif
276
277 static const struct dma_map_ops iounit_dma_ops = {
278 #ifdef CONFIG_SBUS
279 .alloc = iounit_alloc,
280 .free = iounit_free,
281 #endif
282 .map_page = iounit_map_page,
283 .unmap_page = iounit_unmap_page,
284 .map_sg = iounit_map_sg,
285 .unmap_sg = iounit_unmap_sg,
286 };
287