1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * arch/sh64/mm/ioremap.c
7  *
8  * Copyright (C) 2000, 2001  Paolo Alberelli
9  * Copyright (C) 2003  Paul Mundt
10  *
11  * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
12  * derived from arch/i386/mm/ioremap.c .
13  *
14  *   (C) Copyright 1995 1996 Linus Torvalds
15  */
16 
17 #include <linux/vmalloc.h>
18 #include <asm/io.h>
19 #include <asm/pgalloc.h>
20 #include <linux/ioport.h>
21 #include <linux/bootmem.h>
22 #include <linux/proc_fs.h>
23 
24 /*
25 ** change to:
26 ** #define DEBUG_IOREMAP(args)	printk(args)
27 ** to turn on ioremap trace.
28 */
29 #define DEBUG_IOREMAP(args)
30 
remap_area_pte(pte_t * pte,unsigned long address,unsigned long size,unsigned long phys_addr,unsigned long flags)31 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
32 	unsigned long phys_addr, unsigned long flags)
33 {
34 	unsigned long end;
35 
36 	address &= ~PMD_MASK;
37 	end = address + size;
38 	if (end > PMD_SIZE)
39 		end = PMD_SIZE;
40 
41 	DEBUG_IOREMAP(("    %s: pte %x address %x size %x phys_addr %x\n", \
42 			__FUNCTION__,pte,address,size,phys_addr));
43 
44 	do {
45 		if (!pte_none(*pte))
46 			printk("remap_area_pte: page already exists\n");
47 		set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT |
48 					_PAGE_READ | _PAGE_WRITE |
49 					_PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags)));
50 		address += PAGE_SIZE;
51 		phys_addr += PAGE_SIZE;
52 		pte++;
53 	} while (address && (address < end));
54 }
55 
remap_area_pmd(pmd_t * pmd,unsigned long address,unsigned long size,unsigned long phys_addr,unsigned long flags)56 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
57 	unsigned long phys_addr, unsigned long flags)
58 {
59 	unsigned long end;
60 
61 	address &= ~PGDIR_MASK;
62 	end = address + size;
63 
64 	if (end > PGDIR_SIZE)
65 		end = PGDIR_SIZE;
66 
67         DEBUG_IOREMAP(("%s: pmd 0x%08x, addr. 0x%08x, phys. 0x%08x, end 0x%08x pmd 0x%08x\n", \
68 	       pmd, address, phys_addr, end));
69 
70 	phys_addr -= address;
71 	do {
72 
73         DEBUG_IOREMAP(("%s: pmd 0x%08x, addr. 0x%08x, phys. 0x%08x, end 0x%08x pmd 0x%08x\n", \
74 	               __FUNCTION__, pmd, address, phys_addr, end));
75 
76 		pte_t * pte = pte_alloc(&init_mm, pmd, address);
77 		if (!pte) {
78 			return -ENOMEM;
79 		}
80 		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
81 
82 		address = (address + PMD_SIZE) & PMD_MASK;
83 		pmd++;
84 
85     		DEBUG_IOREMAP(("remap_area_pmd address is 0x%08x phys_addr is 0x%08x end 0x%08x\n", address,phys_addr,end));
86 
87 
88 	} while (address && (address < end));
89 	return 0;
90 }
91 
remap_area_pages(unsigned long address,unsigned long phys_addr,unsigned long size,unsigned long flags)92 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
93 				 unsigned long size, unsigned long flags)
94 {
95 	int error;
96 	pgd_t * dir;
97 	unsigned long end = address + size;
98 
99 	phys_addr -= address;
100 	dir = pgd_offset(&init_mm, address);
101 	flush_cache_all();
102 	spin_lock(&init_mm.page_table_lock);
103 	do {
104 
105 		pmd_t *pmd = pmd_alloc(&init_mm,dir, address);
106 		error = -ENOMEM;
107 		if (!pmd)
108 			break;
109 		if (remap_area_pmd(pmd, address, end - address,
110 				   phys_addr + address, flags)) {
111 			 break;
112 		}
113 		error=0;
114 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
115 		dir++;
116 	} while (address && (address < end));
117 	spin_unlock(&init_mm.page_table_lock);
118 	flush_tlb_all();
119 	return 0;
120 }
121 
122 /*
123  * Generic mapping function (not visible outside):
124  */
125 
126 /*
127  * Remap an arbitrary physical address space into the kernel virtual
128  * address space. Needed when the kernel wants to access high addresses
129  * directly.
130  *
131  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
132  * have to convert them into an offset in a page-aligned mapping, but the
133  * caller shouldn't need to know that small detail.
134  */
__ioremap(unsigned long phys_addr,unsigned long size,unsigned long flags)135 void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
136 {
137 	void * addr;
138 	struct vm_struct * area;
139 	unsigned long offset, last_addr;
140 
141 	/* Don't allow wraparound or zero size */
142 	last_addr = phys_addr + size - 1;
143 	if (!size || last_addr < phys_addr)
144 		return NULL;
145 
146 	/*
147 	 * Mappings have to be page-aligned
148 	 */
149 	offset = phys_addr & ~PAGE_MASK;
150 	phys_addr &= PAGE_MASK;
151 	size = PAGE_ALIGN(last_addr) - phys_addr;
152 
153 	/*
154 	 * Ok, go for it..
155 	 */
156 	area = get_vm_area(size, VM_IOREMAP);
157 	DEBUG_IOREMAP(("Get vm_area returns 0x%08x addr 0x%08x \n", \
158 		      area, area->addr));
159 
160 	if (!area)
161 		return NULL;
162 	addr = area->addr;
163 	if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
164 		vfree(addr);
165 		return NULL;
166 	}
167 	return (void *) (offset + (char *)addr);
168 }
169 
iounmap(void * addr)170 void iounmap(void *addr)
171 {
172 	if (((long) addr >= VMALLOC_START) && ((long) addr <= VMALLOC_END))
173 		return vfree((void *) (PAGE_MASK & (unsigned long) addr));
174 }
175 
176 static struct resource shmedia_iomap = {
177         .name	= "shmedia_iomap",
178 	.start	= IOBASE_VADDR,
179 	.end	= IOBASE_END - 1,
180 };
181 
182 static void shmedia_mapioaddr(unsigned long pa, unsigned long va);
183 static void shmedia_unmapioaddr(unsigned long vaddr);
184 static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz);
185 
186 /*
187  * We have the same problem as the SPARC, so lets have the same comment:
188  * Our mini-allocator...
189  * Boy this is gross! We need it because we must map I/O for
190  * timers and interrupt controller before the kmalloc is available.
191  */
192 
193 #define XNMLN  15
194 #define XNRES  10
195 
196 struct xresource {
197 	struct resource xres;   /* Must be first */
198 	int xflag;              /* 1 == used */
199 	char xname[XNMLN+1];
200 };
201 
202 static struct xresource xresv[XNRES];
203 
xres_alloc(void)204 static struct xresource *xres_alloc(void)
205 {
206         struct xresource *xrp;
207         int n;
208 
209         xrp = xresv;
210         for (n = 0; n < XNRES; n++) {
211                 if (xrp->xflag == 0) {
212                         xrp->xflag = 1;
213                         return xrp;
214                 }
215                 xrp++;
216         }
217         return NULL;
218 }
219 
xres_free(struct xresource * xrp)220 static void xres_free(struct xresource *xrp)
221 {
222         xrp->xflag = 0;
223 }
224 
shmedia_find_resource(struct resource * root,unsigned long vaddr)225 static struct resource *shmedia_find_resource(struct resource *root,
226 					      unsigned long vaddr)
227 {
228 	struct resource *res;
229 
230 	for (res = root->child; res; res = res->sibling)
231 		if (res->start <= vaddr && res->end >= vaddr)
232 			return res;
233 
234 	return NULL;
235 }
236 
shmedia_alloc_io(unsigned long phys,unsigned long size,const char * name)237 static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size,
238 				      const char *name)
239 {
240         static int printed_full = 0;
241         struct xresource *xres;
242         struct resource *res;
243         char *tack;
244         int tlen;
245 
246         if (name == NULL) name = "???";
247 
248         if ((xres = xres_alloc()) != 0) {
249                 tack = xres->xname;
250                 res = &xres->xres;
251         } else {
252                 if (!printed_full) {
253                         printk("%s: done with statics, switching to kmalloc\n",
254 			       __FUNCTION__);
255                         printed_full = 1;
256                 }
257                 tlen = strlen(name);
258                 tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
259                 if (!tack)
260 			return -ENOMEM;
261                 memset(tack, 0, sizeof(struct resource));
262                 res = (struct resource *) tack;
263                 tack += sizeof (struct resource);
264         }
265 
266         strncpy(tack, name, XNMLN);
267         tack[XNMLN] = 0;
268         res->name = tack;
269 
270         return shmedia_ioremap(res, phys, size);
271 }
272 
shmedia_ioremap(struct resource * res,u32 pa,int sz)273 static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz)
274 {
275         unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
276 	unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK;
277         unsigned long va;
278         unsigned int psz;
279 
280         if (allocate_resource(&shmedia_iomap, res, round_sz,
281 			      shmedia_iomap.start, shmedia_iomap.end,
282 			      PAGE_SIZE, NULL, NULL) != 0) {
283                 panic("alloc_io_res(%s): cannot occupy\n",
284                     (res->name != NULL)? res->name: "???");
285         }
286 
287         va = res->start;
288         pa &= PAGE_MASK;
289 
290 	psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
291 
292 	/* log at boot time ... */
293 	printk("mapioaddr: %6s  [%2ld page%s]  va 0x%08lx   pa 0x%08x\n",
294 	       ((res->name != NULL) ? res->name : "???"),
295 	       psz, psz == 1 ? " " : "s", va, pa);
296 
297         for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
298                 shmedia_mapioaddr(pa, va);
299                 va += PAGE_SIZE;
300                 pa += PAGE_SIZE;
301         }
302 
303         res->start += offset;
304         res->end = res->start + sz - 1;         /* not strictly necessary.. */
305 
306         return res->start;
307 }
308 
shmedia_free_io(struct resource * res)309 static void shmedia_free_io(struct resource *res)
310 {
311 	unsigned long len = res->end - res->start + 1;
312 
313 	BUG_ON((len & (PAGE_SIZE - 1)) != 0);
314 
315 	while (len) {
316 		len -= PAGE_SIZE;
317 		shmedia_unmapioaddr(res->start + len);
318 	}
319 
320 	release_resource(res);
321 }
322 
shmedia_mapioaddr(unsigned long pa,unsigned long va)323 static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
324 {
325 	pgd_t *pgdp;
326 	pmd_t *pmdp;
327 	pte_t *ptep;
328 
329 	unsigned long flags = 1; /* 1 = CB0-1 device */
330 
331 
332 	DEBUG_IOREMAP(("shmedia_mapiopage pa %08x va %08x\n",  pa, va));
333 
334 	pgdp = pgd_offset_k(va);
335 	if (pgd_none(*pgdp)) {
336 		pmdp = alloc_bootmem_low_pages(PTRS_PER_PMD * sizeof(pmd_t));
337 		if (pmdp == NULL) panic("No memory for pmd\n");
338 		memset(pmdp, 0, PTRS_PER_PGD * sizeof(pmd_t));
339 		set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
340 	}
341 
342 	pmdp = pmd_offset(pgdp, va);
343 	if (pmd_none(*pmdp)) {
344 		ptep = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
345 		if (ptep == NULL) panic("No memory for pte\n");
346 		clear_page((void *)ptep);
347 		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
348 	}
349 
350 	ptep = pte_offset(pmdp, va);
351 	set_pte(ptep, mk_pte_phys(pa, __pgprot(_PAGE_PRESENT |
352 			_PAGE_READ | _PAGE_WRITE |
353 			_PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags)));
354 }
355 
shmedia_unmapioaddr(unsigned long vaddr)356 static void shmedia_unmapioaddr(unsigned long vaddr)
357 {
358 	pgd_t *pgdp;
359 	pmd_t *pmdp;
360 	pte_t *ptep;
361 
362 	pgdp = pgd_offset_k(vaddr);
363 	pmdp = pmd_offset(pgdp, vaddr);
364 
365 	if (pmd_none(*pmdp) || pmd_bad(*pmdp))
366 		return;
367 
368 	ptep = pte_offset(pmdp, vaddr);
369 
370 	if (pte_none(*ptep) || !pte_present(*ptep))
371 		return;
372 
373 	clear_page((void *)ptep);
374 	pte_clear(ptep);
375 }
376 
onchip_remap(unsigned long phys,unsigned long size,const char * name)377 unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name)
378 {
379 	if (size < PAGE_SIZE)
380 		size = PAGE_SIZE;
381 
382 	return shmedia_alloc_io(phys, size, name);
383 }
384 
onchip_unmap(unsigned long vaddr)385 void onchip_unmap(unsigned long vaddr)
386 {
387 	struct resource *res;
388 	unsigned int psz;
389 
390 	res = shmedia_find_resource(&shmedia_iomap, vaddr);
391 	if (!res) {
392 		printk(KERN_ERR "%s: Failed to free 0x%08lx\n",
393 		       __FUNCTION__, vaddr);
394 		return;
395 	}
396 
397         psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
398 
399         printk(KERN_DEBUG "unmapioaddr: %6s  [%2ld page%s] freed\n",
400 	       res->name, psz, psz == 1 ? " " : "s");
401 
402 	shmedia_free_io(res);
403 
404 	if ((char *)res >= (char *)xresv &&
405 	    (char *)res <  (char *)&xresv[XNRES]) {
406 		xres_free((struct xresource *)res);
407 	} else {
408 		kfree(res);
409 	}
410 }
411 
412 #ifdef CONFIG_PROC_FS
413 static int
ioremap_proc_info(char * buf,char ** start,off_t fpos,int length,int * eof,void * data)414 ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof,
415 		  void *data)
416 {
417 	char *p = buf, *e = buf + length;
418 	struct resource *r;
419 	const char *nm;
420 
421 	for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
422 		if (p + 32 >= e)        /* Better than nothing */
423 			break;
424 		if ((nm = r->name) == 0) nm = "???";
425 		p += sprintf(p, "%08lx-%08lx: %s\n", r->start, r->end, nm);
426 	}
427 
428 	return p-buf;
429 }
430 #endif /* CONFIG_PROC_FS */
431 
register_proc_onchip(void)432 static int __init register_proc_onchip(void)
433 {
434 #ifdef CONFIG_PROC_FS
435 	create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap);
436 #endif
437 	return 0;
438 }
439 
440 __initcall(register_proc_onchip);
441