1 /*
2  * arch/i386/mm/ioremap.c
3  *
4  * Re-map IO memory to kernel address space so that we can access it.
5  * This is needed for high PCI addresses that aren't mapped in the
6  * 640k-1MB IO memory area on PC's
7  *
8  * (C) Copyright 1995 1996 Linus Torvalds
9  */
10 
11 #include <linux/vmalloc.h>
12 #include <asm/io.h>
13 #include <asm/pgalloc.h>
14 
remap_area_pte(pte_t * pte,unsigned long address,unsigned long size,unsigned long phys_addr,unsigned long flags)15 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
16 	unsigned long phys_addr, unsigned long flags)
17 {
18 	unsigned long end;
19 
20 	address &= ~PMD_MASK;
21 	end = address + size;
22 	if (end > PMD_SIZE)
23 		end = PMD_SIZE;
24 	if (address >= end)
25 		BUG();
26 	do {
27 		if (!pte_none(*pte)) {
28 			printk("remap_area_pte: page already exists\n");
29 			BUG();
30 		}
31 		set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
32 					_PAGE_DIRTY | _PAGE_ACCESSED | flags)));
33 		address += PAGE_SIZE;
34 		phys_addr += PAGE_SIZE;
35 		pte++;
36 	} while (address && (address < end));
37 }
38 
remap_area_pmd(pmd_t * pmd,unsigned long address,unsigned long size,unsigned long phys_addr,unsigned long flags)39 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
40 	unsigned long phys_addr, unsigned long flags)
41 {
42 	unsigned long end;
43 
44 	address &= ~PGDIR_MASK;
45 	end = address + size;
46 	if (end > PGDIR_SIZE)
47 		end = PGDIR_SIZE;
48 	phys_addr -= address;
49 	if (address >= end)
50 		BUG();
51 	do {
52 		pte_t * pte = pte_alloc(&init_mm, pmd, address);
53 		if (!pte)
54 			return -ENOMEM;
55 		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
56 		address = (address + PMD_SIZE) & PMD_MASK;
57 		pmd++;
58 	} while (address && (address < end));
59 	return 0;
60 }
61 
remap_area_pages(unsigned long address,unsigned long phys_addr,unsigned long size,unsigned long flags)62 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
63 				 unsigned long size, unsigned long flags)
64 {
65 	int error;
66 	pgd_t * dir;
67 	unsigned long end = address + size;
68 
69 	phys_addr -= address;
70 	dir = pgd_offset(&init_mm, address);
71 	flush_cache_all();
72 	if (address >= end)
73 		BUG();
74 	spin_lock(&init_mm.page_table_lock);
75 	do {
76 		pmd_t *pmd;
77 		pmd = pmd_alloc(&init_mm, dir, address);
78 		error = -ENOMEM;
79 		if (!pmd)
80 			break;
81 		if (remap_area_pmd(pmd, address, end - address,
82 					 phys_addr + address, flags))
83 			break;
84 		error = 0;
85 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
86 		dir++;
87 	} while (address && (address < end));
88 	spin_unlock(&init_mm.page_table_lock);
89 	flush_tlb_all();
90 	return error;
91 }
92 
93 /*
94  * Generic mapping function (not visible outside):
95  */
96 
97 /*
98  * Remap an arbitrary physical address space into the kernel virtual
99  * address space. Needed when the kernel wants to access high addresses
100  * directly.
101  *
102  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
103  * have to convert them into an offset in a page-aligned mapping, but the
104  * caller shouldn't need to know that small detail.
105  */
__ioremap(unsigned long phys_addr,unsigned long size,unsigned long flags)106 void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
107 {
108 	void * addr;
109 	struct vm_struct * area;
110 	unsigned long offset, last_addr;
111 
112 	/* Don't allow wraparound or zero size */
113 	last_addr = phys_addr + size - 1;
114 	if (!size || last_addr < phys_addr)
115 		return NULL;
116 
117 	/*
118 	 * Don't remap the low PCI/ISA area, it's always mapped..
119 	 */
120 	if (phys_addr >= 0xA0000 && last_addr < 0x100000)
121 		return phys_to_virt(phys_addr);
122 
123 	/*
124 	 * Don't allow anybody to remap normal RAM that we're using..
125 	 */
126 	if (phys_addr < virt_to_phys(high_memory)) {
127 		char *t_addr, *t_end;
128 		struct page *page;
129 
130 		t_addr = __va(phys_addr);
131 		t_end = t_addr + (size - 1);
132 
133 		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
134 			if(!PageReserved(page))
135 				return NULL;
136 	}
137 
138 	/*
139 	 * Mappings have to be page-aligned
140 	 */
141 	offset = phys_addr & ~PAGE_MASK;
142 	phys_addr &= PAGE_MASK;
143 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
144 
145 	/*
146 	 * Ok, go for it..
147 	 */
148 	area = get_vm_area(size, VM_IOREMAP);
149 	if (!area)
150 		return NULL;
151 	addr = area->addr;
152 	if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
153 		vfree(addr);
154 		return NULL;
155 	}
156 	return (void *) (offset + (char *)addr);
157 }
158 
iounmap(void * addr)159 void iounmap(void *addr)
160 {
161 	if (addr > high_memory)
162 		return vfree((void *) (PAGE_MASK & (unsigned long) addr));
163 }
164 
bt_ioremap(unsigned long phys_addr,unsigned long size)165 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
166 {
167 	unsigned long offset, last_addr;
168 	unsigned int nrpages;
169 	enum fixed_addresses idx;
170 
171 	/* Don't allow wraparound or zero size */
172 	last_addr = phys_addr + size - 1;
173 	if (!size || last_addr < phys_addr)
174 		return NULL;
175 
176 	/*
177 	 * Don't remap the low PCI/ISA area, it's always mapped..
178 	 */
179 	if (phys_addr >= 0xA0000 && last_addr < 0x100000)
180 		return phys_to_virt(phys_addr);
181 
182 	/*
183 	 * Mappings have to be page-aligned
184 	 */
185 	offset = phys_addr & ~PAGE_MASK;
186 	phys_addr &= PAGE_MASK;
187 	size = PAGE_ALIGN(last_addr) - phys_addr;
188 
189 	/*
190 	 * Mappings have to fit in the FIX_BTMAP area.
191 	 */
192 	nrpages = size >> PAGE_SHIFT;
193 	if (nrpages > NR_FIX_BTMAPS)
194 		return NULL;
195 
196 	/*
197 	 * Ok, go for it..
198 	 */
199 	idx = FIX_BTMAP_BEGIN;
200 	while (nrpages > 0) {
201 		set_fixmap(idx, phys_addr);
202 		phys_addr += PAGE_SIZE;
203 		--idx;
204 		--nrpages;
205 	}
206 	return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
207 }
208 
bt_iounmap(void * addr,unsigned long size)209 void __init bt_iounmap(void *addr, unsigned long size)
210 {
211 	unsigned long virt_addr;
212 	unsigned long offset;
213 	unsigned int nrpages;
214 	enum fixed_addresses idx;
215 
216 	virt_addr = (unsigned long)addr;
217 	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
218 		return;
219 	offset = virt_addr & ~PAGE_MASK;
220 	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
221 
222 	idx = FIX_BTMAP_BEGIN;
223 	while (nrpages > 0) {
224 		__set_fixmap(idx, 0, __pgprot(0));
225 		--idx;
226 		--nrpages;
227 	}
228 }
229