1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Trapped io support
4  *
5  * Copyright (C) 2008 Magnus Damm
6  *
7  * Intercept io operations by trapping.
8  */
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/bitops.h>
12 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <asm/mmu_context.h>
16 #include <linux/uaccess.h>
17 #include <asm/io.h>
18 #include <asm/io_trapped.h>
19 
20 #define TRAPPED_PAGES_MAX 16
21 
22 #ifdef CONFIG_HAS_IOPORT_MAP
23 LIST_HEAD(trapped_io);
24 EXPORT_SYMBOL_GPL(trapped_io);
25 #endif
26 #ifdef CONFIG_HAS_IOMEM
27 LIST_HEAD(trapped_mem);
28 EXPORT_SYMBOL_GPL(trapped_mem);
29 #endif
30 static DEFINE_SPINLOCK(trapped_lock);
31 
32 static int trapped_io_disable __read_mostly;
33 
trapped_io_setup(char * __unused)34 static int __init trapped_io_setup(char *__unused)
35 {
36 	trapped_io_disable = 1;
37 	return 1;
38 }
39 __setup("noiotrap", trapped_io_setup);
40 
register_trapped_io(struct trapped_io * tiop)41 int register_trapped_io(struct trapped_io *tiop)
42 {
43 	struct resource *res;
44 	unsigned long len = 0, flags = 0;
45 	struct page *pages[TRAPPED_PAGES_MAX];
46 	int k, n;
47 
48 	if (unlikely(trapped_io_disable))
49 		return 0;
50 
51 	/* structure must be page aligned */
52 	if ((unsigned long)tiop & (PAGE_SIZE - 1))
53 		goto bad;
54 
55 	for (k = 0; k < tiop->num_resources; k++) {
56 		res = tiop->resource + k;
57 		len += roundup(resource_size(res), PAGE_SIZE);
58 		flags |= res->flags;
59 	}
60 
61 	/* support IORESOURCE_IO _or_ MEM, not both */
62 	if (hweight_long(flags) != 1)
63 		goto bad;
64 
65 	n = len >> PAGE_SHIFT;
66 
67 	if (n >= TRAPPED_PAGES_MAX)
68 		goto bad;
69 
70 	for (k = 0; k < n; k++)
71 		pages[k] = virt_to_page(tiop);
72 
73 	tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
74 	if (!tiop->virt_base)
75 		goto bad;
76 
77 	len = 0;
78 	for (k = 0; k < tiop->num_resources; k++) {
79 		res = tiop->resource + k;
80 		pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
81 		       (unsigned long)(tiop->virt_base + len),
82 		       res->flags & IORESOURCE_IO ? "io" : "mmio",
83 		       (unsigned long)res->start);
84 		len += roundup(resource_size(res), PAGE_SIZE);
85 	}
86 
87 	tiop->magic = IO_TRAPPED_MAGIC;
88 	INIT_LIST_HEAD(&tiop->list);
89 	spin_lock_irq(&trapped_lock);
90 #ifdef CONFIG_HAS_IOPORT_MAP
91 	if (flags & IORESOURCE_IO)
92 		list_add(&tiop->list, &trapped_io);
93 #endif
94 #ifdef CONFIG_HAS_IOMEM
95 	if (flags & IORESOURCE_MEM)
96 		list_add(&tiop->list, &trapped_mem);
97 #endif
98 	spin_unlock_irq(&trapped_lock);
99 
100 	return 0;
101  bad:
102 	pr_warn("unable to install trapped io filter\n");
103 	return -1;
104 }
105 
match_trapped_io_handler(struct list_head * list,unsigned long offset,unsigned long size)106 void __iomem *match_trapped_io_handler(struct list_head *list,
107 				       unsigned long offset,
108 				       unsigned long size)
109 {
110 	unsigned long voffs;
111 	struct trapped_io *tiop;
112 	struct resource *res;
113 	int k, len;
114 	unsigned long flags;
115 
116 	spin_lock_irqsave(&trapped_lock, flags);
117 	list_for_each_entry(tiop, list, list) {
118 		voffs = 0;
119 		for (k = 0; k < tiop->num_resources; k++) {
120 			res = tiop->resource + k;
121 			if (res->start == offset) {
122 				spin_unlock_irqrestore(&trapped_lock, flags);
123 				return tiop->virt_base + voffs;
124 			}
125 
126 			len = resource_size(res);
127 			voffs += roundup(len, PAGE_SIZE);
128 		}
129 	}
130 	spin_unlock_irqrestore(&trapped_lock, flags);
131 	return NULL;
132 }
133 
lookup_tiop(unsigned long address)134 static struct trapped_io *lookup_tiop(unsigned long address)
135 {
136 	pgd_t *pgd_k;
137 	p4d_t *p4d_k;
138 	pud_t *pud_k;
139 	pmd_t *pmd_k;
140 	pte_t *pte_k;
141 	pte_t entry;
142 
143 	pgd_k = swapper_pg_dir + pgd_index(address);
144 	if (!pgd_present(*pgd_k))
145 		return NULL;
146 
147 	p4d_k = p4d_offset(pgd_k, address);
148 	if (!p4d_present(*p4d_k))
149 		return NULL;
150 
151 	pud_k = pud_offset(p4d_k, address);
152 	if (!pud_present(*pud_k))
153 		return NULL;
154 
155 	pmd_k = pmd_offset(pud_k, address);
156 	if (!pmd_present(*pmd_k))
157 		return NULL;
158 
159 	pte_k = pte_offset_kernel(pmd_k, address);
160 	entry = *pte_k;
161 
162 	return pfn_to_kaddr(pte_pfn(entry));
163 }
164 
lookup_address(struct trapped_io * tiop,unsigned long address)165 static unsigned long lookup_address(struct trapped_io *tiop,
166 				    unsigned long address)
167 {
168 	struct resource *res;
169 	unsigned long vaddr = (unsigned long)tiop->virt_base;
170 	unsigned long len;
171 	int k;
172 
173 	for (k = 0; k < tiop->num_resources; k++) {
174 		res = tiop->resource + k;
175 		len = roundup(resource_size(res), PAGE_SIZE);
176 		if (address < (vaddr + len))
177 			return res->start + (address - vaddr);
178 		vaddr += len;
179 	}
180 	return 0;
181 }
182 
copy_word(unsigned long src_addr,int src_len,unsigned long dst_addr,int dst_len)183 static unsigned long long copy_word(unsigned long src_addr, int src_len,
184 				    unsigned long dst_addr, int dst_len)
185 {
186 	unsigned long long tmp = 0;
187 
188 	switch (src_len) {
189 	case 1:
190 		tmp = __raw_readb(src_addr);
191 		break;
192 	case 2:
193 		tmp = __raw_readw(src_addr);
194 		break;
195 	case 4:
196 		tmp = __raw_readl(src_addr);
197 		break;
198 	case 8:
199 		tmp = __raw_readq(src_addr);
200 		break;
201 	}
202 
203 	switch (dst_len) {
204 	case 1:
205 		__raw_writeb(tmp, dst_addr);
206 		break;
207 	case 2:
208 		__raw_writew(tmp, dst_addr);
209 		break;
210 	case 4:
211 		__raw_writel(tmp, dst_addr);
212 		break;
213 	case 8:
214 		__raw_writeq(tmp, dst_addr);
215 		break;
216 	}
217 
218 	return tmp;
219 }
220 
from_device(void * dst,const void * src,unsigned long cnt)221 static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
222 {
223 	struct trapped_io *tiop;
224 	unsigned long src_addr = (unsigned long)src;
225 	unsigned long long tmp;
226 
227 	pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
228 	tiop = lookup_tiop(src_addr);
229 	WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
230 
231 	src_addr = lookup_address(tiop, src_addr);
232 	if (!src_addr)
233 		return cnt;
234 
235 	tmp = copy_word(src_addr,
236 			max_t(unsigned long, cnt,
237 			      (tiop->minimum_bus_width / 8)),
238 			(unsigned long)dst, cnt);
239 
240 	pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
241 	return 0;
242 }
243 
to_device(void * dst,const void * src,unsigned long cnt)244 static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
245 {
246 	struct trapped_io *tiop;
247 	unsigned long dst_addr = (unsigned long)dst;
248 	unsigned long long tmp;
249 
250 	pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
251 	tiop = lookup_tiop(dst_addr);
252 	WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
253 
254 	dst_addr = lookup_address(tiop, dst_addr);
255 	if (!dst_addr)
256 		return cnt;
257 
258 	tmp = copy_word((unsigned long)src, cnt,
259 			dst_addr, max_t(unsigned long, cnt,
260 					(tiop->minimum_bus_width / 8)));
261 
262 	pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
263 	return 0;
264 }
265 
266 static struct mem_access trapped_io_access = {
267 	from_device,
268 	to_device,
269 };
270 
handle_trapped_io(struct pt_regs * regs,unsigned long address)271 int handle_trapped_io(struct pt_regs *regs, unsigned long address)
272 {
273 	insn_size_t instruction;
274 	int tmp;
275 
276 	if (trapped_io_disable)
277 		return 0;
278 	if (!lookup_tiop(address))
279 		return 0;
280 
281 	WARN_ON(user_mode(regs));
282 
283 	if (copy_from_kernel_nofault(&instruction, (void *)(regs->pc),
284 				     sizeof(instruction))) {
285 		return 0;
286 	}
287 
288 	tmp = handle_unaligned_access(instruction, regs,
289 				      &trapped_io_access, 1, address);
290 	return tmp == 0;
291 }
292