1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VFIO PCI I/O Port & MMIO access
4 *
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
7 *
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
11 */
12
13 #include <linux/fs.h>
14 #include <linux/pci.h>
15 #include <linux/uaccess.h>
16 #include <linux/io.h>
17 #include <linux/vfio.h>
18 #include <linux/vgaarb.h>
19
20 #include "vfio_pci_priv.h"
21
22 #ifdef __LITTLE_ENDIAN
23 #define vfio_ioread64 ioread64
24 #define vfio_iowrite64 iowrite64
25 #define vfio_ioread32 ioread32
26 #define vfio_iowrite32 iowrite32
27 #define vfio_ioread16 ioread16
28 #define vfio_iowrite16 iowrite16
29 #else
30 #define vfio_ioread64 ioread64be
31 #define vfio_iowrite64 iowrite64be
32 #define vfio_ioread32 ioread32be
33 #define vfio_iowrite32 iowrite32be
34 #define vfio_ioread16 ioread16be
35 #define vfio_iowrite16 iowrite16be
36 #endif
37 #define vfio_ioread8 ioread8
38 #define vfio_iowrite8 iowrite8
39
40 #define VFIO_IOWRITE(size) \
41 static int vfio_pci_iowrite##size(struct vfio_pci_core_device *vdev, \
42 bool test_mem, u##size val, void __iomem *io) \
43 { \
44 if (test_mem) { \
45 down_read(&vdev->memory_lock); \
46 if (!__vfio_pci_memory_enabled(vdev)) { \
47 up_read(&vdev->memory_lock); \
48 return -EIO; \
49 } \
50 } \
51 \
52 vfio_iowrite##size(val, io); \
53 \
54 if (test_mem) \
55 up_read(&vdev->memory_lock); \
56 \
57 return 0; \
58 }
59
60 VFIO_IOWRITE(8)
61 VFIO_IOWRITE(16)
62 VFIO_IOWRITE(32)
63 #ifdef iowrite64
64 VFIO_IOWRITE(64)
65 #endif
66
67 #define VFIO_IOREAD(size) \
68 static int vfio_pci_ioread##size(struct vfio_pci_core_device *vdev, \
69 bool test_mem, u##size *val, void __iomem *io) \
70 { \
71 if (test_mem) { \
72 down_read(&vdev->memory_lock); \
73 if (!__vfio_pci_memory_enabled(vdev)) { \
74 up_read(&vdev->memory_lock); \
75 return -EIO; \
76 } \
77 } \
78 \
79 *val = vfio_ioread##size(io); \
80 \
81 if (test_mem) \
82 up_read(&vdev->memory_lock); \
83 \
84 return 0; \
85 }
86
87 VFIO_IOREAD(8)
88 VFIO_IOREAD(16)
89 VFIO_IOREAD(32)
90
91 /*
92 * Read or write from an __iomem region (MMIO or I/O port) with an excluded
93 * range which is inaccessible. The excluded range drops writes and fills
94 * reads with -1. This is intended for handling MSI-X vector tables and
95 * leftover space for ROM BARs.
96 */
do_io_rw(struct vfio_pci_core_device * vdev,bool test_mem,void __iomem * io,char __user * buf,loff_t off,size_t count,size_t x_start,size_t x_end,bool iswrite)97 static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
98 void __iomem *io, char __user *buf,
99 loff_t off, size_t count, size_t x_start,
100 size_t x_end, bool iswrite)
101 {
102 ssize_t done = 0;
103 int ret;
104
105 while (count) {
106 size_t fillable, filled;
107
108 if (off < x_start)
109 fillable = min(count, (size_t)(x_start - off));
110 else if (off >= x_end)
111 fillable = count;
112 else
113 fillable = 0;
114
115 if (fillable >= 4 && !(off % 4)) {
116 u32 val;
117
118 if (iswrite) {
119 if (copy_from_user(&val, buf, 4))
120 return -EFAULT;
121
122 ret = vfio_pci_iowrite32(vdev, test_mem,
123 val, io + off);
124 if (ret)
125 return ret;
126 } else {
127 ret = vfio_pci_ioread32(vdev, test_mem,
128 &val, io + off);
129 if (ret)
130 return ret;
131
132 if (copy_to_user(buf, &val, 4))
133 return -EFAULT;
134 }
135
136 filled = 4;
137 } else if (fillable >= 2 && !(off % 2)) {
138 u16 val;
139
140 if (iswrite) {
141 if (copy_from_user(&val, buf, 2))
142 return -EFAULT;
143
144 ret = vfio_pci_iowrite16(vdev, test_mem,
145 val, io + off);
146 if (ret)
147 return ret;
148 } else {
149 ret = vfio_pci_ioread16(vdev, test_mem,
150 &val, io + off);
151 if (ret)
152 return ret;
153
154 if (copy_to_user(buf, &val, 2))
155 return -EFAULT;
156 }
157
158 filled = 2;
159 } else if (fillable) {
160 u8 val;
161
162 if (iswrite) {
163 if (copy_from_user(&val, buf, 1))
164 return -EFAULT;
165
166 ret = vfio_pci_iowrite8(vdev, test_mem,
167 val, io + off);
168 if (ret)
169 return ret;
170 } else {
171 ret = vfio_pci_ioread8(vdev, test_mem,
172 &val, io + off);
173 if (ret)
174 return ret;
175
176 if (copy_to_user(buf, &val, 1))
177 return -EFAULT;
178 }
179
180 filled = 1;
181 } else {
182 /* Fill reads with -1, drop writes */
183 filled = min(count, (size_t)(x_end - off));
184 if (!iswrite) {
185 u8 val = 0xFF;
186 size_t i;
187
188 for (i = 0; i < filled; i++)
189 if (copy_to_user(buf + i, &val, 1))
190 return -EFAULT;
191 }
192 }
193
194 count -= filled;
195 done += filled;
196 off += filled;
197 buf += filled;
198 }
199
200 return done;
201 }
202
vfio_pci_setup_barmap(struct vfio_pci_core_device * vdev,int bar)203 static int vfio_pci_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
204 {
205 struct pci_dev *pdev = vdev->pdev;
206 int ret;
207 void __iomem *io;
208
209 if (vdev->barmap[bar])
210 return 0;
211
212 ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
213 if (ret)
214 return ret;
215
216 io = pci_iomap(pdev, bar, 0);
217 if (!io) {
218 pci_release_selected_regions(pdev, 1 << bar);
219 return -ENOMEM;
220 }
221
222 vdev->barmap[bar] = io;
223
224 return 0;
225 }
226
vfio_pci_bar_rw(struct vfio_pci_core_device * vdev,char __user * buf,size_t count,loff_t * ppos,bool iswrite)227 ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
228 size_t count, loff_t *ppos, bool iswrite)
229 {
230 struct pci_dev *pdev = vdev->pdev;
231 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
232 int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
233 size_t x_start = 0, x_end = 0;
234 resource_size_t end;
235 void __iomem *io;
236 struct resource *res = &vdev->pdev->resource[bar];
237 ssize_t done;
238
239 if (pci_resource_start(pdev, bar))
240 end = pci_resource_len(pdev, bar);
241 else if (bar == PCI_ROM_RESOURCE &&
242 pdev->resource[bar].flags & IORESOURCE_ROM_SHADOW)
243 end = 0x20000;
244 else
245 return -EINVAL;
246
247 if (pos >= end)
248 return -EINVAL;
249
250 count = min(count, (size_t)(end - pos));
251
252 if (bar == PCI_ROM_RESOURCE) {
253 /*
254 * The ROM can fill less space than the BAR, so we start the
255 * excluded range at the end of the actual ROM. This makes
256 * filling large ROM BARs much faster.
257 */
258 io = pci_map_rom(pdev, &x_start);
259 if (!io) {
260 done = -ENOMEM;
261 goto out;
262 }
263 x_end = end;
264 } else {
265 int ret = vfio_pci_setup_barmap(vdev, bar);
266 if (ret) {
267 done = ret;
268 goto out;
269 }
270
271 io = vdev->barmap[bar];
272 }
273
274 if (bar == vdev->msix_bar) {
275 x_start = vdev->msix_offset;
276 x_end = vdev->msix_offset + vdev->msix_size;
277 }
278
279 done = do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos,
280 count, x_start, x_end, iswrite);
281
282 if (done >= 0)
283 *ppos += done;
284
285 if (bar == PCI_ROM_RESOURCE)
286 pci_unmap_rom(pdev, io);
287 out:
288 return done;
289 }
290
291 #ifdef CONFIG_VFIO_PCI_VGA
vfio_pci_vga_rw(struct vfio_pci_core_device * vdev,char __user * buf,size_t count,loff_t * ppos,bool iswrite)292 ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
293 size_t count, loff_t *ppos, bool iswrite)
294 {
295 int ret;
296 loff_t off, pos = *ppos & VFIO_PCI_OFFSET_MASK;
297 void __iomem *iomem = NULL;
298 unsigned int rsrc;
299 bool is_ioport;
300 ssize_t done;
301
302 if (!vdev->has_vga)
303 return -EINVAL;
304
305 if (pos > 0xbfffful)
306 return -EINVAL;
307
308 switch ((u32)pos) {
309 case 0xa0000 ... 0xbffff:
310 count = min(count, (size_t)(0xc0000 - pos));
311 iomem = ioremap(0xa0000, 0xbffff - 0xa0000 + 1);
312 off = pos - 0xa0000;
313 rsrc = VGA_RSRC_LEGACY_MEM;
314 is_ioport = false;
315 break;
316 case 0x3b0 ... 0x3bb:
317 count = min(count, (size_t)(0x3bc - pos));
318 iomem = ioport_map(0x3b0, 0x3bb - 0x3b0 + 1);
319 off = pos - 0x3b0;
320 rsrc = VGA_RSRC_LEGACY_IO;
321 is_ioport = true;
322 break;
323 case 0x3c0 ... 0x3df:
324 count = min(count, (size_t)(0x3e0 - pos));
325 iomem = ioport_map(0x3c0, 0x3df - 0x3c0 + 1);
326 off = pos - 0x3c0;
327 rsrc = VGA_RSRC_LEGACY_IO;
328 is_ioport = true;
329 break;
330 default:
331 return -EINVAL;
332 }
333
334 if (!iomem)
335 return -ENOMEM;
336
337 ret = vga_get_interruptible(vdev->pdev, rsrc);
338 if (ret) {
339 is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
340 return ret;
341 }
342
343 /*
344 * VGA MMIO is a legacy, non-BAR resource that hopefully allows
345 * probing, so we don't currently worry about access in relation
346 * to the memory enable bit in the command register.
347 */
348 done = do_io_rw(vdev, false, iomem, buf, off, count, 0, 0, iswrite);
349
350 vga_put(vdev->pdev, rsrc);
351
352 is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
353
354 if (done >= 0)
355 *ppos += done;
356
357 return done;
358 }
359 #endif
360
vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd * ioeventfd,bool test_mem)361 static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
362 bool test_mem)
363 {
364 switch (ioeventfd->count) {
365 case 1:
366 vfio_pci_iowrite8(ioeventfd->vdev, test_mem,
367 ioeventfd->data, ioeventfd->addr);
368 break;
369 case 2:
370 vfio_pci_iowrite16(ioeventfd->vdev, test_mem,
371 ioeventfd->data, ioeventfd->addr);
372 break;
373 case 4:
374 vfio_pci_iowrite32(ioeventfd->vdev, test_mem,
375 ioeventfd->data, ioeventfd->addr);
376 break;
377 #ifdef iowrite64
378 case 8:
379 vfio_pci_iowrite64(ioeventfd->vdev, test_mem,
380 ioeventfd->data, ioeventfd->addr);
381 break;
382 #endif
383 }
384 }
385
vfio_pci_ioeventfd_handler(void * opaque,void * unused)386 static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
387 {
388 struct vfio_pci_ioeventfd *ioeventfd = opaque;
389 struct vfio_pci_core_device *vdev = ioeventfd->vdev;
390
391 if (ioeventfd->test_mem) {
392 if (!down_read_trylock(&vdev->memory_lock))
393 return 1; /* Lock contended, use thread */
394 if (!__vfio_pci_memory_enabled(vdev)) {
395 up_read(&vdev->memory_lock);
396 return 0;
397 }
398 }
399
400 vfio_pci_ioeventfd_do_write(ioeventfd, false);
401
402 if (ioeventfd->test_mem)
403 up_read(&vdev->memory_lock);
404
405 return 0;
406 }
407
vfio_pci_ioeventfd_thread(void * opaque,void * unused)408 static void vfio_pci_ioeventfd_thread(void *opaque, void *unused)
409 {
410 struct vfio_pci_ioeventfd *ioeventfd = opaque;
411
412 vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem);
413 }
414
vfio_pci_ioeventfd(struct vfio_pci_core_device * vdev,loff_t offset,uint64_t data,int count,int fd)415 int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
416 uint64_t data, int count, int fd)
417 {
418 struct pci_dev *pdev = vdev->pdev;
419 loff_t pos = offset & VFIO_PCI_OFFSET_MASK;
420 int ret, bar = VFIO_PCI_OFFSET_TO_INDEX(offset);
421 struct vfio_pci_ioeventfd *ioeventfd;
422
423 /* Only support ioeventfds into BARs */
424 if (bar > VFIO_PCI_BAR5_REGION_INDEX)
425 return -EINVAL;
426
427 if (pos + count > pci_resource_len(pdev, bar))
428 return -EINVAL;
429
430 /* Disallow ioeventfds working around MSI-X table writes */
431 if (bar == vdev->msix_bar &&
432 !(pos + count <= vdev->msix_offset ||
433 pos >= vdev->msix_offset + vdev->msix_size))
434 return -EINVAL;
435
436 #ifndef iowrite64
437 if (count == 8)
438 return -EINVAL;
439 #endif
440
441 ret = vfio_pci_setup_barmap(vdev, bar);
442 if (ret)
443 return ret;
444
445 mutex_lock(&vdev->ioeventfds_lock);
446
447 list_for_each_entry(ioeventfd, &vdev->ioeventfds_list, next) {
448 if (ioeventfd->pos == pos && ioeventfd->bar == bar &&
449 ioeventfd->data == data && ioeventfd->count == count) {
450 if (fd == -1) {
451 vfio_virqfd_disable(&ioeventfd->virqfd);
452 list_del(&ioeventfd->next);
453 vdev->ioeventfds_nr--;
454 kfree(ioeventfd);
455 ret = 0;
456 } else
457 ret = -EEXIST;
458
459 goto out_unlock;
460 }
461 }
462
463 if (fd < 0) {
464 ret = -ENODEV;
465 goto out_unlock;
466 }
467
468 if (vdev->ioeventfds_nr >= VFIO_PCI_IOEVENTFD_MAX) {
469 ret = -ENOSPC;
470 goto out_unlock;
471 }
472
473 ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL);
474 if (!ioeventfd) {
475 ret = -ENOMEM;
476 goto out_unlock;
477 }
478
479 ioeventfd->vdev = vdev;
480 ioeventfd->addr = vdev->barmap[bar] + pos;
481 ioeventfd->data = data;
482 ioeventfd->pos = pos;
483 ioeventfd->bar = bar;
484 ioeventfd->count = count;
485 ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM;
486
487 ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
488 vfio_pci_ioeventfd_thread, NULL,
489 &ioeventfd->virqfd, fd);
490 if (ret) {
491 kfree(ioeventfd);
492 goto out_unlock;
493 }
494
495 list_add(&ioeventfd->next, &vdev->ioeventfds_list);
496 vdev->ioeventfds_nr++;
497
498 out_unlock:
499 mutex_unlock(&vdev->ioeventfds_lock);
500
501 return ret;
502 }
503