1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_PCI_MODERN_H
3 #define _LINUX_VIRTIO_PCI_MODERN_H
4
5 #include <linux/pci.h>
6 #include <linux/virtio_pci.h>
7
8 struct virtio_pci_modern_device {
9 struct pci_dev *pci_dev;
10
11 struct virtio_pci_common_cfg __iomem *common;
12 /* Device-specific data (non-legacy mode) */
13 void __iomem *device;
14 /* Base of vq notifications (non-legacy mode). */
15 void __iomem *notify_base;
16 /* Physical base of vq notifications */
17 resource_size_t notify_pa;
18 /* Where to read and clear interrupt */
19 u8 __iomem *isr;
20
21 /* So we can sanity-check accesses. */
22 size_t notify_len;
23 size_t device_len;
24
25 /* Capability for when we need to map notifications per-vq. */
26 int notify_map_cap;
27
28 /* Multiply queue_notify_off by this value. (non-legacy mode). */
29 u32 notify_offset_multiplier;
30
31 int modern_bars;
32
33 struct virtio_device_id id;
34 };
35
36 /*
37 * Type-safe wrappers for io accesses.
38 * Use these to enforce at compile time the following spec requirement:
39 *
40 * The driver MUST access each field using the “natural” access
41 * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
42 * for 16-bit fields and 8-bit accesses for 8-bit fields.
43 */
vp_ioread8(const u8 __iomem * addr)44 static inline u8 vp_ioread8(const u8 __iomem *addr)
45 {
46 return ioread8(addr);
47 }
vp_ioread16(const __le16 __iomem * addr)48 static inline u16 vp_ioread16 (const __le16 __iomem *addr)
49 {
50 return ioread16(addr);
51 }
52
vp_ioread32(const __le32 __iomem * addr)53 static inline u32 vp_ioread32(const __le32 __iomem *addr)
54 {
55 return ioread32(addr);
56 }
57
vp_iowrite8(u8 value,u8 __iomem * addr)58 static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
59 {
60 iowrite8(value, addr);
61 }
62
vp_iowrite16(u16 value,__le16 __iomem * addr)63 static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
64 {
65 iowrite16(value, addr);
66 }
67
vp_iowrite32(u32 value,__le32 __iomem * addr)68 static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
69 {
70 iowrite32(value, addr);
71 }
72
vp_iowrite64_twopart(u64 val,__le32 __iomem * lo,__le32 __iomem * hi)73 static inline void vp_iowrite64_twopart(u64 val,
74 __le32 __iomem *lo,
75 __le32 __iomem *hi)
76 {
77 vp_iowrite32((u32)val, lo);
78 vp_iowrite32(val >> 32, hi);
79 }
80
81 u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
82 u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev);
83 void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
84 u64 features);
85 u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
86 u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
87 void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
88 u8 status);
89 u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
90 u16 idx, u16 vector);
91 u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
92 u16 vector);
93 void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
94 u16 index, u64 desc_addr, u64 driver_addr,
95 u64 device_addr);
96 void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
97 u16 idx, bool enable);
98 bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
99 u16 idx);
100 void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
101 u16 idx, u16 size);
102 u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
103 u16 idx);
104 u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev);
105 void __iomem * vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
106 u16 index, resource_size_t *pa);
107 int vp_modern_probe(struct virtio_pci_modern_device *mdev);
108 void vp_modern_remove(struct virtio_pci_modern_device *mdev);
109 #endif
110