1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_IO_H
3 #define _ASM_X86_IO_H
4
5 /*
6 * This file contains the definitions for the x86 IO instructions
7 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
8 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
9 * versions of the single-IO instructions (inb_p/inw_p/..).
10 *
11 * This file is not meant to be obfuscating: it's just complicated
12 * to (a) handle it all in a way that makes gcc able to optimize it
13 * as well as possible and (b) trying to avoid writing the same thing
14 * over and over again with slight variations and possibly making a
15 * mistake somewhere.
16 */
17
18 /*
19 * Thanks to James van Artsdalen for a better timing-fix than
20 * the two short jumps: using outb's to a nonexistent port seems
21 * to guarantee better timings even on fast machines.
22 *
23 * On the other hand, I'd like to be sure of a non-existent port:
24 * I feel a bit unsafe about using 0x80 (should be safe, though)
25 *
26 * Linus
27 */
28
29 /*
30 * Bit simplified and optimized by Jan Hubicka
31 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
32 *
33 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
34 * isa_read[wl] and isa_write[wl] fixed
35 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
36 */
37
38 #include <linux/string.h>
39 #include <linux/compiler.h>
40 #include <linux/cc_platform.h>
41 #include <asm/page.h>
42 #include <asm/early_ioremap.h>
43 #include <asm/pgtable_types.h>
44 #include <asm/shared/io.h>
45
46 #define build_mmio_read(name, size, type, reg, barrier) \
47 static inline type name(const volatile void __iomem *addr) \
48 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
49 :"m" (*(volatile type __force *)addr) barrier); return ret; }
50
51 #define build_mmio_write(name, size, type, reg, barrier) \
52 static inline void name(type val, volatile void __iomem *addr) \
53 { asm volatile("mov" size " %0,%1": :reg (val), \
54 "m" (*(volatile type __force *)addr) barrier); }
55
56 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
57 build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
58 build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
59
60 build_mmio_read(__readb, "b", unsigned char, "=q", )
61 build_mmio_read(__readw, "w", unsigned short, "=r", )
62 build_mmio_read(__readl, "l", unsigned int, "=r", )
63
64 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
65 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
66 build_mmio_write(writel, "l", unsigned int, "r", :"memory")
67
68 build_mmio_write(__writeb, "b", unsigned char, "q", )
69 build_mmio_write(__writew, "w", unsigned short, "r", )
70 build_mmio_write(__writel, "l", unsigned int, "r", )
71
72 #define readb readb
73 #define readw readw
74 #define readl readl
75 #define readb_relaxed(a) __readb(a)
76 #define readw_relaxed(a) __readw(a)
77 #define readl_relaxed(a) __readl(a)
78 #define __raw_readb __readb
79 #define __raw_readw __readw
80 #define __raw_readl __readl
81
82 #define writeb writeb
83 #define writew writew
84 #define writel writel
85 #define writeb_relaxed(v, a) __writeb(v, a)
86 #define writew_relaxed(v, a) __writew(v, a)
87 #define writel_relaxed(v, a) __writel(v, a)
88 #define __raw_writeb __writeb
89 #define __raw_writew __writew
90 #define __raw_writel __writel
91
92 #ifdef CONFIG_X86_64
93
94 build_mmio_read(readq, "q", u64, "=r", :"memory")
95 build_mmio_read(__readq, "q", u64, "=r", )
96 build_mmio_write(writeq, "q", u64, "r", :"memory")
97 build_mmio_write(__writeq, "q", u64, "r", )
98
99 #define readq_relaxed(a) __readq(a)
100 #define writeq_relaxed(v, a) __writeq(v, a)
101
102 #define __raw_readq __readq
103 #define __raw_writeq __writeq
104
105 /* Let people know that we have them */
106 #define readq readq
107 #define writeq writeq
108
109 #endif
110
111 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
112 extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
113 extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
114
115 /**
116 * virt_to_phys - map virtual addresses to physical
117 * @address: address to remap
118 *
119 * The returned physical address is the physical (CPU) mapping for
120 * the memory address given. It is only valid to use this function on
121 * addresses directly mapped or allocated via kmalloc.
122 *
123 * This function does not give bus mappings for DMA transfers. In
124 * almost all conceivable cases a device driver should not be using
125 * this function
126 */
127
virt_to_phys(volatile void * address)128 static inline phys_addr_t virt_to_phys(volatile void *address)
129 {
130 return __pa(address);
131 }
132 #define virt_to_phys virt_to_phys
133
134 /**
135 * phys_to_virt - map physical address to virtual
136 * @address: address to remap
137 *
138 * The returned virtual address is a current CPU mapping for
139 * the memory address given. It is only valid to use this function on
140 * addresses that have a kernel mapping
141 *
142 * This function does not handle bus mappings for DMA transfers. In
143 * almost all conceivable cases a device driver should not be using
144 * this function
145 */
146
phys_to_virt(phys_addr_t address)147 static inline void *phys_to_virt(phys_addr_t address)
148 {
149 return __va(address);
150 }
151 #define phys_to_virt phys_to_virt
152
153 /*
154 * Change "struct page" to physical address.
155 */
156 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
157
158 /*
159 * ISA I/O bus memory addresses are 1:1 with the physical address.
160 * However, we truncate the address to unsigned int to avoid undesirable
161 * promotions in legacy drivers.
162 */
isa_virt_to_bus(volatile void * address)163 static inline unsigned int isa_virt_to_bus(volatile void *address)
164 {
165 return (unsigned int)virt_to_phys(address);
166 }
167 #define isa_bus_to_virt phys_to_virt
168
169 /*
170 * The default ioremap() behavior is non-cached; if you need something
171 * else, you probably want one of the following.
172 */
173 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
174 #define ioremap_uc ioremap_uc
175 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
176 #define ioremap_cache ioremap_cache
177 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
178 #define ioremap_prot ioremap_prot
179 extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size);
180 #define ioremap_encrypted ioremap_encrypted
181
182 /**
183 * ioremap - map bus memory into CPU space
184 * @offset: bus address of the memory
185 * @size: size of the resource to map
186 *
187 * ioremap performs a platform specific sequence of operations to
188 * make bus memory CPU accessible via the readb/readw/readl/writeb/
189 * writew/writel functions and the other mmio helpers. The returned
190 * address is not guaranteed to be usable directly as a virtual
191 * address.
192 *
193 * If the area you are trying to map is a PCI BAR you should have a
194 * look at pci_iomap().
195 */
196 void __iomem *ioremap(resource_size_t offset, unsigned long size);
197 #define ioremap ioremap
198
199 extern void iounmap(volatile void __iomem *addr);
200 #define iounmap iounmap
201
202 #ifdef __KERNEL__
203
204 void memcpy_fromio(void *, const volatile void __iomem *, size_t);
205 void memcpy_toio(volatile void __iomem *, const void *, size_t);
206 void memset_io(volatile void __iomem *, int, size_t);
207
208 #define memcpy_fromio memcpy_fromio
209 #define memcpy_toio memcpy_toio
210 #define memset_io memset_io
211
212 /*
213 * ISA space is 'always mapped' on a typical x86 system, no need to
214 * explicitly ioremap() it. The fact that the ISA IO space is mapped
215 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
216 * are physical addresses. The following constant pointer can be
217 * used as the IO-area pointer (it can be iounmapped as well, so the
218 * analogy with PCI is quite large):
219 */
220 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
221
222 #endif /* __KERNEL__ */
223
224 extern void native_io_delay(void);
225
226 extern int io_delay_type;
227 extern void io_delay_init(void);
228
229 #if defined(CONFIG_PARAVIRT)
230 #include <asm/paravirt.h>
231 #else
232
slow_down_io(void)233 static inline void slow_down_io(void)
234 {
235 native_io_delay();
236 #ifdef REALLY_SLOW_IO
237 native_io_delay();
238 native_io_delay();
239 native_io_delay();
240 #endif
241 }
242
243 #endif
244
245 #define BUILDIO(bwl, bw, type) \
246 static inline void out##bwl##_p(type value, u16 port) \
247 { \
248 out##bwl(value, port); \
249 slow_down_io(); \
250 } \
251 \
252 static inline type in##bwl##_p(u16 port) \
253 { \
254 type value = in##bwl(port); \
255 slow_down_io(); \
256 return value; \
257 } \
258 \
259 static inline void outs##bwl(u16 port, const void *addr, unsigned long count) \
260 { \
261 if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) { \
262 type *value = (type *)addr; \
263 while (count) { \
264 out##bwl(*value, port); \
265 value++; \
266 count--; \
267 } \
268 } else { \
269 asm volatile("rep; outs" #bwl \
270 : "+S"(addr), "+c"(count) \
271 : "d"(port) : "memory"); \
272 } \
273 } \
274 \
275 static inline void ins##bwl(u16 port, void *addr, unsigned long count) \
276 { \
277 if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) { \
278 type *value = (type *)addr; \
279 while (count) { \
280 *value = in##bwl(port); \
281 value++; \
282 count--; \
283 } \
284 } else { \
285 asm volatile("rep; ins" #bwl \
286 : "+D"(addr), "+c"(count) \
287 : "d"(port) : "memory"); \
288 } \
289 }
290
291 BUILDIO(b, b, u8)
292 BUILDIO(w, w, u16)
293 BUILDIO(l, , u32)
294 #undef BUILDIO
295
296 #define inb_p inb_p
297 #define inw_p inw_p
298 #define inl_p inl_p
299 #define insb insb
300 #define insw insw
301 #define insl insl
302
303 #define outb_p outb_p
304 #define outw_p outw_p
305 #define outl_p outl_p
306 #define outsb outsb
307 #define outsw outsw
308 #define outsl outsl
309
310 extern void *xlate_dev_mem_ptr(phys_addr_t phys);
311 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
312
313 #define xlate_dev_mem_ptr xlate_dev_mem_ptr
314 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
315
316 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
317 enum page_cache_mode pcm);
318 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
319 #define ioremap_wc ioremap_wc
320 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
321 #define ioremap_wt ioremap_wt
322
323 extern bool is_early_ioremap_ptep(pte_t *ptep);
324
325 #define IO_SPACE_LIMIT 0xffff
326
327 #include <asm-generic/io.h>
328 #undef PCI_IOBASE
329
330 #ifdef CONFIG_MTRR
331 extern int __must_check arch_phys_wc_index(int handle);
332 #define arch_phys_wc_index arch_phys_wc_index
333
334 extern int __must_check arch_phys_wc_add(unsigned long base,
335 unsigned long size);
336 extern void arch_phys_wc_del(int handle);
337 #define arch_phys_wc_add arch_phys_wc_add
338 #endif
339
340 #ifdef CONFIG_X86_PAT
341 extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
342 extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
343 #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
344 #endif
345
346 #ifdef CONFIG_AMD_MEM_ENCRYPT
347 extern bool arch_memremap_can_ram_remap(resource_size_t offset,
348 unsigned long size,
349 unsigned long flags);
350 #define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
351
352 extern bool phys_mem_access_encrypted(unsigned long phys_addr,
353 unsigned long size);
354 #else
phys_mem_access_encrypted(unsigned long phys_addr,unsigned long size)355 static inline bool phys_mem_access_encrypted(unsigned long phys_addr,
356 unsigned long size)
357 {
358 return true;
359 }
360 #endif
361
362 /**
363 * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
364 * @dst: destination, in MMIO space (must be 512-bit aligned)
365 * @src: source
366 * @count: number of 512 bits quantities to submit
367 *
368 * Submit data from kernel space to MMIO space, in units of 512 bits at a
369 * time. Order of access is not guaranteed, nor is a memory barrier
370 * performed afterwards.
371 *
372 * Warning: Do not use this helper unless your driver has checked that the CPU
373 * instruction is supported on the platform.
374 */
iosubmit_cmds512(void __iomem * dst,const void * src,size_t count)375 static inline void iosubmit_cmds512(void __iomem *dst, const void *src,
376 size_t count)
377 {
378 const u8 *from = src;
379 const u8 *end = from + count * 64;
380
381 while (from < end) {
382 movdir64b(dst, from);
383 from += 64;
384 }
385 }
386
387 #endif /* _ASM_X86_IO_H */
388