1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Access kernel memory without faulting -- s390 specific implementation.
4 *
5 * Copyright IBM Corp. 2009, 2015
6 *
7 */
8
9 #include <linux/uaccess.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/gfp.h>
14 #include <linux/cpu.h>
15 #include <asm/asm-extable.h>
16 #include <asm/ctl_reg.h>
17 #include <asm/io.h>
18 #include <asm/stacktrace.h>
19
s390_kernel_write_odd(void * dst,const void * src,size_t size)20 static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
21 {
22 unsigned long aligned, offset, count;
23 char tmp[8];
24
25 aligned = (unsigned long) dst & ~7UL;
26 offset = (unsigned long) dst & 7UL;
27 size = min(8UL - offset, size);
28 count = size - 1;
29 asm volatile(
30 " bras 1,0f\n"
31 " mvc 0(1,%4),0(%5)\n"
32 "0: mvc 0(8,%3),0(%0)\n"
33 " ex %1,0(1)\n"
34 " lg %1,0(%3)\n"
35 " lra %0,0(%0)\n"
36 " sturg %1,%0\n"
37 : "+&a" (aligned), "+&a" (count), "=m" (tmp)
38 : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
39 : "cc", "memory", "1");
40 return size;
41 }
42
43 /*
44 * s390_kernel_write - write to kernel memory bypassing DAT
45 * @dst: destination address
46 * @src: source address
47 * @size: number of bytes to copy
48 *
49 * This function writes to kernel memory bypassing DAT and possible page table
50 * write protection. It writes to the destination using the sturg instruction.
51 * Therefore we have a read-modify-write sequence: the function reads eight
52 * bytes from destination at an eight byte boundary, modifies the bytes
53 * requested and writes the result back in a loop.
54 */
55 static DEFINE_SPINLOCK(s390_kernel_write_lock);
56
s390_kernel_write(void * dst,const void * src,size_t size)57 notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
58 {
59 void *tmp = dst;
60 unsigned long flags;
61 long copied;
62
63 spin_lock_irqsave(&s390_kernel_write_lock, flags);
64 if (!(flags & PSW_MASK_DAT)) {
65 memcpy(dst, src, size);
66 } else {
67 while (size) {
68 copied = s390_kernel_write_odd(tmp, src, size);
69 tmp += copied;
70 src += copied;
71 size -= copied;
72 }
73 }
74 spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
75
76 return dst;
77 }
78
__memcpy_real(void * dest,void * src,size_t count)79 static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
80 {
81 union register_pair _dst, _src;
82 int rc = -EFAULT;
83
84 _dst.even = (unsigned long) dest;
85 _dst.odd = (unsigned long) count;
86 _src.even = (unsigned long) src;
87 _src.odd = (unsigned long) count;
88 asm volatile (
89 "0: mvcle %[dst],%[src],0\n"
90 "1: jo 0b\n"
91 " lhi %[rc],0\n"
92 "2:\n"
93 EX_TABLE(1b,2b)
94 : [rc] "+&d" (rc), [dst] "+&d" (_dst.pair), [src] "+&d" (_src.pair)
95 : : "cc", "memory");
96 return rc;
97 }
98
_memcpy_real(unsigned long dest,unsigned long src,unsigned long count)99 static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
100 unsigned long src,
101 unsigned long count)
102 {
103 int irqs_disabled, rc;
104 unsigned long flags;
105
106 if (!count)
107 return 0;
108 flags = arch_local_irq_save();
109 irqs_disabled = arch_irqs_disabled_flags(flags);
110 if (!irqs_disabled)
111 trace_hardirqs_off();
112 __arch_local_irq_stnsm(0xf8); // disable DAT
113 rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
114 if (flags & PSW_MASK_DAT)
115 __arch_local_irq_stosm(0x04); // enable DAT
116 if (!irqs_disabled)
117 trace_hardirqs_on();
118 __arch_local_irq_ssm(flags);
119 return rc;
120 }
121
122 /*
123 * Copy memory in real mode (kernel to kernel)
124 */
memcpy_real(void * dest,unsigned long src,size_t count)125 int memcpy_real(void *dest, unsigned long src, size_t count)
126 {
127 unsigned long _dest = (unsigned long)dest;
128 unsigned long _src = (unsigned long)src;
129 unsigned long _count = (unsigned long)count;
130 int rc;
131
132 if (S390_lowcore.nodat_stack != 0) {
133 preempt_disable();
134 rc = call_on_stack(3, S390_lowcore.nodat_stack,
135 unsigned long, _memcpy_real,
136 unsigned long, _dest,
137 unsigned long, _src,
138 unsigned long, _count);
139 preempt_enable();
140 return rc;
141 }
142 /*
143 * This is a really early memcpy_real call, the stacks are
144 * not set up yet. Just call _memcpy_real on the early boot
145 * stack
146 */
147 return _memcpy_real(_dest, _src, _count);
148 }
149
150 /*
151 * Copy memory in absolute mode (kernel to kernel)
152 */
memcpy_absolute(void * dest,void * src,size_t count)153 void memcpy_absolute(void *dest, void *src, size_t count)
154 {
155 unsigned long cr0, flags, prefix;
156
157 flags = arch_local_irq_save();
158 __ctl_store(cr0, 0, 0);
159 __ctl_clear_bit(0, 28); /* disable lowcore protection */
160 prefix = store_prefix();
161 if (prefix) {
162 local_mcck_disable();
163 set_prefix(0);
164 memcpy(dest, src, count);
165 set_prefix(prefix);
166 local_mcck_enable();
167 } else {
168 memcpy(dest, src, count);
169 }
170 __ctl_load(cr0, 0, 0);
171 arch_local_irq_restore(flags);
172 }
173
174 /*
175 * Copy memory from kernel (real) to user (virtual)
176 */
copy_to_user_real(void __user * dest,unsigned long src,unsigned long count)177 int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count)
178 {
179 int offs = 0, size, rc;
180 char *buf;
181
182 buf = (char *) __get_free_page(GFP_KERNEL);
183 if (!buf)
184 return -ENOMEM;
185 rc = -EFAULT;
186 while (offs < count) {
187 size = min(PAGE_SIZE, count - offs);
188 if (memcpy_real(buf, src + offs, size))
189 goto out;
190 if (copy_to_user(dest + offs, buf, size))
191 goto out;
192 offs += size;
193 }
194 rc = 0;
195 out:
196 free_page((unsigned long) buf);
197 return rc;
198 }
199
200 /*
201 * Check if physical address is within prefix or zero page
202 */
is_swapped(phys_addr_t addr)203 static int is_swapped(phys_addr_t addr)
204 {
205 phys_addr_t lc;
206 int cpu;
207
208 if (addr < sizeof(struct lowcore))
209 return 1;
210 for_each_online_cpu(cpu) {
211 lc = virt_to_phys(lowcore_ptr[cpu]);
212 if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
213 continue;
214 return 1;
215 }
216 return 0;
217 }
218
219 /*
220 * Convert a physical pointer for /dev/mem access
221 *
222 * For swapped prefix pages a new buffer is returned that contains a copy of
223 * the absolute memory. The buffer size is maximum one page large.
224 */
xlate_dev_mem_ptr(phys_addr_t addr)225 void *xlate_dev_mem_ptr(phys_addr_t addr)
226 {
227 void *ptr = phys_to_virt(addr);
228 void *bounce = ptr;
229 unsigned long size;
230
231 cpus_read_lock();
232 preempt_disable();
233 if (is_swapped(addr)) {
234 size = PAGE_SIZE - (addr & ~PAGE_MASK);
235 bounce = (void *) __get_free_page(GFP_ATOMIC);
236 if (bounce)
237 memcpy_absolute(bounce, ptr, size);
238 }
239 preempt_enable();
240 cpus_read_unlock();
241 return bounce;
242 }
243
244 /*
245 * Free converted buffer for /dev/mem access (if necessary)
246 */
unxlate_dev_mem_ptr(phys_addr_t addr,void * ptr)247 void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr)
248 {
249 if (addr != virt_to_phys(ptr))
250 free_page((unsigned long)ptr);
251 }
252