1 /*
2  *  arch/s390/lib/uaccess_pt.c
3  *
4  *  User access functions based on page table walks for enhanced
5  *  system layout without hardware support.
6  *
7  *    Copyright IBM Corp. 2006
8  *    Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
9  */
10 
11 #include <linux/errno.h>
12 #include <linux/hardirq.h>
13 #include <linux/mm.h>
14 #include <asm/uaccess.h>
15 #include <asm/futex.h>
16 #include "uaccess.h"
17 
follow_table(struct mm_struct * mm,unsigned long addr)18 static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
19 {
20 	pgd_t *pgd;
21 	pud_t *pud;
22 	pmd_t *pmd;
23 
24 	pgd = pgd_offset(mm, addr);
25 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
26 		return (pte_t *) 0x3a;
27 
28 	pud = pud_offset(pgd, addr);
29 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
30 		return (pte_t *) 0x3b;
31 
32 	pmd = pmd_offset(pud, addr);
33 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
34 		return (pte_t *) 0x10;
35 
36 	return pte_offset_map(pmd, addr);
37 }
38 
__user_copy_pt(unsigned long uaddr,void * kptr,size_t n,int write_user)39 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
40 					     size_t n, int write_user)
41 {
42 	struct mm_struct *mm = current->mm;
43 	unsigned long offset, pfn, done, size;
44 	pte_t *pte;
45 	void *from, *to;
46 
47 	done = 0;
48 retry:
49 	spin_lock(&mm->page_table_lock);
50 	do {
51 		pte = follow_table(mm, uaddr);
52 		if ((unsigned long) pte < 0x1000)
53 			goto fault;
54 		if (!pte_present(*pte)) {
55 			pte = (pte_t *) 0x11;
56 			goto fault;
57 		} else if (write_user && !pte_write(*pte)) {
58 			pte = (pte_t *) 0x04;
59 			goto fault;
60 		}
61 
62 		pfn = pte_pfn(*pte);
63 		offset = uaddr & (PAGE_SIZE - 1);
64 		size = min(n - done, PAGE_SIZE - offset);
65 		if (write_user) {
66 			to = (void *)((pfn << PAGE_SHIFT) + offset);
67 			from = kptr + done;
68 		} else {
69 			from = (void *)((pfn << PAGE_SHIFT) + offset);
70 			to = kptr + done;
71 		}
72 		memcpy(to, from, size);
73 		done += size;
74 		uaddr += size;
75 	} while (done < n);
76 	spin_unlock(&mm->page_table_lock);
77 	return n - done;
78 fault:
79 	spin_unlock(&mm->page_table_lock);
80 	if (__handle_fault(uaddr, (unsigned long) pte, write_user))
81 		return n - done;
82 	goto retry;
83 }
84 
85 /*
86  * Do DAT for user address by page table walk, return kernel address.
87  * This function needs to be called with current->mm->page_table_lock held.
88  */
__dat_user_addr(unsigned long uaddr)89 static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
90 {
91 	struct mm_struct *mm = current->mm;
92 	unsigned long pfn;
93 	pte_t *pte;
94 	int rc;
95 
96 retry:
97 	pte = follow_table(mm, uaddr);
98 	if ((unsigned long) pte < 0x1000)
99 		goto fault;
100 	if (!pte_present(*pte)) {
101 		pte = (pte_t *) 0x11;
102 		goto fault;
103 	}
104 
105 	pfn = pte_pfn(*pte);
106 	return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
107 fault:
108 	spin_unlock(&mm->page_table_lock);
109 	rc = __handle_fault(uaddr, (unsigned long) pte, 0);
110 	spin_lock(&mm->page_table_lock);
111 	if (!rc)
112 		goto retry;
113 	return 0;
114 }
115 
copy_from_user_pt(size_t n,const void __user * from,void * to)116 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
117 {
118 	size_t rc;
119 
120 	if (segment_eq(get_fs(), KERNEL_DS)) {
121 		memcpy(to, (void __kernel __force *) from, n);
122 		return 0;
123 	}
124 	rc = __user_copy_pt((unsigned long) from, to, n, 0);
125 	if (unlikely(rc))
126 		memset(to + n - rc, 0, rc);
127 	return rc;
128 }
129 
copy_to_user_pt(size_t n,void __user * to,const void * from)130 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
131 {
132 	if (segment_eq(get_fs(), KERNEL_DS)) {
133 		memcpy((void __kernel __force *) to, from, n);
134 		return 0;
135 	}
136 	return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
137 }
138 
clear_user_pt(size_t n,void __user * to)139 static size_t clear_user_pt(size_t n, void __user *to)
140 {
141 	long done, size, ret;
142 
143 	if (segment_eq(get_fs(), KERNEL_DS)) {
144 		memset((void __kernel __force *) to, 0, n);
145 		return 0;
146 	}
147 	done = 0;
148 	do {
149 		if (n - done > PAGE_SIZE)
150 			size = PAGE_SIZE;
151 		else
152 			size = n - done;
153 		ret = __user_copy_pt((unsigned long) to + done,
154 				      &empty_zero_page, size, 1);
155 		done += size;
156 		if (ret)
157 			return ret + n - done;
158 	} while (done < n);
159 	return 0;
160 }
161 
strnlen_user_pt(size_t count,const char __user * src)162 static size_t strnlen_user_pt(size_t count, const char __user *src)
163 {
164 	char *addr;
165 	unsigned long uaddr = (unsigned long) src;
166 	struct mm_struct *mm = current->mm;
167 	unsigned long offset, pfn, done, len;
168 	pte_t *pte;
169 	size_t len_str;
170 
171 	if (segment_eq(get_fs(), KERNEL_DS))
172 		return strnlen((const char __kernel __force *) src, count) + 1;
173 	done = 0;
174 retry:
175 	spin_lock(&mm->page_table_lock);
176 	do {
177 		pte = follow_table(mm, uaddr);
178 		if ((unsigned long) pte < 0x1000)
179 			goto fault;
180 		if (!pte_present(*pte)) {
181 			pte = (pte_t *) 0x11;
182 			goto fault;
183 		}
184 
185 		pfn = pte_pfn(*pte);
186 		offset = uaddr & (PAGE_SIZE-1);
187 		addr = (char *)(pfn << PAGE_SHIFT) + offset;
188 		len = min(count - done, PAGE_SIZE - offset);
189 		len_str = strnlen(addr, len);
190 		done += len_str;
191 		uaddr += len_str;
192 	} while ((len_str == len) && (done < count));
193 	spin_unlock(&mm->page_table_lock);
194 	return done + 1;
195 fault:
196 	spin_unlock(&mm->page_table_lock);
197 	if (__handle_fault(uaddr, (unsigned long) pte, 0))
198 		return 0;
199 	goto retry;
200 }
201 
strncpy_from_user_pt(size_t count,const char __user * src,char * dst)202 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
203 				   char *dst)
204 {
205 	size_t n = strnlen_user_pt(count, src);
206 
207 	if (!n)
208 		return -EFAULT;
209 	if (n > count)
210 		n = count;
211 	if (segment_eq(get_fs(), KERNEL_DS)) {
212 		memcpy(dst, (const char __kernel __force *) src, n);
213 		if (dst[n-1] == '\0')
214 			return n-1;
215 		else
216 			return n;
217 	}
218 	if (__user_copy_pt((unsigned long) src, dst, n, 0))
219 		return -EFAULT;
220 	if (dst[n-1] == '\0')
221 		return n-1;
222 	else
223 		return n;
224 }
225 
copy_in_user_pt(size_t n,void __user * to,const void __user * from)226 static size_t copy_in_user_pt(size_t n, void __user *to,
227 			      const void __user *from)
228 {
229 	struct mm_struct *mm = current->mm;
230 	unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
231 		      uaddr, done, size, error_code;
232 	unsigned long uaddr_from = (unsigned long) from;
233 	unsigned long uaddr_to = (unsigned long) to;
234 	pte_t *pte_from, *pte_to;
235 	int write_user;
236 
237 	if (segment_eq(get_fs(), KERNEL_DS)) {
238 		memcpy((void __force *) to, (void __force *) from, n);
239 		return 0;
240 	}
241 	done = 0;
242 retry:
243 	spin_lock(&mm->page_table_lock);
244 	do {
245 		write_user = 0;
246 		uaddr = uaddr_from;
247 		pte_from = follow_table(mm, uaddr_from);
248 		error_code = (unsigned long) pte_from;
249 		if (error_code < 0x1000)
250 			goto fault;
251 		if (!pte_present(*pte_from)) {
252 			error_code = 0x11;
253 			goto fault;
254 		}
255 
256 		write_user = 1;
257 		uaddr = uaddr_to;
258 		pte_to = follow_table(mm, uaddr_to);
259 		error_code = (unsigned long) pte_to;
260 		if (error_code < 0x1000)
261 			goto fault;
262 		if (!pte_present(*pte_to)) {
263 			error_code = 0x11;
264 			goto fault;
265 		} else if (!pte_write(*pte_to)) {
266 			error_code = 0x04;
267 			goto fault;
268 		}
269 
270 		pfn_from = pte_pfn(*pte_from);
271 		pfn_to = pte_pfn(*pte_to);
272 		offset_from = uaddr_from & (PAGE_SIZE-1);
273 		offset_to = uaddr_from & (PAGE_SIZE-1);
274 		offset_max = max(offset_from, offset_to);
275 		size = min(n - done, PAGE_SIZE - offset_max);
276 
277 		memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
278 		       (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
279 		done += size;
280 		uaddr_from += size;
281 		uaddr_to += size;
282 	} while (done < n);
283 	spin_unlock(&mm->page_table_lock);
284 	return n - done;
285 fault:
286 	spin_unlock(&mm->page_table_lock);
287 	if (__handle_fault(uaddr, error_code, write_user))
288 		return n - done;
289 	goto retry;
290 }
291 
292 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)	\
293 	asm volatile("0: l   %1,0(%6)\n"				\
294 		     "1: " insn						\
295 		     "2: cs  %1,%2,0(%6)\n"				\
296 		     "3: jl  1b\n"					\
297 		     "   lhi %0,0\n"					\
298 		     "4:\n"						\
299 		     EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b)	\
300 		     : "=d" (ret), "=&d" (oldval), "=&d" (newval),	\
301 		       "=m" (*uaddr)					\
302 		     : "0" (-EFAULT), "d" (oparg), "a" (uaddr),		\
303 		       "m" (*uaddr) : "cc" );
304 
__futex_atomic_op_pt(int op,u32 __user * uaddr,int oparg,int * old)305 static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
306 {
307 	int oldval = 0, newval, ret;
308 
309 	switch (op) {
310 	case FUTEX_OP_SET:
311 		__futex_atomic_op("lr %2,%5\n",
312 				  ret, oldval, newval, uaddr, oparg);
313 		break;
314 	case FUTEX_OP_ADD:
315 		__futex_atomic_op("lr %2,%1\nar %2,%5\n",
316 				  ret, oldval, newval, uaddr, oparg);
317 		break;
318 	case FUTEX_OP_OR:
319 		__futex_atomic_op("lr %2,%1\nor %2,%5\n",
320 				  ret, oldval, newval, uaddr, oparg);
321 		break;
322 	case FUTEX_OP_ANDN:
323 		__futex_atomic_op("lr %2,%1\nnr %2,%5\n",
324 				  ret, oldval, newval, uaddr, oparg);
325 		break;
326 	case FUTEX_OP_XOR:
327 		__futex_atomic_op("lr %2,%1\nxr %2,%5\n",
328 				  ret, oldval, newval, uaddr, oparg);
329 		break;
330 	default:
331 		ret = -ENOSYS;
332 	}
333 	if (ret == 0)
334 		*old = oldval;
335 	return ret;
336 }
337 
futex_atomic_op_pt(int op,u32 __user * uaddr,int oparg,int * old)338 int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
339 {
340 	int ret;
341 
342 	if (segment_eq(get_fs(), KERNEL_DS))
343 		return __futex_atomic_op_pt(op, uaddr, oparg, old);
344 	spin_lock(&current->mm->page_table_lock);
345 	uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
346 	if (!uaddr) {
347 		spin_unlock(&current->mm->page_table_lock);
348 		return -EFAULT;
349 	}
350 	get_page(virt_to_page(uaddr));
351 	spin_unlock(&current->mm->page_table_lock);
352 	ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
353 	put_page(virt_to_page(uaddr));
354 	return ret;
355 }
356 
__futex_atomic_cmpxchg_pt(u32 * uval,u32 __user * uaddr,u32 oldval,u32 newval)357 static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
358 				     u32 oldval, u32 newval)
359 {
360 	int ret;
361 
362 	asm volatile("0: cs   %1,%4,0(%5)\n"
363 		     "1: la   %0,0\n"
364 		     "2:\n"
365 		     EX_TABLE(0b,2b) EX_TABLE(1b,2b)
366 		     : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
367 		     : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
368 		     : "cc", "memory" );
369 	*uval = oldval;
370 	return ret;
371 }
372 
futex_atomic_cmpxchg_pt(u32 * uval,u32 __user * uaddr,u32 oldval,u32 newval)373 int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
374 			    u32 oldval, u32 newval)
375 {
376 	int ret;
377 
378 	if (segment_eq(get_fs(), KERNEL_DS))
379 		return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
380 	spin_lock(&current->mm->page_table_lock);
381 	uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
382 	if (!uaddr) {
383 		spin_unlock(&current->mm->page_table_lock);
384 		return -EFAULT;
385 	}
386 	get_page(virt_to_page(uaddr));
387 	spin_unlock(&current->mm->page_table_lock);
388 	ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
389 	put_page(virt_to_page(uaddr));
390 	return ret;
391 }
392 
393 struct uaccess_ops uaccess_pt = {
394 	.copy_from_user		= copy_from_user_pt,
395 	.copy_from_user_small	= copy_from_user_pt,
396 	.copy_to_user		= copy_to_user_pt,
397 	.copy_to_user_small	= copy_to_user_pt,
398 	.copy_in_user		= copy_in_user_pt,
399 	.clear_user		= clear_user_pt,
400 	.strnlen_user		= strnlen_user_pt,
401 	.strncpy_from_user	= strncpy_from_user_pt,
402 	.futex_atomic_op	= futex_atomic_op_pt,
403 	.futex_atomic_cmpxchg	= futex_atomic_cmpxchg_pt,
404 };
405