1 /*
2 * arch/s390/lib/uaccess_mvcos.c
3 *
4 * Optimized user space space access functions based on mvcos.
5 *
6 * Copyright (C) IBM Corp. 2006
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
9 */
10
11 #include <linux/errno.h>
12 #include <linux/mm.h>
13 #include <asm/uaccess.h>
14 #include <asm/futex.h>
15 #include "uaccess.h"
16
17 #ifndef __s390x__
18 #define AHI "ahi"
19 #define ALR "alr"
20 #define CLR "clr"
21 #define LHI "lhi"
22 #define SLR "slr"
23 #else
24 #define AHI "aghi"
25 #define ALR "algr"
26 #define CLR "clgr"
27 #define LHI "lghi"
28 #define SLR "slgr"
29 #endif
30
copy_from_user_mvcos(size_t size,const void __user * ptr,void * x)31 static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
32 {
33 register unsigned long reg0 asm("0") = 0x81UL;
34 unsigned long tmp1, tmp2;
35
36 tmp1 = -4096UL;
37 asm volatile(
38 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
39 "9: jz 7f\n"
40 "1:"ALR" %0,%3\n"
41 " "SLR" %1,%3\n"
42 " "SLR" %2,%3\n"
43 " j 0b\n"
44 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
45 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
46 " "SLR" %4,%1\n"
47 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
48 " jnh 4f\n"
49 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
50 "10:"SLR" %0,%4\n"
51 " "ALR" %2,%4\n"
52 "4:"LHI" %4,-1\n"
53 " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
54 " bras %3,6f\n" /* memset loop */
55 " xc 0(1,%2),0(%2)\n"
56 "5: xc 0(256,%2),0(%2)\n"
57 " la %2,256(%2)\n"
58 "6:"AHI" %4,-256\n"
59 " jnm 5b\n"
60 " ex %4,0(%3)\n"
61 " j 8f\n"
62 "7:"SLR" %0,%0\n"
63 "8: \n"
64 EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
65 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
66 : "d" (reg0) : "cc", "memory");
67 return size;
68 }
69
copy_from_user_mvcos_check(size_t size,const void __user * ptr,void * x)70 static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
71 {
72 if (size <= 256)
73 return copy_from_user_std(size, ptr, x);
74 return copy_from_user_mvcos(size, ptr, x);
75 }
76
copy_to_user_mvcos(size_t size,void __user * ptr,const void * x)77 static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
78 {
79 register unsigned long reg0 asm("0") = 0x810000UL;
80 unsigned long tmp1, tmp2;
81
82 tmp1 = -4096UL;
83 asm volatile(
84 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
85 "6: jz 4f\n"
86 "1:"ALR" %0,%3\n"
87 " "SLR" %1,%3\n"
88 " "SLR" %2,%3\n"
89 " j 0b\n"
90 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
91 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
92 " "SLR" %4,%1\n"
93 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
94 " jnh 5f\n"
95 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
96 "7:"SLR" %0,%4\n"
97 " j 5f\n"
98 "4:"SLR" %0,%0\n"
99 "5: \n"
100 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
101 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
102 : "d" (reg0) : "cc", "memory");
103 return size;
104 }
105
copy_to_user_mvcos_check(size_t size,void __user * ptr,const void * x)106 static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
107 const void *x)
108 {
109 if (size <= 256)
110 return copy_to_user_std(size, ptr, x);
111 return copy_to_user_mvcos(size, ptr, x);
112 }
113
copy_in_user_mvcos(size_t size,void __user * to,const void __user * from)114 static size_t copy_in_user_mvcos(size_t size, void __user *to,
115 const void __user *from)
116 {
117 register unsigned long reg0 asm("0") = 0x810081UL;
118 unsigned long tmp1, tmp2;
119
120 tmp1 = -4096UL;
121 /* FIXME: copy with reduced length. */
122 asm volatile(
123 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
124 " jz 2f\n"
125 "1:"ALR" %0,%3\n"
126 " "SLR" %1,%3\n"
127 " "SLR" %2,%3\n"
128 " j 0b\n"
129 "2:"SLR" %0,%0\n"
130 "3: \n"
131 EX_TABLE(0b,3b)
132 : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
133 : "d" (reg0) : "cc", "memory");
134 return size;
135 }
136
clear_user_mvcos(size_t size,void __user * to)137 static size_t clear_user_mvcos(size_t size, void __user *to)
138 {
139 register unsigned long reg0 asm("0") = 0x810000UL;
140 unsigned long tmp1, tmp2;
141
142 tmp1 = -4096UL;
143 asm volatile(
144 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
145 " jz 4f\n"
146 "1:"ALR" %0,%2\n"
147 " "SLR" %1,%2\n"
148 " j 0b\n"
149 "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
150 " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
151 " "SLR" %3,%1\n"
152 " "CLR" %0,%3\n" /* copy crosses next page boundary? */
153 " jnh 5f\n"
154 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
155 " "SLR" %0,%3\n"
156 " j 5f\n"
157 "4:"SLR" %0,%0\n"
158 "5: \n"
159 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
160 : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
161 : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
162 return size;
163 }
164
strnlen_user_mvcos(size_t count,const char __user * src)165 static size_t strnlen_user_mvcos(size_t count, const char __user *src)
166 {
167 char buf[256];
168 int rc;
169 size_t done, len, len_str;
170
171 done = 0;
172 do {
173 len = min(count - done, (size_t) 256);
174 rc = uaccess.copy_from_user(len, src + done, buf);
175 if (unlikely(rc == len))
176 return 0;
177 len -= rc;
178 len_str = strnlen(buf, len);
179 done += len_str;
180 } while ((len_str == len) && (done < count));
181 return done + 1;
182 }
183
strncpy_from_user_mvcos(size_t count,const char __user * src,char * dst)184 static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
185 char *dst)
186 {
187 int rc;
188 size_t done, len, len_str;
189
190 done = 0;
191 do {
192 len = min(count - done, (size_t) 4096);
193 rc = uaccess.copy_from_user(len, src + done, dst);
194 if (unlikely(rc == len))
195 return -EFAULT;
196 len -= rc;
197 len_str = strnlen(dst, len);
198 done += len_str;
199 } while ((len_str == len) && (done < count));
200 return done;
201 }
202
203 struct uaccess_ops uaccess_mvcos = {
204 .copy_from_user = copy_from_user_mvcos_check,
205 .copy_from_user_small = copy_from_user_std,
206 .copy_to_user = copy_to_user_mvcos_check,
207 .copy_to_user_small = copy_to_user_std,
208 .copy_in_user = copy_in_user_mvcos,
209 .clear_user = clear_user_mvcos,
210 .strnlen_user = strnlen_user_std,
211 .strncpy_from_user = strncpy_from_user_std,
212 .futex_atomic_op = futex_atomic_op_std,
213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
214 };
215
216 struct uaccess_ops uaccess_mvcos_switch = {
217 .copy_from_user = copy_from_user_mvcos,
218 .copy_from_user_small = copy_from_user_mvcos,
219 .copy_to_user = copy_to_user_mvcos,
220 .copy_to_user_small = copy_to_user_mvcos,
221 .copy_in_user = copy_in_user_mvcos,
222 .clear_user = clear_user_mvcos,
223 .strnlen_user = strnlen_user_mvcos,
224 .strncpy_from_user = strncpy_from_user_mvcos,
225 .futex_atomic_op = futex_atomic_op_pt,
226 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
227 };
228