1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * These routines make two important assumptions:
15 *
16 * 1. atomic_t is really an int and can be freely cast back and forth
17 * (validated in __init_atomic_per_cpu).
18 *
19 * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
20 * the same locking convention that all the kernel atomic routines use.
21 */
22
23 #ifndef _ASM_TILE_FUTEX_H
24 #define _ASM_TILE_FUTEX_H
25
26 #ifndef __ASSEMBLY__
27
28 #include <linux/futex.h>
29 #include <linux/uaccess.h>
30 #include <linux/errno.h>
31
32 extern struct __get_user futex_set(u32 __user *v, int i);
33 extern struct __get_user futex_add(u32 __user *v, int n);
34 extern struct __get_user futex_or(u32 __user *v, int n);
35 extern struct __get_user futex_andn(u32 __user *v, int n);
36 extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
37
38 #ifndef __tilegx__
39 extern struct __get_user futex_xor(u32 __user *v, int n);
40 #else
futex_xor(u32 __user * uaddr,int n)41 static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
42 {
43 struct __get_user asm_ret = __get_user_4(uaddr);
44 if (!asm_ret.err) {
45 int oldval, newval;
46 do {
47 oldval = asm_ret.val;
48 newval = oldval ^ n;
49 asm_ret = futex_cmpxchg(uaddr, oldval, newval);
50 } while (asm_ret.err == 0 && oldval != asm_ret.val);
51 }
52 return asm_ret;
53 }
54 #endif
55
futex_atomic_op_inuser(int encoded_op,u32 __user * uaddr)56 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
57 {
58 int op = (encoded_op >> 28) & 7;
59 int cmp = (encoded_op >> 24) & 15;
60 int oparg = (encoded_op << 8) >> 20;
61 int cmparg = (encoded_op << 20) >> 20;
62 int ret;
63 struct __get_user asm_ret;
64
65 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
66 oparg = 1 << oparg;
67
68 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
69 return -EFAULT;
70
71 pagefault_disable();
72 switch (op) {
73 case FUTEX_OP_SET:
74 asm_ret = futex_set(uaddr, oparg);
75 break;
76 case FUTEX_OP_ADD:
77 asm_ret = futex_add(uaddr, oparg);
78 break;
79 case FUTEX_OP_OR:
80 asm_ret = futex_or(uaddr, oparg);
81 break;
82 case FUTEX_OP_ANDN:
83 asm_ret = futex_andn(uaddr, oparg);
84 break;
85 case FUTEX_OP_XOR:
86 asm_ret = futex_xor(uaddr, oparg);
87 break;
88 default:
89 asm_ret.err = -ENOSYS;
90 }
91 pagefault_enable();
92
93 ret = asm_ret.err;
94
95 if (!ret) {
96 switch (cmp) {
97 case FUTEX_OP_CMP_EQ:
98 ret = (asm_ret.val == cmparg);
99 break;
100 case FUTEX_OP_CMP_NE:
101 ret = (asm_ret.val != cmparg);
102 break;
103 case FUTEX_OP_CMP_LT:
104 ret = (asm_ret.val < cmparg);
105 break;
106 case FUTEX_OP_CMP_GE:
107 ret = (asm_ret.val >= cmparg);
108 break;
109 case FUTEX_OP_CMP_LE:
110 ret = (asm_ret.val <= cmparg);
111 break;
112 case FUTEX_OP_CMP_GT:
113 ret = (asm_ret.val > cmparg);
114 break;
115 default:
116 ret = -ENOSYS;
117 }
118 }
119 return ret;
120 }
121
futex_atomic_cmpxchg_inatomic(u32 * uval,u32 __user * uaddr,u32 oldval,u32 newval)122 static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
123 u32 oldval, u32 newval)
124 {
125 struct __get_user asm_ret;
126
127 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
128 return -EFAULT;
129
130 asm_ret = futex_cmpxchg(uaddr, oldval, newval);
131 *uval = asm_ret.val;
132 return asm_ret.err;
133 }
134
135 #ifndef __tilegx__
136 /* Return failure from the atomic wrappers. */
137 struct __get_user __atomic_bad_address(int __user *addr);
138 #endif
139
140 #endif /* !__ASSEMBLY__ */
141
142 #endif /* _ASM_TILE_FUTEX_H */
143