1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
10 #include <linux/xattr.h>
11 
12 #include <uapi/linux/io_uring.h>
13 
14 #include "../fs/internal.h"
15 
16 #include "io_uring.h"
17 #include "xattr.h"
18 
19 struct io_xattr {
20 	struct file			*file;
21 	struct xattr_ctx		ctx;
22 	struct filename			*filename;
23 };
24 
io_xattr_cleanup(struct io_kiocb * req)25 void io_xattr_cleanup(struct io_kiocb *req)
26 {
27 	struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
28 
29 	if (ix->filename)
30 		putname(ix->filename);
31 
32 	kfree(ix->ctx.kname);
33 	kvfree(ix->ctx.kvalue);
34 }
35 
io_xattr_finish(struct io_kiocb * req,int ret)36 static void io_xattr_finish(struct io_kiocb *req, int ret)
37 {
38 	req->flags &= ~REQ_F_NEED_CLEANUP;
39 
40 	io_xattr_cleanup(req);
41 	io_req_set_res(req, ret, 0);
42 }
43 
__io_getxattr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)44 static int __io_getxattr_prep(struct io_kiocb *req,
45 			      const struct io_uring_sqe *sqe)
46 {
47 	struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
48 	const char __user *name;
49 	int ret;
50 
51 	if (unlikely(req->flags & REQ_F_FIXED_FILE))
52 		return -EBADF;
53 
54 	ix->filename = NULL;
55 	ix->ctx.kvalue = NULL;
56 	name = u64_to_user_ptr(READ_ONCE(sqe->addr));
57 	ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2));
58 	ix->ctx.size = READ_ONCE(sqe->len);
59 	ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
60 
61 	if (ix->ctx.flags)
62 		return -EINVAL;
63 
64 	ix->ctx.kname = kmalloc(sizeof(*ix->ctx.kname), GFP_KERNEL);
65 	if (!ix->ctx.kname)
66 		return -ENOMEM;
67 
68 	ret = strncpy_from_user(ix->ctx.kname->name, name,
69 				sizeof(ix->ctx.kname->name));
70 	if (!ret || ret == sizeof(ix->ctx.kname->name))
71 		ret = -ERANGE;
72 	if (ret < 0) {
73 		kfree(ix->ctx.kname);
74 		return ret;
75 	}
76 
77 	req->flags |= REQ_F_NEED_CLEANUP;
78 	req->flags |= REQ_F_FORCE_ASYNC;
79 	return 0;
80 }
81 
io_fgetxattr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)82 int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
83 {
84 	return __io_getxattr_prep(req, sqe);
85 }
86 
io_getxattr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)87 int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
88 {
89 	struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
90 	const char __user *path;
91 	int ret;
92 
93 	ret = __io_getxattr_prep(req, sqe);
94 	if (ret)
95 		return ret;
96 
97 	path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
98 
99 	ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL);
100 	if (IS_ERR(ix->filename)) {
101 		ret = PTR_ERR(ix->filename);
102 		ix->filename = NULL;
103 	}
104 
105 	return ret;
106 }
107 
io_fgetxattr(struct io_kiocb * req,unsigned int issue_flags)108 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
109 {
110 	struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
111 	int ret;
112 
113 	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
114 
115 	ret = do_getxattr(mnt_idmap(req->file->f_path.mnt),
116 			req->file->f_path.dentry,
117 			&ix->ctx);
118 
119 	io_xattr_finish(req, ret);
120 	return IOU_OK;
121 }
122 
io_getxattr(struct io_kiocb * req,unsigned int issue_flags)123 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags)
124 {
125 	struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
126 	unsigned int lookup_flags = LOOKUP_FOLLOW;
127 	struct path path;
128 	int ret;
129 
130 	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
131 
132 retry:
133 	ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
134 	if (!ret) {
135 		ret = do_getxattr(mnt_idmap(path.mnt), path.dentry, &ix->ctx);
136 
137 		path_put(&path);
138 		if (retry_estale(ret, lookup_flags)) {
139 			lookup_flags |= LOOKUP_REVAL;
140 			goto retry;
141 		}
142 	}
143 
144 	io_xattr_finish(req, ret);
145 	return IOU_OK;
146 }
147 
__io_setxattr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)148 static int __io_setxattr_prep(struct io_kiocb *req,
149 			const struct io_uring_sqe *sqe)
150 {
151 	struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
152 	const char __user *name;
153 	int ret;
154 
155 	if (unlikely(req->flags & REQ_F_FIXED_FILE))
156 		return -EBADF;
157 
158 	ix->filename = NULL;
159 	name = u64_to_user_ptr(READ_ONCE(sqe->addr));
160 	ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2));
161 	ix->ctx.kvalue = NULL;
162 	ix->ctx.size = READ_ONCE(sqe->len);
163 	ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
164 
165 	ix->ctx.kname = kmalloc(sizeof(*ix->ctx.kname), GFP_KERNEL);
166 	if (!ix->ctx.kname)
167 		return -ENOMEM;
168 
169 	ret = setxattr_copy(name, &ix->ctx);
170 	if (ret) {
171 		kfree(ix->ctx.kname);
172 		return ret;
173 	}
174 
175 	req->flags |= REQ_F_NEED_CLEANUP;
176 	req->flags |= REQ_F_FORCE_ASYNC;
177 	return 0;
178 }
179 
io_setxattr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)180 int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
181 {
182 	struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
183 	const char __user *path;
184 	int ret;
185 
186 	ret = __io_setxattr_prep(req, sqe);
187 	if (ret)
188 		return ret;
189 
190 	path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
191 
192 	ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL);
193 	if (IS_ERR(ix->filename)) {
194 		ret = PTR_ERR(ix->filename);
195 		ix->filename = NULL;
196 	}
197 
198 	return ret;
199 }
200 
io_fsetxattr_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)201 int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
202 {
203 	return __io_setxattr_prep(req, sqe);
204 }
205 
__io_setxattr(struct io_kiocb * req,unsigned int issue_flags,const struct path * path)206 static int __io_setxattr(struct io_kiocb *req, unsigned int issue_flags,
207 			const struct path *path)
208 {
209 	struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
210 	int ret;
211 
212 	ret = mnt_want_write(path->mnt);
213 	if (!ret) {
214 		ret = do_setxattr(mnt_idmap(path->mnt), path->dentry, &ix->ctx);
215 		mnt_drop_write(path->mnt);
216 	}
217 
218 	return ret;
219 }
220 
io_fsetxattr(struct io_kiocb * req,unsigned int issue_flags)221 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags)
222 {
223 	int ret;
224 
225 	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
226 
227 	ret = __io_setxattr(req, issue_flags, &req->file->f_path);
228 	io_xattr_finish(req, ret);
229 	return IOU_OK;
230 }
231 
io_setxattr(struct io_kiocb * req,unsigned int issue_flags)232 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags)
233 {
234 	struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
235 	unsigned int lookup_flags = LOOKUP_FOLLOW;
236 	struct path path;
237 	int ret;
238 
239 	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
240 
241 retry:
242 	ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
243 	if (!ret) {
244 		ret = __io_setxattr(req, issue_flags, &path);
245 		path_put(&path);
246 		if (retry_estale(ret, lookup_flags)) {
247 			lookup_flags |= LOOKUP_REVAL;
248 			goto retry;
249 		}
250 	}
251 
252 	io_xattr_finish(req, ret);
253 	return IOU_OK;
254 }
255