1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/fdtable.h>
3 #include <linux/anon_inodes.h>
4 #include <linux/uio.h>
5 #include "internal.h"
6 
cachefiles_ondemand_fd_release(struct inode * inode,struct file * file)7 static int cachefiles_ondemand_fd_release(struct inode *inode,
8 					  struct file *file)
9 {
10 	struct cachefiles_object *object = file->private_data;
11 	struct cachefiles_cache *cache = object->volume->cache;
12 	int object_id = object->ondemand_id;
13 	struct cachefiles_req *req;
14 	XA_STATE(xas, &cache->reqs, 0);
15 
16 	xa_lock(&cache->reqs);
17 	object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
18 
19 	/*
20 	 * Flush all pending READ requests since their completion depends on
21 	 * anon_fd.
22 	 */
23 	xas_for_each(&xas, req, ULONG_MAX) {
24 		if (req->msg.object_id == object_id &&
25 		    req->msg.opcode == CACHEFILES_OP_READ) {
26 			req->error = -EIO;
27 			complete(&req->done);
28 			xas_store(&xas, NULL);
29 		}
30 	}
31 	xa_unlock(&cache->reqs);
32 
33 	xa_erase(&cache->ondemand_ids, object_id);
34 	trace_cachefiles_ondemand_fd_release(object, object_id);
35 	cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
36 	cachefiles_put_unbind_pincount(cache);
37 	return 0;
38 }
39 
cachefiles_ondemand_fd_write_iter(struct kiocb * kiocb,struct iov_iter * iter)40 static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
41 						 struct iov_iter *iter)
42 {
43 	struct cachefiles_object *object = kiocb->ki_filp->private_data;
44 	struct cachefiles_cache *cache = object->volume->cache;
45 	struct file *file = object->file;
46 	size_t len = iter->count;
47 	loff_t pos = kiocb->ki_pos;
48 	const struct cred *saved_cred;
49 	int ret;
50 
51 	if (!file)
52 		return -ENOBUFS;
53 
54 	cachefiles_begin_secure(cache, &saved_cred);
55 	ret = __cachefiles_prepare_write(object, file, &pos, &len, true);
56 	cachefiles_end_secure(cache, saved_cred);
57 	if (ret < 0)
58 		return ret;
59 
60 	trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
61 	ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
62 	if (!ret)
63 		ret = len;
64 
65 	return ret;
66 }
67 
cachefiles_ondemand_fd_llseek(struct file * filp,loff_t pos,int whence)68 static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
69 					    int whence)
70 {
71 	struct cachefiles_object *object = filp->private_data;
72 	struct file *file = object->file;
73 
74 	if (!file)
75 		return -ENOBUFS;
76 
77 	return vfs_llseek(file, pos, whence);
78 }
79 
cachefiles_ondemand_fd_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)80 static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
81 					 unsigned long arg)
82 {
83 	struct cachefiles_object *object = filp->private_data;
84 	struct cachefiles_cache *cache = object->volume->cache;
85 	struct cachefiles_req *req;
86 	unsigned long id;
87 
88 	if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
89 		return -EINVAL;
90 
91 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
92 		return -EOPNOTSUPP;
93 
94 	id = arg;
95 	req = xa_erase(&cache->reqs, id);
96 	if (!req)
97 		return -EINVAL;
98 
99 	trace_cachefiles_ondemand_cread(object, id);
100 	complete(&req->done);
101 	return 0;
102 }
103 
104 static const struct file_operations cachefiles_ondemand_fd_fops = {
105 	.owner		= THIS_MODULE,
106 	.release	= cachefiles_ondemand_fd_release,
107 	.write_iter	= cachefiles_ondemand_fd_write_iter,
108 	.llseek		= cachefiles_ondemand_fd_llseek,
109 	.unlocked_ioctl	= cachefiles_ondemand_fd_ioctl,
110 };
111 
112 /*
113  * OPEN request Completion (copen)
114  * - command: "copen <id>,<cache_size>"
115  *   <cache_size> indicates the object size if >=0, error code if negative
116  */
cachefiles_ondemand_copen(struct cachefiles_cache * cache,char * args)117 int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
118 {
119 	struct cachefiles_req *req;
120 	struct fscache_cookie *cookie;
121 	char *pid, *psize;
122 	unsigned long id;
123 	long size;
124 	int ret;
125 
126 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
127 		return -EOPNOTSUPP;
128 
129 	if (!*args) {
130 		pr_err("Empty id specified\n");
131 		return -EINVAL;
132 	}
133 
134 	pid = args;
135 	psize = strchr(args, ',');
136 	if (!psize) {
137 		pr_err("Cache size is not specified\n");
138 		return -EINVAL;
139 	}
140 
141 	*psize = 0;
142 	psize++;
143 
144 	ret = kstrtoul(pid, 0, &id);
145 	if (ret)
146 		return ret;
147 
148 	req = xa_erase(&cache->reqs, id);
149 	if (!req)
150 		return -EINVAL;
151 
152 	/* fail OPEN request if copen format is invalid */
153 	ret = kstrtol(psize, 0, &size);
154 	if (ret) {
155 		req->error = ret;
156 		goto out;
157 	}
158 
159 	/* fail OPEN request if daemon reports an error */
160 	if (size < 0) {
161 		if (!IS_ERR_VALUE(size)) {
162 			req->error = -EINVAL;
163 			ret = -EINVAL;
164 		} else {
165 			req->error = size;
166 			ret = 0;
167 		}
168 		goto out;
169 	}
170 
171 	cookie = req->object->cookie;
172 	cookie->object_size = size;
173 	if (size)
174 		clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
175 	else
176 		set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
177 	trace_cachefiles_ondemand_copen(req->object, id, size);
178 
179 out:
180 	complete(&req->done);
181 	return ret;
182 }
183 
cachefiles_ondemand_get_fd(struct cachefiles_req * req)184 static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
185 {
186 	struct cachefiles_object *object;
187 	struct cachefiles_cache *cache;
188 	struct cachefiles_open *load;
189 	struct file *file;
190 	u32 object_id;
191 	int ret, fd;
192 
193 	object = cachefiles_grab_object(req->object,
194 			cachefiles_obj_get_ondemand_fd);
195 	cache = object->volume->cache;
196 
197 	ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL,
198 			      XA_LIMIT(1, INT_MAX),
199 			      &cache->ondemand_id_next, GFP_KERNEL);
200 	if (ret < 0)
201 		goto err;
202 
203 	fd = get_unused_fd_flags(O_WRONLY);
204 	if (fd < 0) {
205 		ret = fd;
206 		goto err_free_id;
207 	}
208 
209 	file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
210 				  object, O_WRONLY);
211 	if (IS_ERR(file)) {
212 		ret = PTR_ERR(file);
213 		goto err_put_fd;
214 	}
215 
216 	file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
217 	fd_install(fd, file);
218 
219 	load = (void *)req->msg.data;
220 	load->fd = fd;
221 	req->msg.object_id = object_id;
222 	object->ondemand_id = object_id;
223 
224 	cachefiles_get_unbind_pincount(cache);
225 	trace_cachefiles_ondemand_open(object, &req->msg, load);
226 	return 0;
227 
228 err_put_fd:
229 	put_unused_fd(fd);
230 err_free_id:
231 	xa_erase(&cache->ondemand_ids, object_id);
232 err:
233 	cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
234 	return ret;
235 }
236 
cachefiles_ondemand_daemon_read(struct cachefiles_cache * cache,char __user * _buffer,size_t buflen)237 ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
238 					char __user *_buffer, size_t buflen)
239 {
240 	struct cachefiles_req *req;
241 	struct cachefiles_msg *msg;
242 	unsigned long id = 0;
243 	size_t n;
244 	int ret = 0;
245 	XA_STATE(xas, &cache->reqs, cache->req_id_next);
246 
247 	/*
248 	 * Cyclically search for a request that has not ever been processed,
249 	 * to prevent requests from being processed repeatedly, and make
250 	 * request distribution fair.
251 	 */
252 	xa_lock(&cache->reqs);
253 	req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
254 	if (!req && cache->req_id_next > 0) {
255 		xas_set(&xas, 0);
256 		req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
257 	}
258 	if (!req) {
259 		xa_unlock(&cache->reqs);
260 		return 0;
261 	}
262 
263 	msg = &req->msg;
264 	n = msg->len;
265 
266 	if (n > buflen) {
267 		xa_unlock(&cache->reqs);
268 		return -EMSGSIZE;
269 	}
270 
271 	xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
272 	cache->req_id_next = xas.xa_index + 1;
273 	xa_unlock(&cache->reqs);
274 
275 	id = xas.xa_index;
276 	msg->msg_id = id;
277 
278 	if (msg->opcode == CACHEFILES_OP_OPEN) {
279 		ret = cachefiles_ondemand_get_fd(req);
280 		if (ret)
281 			goto error;
282 	}
283 
284 	if (copy_to_user(_buffer, msg, n) != 0) {
285 		ret = -EFAULT;
286 		goto err_put_fd;
287 	}
288 
289 	/* CLOSE request has no reply */
290 	if (msg->opcode == CACHEFILES_OP_CLOSE) {
291 		xa_erase(&cache->reqs, id);
292 		complete(&req->done);
293 	}
294 
295 	return n;
296 
297 err_put_fd:
298 	if (msg->opcode == CACHEFILES_OP_OPEN)
299 		close_fd(((struct cachefiles_open *)msg->data)->fd);
300 error:
301 	xa_erase(&cache->reqs, id);
302 	req->error = ret;
303 	complete(&req->done);
304 	return ret;
305 }
306 
307 typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
308 
cachefiles_ondemand_send_req(struct cachefiles_object * object,enum cachefiles_opcode opcode,size_t data_len,init_req_fn init_req,void * private)309 static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
310 					enum cachefiles_opcode opcode,
311 					size_t data_len,
312 					init_req_fn init_req,
313 					void *private)
314 {
315 	struct cachefiles_cache *cache = object->volume->cache;
316 	struct cachefiles_req *req;
317 	XA_STATE(xas, &cache->reqs, 0);
318 	int ret;
319 
320 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
321 		return 0;
322 
323 	if (test_bit(CACHEFILES_DEAD, &cache->flags))
324 		return -EIO;
325 
326 	req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
327 	if (!req)
328 		return -ENOMEM;
329 
330 	req->object = object;
331 	init_completion(&req->done);
332 	req->msg.opcode = opcode;
333 	req->msg.len = sizeof(struct cachefiles_msg) + data_len;
334 
335 	ret = init_req(req, private);
336 	if (ret)
337 		goto out;
338 
339 	do {
340 		/*
341 		 * Stop enqueuing the request when daemon is dying. The
342 		 * following two operations need to be atomic as a whole.
343 		 *   1) check cache state, and
344 		 *   2) enqueue request if cache is alive.
345 		 * Otherwise the request may be enqueued after xarray has been
346 		 * flushed, leaving the orphan request never being completed.
347 		 *
348 		 * CPU 1			CPU 2
349 		 * =====			=====
350 		 *				test CACHEFILES_DEAD bit
351 		 * set CACHEFILES_DEAD bit
352 		 * flush requests in the xarray
353 		 *				enqueue the request
354 		 */
355 		xas_lock(&xas);
356 
357 		if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
358 			xas_unlock(&xas);
359 			ret = -EIO;
360 			goto out;
361 		}
362 
363 		/* coupled with the barrier in cachefiles_flush_reqs() */
364 		smp_mb();
365 
366 		if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) {
367 			WARN_ON_ONCE(object->ondemand_id == 0);
368 			xas_unlock(&xas);
369 			ret = -EIO;
370 			goto out;
371 		}
372 
373 		xas.xa_index = 0;
374 		xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
375 		if (xas.xa_node == XAS_RESTART)
376 			xas_set_err(&xas, -EBUSY);
377 		xas_store(&xas, req);
378 		xas_clear_mark(&xas, XA_FREE_MARK);
379 		xas_set_mark(&xas, CACHEFILES_REQ_NEW);
380 		xas_unlock(&xas);
381 	} while (xas_nomem(&xas, GFP_KERNEL));
382 
383 	ret = xas_error(&xas);
384 	if (ret)
385 		goto out;
386 
387 	wake_up_all(&cache->daemon_pollwq);
388 	wait_for_completion(&req->done);
389 	ret = req->error;
390 out:
391 	kfree(req);
392 	return ret;
393 }
394 
cachefiles_ondemand_init_open_req(struct cachefiles_req * req,void * private)395 static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req,
396 					     void *private)
397 {
398 	struct cachefiles_object *object = req->object;
399 	struct fscache_cookie *cookie = object->cookie;
400 	struct fscache_volume *volume = object->volume->vcookie;
401 	struct cachefiles_open *load = (void *)req->msg.data;
402 	size_t volume_key_size, cookie_key_size;
403 	void *volume_key, *cookie_key;
404 
405 	/*
406 	 * Volume key is a NUL-terminated string. key[0] stores strlen() of the
407 	 * string, followed by the content of the string (excluding '\0').
408 	 */
409 	volume_key_size = volume->key[0] + 1;
410 	volume_key = volume->key + 1;
411 
412 	/* Cookie key is binary data, which is netfs specific. */
413 	cookie_key_size = cookie->key_len;
414 	cookie_key = fscache_get_key(cookie);
415 
416 	if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) {
417 		pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n");
418 		return -EINVAL;
419 	}
420 
421 	load->volume_key_size = volume_key_size;
422 	load->cookie_key_size = cookie_key_size;
423 	memcpy(load->data, volume_key, volume_key_size);
424 	memcpy(load->data + volume_key_size, cookie_key, cookie_key_size);
425 
426 	return 0;
427 }
428 
cachefiles_ondemand_init_close_req(struct cachefiles_req * req,void * private)429 static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
430 					      void *private)
431 {
432 	struct cachefiles_object *object = req->object;
433 	int object_id = object->ondemand_id;
434 
435 	/*
436 	 * It's possible that object id is still 0 if the cookie looking up
437 	 * phase failed before OPEN request has ever been sent. Also avoid
438 	 * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means
439 	 * anon_fd has already been closed.
440 	 */
441 	if (object_id <= 0)
442 		return -ENOENT;
443 
444 	req->msg.object_id = object_id;
445 	trace_cachefiles_ondemand_close(object, &req->msg);
446 	return 0;
447 }
448 
449 struct cachefiles_read_ctx {
450 	loff_t off;
451 	size_t len;
452 };
453 
cachefiles_ondemand_init_read_req(struct cachefiles_req * req,void * private)454 static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
455 					     void *private)
456 {
457 	struct cachefiles_object *object = req->object;
458 	struct cachefiles_read *load = (void *)req->msg.data;
459 	struct cachefiles_read_ctx *read_ctx = private;
460 	int object_id = object->ondemand_id;
461 
462 	/* Stop enqueuing requests when daemon has closed anon_fd. */
463 	if (object_id <= 0) {
464 		WARN_ON_ONCE(object_id == 0);
465 		pr_info_once("READ: anonymous fd closed prematurely.\n");
466 		return -EIO;
467 	}
468 
469 	req->msg.object_id = object_id;
470 	load->off = read_ctx->off;
471 	load->len = read_ctx->len;
472 	trace_cachefiles_ondemand_read(object, &req->msg, load);
473 	return 0;
474 }
475 
cachefiles_ondemand_init_object(struct cachefiles_object * object)476 int cachefiles_ondemand_init_object(struct cachefiles_object *object)
477 {
478 	struct fscache_cookie *cookie = object->cookie;
479 	struct fscache_volume *volume = object->volume->vcookie;
480 	size_t volume_key_size, cookie_key_size, data_len;
481 
482 	/*
483 	 * CacheFiles will firstly check the cache file under the root cache
484 	 * directory. If the coherency check failed, it will fallback to
485 	 * creating a new tmpfile as the cache file. Reuse the previously
486 	 * allocated object ID if any.
487 	 */
488 	if (object->ondemand_id > 0)
489 		return 0;
490 
491 	volume_key_size = volume->key[0] + 1;
492 	cookie_key_size = cookie->key_len;
493 	data_len = sizeof(struct cachefiles_open) +
494 		   volume_key_size + cookie_key_size;
495 
496 	return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN,
497 			data_len, cachefiles_ondemand_init_open_req, NULL);
498 }
499 
cachefiles_ondemand_clean_object(struct cachefiles_object * object)500 void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
501 {
502 	cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
503 			cachefiles_ondemand_init_close_req, NULL);
504 }
505 
cachefiles_ondemand_read(struct cachefiles_object * object,loff_t pos,size_t len)506 int cachefiles_ondemand_read(struct cachefiles_object *object,
507 			     loff_t pos, size_t len)
508 {
509 	struct cachefiles_read_ctx read_ctx = {pos, len};
510 
511 	return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ,
512 			sizeof(struct cachefiles_read),
513 			cachefiles_ondemand_init_read_req, &read_ctx);
514 }
515