1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/sched/signal.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/pagemap.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/swap.h>
22 #include <linux/splice.h>
23 #include <linux/sched.h>
24
25 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
26 MODULE_ALIAS("devname:fuse");
27
28 /* Ordinary requests have even IDs, while interrupts IDs are odd */
29 #define FUSE_INT_REQ_BIT (1ULL << 0)
30 #define FUSE_REQ_ID_STEP (1ULL << 1)
31
32 static struct kmem_cache *fuse_req_cachep;
33
fuse_get_dev(struct file * file)34 static struct fuse_dev *fuse_get_dev(struct file *file)
35 {
36 /*
37 * Lockless access is OK, because file->private data is set
38 * once during mount and is valid until the file is released.
39 */
40 return READ_ONCE(file->private_data);
41 }
42
fuse_request_init(struct fuse_mount * fm,struct fuse_req * req)43 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
44 {
45 INIT_LIST_HEAD(&req->list);
46 INIT_LIST_HEAD(&req->intr_entry);
47 init_waitqueue_head(&req->waitq);
48 refcount_set(&req->count, 1);
49 __set_bit(FR_PENDING, &req->flags);
50 req->fm = fm;
51 }
52
fuse_request_alloc(struct fuse_mount * fm,gfp_t flags)53 static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
54 {
55 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
56 if (req)
57 fuse_request_init(fm, req);
58
59 return req;
60 }
61
fuse_request_free(struct fuse_req * req)62 static void fuse_request_free(struct fuse_req *req)
63 {
64 kmem_cache_free(fuse_req_cachep, req);
65 }
66
__fuse_get_request(struct fuse_req * req)67 static void __fuse_get_request(struct fuse_req *req)
68 {
69 refcount_inc(&req->count);
70 }
71
72 /* Must be called with > 1 refcount */
__fuse_put_request(struct fuse_req * req)73 static void __fuse_put_request(struct fuse_req *req)
74 {
75 refcount_dec(&req->count);
76 }
77
fuse_set_initialized(struct fuse_conn * fc)78 void fuse_set_initialized(struct fuse_conn *fc)
79 {
80 /* Make sure stores before this are seen on another CPU */
81 smp_wmb();
82 fc->initialized = 1;
83 }
84
fuse_block_alloc(struct fuse_conn * fc,bool for_background)85 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
86 {
87 return !fc->initialized || (for_background && fc->blocked);
88 }
89
fuse_drop_waiting(struct fuse_conn * fc)90 static void fuse_drop_waiting(struct fuse_conn *fc)
91 {
92 /*
93 * lockess check of fc->connected is okay, because atomic_dec_and_test()
94 * provides a memory barrier matched with the one in fuse_wait_aborted()
95 * to ensure no wake-up is missed.
96 */
97 if (atomic_dec_and_test(&fc->num_waiting) &&
98 !READ_ONCE(fc->connected)) {
99 /* wake up aborters */
100 wake_up_all(&fc->blocked_waitq);
101 }
102 }
103
104 static void fuse_put_request(struct fuse_req *req);
105
fuse_get_req(struct fuse_mount * fm,bool for_background)106 static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
107 {
108 struct fuse_conn *fc = fm->fc;
109 struct fuse_req *req;
110 int err;
111 atomic_inc(&fc->num_waiting);
112
113 if (fuse_block_alloc(fc, for_background)) {
114 err = -EINTR;
115 if (wait_event_killable_exclusive(fc->blocked_waitq,
116 !fuse_block_alloc(fc, for_background)))
117 goto out;
118 }
119 /* Matches smp_wmb() in fuse_set_initialized() */
120 smp_rmb();
121
122 err = -ENOTCONN;
123 if (!fc->connected)
124 goto out;
125
126 err = -ECONNREFUSED;
127 if (fc->conn_error)
128 goto out;
129
130 req = fuse_request_alloc(fm, GFP_KERNEL);
131 err = -ENOMEM;
132 if (!req) {
133 if (for_background)
134 wake_up(&fc->blocked_waitq);
135 goto out;
136 }
137
138 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
139 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
140 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
141
142 __set_bit(FR_WAITING, &req->flags);
143 if (for_background)
144 __set_bit(FR_BACKGROUND, &req->flags);
145
146 if (unlikely(req->in.h.uid == ((uid_t)-1) ||
147 req->in.h.gid == ((gid_t)-1))) {
148 fuse_put_request(req);
149 return ERR_PTR(-EOVERFLOW);
150 }
151 return req;
152
153 out:
154 fuse_drop_waiting(fc);
155 return ERR_PTR(err);
156 }
157
fuse_put_request(struct fuse_req * req)158 static void fuse_put_request(struct fuse_req *req)
159 {
160 struct fuse_conn *fc = req->fm->fc;
161
162 if (refcount_dec_and_test(&req->count)) {
163 if (test_bit(FR_BACKGROUND, &req->flags)) {
164 /*
165 * We get here in the unlikely case that a background
166 * request was allocated but not sent
167 */
168 spin_lock(&fc->bg_lock);
169 if (!fc->blocked)
170 wake_up(&fc->blocked_waitq);
171 spin_unlock(&fc->bg_lock);
172 }
173
174 if (test_bit(FR_WAITING, &req->flags)) {
175 __clear_bit(FR_WAITING, &req->flags);
176 fuse_drop_waiting(fc);
177 }
178
179 fuse_request_free(req);
180 }
181 }
182
fuse_len_args(unsigned int numargs,struct fuse_arg * args)183 unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
184 {
185 unsigned nbytes = 0;
186 unsigned i;
187
188 for (i = 0; i < numargs; i++)
189 nbytes += args[i].size;
190
191 return nbytes;
192 }
193 EXPORT_SYMBOL_GPL(fuse_len_args);
194
fuse_get_unique(struct fuse_iqueue * fiq)195 u64 fuse_get_unique(struct fuse_iqueue *fiq)
196 {
197 fiq->reqctr += FUSE_REQ_ID_STEP;
198 return fiq->reqctr;
199 }
200 EXPORT_SYMBOL_GPL(fuse_get_unique);
201
fuse_req_hash(u64 unique)202 static unsigned int fuse_req_hash(u64 unique)
203 {
204 return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
205 }
206
207 /*
208 * A new request is available, wake fiq->waitq
209 */
fuse_dev_wake_and_unlock(struct fuse_iqueue * fiq)210 static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
211 __releases(fiq->lock)
212 {
213 wake_up(&fiq->waitq);
214 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
215 spin_unlock(&fiq->lock);
216 }
217
218 const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
219 .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
220 .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
221 .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
222 };
223 EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
224
queue_request_and_unlock(struct fuse_iqueue * fiq,struct fuse_req * req)225 static void queue_request_and_unlock(struct fuse_iqueue *fiq,
226 struct fuse_req *req)
227 __releases(fiq->lock)
228 {
229 req->in.h.len = sizeof(struct fuse_in_header) +
230 fuse_len_args(req->args->in_numargs,
231 (struct fuse_arg *) req->args->in_args);
232 list_add_tail(&req->list, &fiq->pending);
233 fiq->ops->wake_pending_and_unlock(fiq);
234 }
235
fuse_queue_forget(struct fuse_conn * fc,struct fuse_forget_link * forget,u64 nodeid,u64 nlookup)236 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
237 u64 nodeid, u64 nlookup)
238 {
239 struct fuse_iqueue *fiq = &fc->iq;
240
241 forget->forget_one.nodeid = nodeid;
242 forget->forget_one.nlookup = nlookup;
243
244 spin_lock(&fiq->lock);
245 if (fiq->connected) {
246 fiq->forget_list_tail->next = forget;
247 fiq->forget_list_tail = forget;
248 fiq->ops->wake_forget_and_unlock(fiq);
249 } else {
250 kfree(forget);
251 spin_unlock(&fiq->lock);
252 }
253 }
254
flush_bg_queue(struct fuse_conn * fc)255 static void flush_bg_queue(struct fuse_conn *fc)
256 {
257 struct fuse_iqueue *fiq = &fc->iq;
258
259 while (fc->active_background < fc->max_background &&
260 !list_empty(&fc->bg_queue)) {
261 struct fuse_req *req;
262
263 req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
264 list_del(&req->list);
265 fc->active_background++;
266 spin_lock(&fiq->lock);
267 req->in.h.unique = fuse_get_unique(fiq);
268 queue_request_and_unlock(fiq, req);
269 }
270 }
271
272 /*
273 * This function is called when a request is finished. Either a reply
274 * has arrived or it was aborted (and not yet sent) or some error
275 * occurred during communication with userspace, or the device file
276 * was closed. The requester thread is woken up (if still waiting),
277 * the 'end' callback is called if given, else the reference to the
278 * request is released
279 */
fuse_request_end(struct fuse_req * req)280 void fuse_request_end(struct fuse_req *req)
281 {
282 struct fuse_mount *fm = req->fm;
283 struct fuse_conn *fc = fm->fc;
284 struct fuse_iqueue *fiq = &fc->iq;
285
286 if (test_and_set_bit(FR_FINISHED, &req->flags))
287 goto put_request;
288
289 /*
290 * test_and_set_bit() implies smp_mb() between bit
291 * changing and below FR_INTERRUPTED check. Pairs with
292 * smp_mb() from queue_interrupt().
293 */
294 if (test_bit(FR_INTERRUPTED, &req->flags)) {
295 spin_lock(&fiq->lock);
296 list_del_init(&req->intr_entry);
297 spin_unlock(&fiq->lock);
298 }
299 WARN_ON(test_bit(FR_PENDING, &req->flags));
300 WARN_ON(test_bit(FR_SENT, &req->flags));
301 if (test_bit(FR_BACKGROUND, &req->flags)) {
302 spin_lock(&fc->bg_lock);
303 clear_bit(FR_BACKGROUND, &req->flags);
304 if (fc->num_background == fc->max_background) {
305 fc->blocked = 0;
306 wake_up(&fc->blocked_waitq);
307 } else if (!fc->blocked) {
308 /*
309 * Wake up next waiter, if any. It's okay to use
310 * waitqueue_active(), as we've already synced up
311 * fc->blocked with waiters with the wake_up() call
312 * above.
313 */
314 if (waitqueue_active(&fc->blocked_waitq))
315 wake_up(&fc->blocked_waitq);
316 }
317
318 fc->num_background--;
319 fc->active_background--;
320 flush_bg_queue(fc);
321 spin_unlock(&fc->bg_lock);
322 } else {
323 /* Wake up waiter sleeping in request_wait_answer() */
324 wake_up(&req->waitq);
325 }
326
327 if (test_bit(FR_ASYNC, &req->flags))
328 req->args->end(fm, req->args, req->out.h.error);
329 put_request:
330 fuse_put_request(req);
331 }
332 EXPORT_SYMBOL_GPL(fuse_request_end);
333
queue_interrupt(struct fuse_req * req)334 static int queue_interrupt(struct fuse_req *req)
335 {
336 struct fuse_iqueue *fiq = &req->fm->fc->iq;
337
338 spin_lock(&fiq->lock);
339 /* Check for we've sent request to interrupt this req */
340 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
341 spin_unlock(&fiq->lock);
342 return -EINVAL;
343 }
344
345 if (list_empty(&req->intr_entry)) {
346 list_add_tail(&req->intr_entry, &fiq->interrupts);
347 /*
348 * Pairs with smp_mb() implied by test_and_set_bit()
349 * from fuse_request_end().
350 */
351 smp_mb();
352 if (test_bit(FR_FINISHED, &req->flags)) {
353 list_del_init(&req->intr_entry);
354 spin_unlock(&fiq->lock);
355 return 0;
356 }
357 fiq->ops->wake_interrupt_and_unlock(fiq);
358 } else {
359 spin_unlock(&fiq->lock);
360 }
361 return 0;
362 }
363
request_wait_answer(struct fuse_req * req)364 static void request_wait_answer(struct fuse_req *req)
365 {
366 struct fuse_conn *fc = req->fm->fc;
367 struct fuse_iqueue *fiq = &fc->iq;
368 int err;
369
370 if (!fc->no_interrupt) {
371 /* Any signal may interrupt this */
372 err = wait_event_interruptible(req->waitq,
373 test_bit(FR_FINISHED, &req->flags));
374 if (!err)
375 return;
376
377 set_bit(FR_INTERRUPTED, &req->flags);
378 /* matches barrier in fuse_dev_do_read() */
379 smp_mb__after_atomic();
380 if (test_bit(FR_SENT, &req->flags))
381 queue_interrupt(req);
382 }
383
384 if (!test_bit(FR_FORCE, &req->flags)) {
385 /* Only fatal signals may interrupt this */
386 err = wait_event_killable(req->waitq,
387 test_bit(FR_FINISHED, &req->flags));
388 if (!err)
389 return;
390
391 spin_lock(&fiq->lock);
392 /* Request is not yet in userspace, bail out */
393 if (test_bit(FR_PENDING, &req->flags)) {
394 list_del(&req->list);
395 spin_unlock(&fiq->lock);
396 __fuse_put_request(req);
397 req->out.h.error = -EINTR;
398 return;
399 }
400 spin_unlock(&fiq->lock);
401 }
402
403 /*
404 * Either request is already in userspace, or it was forced.
405 * Wait it out.
406 */
407 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
408 }
409
__fuse_request_send(struct fuse_req * req)410 static void __fuse_request_send(struct fuse_req *req)
411 {
412 struct fuse_iqueue *fiq = &req->fm->fc->iq;
413
414 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
415 spin_lock(&fiq->lock);
416 if (!fiq->connected) {
417 spin_unlock(&fiq->lock);
418 req->out.h.error = -ENOTCONN;
419 } else {
420 req->in.h.unique = fuse_get_unique(fiq);
421 /* acquire extra reference, since request is still needed
422 after fuse_request_end() */
423 __fuse_get_request(req);
424 queue_request_and_unlock(fiq, req);
425
426 request_wait_answer(req);
427 /* Pairs with smp_wmb() in fuse_request_end() */
428 smp_rmb();
429 }
430 }
431
fuse_adjust_compat(struct fuse_conn * fc,struct fuse_args * args)432 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
433 {
434 if (fc->minor < 4 && args->opcode == FUSE_STATFS)
435 args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE;
436
437 if (fc->minor < 9) {
438 switch (args->opcode) {
439 case FUSE_LOOKUP:
440 case FUSE_CREATE:
441 case FUSE_MKNOD:
442 case FUSE_MKDIR:
443 case FUSE_SYMLINK:
444 case FUSE_LINK:
445 args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
446 break;
447 case FUSE_GETATTR:
448 case FUSE_SETATTR:
449 args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
450 break;
451 }
452 }
453 if (fc->minor < 12) {
454 switch (args->opcode) {
455 case FUSE_CREATE:
456 args->in_args[0].size = sizeof(struct fuse_open_in);
457 break;
458 case FUSE_MKNOD:
459 args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
460 break;
461 }
462 }
463 }
464
fuse_force_creds(struct fuse_req * req)465 static void fuse_force_creds(struct fuse_req *req)
466 {
467 struct fuse_conn *fc = req->fm->fc;
468
469 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
470 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
471 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
472 }
473
fuse_args_to_req(struct fuse_req * req,struct fuse_args * args)474 static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
475 {
476 req->in.h.opcode = args->opcode;
477 req->in.h.nodeid = args->nodeid;
478 req->args = args;
479 if (args->is_ext)
480 req->in.h.total_extlen = args->in_args[args->ext_idx].size / 8;
481 if (args->end)
482 __set_bit(FR_ASYNC, &req->flags);
483 }
484
fuse_simple_request(struct fuse_mount * fm,struct fuse_args * args)485 ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
486 {
487 struct fuse_conn *fc = fm->fc;
488 struct fuse_req *req;
489 ssize_t ret;
490
491 if (args->force) {
492 atomic_inc(&fc->num_waiting);
493 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL);
494
495 if (!args->nocreds)
496 fuse_force_creds(req);
497
498 __set_bit(FR_WAITING, &req->flags);
499 __set_bit(FR_FORCE, &req->flags);
500 } else {
501 WARN_ON(args->nocreds);
502 req = fuse_get_req(fm, false);
503 if (IS_ERR(req))
504 return PTR_ERR(req);
505 }
506
507 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
508 fuse_adjust_compat(fc, args);
509 fuse_args_to_req(req, args);
510
511 if (!args->noreply)
512 __set_bit(FR_ISREPLY, &req->flags);
513 __fuse_request_send(req);
514 ret = req->out.h.error;
515 if (!ret && args->out_argvar) {
516 BUG_ON(args->out_numargs == 0);
517 ret = args->out_args[args->out_numargs - 1].size;
518 }
519 fuse_put_request(req);
520
521 return ret;
522 }
523
fuse_request_queue_background(struct fuse_req * req)524 static bool fuse_request_queue_background(struct fuse_req *req)
525 {
526 struct fuse_mount *fm = req->fm;
527 struct fuse_conn *fc = fm->fc;
528 bool queued = false;
529
530 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
531 if (!test_bit(FR_WAITING, &req->flags)) {
532 __set_bit(FR_WAITING, &req->flags);
533 atomic_inc(&fc->num_waiting);
534 }
535 __set_bit(FR_ISREPLY, &req->flags);
536 spin_lock(&fc->bg_lock);
537 if (likely(fc->connected)) {
538 fc->num_background++;
539 if (fc->num_background == fc->max_background)
540 fc->blocked = 1;
541 list_add_tail(&req->list, &fc->bg_queue);
542 flush_bg_queue(fc);
543 queued = true;
544 }
545 spin_unlock(&fc->bg_lock);
546
547 return queued;
548 }
549
fuse_simple_background(struct fuse_mount * fm,struct fuse_args * args,gfp_t gfp_flags)550 int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
551 gfp_t gfp_flags)
552 {
553 struct fuse_req *req;
554
555 if (args->force) {
556 WARN_ON(!args->nocreds);
557 req = fuse_request_alloc(fm, gfp_flags);
558 if (!req)
559 return -ENOMEM;
560 __set_bit(FR_BACKGROUND, &req->flags);
561 } else {
562 WARN_ON(args->nocreds);
563 req = fuse_get_req(fm, true);
564 if (IS_ERR(req))
565 return PTR_ERR(req);
566 }
567
568 fuse_args_to_req(req, args);
569
570 if (!fuse_request_queue_background(req)) {
571 fuse_put_request(req);
572 return -ENOTCONN;
573 }
574
575 return 0;
576 }
577 EXPORT_SYMBOL_GPL(fuse_simple_background);
578
fuse_simple_notify_reply(struct fuse_mount * fm,struct fuse_args * args,u64 unique)579 static int fuse_simple_notify_reply(struct fuse_mount *fm,
580 struct fuse_args *args, u64 unique)
581 {
582 struct fuse_req *req;
583 struct fuse_iqueue *fiq = &fm->fc->iq;
584 int err = 0;
585
586 req = fuse_get_req(fm, false);
587 if (IS_ERR(req))
588 return PTR_ERR(req);
589
590 __clear_bit(FR_ISREPLY, &req->flags);
591 req->in.h.unique = unique;
592
593 fuse_args_to_req(req, args);
594
595 spin_lock(&fiq->lock);
596 if (fiq->connected) {
597 queue_request_and_unlock(fiq, req);
598 } else {
599 err = -ENODEV;
600 spin_unlock(&fiq->lock);
601 fuse_put_request(req);
602 }
603
604 return err;
605 }
606
607 /*
608 * Lock the request. Up to the next unlock_request() there mustn't be
609 * anything that could cause a page-fault. If the request was already
610 * aborted bail out.
611 */
lock_request(struct fuse_req * req)612 static int lock_request(struct fuse_req *req)
613 {
614 int err = 0;
615 if (req) {
616 spin_lock(&req->waitq.lock);
617 if (test_bit(FR_ABORTED, &req->flags))
618 err = -ENOENT;
619 else
620 set_bit(FR_LOCKED, &req->flags);
621 spin_unlock(&req->waitq.lock);
622 }
623 return err;
624 }
625
626 /*
627 * Unlock request. If it was aborted while locked, caller is responsible
628 * for unlocking and ending the request.
629 */
unlock_request(struct fuse_req * req)630 static int unlock_request(struct fuse_req *req)
631 {
632 int err = 0;
633 if (req) {
634 spin_lock(&req->waitq.lock);
635 if (test_bit(FR_ABORTED, &req->flags))
636 err = -ENOENT;
637 else
638 clear_bit(FR_LOCKED, &req->flags);
639 spin_unlock(&req->waitq.lock);
640 }
641 return err;
642 }
643
644 struct fuse_copy_state {
645 int write;
646 struct fuse_req *req;
647 struct iov_iter *iter;
648 struct pipe_buffer *pipebufs;
649 struct pipe_buffer *currbuf;
650 struct pipe_inode_info *pipe;
651 unsigned long nr_segs;
652 struct page *pg;
653 unsigned len;
654 unsigned offset;
655 unsigned move_pages:1;
656 };
657
fuse_copy_init(struct fuse_copy_state * cs,int write,struct iov_iter * iter)658 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
659 struct iov_iter *iter)
660 {
661 memset(cs, 0, sizeof(*cs));
662 cs->write = write;
663 cs->iter = iter;
664 }
665
666 /* Unmap and put previous page of userspace buffer */
fuse_copy_finish(struct fuse_copy_state * cs)667 static void fuse_copy_finish(struct fuse_copy_state *cs)
668 {
669 if (cs->currbuf) {
670 struct pipe_buffer *buf = cs->currbuf;
671
672 if (cs->write)
673 buf->len = PAGE_SIZE - cs->len;
674 cs->currbuf = NULL;
675 } else if (cs->pg) {
676 if (cs->write) {
677 flush_dcache_page(cs->pg);
678 set_page_dirty_lock(cs->pg);
679 }
680 put_page(cs->pg);
681 }
682 cs->pg = NULL;
683 }
684
685 /*
686 * Get another pagefull of userspace buffer, and map it to kernel
687 * address space, and lock request
688 */
fuse_copy_fill(struct fuse_copy_state * cs)689 static int fuse_copy_fill(struct fuse_copy_state *cs)
690 {
691 struct page *page;
692 int err;
693
694 err = unlock_request(cs->req);
695 if (err)
696 return err;
697
698 fuse_copy_finish(cs);
699 if (cs->pipebufs) {
700 struct pipe_buffer *buf = cs->pipebufs;
701
702 if (!cs->write) {
703 err = pipe_buf_confirm(cs->pipe, buf);
704 if (err)
705 return err;
706
707 BUG_ON(!cs->nr_segs);
708 cs->currbuf = buf;
709 cs->pg = buf->page;
710 cs->offset = buf->offset;
711 cs->len = buf->len;
712 cs->pipebufs++;
713 cs->nr_segs--;
714 } else {
715 if (cs->nr_segs >= cs->pipe->max_usage)
716 return -EIO;
717
718 page = alloc_page(GFP_HIGHUSER);
719 if (!page)
720 return -ENOMEM;
721
722 buf->page = page;
723 buf->offset = 0;
724 buf->len = 0;
725
726 cs->currbuf = buf;
727 cs->pg = page;
728 cs->offset = 0;
729 cs->len = PAGE_SIZE;
730 cs->pipebufs++;
731 cs->nr_segs++;
732 }
733 } else {
734 size_t off;
735 err = iov_iter_get_pages2(cs->iter, &page, PAGE_SIZE, 1, &off);
736 if (err < 0)
737 return err;
738 BUG_ON(!err);
739 cs->len = err;
740 cs->offset = off;
741 cs->pg = page;
742 }
743
744 return lock_request(cs->req);
745 }
746
747 /* Do as much copy to/from userspace buffer as we can */
fuse_copy_do(struct fuse_copy_state * cs,void ** val,unsigned * size)748 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
749 {
750 unsigned ncpy = min(*size, cs->len);
751 if (val) {
752 void *pgaddr = kmap_local_page(cs->pg);
753 void *buf = pgaddr + cs->offset;
754
755 if (cs->write)
756 memcpy(buf, *val, ncpy);
757 else
758 memcpy(*val, buf, ncpy);
759
760 kunmap_local(pgaddr);
761 *val += ncpy;
762 }
763 *size -= ncpy;
764 cs->len -= ncpy;
765 cs->offset += ncpy;
766 return ncpy;
767 }
768
fuse_check_folio(struct folio * folio)769 static int fuse_check_folio(struct folio *folio)
770 {
771 if (folio_mapped(folio) ||
772 folio->mapping != NULL ||
773 (folio->flags & PAGE_FLAGS_CHECK_AT_PREP &
774 ~(1 << PG_locked |
775 1 << PG_referenced |
776 1 << PG_uptodate |
777 1 << PG_lru |
778 1 << PG_active |
779 1 << PG_workingset |
780 1 << PG_reclaim |
781 1 << PG_waiters |
782 LRU_GEN_MASK | LRU_REFS_MASK))) {
783 dump_page(&folio->page, "fuse: trying to steal weird page");
784 return 1;
785 }
786 return 0;
787 }
788
fuse_try_move_page(struct fuse_copy_state * cs,struct page ** pagep)789 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
790 {
791 int err;
792 struct folio *oldfolio = page_folio(*pagep);
793 struct folio *newfolio;
794 struct pipe_buffer *buf = cs->pipebufs;
795
796 folio_get(oldfolio);
797 err = unlock_request(cs->req);
798 if (err)
799 goto out_put_old;
800
801 fuse_copy_finish(cs);
802
803 err = pipe_buf_confirm(cs->pipe, buf);
804 if (err)
805 goto out_put_old;
806
807 BUG_ON(!cs->nr_segs);
808 cs->currbuf = buf;
809 cs->len = buf->len;
810 cs->pipebufs++;
811 cs->nr_segs--;
812
813 if (cs->len != PAGE_SIZE)
814 goto out_fallback;
815
816 if (!pipe_buf_try_steal(cs->pipe, buf))
817 goto out_fallback;
818
819 newfolio = page_folio(buf->page);
820
821 if (!folio_test_uptodate(newfolio))
822 folio_mark_uptodate(newfolio);
823
824 folio_clear_mappedtodisk(newfolio);
825
826 if (fuse_check_folio(newfolio) != 0)
827 goto out_fallback_unlock;
828
829 /*
830 * This is a new and locked page, it shouldn't be mapped or
831 * have any special flags on it
832 */
833 if (WARN_ON(folio_mapped(oldfolio)))
834 goto out_fallback_unlock;
835 if (WARN_ON(folio_has_private(oldfolio)))
836 goto out_fallback_unlock;
837 if (WARN_ON(folio_test_dirty(oldfolio) ||
838 folio_test_writeback(oldfolio)))
839 goto out_fallback_unlock;
840 if (WARN_ON(folio_test_mlocked(oldfolio)))
841 goto out_fallback_unlock;
842
843 replace_page_cache_folio(oldfolio, newfolio);
844
845 folio_get(newfolio);
846
847 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
848 folio_add_lru(newfolio);
849
850 /*
851 * Release while we have extra ref on stolen page. Otherwise
852 * anon_pipe_buf_release() might think the page can be reused.
853 */
854 pipe_buf_release(cs->pipe, buf);
855
856 err = 0;
857 spin_lock(&cs->req->waitq.lock);
858 if (test_bit(FR_ABORTED, &cs->req->flags))
859 err = -ENOENT;
860 else
861 *pagep = &newfolio->page;
862 spin_unlock(&cs->req->waitq.lock);
863
864 if (err) {
865 folio_unlock(newfolio);
866 folio_put(newfolio);
867 goto out_put_old;
868 }
869
870 folio_unlock(oldfolio);
871 /* Drop ref for ap->pages[] array */
872 folio_put(oldfolio);
873 cs->len = 0;
874
875 err = 0;
876 out_put_old:
877 /* Drop ref obtained in this function */
878 folio_put(oldfolio);
879 return err;
880
881 out_fallback_unlock:
882 folio_unlock(newfolio);
883 out_fallback:
884 cs->pg = buf->page;
885 cs->offset = buf->offset;
886
887 err = lock_request(cs->req);
888 if (!err)
889 err = 1;
890
891 goto out_put_old;
892 }
893
fuse_ref_page(struct fuse_copy_state * cs,struct page * page,unsigned offset,unsigned count)894 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
895 unsigned offset, unsigned count)
896 {
897 struct pipe_buffer *buf;
898 int err;
899
900 if (cs->nr_segs >= cs->pipe->max_usage)
901 return -EIO;
902
903 get_page(page);
904 err = unlock_request(cs->req);
905 if (err) {
906 put_page(page);
907 return err;
908 }
909
910 fuse_copy_finish(cs);
911
912 buf = cs->pipebufs;
913 buf->page = page;
914 buf->offset = offset;
915 buf->len = count;
916
917 cs->pipebufs++;
918 cs->nr_segs++;
919 cs->len = 0;
920
921 return 0;
922 }
923
924 /*
925 * Copy a page in the request to/from the userspace buffer. Must be
926 * done atomically
927 */
fuse_copy_page(struct fuse_copy_state * cs,struct page ** pagep,unsigned offset,unsigned count,int zeroing)928 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
929 unsigned offset, unsigned count, int zeroing)
930 {
931 int err;
932 struct page *page = *pagep;
933
934 if (page && zeroing && count < PAGE_SIZE)
935 clear_highpage(page);
936
937 while (count) {
938 if (cs->write && cs->pipebufs && page) {
939 /*
940 * Can't control lifetime of pipe buffers, so always
941 * copy user pages.
942 */
943 if (cs->req->args->user_pages) {
944 err = fuse_copy_fill(cs);
945 if (err)
946 return err;
947 } else {
948 return fuse_ref_page(cs, page, offset, count);
949 }
950 } else if (!cs->len) {
951 if (cs->move_pages && page &&
952 offset == 0 && count == PAGE_SIZE) {
953 err = fuse_try_move_page(cs, pagep);
954 if (err <= 0)
955 return err;
956 } else {
957 err = fuse_copy_fill(cs);
958 if (err)
959 return err;
960 }
961 }
962 if (page) {
963 void *mapaddr = kmap_local_page(page);
964 void *buf = mapaddr + offset;
965 offset += fuse_copy_do(cs, &buf, &count);
966 kunmap_local(mapaddr);
967 } else
968 offset += fuse_copy_do(cs, NULL, &count);
969 }
970 if (page && !cs->write)
971 flush_dcache_page(page);
972 return 0;
973 }
974
975 /* Copy pages in the request to/from userspace buffer */
fuse_copy_pages(struct fuse_copy_state * cs,unsigned nbytes,int zeroing)976 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
977 int zeroing)
978 {
979 unsigned i;
980 struct fuse_req *req = cs->req;
981 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
982
983
984 for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
985 int err;
986 unsigned int offset = ap->descs[i].offset;
987 unsigned int count = min(nbytes, ap->descs[i].length);
988
989 err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing);
990 if (err)
991 return err;
992
993 nbytes -= count;
994 }
995 return 0;
996 }
997
998 /* Copy a single argument in the request to/from userspace buffer */
fuse_copy_one(struct fuse_copy_state * cs,void * val,unsigned size)999 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1000 {
1001 while (size) {
1002 if (!cs->len) {
1003 int err = fuse_copy_fill(cs);
1004 if (err)
1005 return err;
1006 }
1007 fuse_copy_do(cs, &val, &size);
1008 }
1009 return 0;
1010 }
1011
1012 /* Copy request arguments to/from userspace buffer */
fuse_copy_args(struct fuse_copy_state * cs,unsigned numargs,unsigned argpages,struct fuse_arg * args,int zeroing)1013 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1014 unsigned argpages, struct fuse_arg *args,
1015 int zeroing)
1016 {
1017 int err = 0;
1018 unsigned i;
1019
1020 for (i = 0; !err && i < numargs; i++) {
1021 struct fuse_arg *arg = &args[i];
1022 if (i == numargs - 1 && argpages)
1023 err = fuse_copy_pages(cs, arg->size, zeroing);
1024 else
1025 err = fuse_copy_one(cs, arg->value, arg->size);
1026 }
1027 return err;
1028 }
1029
forget_pending(struct fuse_iqueue * fiq)1030 static int forget_pending(struct fuse_iqueue *fiq)
1031 {
1032 return fiq->forget_list_head.next != NULL;
1033 }
1034
request_pending(struct fuse_iqueue * fiq)1035 static int request_pending(struct fuse_iqueue *fiq)
1036 {
1037 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1038 forget_pending(fiq);
1039 }
1040
1041 /*
1042 * Transfer an interrupt request to userspace
1043 *
1044 * Unlike other requests this is assembled on demand, without a need
1045 * to allocate a separate fuse_req structure.
1046 *
1047 * Called with fiq->lock held, releases it
1048 */
fuse_read_interrupt(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes,struct fuse_req * req)1049 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1050 struct fuse_copy_state *cs,
1051 size_t nbytes, struct fuse_req *req)
1052 __releases(fiq->lock)
1053 {
1054 struct fuse_in_header ih;
1055 struct fuse_interrupt_in arg;
1056 unsigned reqsize = sizeof(ih) + sizeof(arg);
1057 int err;
1058
1059 list_del_init(&req->intr_entry);
1060 memset(&ih, 0, sizeof(ih));
1061 memset(&arg, 0, sizeof(arg));
1062 ih.len = reqsize;
1063 ih.opcode = FUSE_INTERRUPT;
1064 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
1065 arg.unique = req->in.h.unique;
1066
1067 spin_unlock(&fiq->lock);
1068 if (nbytes < reqsize)
1069 return -EINVAL;
1070
1071 err = fuse_copy_one(cs, &ih, sizeof(ih));
1072 if (!err)
1073 err = fuse_copy_one(cs, &arg, sizeof(arg));
1074 fuse_copy_finish(cs);
1075
1076 return err ? err : reqsize;
1077 }
1078
fuse_dequeue_forget(struct fuse_iqueue * fiq,unsigned int max,unsigned int * countp)1079 struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
1080 unsigned int max,
1081 unsigned int *countp)
1082 {
1083 struct fuse_forget_link *head = fiq->forget_list_head.next;
1084 struct fuse_forget_link **newhead = &head;
1085 unsigned count;
1086
1087 for (count = 0; *newhead != NULL && count < max; count++)
1088 newhead = &(*newhead)->next;
1089
1090 fiq->forget_list_head.next = *newhead;
1091 *newhead = NULL;
1092 if (fiq->forget_list_head.next == NULL)
1093 fiq->forget_list_tail = &fiq->forget_list_head;
1094
1095 if (countp != NULL)
1096 *countp = count;
1097
1098 return head;
1099 }
1100 EXPORT_SYMBOL(fuse_dequeue_forget);
1101
fuse_read_single_forget(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1102 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1103 struct fuse_copy_state *cs,
1104 size_t nbytes)
1105 __releases(fiq->lock)
1106 {
1107 int err;
1108 struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL);
1109 struct fuse_forget_in arg = {
1110 .nlookup = forget->forget_one.nlookup,
1111 };
1112 struct fuse_in_header ih = {
1113 .opcode = FUSE_FORGET,
1114 .nodeid = forget->forget_one.nodeid,
1115 .unique = fuse_get_unique(fiq),
1116 .len = sizeof(ih) + sizeof(arg),
1117 };
1118
1119 spin_unlock(&fiq->lock);
1120 kfree(forget);
1121 if (nbytes < ih.len)
1122 return -EINVAL;
1123
1124 err = fuse_copy_one(cs, &ih, sizeof(ih));
1125 if (!err)
1126 err = fuse_copy_one(cs, &arg, sizeof(arg));
1127 fuse_copy_finish(cs);
1128
1129 if (err)
1130 return err;
1131
1132 return ih.len;
1133 }
1134
fuse_read_batch_forget(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1135 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1136 struct fuse_copy_state *cs, size_t nbytes)
1137 __releases(fiq->lock)
1138 {
1139 int err;
1140 unsigned max_forgets;
1141 unsigned count;
1142 struct fuse_forget_link *head;
1143 struct fuse_batch_forget_in arg = { .count = 0 };
1144 struct fuse_in_header ih = {
1145 .opcode = FUSE_BATCH_FORGET,
1146 .unique = fuse_get_unique(fiq),
1147 .len = sizeof(ih) + sizeof(arg),
1148 };
1149
1150 if (nbytes < ih.len) {
1151 spin_unlock(&fiq->lock);
1152 return -EINVAL;
1153 }
1154
1155 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1156 head = fuse_dequeue_forget(fiq, max_forgets, &count);
1157 spin_unlock(&fiq->lock);
1158
1159 arg.count = count;
1160 ih.len += count * sizeof(struct fuse_forget_one);
1161 err = fuse_copy_one(cs, &ih, sizeof(ih));
1162 if (!err)
1163 err = fuse_copy_one(cs, &arg, sizeof(arg));
1164
1165 while (head) {
1166 struct fuse_forget_link *forget = head;
1167
1168 if (!err) {
1169 err = fuse_copy_one(cs, &forget->forget_one,
1170 sizeof(forget->forget_one));
1171 }
1172 head = forget->next;
1173 kfree(forget);
1174 }
1175
1176 fuse_copy_finish(cs);
1177
1178 if (err)
1179 return err;
1180
1181 return ih.len;
1182 }
1183
fuse_read_forget(struct fuse_conn * fc,struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1184 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1185 struct fuse_copy_state *cs,
1186 size_t nbytes)
1187 __releases(fiq->lock)
1188 {
1189 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1190 return fuse_read_single_forget(fiq, cs, nbytes);
1191 else
1192 return fuse_read_batch_forget(fiq, cs, nbytes);
1193 }
1194
1195 /*
1196 * Read a single request into the userspace filesystem's buffer. This
1197 * function waits until a request is available, then removes it from
1198 * the pending list and copies request data to userspace buffer. If
1199 * no reply is needed (FORGET) or request has been aborted or there
1200 * was an error during the copying then it's finished by calling
1201 * fuse_request_end(). Otherwise add it to the processing list, and set
1202 * the 'sent' flag.
1203 */
fuse_dev_do_read(struct fuse_dev * fud,struct file * file,struct fuse_copy_state * cs,size_t nbytes)1204 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1205 struct fuse_copy_state *cs, size_t nbytes)
1206 {
1207 ssize_t err;
1208 struct fuse_conn *fc = fud->fc;
1209 struct fuse_iqueue *fiq = &fc->iq;
1210 struct fuse_pqueue *fpq = &fud->pq;
1211 struct fuse_req *req;
1212 struct fuse_args *args;
1213 unsigned reqsize;
1214 unsigned int hash;
1215
1216 /*
1217 * Require sane minimum read buffer - that has capacity for fixed part
1218 * of any request header + negotiated max_write room for data.
1219 *
1220 * Historically libfuse reserves 4K for fixed header room, but e.g.
1221 * GlusterFS reserves only 80 bytes
1222 *
1223 * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)`
1224 *
1225 * which is the absolute minimum any sane filesystem should be using
1226 * for header room.
1227 */
1228 if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER,
1229 sizeof(struct fuse_in_header) +
1230 sizeof(struct fuse_write_in) +
1231 fc->max_write))
1232 return -EINVAL;
1233
1234 restart:
1235 for (;;) {
1236 spin_lock(&fiq->lock);
1237 if (!fiq->connected || request_pending(fiq))
1238 break;
1239 spin_unlock(&fiq->lock);
1240
1241 if (file->f_flags & O_NONBLOCK)
1242 return -EAGAIN;
1243 err = wait_event_interruptible_exclusive(fiq->waitq,
1244 !fiq->connected || request_pending(fiq));
1245 if (err)
1246 return err;
1247 }
1248
1249 if (!fiq->connected) {
1250 err = fc->aborted ? -ECONNABORTED : -ENODEV;
1251 goto err_unlock;
1252 }
1253
1254 if (!list_empty(&fiq->interrupts)) {
1255 req = list_entry(fiq->interrupts.next, struct fuse_req,
1256 intr_entry);
1257 return fuse_read_interrupt(fiq, cs, nbytes, req);
1258 }
1259
1260 if (forget_pending(fiq)) {
1261 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1262 return fuse_read_forget(fc, fiq, cs, nbytes);
1263
1264 if (fiq->forget_batch <= -8)
1265 fiq->forget_batch = 16;
1266 }
1267
1268 req = list_entry(fiq->pending.next, struct fuse_req, list);
1269 clear_bit(FR_PENDING, &req->flags);
1270 list_del_init(&req->list);
1271 spin_unlock(&fiq->lock);
1272
1273 args = req->args;
1274 reqsize = req->in.h.len;
1275
1276 /* If request is too large, reply with an error and restart the read */
1277 if (nbytes < reqsize) {
1278 req->out.h.error = -EIO;
1279 /* SETXATTR is special, since it may contain too large data */
1280 if (args->opcode == FUSE_SETXATTR)
1281 req->out.h.error = -E2BIG;
1282 fuse_request_end(req);
1283 goto restart;
1284 }
1285 spin_lock(&fpq->lock);
1286 /*
1287 * Must not put request on fpq->io queue after having been shut down by
1288 * fuse_abort_conn()
1289 */
1290 if (!fpq->connected) {
1291 req->out.h.error = err = -ECONNABORTED;
1292 goto out_end;
1293
1294 }
1295 list_add(&req->list, &fpq->io);
1296 spin_unlock(&fpq->lock);
1297 cs->req = req;
1298 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
1299 if (!err)
1300 err = fuse_copy_args(cs, args->in_numargs, args->in_pages,
1301 (struct fuse_arg *) args->in_args, 0);
1302 fuse_copy_finish(cs);
1303 spin_lock(&fpq->lock);
1304 clear_bit(FR_LOCKED, &req->flags);
1305 if (!fpq->connected) {
1306 err = fc->aborted ? -ECONNABORTED : -ENODEV;
1307 goto out_end;
1308 }
1309 if (err) {
1310 req->out.h.error = -EIO;
1311 goto out_end;
1312 }
1313 if (!test_bit(FR_ISREPLY, &req->flags)) {
1314 err = reqsize;
1315 goto out_end;
1316 }
1317 hash = fuse_req_hash(req->in.h.unique);
1318 list_move_tail(&req->list, &fpq->processing[hash]);
1319 __fuse_get_request(req);
1320 set_bit(FR_SENT, &req->flags);
1321 spin_unlock(&fpq->lock);
1322 /* matches barrier in request_wait_answer() */
1323 smp_mb__after_atomic();
1324 if (test_bit(FR_INTERRUPTED, &req->flags))
1325 queue_interrupt(req);
1326 fuse_put_request(req);
1327
1328 return reqsize;
1329
1330 out_end:
1331 if (!test_bit(FR_PRIVATE, &req->flags))
1332 list_del_init(&req->list);
1333 spin_unlock(&fpq->lock);
1334 fuse_request_end(req);
1335 return err;
1336
1337 err_unlock:
1338 spin_unlock(&fiq->lock);
1339 return err;
1340 }
1341
fuse_dev_open(struct inode * inode,struct file * file)1342 static int fuse_dev_open(struct inode *inode, struct file *file)
1343 {
1344 /*
1345 * The fuse device's file's private_data is used to hold
1346 * the fuse_conn(ection) when it is mounted, and is used to
1347 * keep track of whether the file has been mounted already.
1348 */
1349 file->private_data = NULL;
1350 return 0;
1351 }
1352
fuse_dev_read(struct kiocb * iocb,struct iov_iter * to)1353 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1354 {
1355 struct fuse_copy_state cs;
1356 struct file *file = iocb->ki_filp;
1357 struct fuse_dev *fud = fuse_get_dev(file);
1358
1359 if (!fud)
1360 return -EPERM;
1361
1362 if (!user_backed_iter(to))
1363 return -EINVAL;
1364
1365 fuse_copy_init(&cs, 1, to);
1366
1367 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1368 }
1369
fuse_dev_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1370 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1371 struct pipe_inode_info *pipe,
1372 size_t len, unsigned int flags)
1373 {
1374 int total, ret;
1375 int page_nr = 0;
1376 struct pipe_buffer *bufs;
1377 struct fuse_copy_state cs;
1378 struct fuse_dev *fud = fuse_get_dev(in);
1379
1380 if (!fud)
1381 return -EPERM;
1382
1383 bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer),
1384 GFP_KERNEL);
1385 if (!bufs)
1386 return -ENOMEM;
1387
1388 fuse_copy_init(&cs, 1, NULL);
1389 cs.pipebufs = bufs;
1390 cs.pipe = pipe;
1391 ret = fuse_dev_do_read(fud, in, &cs, len);
1392 if (ret < 0)
1393 goto out;
1394
1395 if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) {
1396 ret = -EIO;
1397 goto out;
1398 }
1399
1400 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1401 /*
1402 * Need to be careful about this. Having buf->ops in module
1403 * code can Oops if the buffer persists after module unload.
1404 */
1405 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1406 bufs[page_nr].flags = 0;
1407 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1408 if (unlikely(ret < 0))
1409 break;
1410 }
1411 if (total)
1412 ret = total;
1413 out:
1414 for (; page_nr < cs.nr_segs; page_nr++)
1415 put_page(bufs[page_nr].page);
1416
1417 kvfree(bufs);
1418 return ret;
1419 }
1420
fuse_notify_poll(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1421 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1422 struct fuse_copy_state *cs)
1423 {
1424 struct fuse_notify_poll_wakeup_out outarg;
1425 int err = -EINVAL;
1426
1427 if (size != sizeof(outarg))
1428 goto err;
1429
1430 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1431 if (err)
1432 goto err;
1433
1434 fuse_copy_finish(cs);
1435 return fuse_notify_poll_wakeup(fc, &outarg);
1436
1437 err:
1438 fuse_copy_finish(cs);
1439 return err;
1440 }
1441
fuse_notify_inval_inode(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1442 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1443 struct fuse_copy_state *cs)
1444 {
1445 struct fuse_notify_inval_inode_out outarg;
1446 int err = -EINVAL;
1447
1448 if (size != sizeof(outarg))
1449 goto err;
1450
1451 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1452 if (err)
1453 goto err;
1454 fuse_copy_finish(cs);
1455
1456 down_read(&fc->killsb);
1457 err = fuse_reverse_inval_inode(fc, outarg.ino,
1458 outarg.off, outarg.len);
1459 up_read(&fc->killsb);
1460 return err;
1461
1462 err:
1463 fuse_copy_finish(cs);
1464 return err;
1465 }
1466
fuse_notify_inval_entry(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1467 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1468 struct fuse_copy_state *cs)
1469 {
1470 struct fuse_notify_inval_entry_out outarg;
1471 int err = -ENOMEM;
1472 char *buf;
1473 struct qstr name;
1474
1475 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1476 if (!buf)
1477 goto err;
1478
1479 err = -EINVAL;
1480 if (size < sizeof(outarg))
1481 goto err;
1482
1483 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1484 if (err)
1485 goto err;
1486
1487 err = -ENAMETOOLONG;
1488 if (outarg.namelen > FUSE_NAME_MAX)
1489 goto err;
1490
1491 err = -EINVAL;
1492 if (size != sizeof(outarg) + outarg.namelen + 1)
1493 goto err;
1494
1495 name.name = buf;
1496 name.len = outarg.namelen;
1497 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1498 if (err)
1499 goto err;
1500 fuse_copy_finish(cs);
1501 buf[outarg.namelen] = 0;
1502
1503 down_read(&fc->killsb);
1504 err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name, outarg.flags);
1505 up_read(&fc->killsb);
1506 kfree(buf);
1507 return err;
1508
1509 err:
1510 kfree(buf);
1511 fuse_copy_finish(cs);
1512 return err;
1513 }
1514
fuse_notify_delete(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1515 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1516 struct fuse_copy_state *cs)
1517 {
1518 struct fuse_notify_delete_out outarg;
1519 int err = -ENOMEM;
1520 char *buf;
1521 struct qstr name;
1522
1523 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1524 if (!buf)
1525 goto err;
1526
1527 err = -EINVAL;
1528 if (size < sizeof(outarg))
1529 goto err;
1530
1531 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1532 if (err)
1533 goto err;
1534
1535 err = -ENAMETOOLONG;
1536 if (outarg.namelen > FUSE_NAME_MAX)
1537 goto err;
1538
1539 err = -EINVAL;
1540 if (size != sizeof(outarg) + outarg.namelen + 1)
1541 goto err;
1542
1543 name.name = buf;
1544 name.len = outarg.namelen;
1545 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1546 if (err)
1547 goto err;
1548 fuse_copy_finish(cs);
1549 buf[outarg.namelen] = 0;
1550
1551 down_read(&fc->killsb);
1552 err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name, 0);
1553 up_read(&fc->killsb);
1554 kfree(buf);
1555 return err;
1556
1557 err:
1558 kfree(buf);
1559 fuse_copy_finish(cs);
1560 return err;
1561 }
1562
fuse_notify_store(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1563 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1564 struct fuse_copy_state *cs)
1565 {
1566 struct fuse_notify_store_out outarg;
1567 struct inode *inode;
1568 struct address_space *mapping;
1569 u64 nodeid;
1570 int err;
1571 pgoff_t index;
1572 unsigned int offset;
1573 unsigned int num;
1574 loff_t file_size;
1575 loff_t end;
1576
1577 err = -EINVAL;
1578 if (size < sizeof(outarg))
1579 goto out_finish;
1580
1581 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1582 if (err)
1583 goto out_finish;
1584
1585 err = -EINVAL;
1586 if (size - sizeof(outarg) != outarg.size)
1587 goto out_finish;
1588
1589 nodeid = outarg.nodeid;
1590
1591 down_read(&fc->killsb);
1592
1593 err = -ENOENT;
1594 inode = fuse_ilookup(fc, nodeid, NULL);
1595 if (!inode)
1596 goto out_up_killsb;
1597
1598 mapping = inode->i_mapping;
1599 index = outarg.offset >> PAGE_SHIFT;
1600 offset = outarg.offset & ~PAGE_MASK;
1601 file_size = i_size_read(inode);
1602 end = outarg.offset + outarg.size;
1603 if (end > file_size) {
1604 file_size = end;
1605 fuse_write_update_attr(inode, file_size, outarg.size);
1606 }
1607
1608 num = outarg.size;
1609 while (num) {
1610 struct page *page;
1611 unsigned int this_num;
1612
1613 err = -ENOMEM;
1614 page = find_or_create_page(mapping, index,
1615 mapping_gfp_mask(mapping));
1616 if (!page)
1617 goto out_iput;
1618
1619 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1620 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1621 if (!err && offset == 0 &&
1622 (this_num == PAGE_SIZE || file_size == end))
1623 SetPageUptodate(page);
1624 unlock_page(page);
1625 put_page(page);
1626
1627 if (err)
1628 goto out_iput;
1629
1630 num -= this_num;
1631 offset = 0;
1632 index++;
1633 }
1634
1635 err = 0;
1636
1637 out_iput:
1638 iput(inode);
1639 out_up_killsb:
1640 up_read(&fc->killsb);
1641 out_finish:
1642 fuse_copy_finish(cs);
1643 return err;
1644 }
1645
1646 struct fuse_retrieve_args {
1647 struct fuse_args_pages ap;
1648 struct fuse_notify_retrieve_in inarg;
1649 };
1650
fuse_retrieve_end(struct fuse_mount * fm,struct fuse_args * args,int error)1651 static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args,
1652 int error)
1653 {
1654 struct fuse_retrieve_args *ra =
1655 container_of(args, typeof(*ra), ap.args);
1656
1657 release_pages(ra->ap.pages, ra->ap.num_pages);
1658 kfree(ra);
1659 }
1660
fuse_retrieve(struct fuse_mount * fm,struct inode * inode,struct fuse_notify_retrieve_out * outarg)1661 static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
1662 struct fuse_notify_retrieve_out *outarg)
1663 {
1664 int err;
1665 struct address_space *mapping = inode->i_mapping;
1666 pgoff_t index;
1667 loff_t file_size;
1668 unsigned int num;
1669 unsigned int offset;
1670 size_t total_len = 0;
1671 unsigned int num_pages;
1672 struct fuse_conn *fc = fm->fc;
1673 struct fuse_retrieve_args *ra;
1674 size_t args_size = sizeof(*ra);
1675 struct fuse_args_pages *ap;
1676 struct fuse_args *args;
1677
1678 offset = outarg->offset & ~PAGE_MASK;
1679 file_size = i_size_read(inode);
1680
1681 num = min(outarg->size, fc->max_write);
1682 if (outarg->offset > file_size)
1683 num = 0;
1684 else if (outarg->offset + num > file_size)
1685 num = file_size - outarg->offset;
1686
1687 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1688 num_pages = min(num_pages, fc->max_pages);
1689
1690 args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
1691
1692 ra = kzalloc(args_size, GFP_KERNEL);
1693 if (!ra)
1694 return -ENOMEM;
1695
1696 ap = &ra->ap;
1697 ap->pages = (void *) (ra + 1);
1698 ap->descs = (void *) (ap->pages + num_pages);
1699
1700 args = &ap->args;
1701 args->nodeid = outarg->nodeid;
1702 args->opcode = FUSE_NOTIFY_REPLY;
1703 args->in_numargs = 2;
1704 args->in_pages = true;
1705 args->end = fuse_retrieve_end;
1706
1707 index = outarg->offset >> PAGE_SHIFT;
1708
1709 while (num && ap->num_pages < num_pages) {
1710 struct page *page;
1711 unsigned int this_num;
1712
1713 page = find_get_page(mapping, index);
1714 if (!page)
1715 break;
1716
1717 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1718 ap->pages[ap->num_pages] = page;
1719 ap->descs[ap->num_pages].offset = offset;
1720 ap->descs[ap->num_pages].length = this_num;
1721 ap->num_pages++;
1722
1723 offset = 0;
1724 num -= this_num;
1725 total_len += this_num;
1726 index++;
1727 }
1728 ra->inarg.offset = outarg->offset;
1729 ra->inarg.size = total_len;
1730 args->in_args[0].size = sizeof(ra->inarg);
1731 args->in_args[0].value = &ra->inarg;
1732 args->in_args[1].size = total_len;
1733
1734 err = fuse_simple_notify_reply(fm, args, outarg->notify_unique);
1735 if (err)
1736 fuse_retrieve_end(fm, args, err);
1737
1738 return err;
1739 }
1740
fuse_notify_retrieve(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1741 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1742 struct fuse_copy_state *cs)
1743 {
1744 struct fuse_notify_retrieve_out outarg;
1745 struct fuse_mount *fm;
1746 struct inode *inode;
1747 u64 nodeid;
1748 int err;
1749
1750 err = -EINVAL;
1751 if (size != sizeof(outarg))
1752 goto copy_finish;
1753
1754 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1755 if (err)
1756 goto copy_finish;
1757
1758 fuse_copy_finish(cs);
1759
1760 down_read(&fc->killsb);
1761 err = -ENOENT;
1762 nodeid = outarg.nodeid;
1763
1764 inode = fuse_ilookup(fc, nodeid, &fm);
1765 if (inode) {
1766 err = fuse_retrieve(fm, inode, &outarg);
1767 iput(inode);
1768 }
1769 up_read(&fc->killsb);
1770
1771 return err;
1772
1773 copy_finish:
1774 fuse_copy_finish(cs);
1775 return err;
1776 }
1777
fuse_notify(struct fuse_conn * fc,enum fuse_notify_code code,unsigned int size,struct fuse_copy_state * cs)1778 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1779 unsigned int size, struct fuse_copy_state *cs)
1780 {
1781 /* Don't try to move pages (yet) */
1782 cs->move_pages = 0;
1783
1784 switch (code) {
1785 case FUSE_NOTIFY_POLL:
1786 return fuse_notify_poll(fc, size, cs);
1787
1788 case FUSE_NOTIFY_INVAL_INODE:
1789 return fuse_notify_inval_inode(fc, size, cs);
1790
1791 case FUSE_NOTIFY_INVAL_ENTRY:
1792 return fuse_notify_inval_entry(fc, size, cs);
1793
1794 case FUSE_NOTIFY_STORE:
1795 return fuse_notify_store(fc, size, cs);
1796
1797 case FUSE_NOTIFY_RETRIEVE:
1798 return fuse_notify_retrieve(fc, size, cs);
1799
1800 case FUSE_NOTIFY_DELETE:
1801 return fuse_notify_delete(fc, size, cs);
1802
1803 default:
1804 fuse_copy_finish(cs);
1805 return -EINVAL;
1806 }
1807 }
1808
1809 /* Look up request on processing list by unique ID */
request_find(struct fuse_pqueue * fpq,u64 unique)1810 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1811 {
1812 unsigned int hash = fuse_req_hash(unique);
1813 struct fuse_req *req;
1814
1815 list_for_each_entry(req, &fpq->processing[hash], list) {
1816 if (req->in.h.unique == unique)
1817 return req;
1818 }
1819 return NULL;
1820 }
1821
copy_out_args(struct fuse_copy_state * cs,struct fuse_args * args,unsigned nbytes)1822 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
1823 unsigned nbytes)
1824 {
1825 unsigned reqsize = sizeof(struct fuse_out_header);
1826
1827 reqsize += fuse_len_args(args->out_numargs, args->out_args);
1828
1829 if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar))
1830 return -EINVAL;
1831 else if (reqsize > nbytes) {
1832 struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1];
1833 unsigned diffsize = reqsize - nbytes;
1834
1835 if (diffsize > lastarg->size)
1836 return -EINVAL;
1837 lastarg->size -= diffsize;
1838 }
1839 return fuse_copy_args(cs, args->out_numargs, args->out_pages,
1840 args->out_args, args->page_zeroing);
1841 }
1842
1843 /*
1844 * Write a single reply to a request. First the header is copied from
1845 * the write buffer. The request is then searched on the processing
1846 * list by the unique ID found in the header. If found, then remove
1847 * it from the list and copy the rest of the buffer to the request.
1848 * The request is finished by calling fuse_request_end().
1849 */
fuse_dev_do_write(struct fuse_dev * fud,struct fuse_copy_state * cs,size_t nbytes)1850 static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1851 struct fuse_copy_state *cs, size_t nbytes)
1852 {
1853 int err;
1854 struct fuse_conn *fc = fud->fc;
1855 struct fuse_pqueue *fpq = &fud->pq;
1856 struct fuse_req *req;
1857 struct fuse_out_header oh;
1858
1859 err = -EINVAL;
1860 if (nbytes < sizeof(struct fuse_out_header))
1861 goto out;
1862
1863 err = fuse_copy_one(cs, &oh, sizeof(oh));
1864 if (err)
1865 goto copy_finish;
1866
1867 err = -EINVAL;
1868 if (oh.len != nbytes)
1869 goto copy_finish;
1870
1871 /*
1872 * Zero oh.unique indicates unsolicited notification message
1873 * and error contains notification code.
1874 */
1875 if (!oh.unique) {
1876 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1877 goto out;
1878 }
1879
1880 err = -EINVAL;
1881 if (oh.error <= -512 || oh.error > 0)
1882 goto copy_finish;
1883
1884 spin_lock(&fpq->lock);
1885 req = NULL;
1886 if (fpq->connected)
1887 req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
1888
1889 err = -ENOENT;
1890 if (!req) {
1891 spin_unlock(&fpq->lock);
1892 goto copy_finish;
1893 }
1894
1895 /* Is it an interrupt reply ID? */
1896 if (oh.unique & FUSE_INT_REQ_BIT) {
1897 __fuse_get_request(req);
1898 spin_unlock(&fpq->lock);
1899
1900 err = 0;
1901 if (nbytes != sizeof(struct fuse_out_header))
1902 err = -EINVAL;
1903 else if (oh.error == -ENOSYS)
1904 fc->no_interrupt = 1;
1905 else if (oh.error == -EAGAIN)
1906 err = queue_interrupt(req);
1907
1908 fuse_put_request(req);
1909
1910 goto copy_finish;
1911 }
1912
1913 clear_bit(FR_SENT, &req->flags);
1914 list_move(&req->list, &fpq->io);
1915 req->out.h = oh;
1916 set_bit(FR_LOCKED, &req->flags);
1917 spin_unlock(&fpq->lock);
1918 cs->req = req;
1919 if (!req->args->page_replace)
1920 cs->move_pages = 0;
1921
1922 if (oh.error)
1923 err = nbytes != sizeof(oh) ? -EINVAL : 0;
1924 else
1925 err = copy_out_args(cs, req->args, nbytes);
1926 fuse_copy_finish(cs);
1927
1928 spin_lock(&fpq->lock);
1929 clear_bit(FR_LOCKED, &req->flags);
1930 if (!fpq->connected)
1931 err = -ENOENT;
1932 else if (err)
1933 req->out.h.error = -EIO;
1934 if (!test_bit(FR_PRIVATE, &req->flags))
1935 list_del_init(&req->list);
1936 spin_unlock(&fpq->lock);
1937
1938 fuse_request_end(req);
1939 out:
1940 return err ? err : nbytes;
1941
1942 copy_finish:
1943 fuse_copy_finish(cs);
1944 goto out;
1945 }
1946
fuse_dev_write(struct kiocb * iocb,struct iov_iter * from)1947 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1948 {
1949 struct fuse_copy_state cs;
1950 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1951
1952 if (!fud)
1953 return -EPERM;
1954
1955 if (!user_backed_iter(from))
1956 return -EINVAL;
1957
1958 fuse_copy_init(&cs, 0, from);
1959
1960 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1961 }
1962
fuse_dev_splice_write(struct pipe_inode_info * pipe,struct file * out,loff_t * ppos,size_t len,unsigned int flags)1963 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1964 struct file *out, loff_t *ppos,
1965 size_t len, unsigned int flags)
1966 {
1967 unsigned int head, tail, mask, count;
1968 unsigned nbuf;
1969 unsigned idx;
1970 struct pipe_buffer *bufs;
1971 struct fuse_copy_state cs;
1972 struct fuse_dev *fud;
1973 size_t rem;
1974 ssize_t ret;
1975
1976 fud = fuse_get_dev(out);
1977 if (!fud)
1978 return -EPERM;
1979
1980 pipe_lock(pipe);
1981
1982 head = pipe->head;
1983 tail = pipe->tail;
1984 mask = pipe->ring_size - 1;
1985 count = head - tail;
1986
1987 bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
1988 if (!bufs) {
1989 pipe_unlock(pipe);
1990 return -ENOMEM;
1991 }
1992
1993 nbuf = 0;
1994 rem = 0;
1995 for (idx = tail; idx != head && rem < len; idx++)
1996 rem += pipe->bufs[idx & mask].len;
1997
1998 ret = -EINVAL;
1999 if (rem < len)
2000 goto out_free;
2001
2002 rem = len;
2003 while (rem) {
2004 struct pipe_buffer *ibuf;
2005 struct pipe_buffer *obuf;
2006
2007 if (WARN_ON(nbuf >= count || tail == head))
2008 goto out_free;
2009
2010 ibuf = &pipe->bufs[tail & mask];
2011 obuf = &bufs[nbuf];
2012
2013 if (rem >= ibuf->len) {
2014 *obuf = *ibuf;
2015 ibuf->ops = NULL;
2016 tail++;
2017 pipe->tail = tail;
2018 } else {
2019 if (!pipe_buf_get(pipe, ibuf))
2020 goto out_free;
2021
2022 *obuf = *ibuf;
2023 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2024 obuf->len = rem;
2025 ibuf->offset += obuf->len;
2026 ibuf->len -= obuf->len;
2027 }
2028 nbuf++;
2029 rem -= obuf->len;
2030 }
2031 pipe_unlock(pipe);
2032
2033 fuse_copy_init(&cs, 0, NULL);
2034 cs.pipebufs = bufs;
2035 cs.nr_segs = nbuf;
2036 cs.pipe = pipe;
2037
2038 if (flags & SPLICE_F_MOVE)
2039 cs.move_pages = 1;
2040
2041 ret = fuse_dev_do_write(fud, &cs, len);
2042
2043 pipe_lock(pipe);
2044 out_free:
2045 for (idx = 0; idx < nbuf; idx++) {
2046 struct pipe_buffer *buf = &bufs[idx];
2047
2048 if (buf->ops)
2049 pipe_buf_release(pipe, buf);
2050 }
2051 pipe_unlock(pipe);
2052
2053 kvfree(bufs);
2054 return ret;
2055 }
2056
fuse_dev_poll(struct file * file,poll_table * wait)2057 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
2058 {
2059 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
2060 struct fuse_iqueue *fiq;
2061 struct fuse_dev *fud = fuse_get_dev(file);
2062
2063 if (!fud)
2064 return EPOLLERR;
2065
2066 fiq = &fud->fc->iq;
2067 poll_wait(file, &fiq->waitq, wait);
2068
2069 spin_lock(&fiq->lock);
2070 if (!fiq->connected)
2071 mask = EPOLLERR;
2072 else if (request_pending(fiq))
2073 mask |= EPOLLIN | EPOLLRDNORM;
2074 spin_unlock(&fiq->lock);
2075
2076 return mask;
2077 }
2078
2079 /* Abort all requests on the given list (pending or processing) */
end_requests(struct list_head * head)2080 static void end_requests(struct list_head *head)
2081 {
2082 while (!list_empty(head)) {
2083 struct fuse_req *req;
2084 req = list_entry(head->next, struct fuse_req, list);
2085 req->out.h.error = -ECONNABORTED;
2086 clear_bit(FR_SENT, &req->flags);
2087 list_del_init(&req->list);
2088 fuse_request_end(req);
2089 }
2090 }
2091
end_polls(struct fuse_conn * fc)2092 static void end_polls(struct fuse_conn *fc)
2093 {
2094 struct rb_node *p;
2095
2096 p = rb_first(&fc->polled_files);
2097
2098 while (p) {
2099 struct fuse_file *ff;
2100 ff = rb_entry(p, struct fuse_file, polled_node);
2101 wake_up_interruptible_all(&ff->poll_wait);
2102
2103 p = rb_next(p);
2104 }
2105 }
2106
2107 /*
2108 * Abort all requests.
2109 *
2110 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2111 * filesystem.
2112 *
2113 * The same effect is usually achievable through killing the filesystem daemon
2114 * and all users of the filesystem. The exception is the combination of an
2115 * asynchronous request and the tricky deadlock (see
2116 * Documentation/filesystems/fuse.rst).
2117 *
2118 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2119 * requests, they should be finished off immediately. Locked requests will be
2120 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2121 * requests. It is possible that some request will finish before we can. This
2122 * is OK, the request will in that case be removed from the list before we touch
2123 * it.
2124 */
fuse_abort_conn(struct fuse_conn * fc)2125 void fuse_abort_conn(struct fuse_conn *fc)
2126 {
2127 struct fuse_iqueue *fiq = &fc->iq;
2128
2129 spin_lock(&fc->lock);
2130 if (fc->connected) {
2131 struct fuse_dev *fud;
2132 struct fuse_req *req, *next;
2133 LIST_HEAD(to_end);
2134 unsigned int i;
2135
2136 /* Background queuing checks fc->connected under bg_lock */
2137 spin_lock(&fc->bg_lock);
2138 fc->connected = 0;
2139 spin_unlock(&fc->bg_lock);
2140
2141 fuse_set_initialized(fc);
2142 list_for_each_entry(fud, &fc->devices, entry) {
2143 struct fuse_pqueue *fpq = &fud->pq;
2144
2145 spin_lock(&fpq->lock);
2146 fpq->connected = 0;
2147 list_for_each_entry_safe(req, next, &fpq->io, list) {
2148 req->out.h.error = -ECONNABORTED;
2149 spin_lock(&req->waitq.lock);
2150 set_bit(FR_ABORTED, &req->flags);
2151 if (!test_bit(FR_LOCKED, &req->flags)) {
2152 set_bit(FR_PRIVATE, &req->flags);
2153 __fuse_get_request(req);
2154 list_move(&req->list, &to_end);
2155 }
2156 spin_unlock(&req->waitq.lock);
2157 }
2158 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2159 list_splice_tail_init(&fpq->processing[i],
2160 &to_end);
2161 spin_unlock(&fpq->lock);
2162 }
2163 spin_lock(&fc->bg_lock);
2164 fc->blocked = 0;
2165 fc->max_background = UINT_MAX;
2166 flush_bg_queue(fc);
2167 spin_unlock(&fc->bg_lock);
2168
2169 spin_lock(&fiq->lock);
2170 fiq->connected = 0;
2171 list_for_each_entry(req, &fiq->pending, list)
2172 clear_bit(FR_PENDING, &req->flags);
2173 list_splice_tail_init(&fiq->pending, &to_end);
2174 while (forget_pending(fiq))
2175 kfree(fuse_dequeue_forget(fiq, 1, NULL));
2176 wake_up_all(&fiq->waitq);
2177 spin_unlock(&fiq->lock);
2178 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2179 end_polls(fc);
2180 wake_up_all(&fc->blocked_waitq);
2181 spin_unlock(&fc->lock);
2182
2183 end_requests(&to_end);
2184 } else {
2185 spin_unlock(&fc->lock);
2186 }
2187 }
2188 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2189
fuse_wait_aborted(struct fuse_conn * fc)2190 void fuse_wait_aborted(struct fuse_conn *fc)
2191 {
2192 /* matches implicit memory barrier in fuse_drop_waiting() */
2193 smp_mb();
2194 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
2195 }
2196
fuse_dev_release(struct inode * inode,struct file * file)2197 int fuse_dev_release(struct inode *inode, struct file *file)
2198 {
2199 struct fuse_dev *fud = fuse_get_dev(file);
2200
2201 if (fud) {
2202 struct fuse_conn *fc = fud->fc;
2203 struct fuse_pqueue *fpq = &fud->pq;
2204 LIST_HEAD(to_end);
2205 unsigned int i;
2206
2207 spin_lock(&fpq->lock);
2208 WARN_ON(!list_empty(&fpq->io));
2209 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2210 list_splice_init(&fpq->processing[i], &to_end);
2211 spin_unlock(&fpq->lock);
2212
2213 end_requests(&to_end);
2214
2215 /* Are we the last open device? */
2216 if (atomic_dec_and_test(&fc->dev_count)) {
2217 WARN_ON(fc->iq.fasync != NULL);
2218 fuse_abort_conn(fc);
2219 }
2220 fuse_dev_free(fud);
2221 }
2222 return 0;
2223 }
2224 EXPORT_SYMBOL_GPL(fuse_dev_release);
2225
fuse_dev_fasync(int fd,struct file * file,int on)2226 static int fuse_dev_fasync(int fd, struct file *file, int on)
2227 {
2228 struct fuse_dev *fud = fuse_get_dev(file);
2229
2230 if (!fud)
2231 return -EPERM;
2232
2233 /* No locking - fasync_helper does its own locking */
2234 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2235 }
2236
fuse_device_clone(struct fuse_conn * fc,struct file * new)2237 static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2238 {
2239 struct fuse_dev *fud;
2240
2241 if (new->private_data)
2242 return -EINVAL;
2243
2244 fud = fuse_dev_alloc_install(fc);
2245 if (!fud)
2246 return -ENOMEM;
2247
2248 new->private_data = fud;
2249 atomic_inc(&fc->dev_count);
2250
2251 return 0;
2252 }
2253
fuse_dev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2254 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2255 unsigned long arg)
2256 {
2257 int res;
2258 int oldfd;
2259 struct fuse_dev *fud = NULL;
2260 struct fd f;
2261
2262 switch (cmd) {
2263 case FUSE_DEV_IOC_CLONE:
2264 if (get_user(oldfd, (__u32 __user *)arg))
2265 return -EFAULT;
2266
2267 f = fdget(oldfd);
2268 if (!f.file)
2269 return -EINVAL;
2270
2271 /*
2272 * Check against file->f_op because CUSE
2273 * uses the same ioctl handler.
2274 */
2275 if (f.file->f_op == file->f_op)
2276 fud = fuse_get_dev(f.file);
2277
2278 res = -EINVAL;
2279 if (fud) {
2280 mutex_lock(&fuse_mutex);
2281 res = fuse_device_clone(fud->fc, file);
2282 mutex_unlock(&fuse_mutex);
2283 }
2284 fdput(f);
2285 break;
2286 default:
2287 res = -ENOTTY;
2288 break;
2289 }
2290 return res;
2291 }
2292
2293 const struct file_operations fuse_dev_operations = {
2294 .owner = THIS_MODULE,
2295 .open = fuse_dev_open,
2296 .llseek = no_llseek,
2297 .read_iter = fuse_dev_read,
2298 .splice_read = fuse_dev_splice_read,
2299 .write_iter = fuse_dev_write,
2300 .splice_write = fuse_dev_splice_write,
2301 .poll = fuse_dev_poll,
2302 .release = fuse_dev_release,
2303 .fasync = fuse_dev_fasync,
2304 .unlocked_ioctl = fuse_dev_ioctl,
2305 .compat_ioctl = compat_ptr_ioctl,
2306 };
2307 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2308
2309 static struct miscdevice fuse_miscdevice = {
2310 .minor = FUSE_MINOR,
2311 .name = "fuse",
2312 .fops = &fuse_dev_operations,
2313 };
2314
fuse_dev_init(void)2315 int __init fuse_dev_init(void)
2316 {
2317 int err = -ENOMEM;
2318 fuse_req_cachep = kmem_cache_create("fuse_request",
2319 sizeof(struct fuse_req),
2320 0, 0, NULL);
2321 if (!fuse_req_cachep)
2322 goto out;
2323
2324 err = misc_register(&fuse_miscdevice);
2325 if (err)
2326 goto out_cache_clean;
2327
2328 return 0;
2329
2330 out_cache_clean:
2331 kmem_cache_destroy(fuse_req_cachep);
2332 out:
2333 return err;
2334 }
2335
fuse_dev_cleanup(void)2336 void fuse_dev_cleanup(void)
2337 {
2338 misc_deregister(&fuse_miscdevice);
2339 kmem_cache_destroy(fuse_req_cachep);
2340 }
2341