1 /* Cache page management and data I/O routines
2  *
3  * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
18 #include "internal.h"
19 
20 /*
21  * check to see if a page is being written to the cache
22  */
__fscache_check_page_write(struct fscache_cookie * cookie,struct page * page)23 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
24 {
25 	void *val;
26 
27 	rcu_read_lock();
28 	val = radix_tree_lookup(&cookie->stores, page->index);
29 	rcu_read_unlock();
30 
31 	return val != NULL;
32 }
33 EXPORT_SYMBOL(__fscache_check_page_write);
34 
35 /*
36  * wait for a page to finish being written to the cache
37  */
__fscache_wait_on_page_write(struct fscache_cookie * cookie,struct page * page)38 void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
39 {
40 	wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
41 
42 	wait_event(*wq, !__fscache_check_page_write(cookie, page));
43 }
44 EXPORT_SYMBOL(__fscache_wait_on_page_write);
45 
46 /*
47  * decide whether a page can be released, possibly by cancelling a store to it
48  * - we're allowed to sleep if __GFP_WAIT is flagged
49  */
__fscache_maybe_release_page(struct fscache_cookie * cookie,struct page * page,gfp_t gfp)50 bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
51 				  struct page *page,
52 				  gfp_t gfp)
53 {
54 	struct page *xpage;
55 	void *val;
56 
57 	_enter("%p,%p,%x", cookie, page, gfp);
58 
59 	rcu_read_lock();
60 	val = radix_tree_lookup(&cookie->stores, page->index);
61 	if (!val) {
62 		rcu_read_unlock();
63 		fscache_stat(&fscache_n_store_vmscan_not_storing);
64 		__fscache_uncache_page(cookie, page);
65 		return true;
66 	}
67 
68 	/* see if the page is actually undergoing storage - if so we can't get
69 	 * rid of it till the cache has finished with it */
70 	if (radix_tree_tag_get(&cookie->stores, page->index,
71 			       FSCACHE_COOKIE_STORING_TAG)) {
72 		rcu_read_unlock();
73 		goto page_busy;
74 	}
75 
76 	/* the page is pending storage, so we attempt to cancel the store and
77 	 * discard the store request so that the page can be reclaimed */
78 	spin_lock(&cookie->stores_lock);
79 	rcu_read_unlock();
80 
81 	if (radix_tree_tag_get(&cookie->stores, page->index,
82 			       FSCACHE_COOKIE_STORING_TAG)) {
83 		/* the page started to undergo storage whilst we were looking,
84 		 * so now we can only wait or return */
85 		spin_unlock(&cookie->stores_lock);
86 		goto page_busy;
87 	}
88 
89 	xpage = radix_tree_delete(&cookie->stores, page->index);
90 	spin_unlock(&cookie->stores_lock);
91 
92 	if (xpage) {
93 		fscache_stat(&fscache_n_store_vmscan_cancelled);
94 		fscache_stat(&fscache_n_store_radix_deletes);
95 		ASSERTCMP(xpage, ==, page);
96 	} else {
97 		fscache_stat(&fscache_n_store_vmscan_gone);
98 	}
99 
100 	wake_up_bit(&cookie->flags, 0);
101 	if (xpage)
102 		page_cache_release(xpage);
103 	__fscache_uncache_page(cookie, page);
104 	return true;
105 
106 page_busy:
107 	/* we might want to wait here, but that could deadlock the allocator as
108 	 * the work threads writing to the cache may all end up sleeping
109 	 * on memory allocation */
110 	fscache_stat(&fscache_n_store_vmscan_busy);
111 	return false;
112 }
113 EXPORT_SYMBOL(__fscache_maybe_release_page);
114 
115 /*
116  * note that a page has finished being written to the cache
117  */
fscache_end_page_write(struct fscache_object * object,struct page * page)118 static void fscache_end_page_write(struct fscache_object *object,
119 				   struct page *page)
120 {
121 	struct fscache_cookie *cookie;
122 	struct page *xpage = NULL;
123 
124 	spin_lock(&object->lock);
125 	cookie = object->cookie;
126 	if (cookie) {
127 		/* delete the page from the tree if it is now no longer
128 		 * pending */
129 		spin_lock(&cookie->stores_lock);
130 		radix_tree_tag_clear(&cookie->stores, page->index,
131 				     FSCACHE_COOKIE_STORING_TAG);
132 		if (!radix_tree_tag_get(&cookie->stores, page->index,
133 					FSCACHE_COOKIE_PENDING_TAG)) {
134 			fscache_stat(&fscache_n_store_radix_deletes);
135 			xpage = radix_tree_delete(&cookie->stores, page->index);
136 		}
137 		spin_unlock(&cookie->stores_lock);
138 		wake_up_bit(&cookie->flags, 0);
139 	}
140 	spin_unlock(&object->lock);
141 	if (xpage)
142 		page_cache_release(xpage);
143 }
144 
145 /*
146  * actually apply the changed attributes to a cache object
147  */
fscache_attr_changed_op(struct fscache_operation * op)148 static void fscache_attr_changed_op(struct fscache_operation *op)
149 {
150 	struct fscache_object *object = op->object;
151 	int ret;
152 
153 	_enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
154 
155 	fscache_stat(&fscache_n_attr_changed_calls);
156 
157 	if (fscache_object_is_active(object)) {
158 		fscache_stat(&fscache_n_cop_attr_changed);
159 		ret = object->cache->ops->attr_changed(object);
160 		fscache_stat_d(&fscache_n_cop_attr_changed);
161 		if (ret < 0)
162 			fscache_abort_object(object);
163 	}
164 
165 	_leave("");
166 }
167 
168 /*
169  * notification that the attributes on an object have changed
170  */
__fscache_attr_changed(struct fscache_cookie * cookie)171 int __fscache_attr_changed(struct fscache_cookie *cookie)
172 {
173 	struct fscache_operation *op;
174 	struct fscache_object *object;
175 
176 	_enter("%p", cookie);
177 
178 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
179 
180 	fscache_stat(&fscache_n_attr_changed);
181 
182 	op = kzalloc(sizeof(*op), GFP_KERNEL);
183 	if (!op) {
184 		fscache_stat(&fscache_n_attr_changed_nomem);
185 		_leave(" = -ENOMEM");
186 		return -ENOMEM;
187 	}
188 
189 	fscache_operation_init(op, fscache_attr_changed_op, NULL);
190 	op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
191 
192 	spin_lock(&cookie->lock);
193 
194 	if (hlist_empty(&cookie->backing_objects))
195 		goto nobufs;
196 	object = hlist_entry(cookie->backing_objects.first,
197 			     struct fscache_object, cookie_link);
198 
199 	if (fscache_submit_exclusive_op(object, op) < 0)
200 		goto nobufs;
201 	spin_unlock(&cookie->lock);
202 	fscache_stat(&fscache_n_attr_changed_ok);
203 	fscache_put_operation(op);
204 	_leave(" = 0");
205 	return 0;
206 
207 nobufs:
208 	spin_unlock(&cookie->lock);
209 	kfree(op);
210 	fscache_stat(&fscache_n_attr_changed_nobufs);
211 	_leave(" = %d", -ENOBUFS);
212 	return -ENOBUFS;
213 }
214 EXPORT_SYMBOL(__fscache_attr_changed);
215 
216 /*
217  * release a retrieval op reference
218  */
fscache_release_retrieval_op(struct fscache_operation * _op)219 static void fscache_release_retrieval_op(struct fscache_operation *_op)
220 {
221 	struct fscache_retrieval *op =
222 		container_of(_op, struct fscache_retrieval, op);
223 
224 	_enter("{OP%x}", op->op.debug_id);
225 
226 	fscache_hist(fscache_retrieval_histogram, op->start_time);
227 	if (op->context)
228 		fscache_put_context(op->op.object->cookie, op->context);
229 
230 	_leave("");
231 }
232 
233 /*
234  * allocate a retrieval op
235  */
fscache_alloc_retrieval(struct address_space * mapping,fscache_rw_complete_t end_io_func,void * context)236 static struct fscache_retrieval *fscache_alloc_retrieval(
237 	struct address_space *mapping,
238 	fscache_rw_complete_t end_io_func,
239 	void *context)
240 {
241 	struct fscache_retrieval *op;
242 
243 	/* allocate a retrieval operation and attempt to submit it */
244 	op = kzalloc(sizeof(*op), GFP_NOIO);
245 	if (!op) {
246 		fscache_stat(&fscache_n_retrievals_nomem);
247 		return NULL;
248 	}
249 
250 	fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
251 	op->op.flags	= FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
252 	op->mapping	= mapping;
253 	op->end_io_func	= end_io_func;
254 	op->context	= context;
255 	op->start_time	= jiffies;
256 	INIT_LIST_HEAD(&op->to_do);
257 	return op;
258 }
259 
260 /*
261  * wait for a deferred lookup to complete
262  */
fscache_wait_for_deferred_lookup(struct fscache_cookie * cookie)263 static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
264 {
265 	unsigned long jif;
266 
267 	_enter("");
268 
269 	if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
270 		_leave(" = 0 [imm]");
271 		return 0;
272 	}
273 
274 	fscache_stat(&fscache_n_retrievals_wait);
275 
276 	jif = jiffies;
277 	if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
278 			fscache_wait_bit_interruptible,
279 			TASK_INTERRUPTIBLE) != 0) {
280 		fscache_stat(&fscache_n_retrievals_intr);
281 		_leave(" = -ERESTARTSYS");
282 		return -ERESTARTSYS;
283 	}
284 
285 	ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
286 
287 	smp_rmb();
288 	fscache_hist(fscache_retrieval_delay_histogram, jif);
289 	_leave(" = 0 [dly]");
290 	return 0;
291 }
292 
293 /*
294  * wait for an object to become active (or dead)
295  */
fscache_wait_for_retrieval_activation(struct fscache_object * object,struct fscache_retrieval * op,atomic_t * stat_op_waits,atomic_t * stat_object_dead)296 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
297 						 struct fscache_retrieval *op,
298 						 atomic_t *stat_op_waits,
299 						 atomic_t *stat_object_dead)
300 {
301 	int ret;
302 
303 	if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
304 		goto check_if_dead;
305 
306 	_debug(">>> WT");
307 	fscache_stat(stat_op_waits);
308 	if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
309 			fscache_wait_bit_interruptible,
310 			TASK_INTERRUPTIBLE) < 0) {
311 		ret = fscache_cancel_op(&op->op);
312 		if (ret == 0)
313 			return -ERESTARTSYS;
314 
315 		/* it's been removed from the pending queue by another party,
316 		 * so we should get to run shortly */
317 		wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
318 			    fscache_wait_bit, TASK_UNINTERRUPTIBLE);
319 	}
320 	_debug("<<< GO");
321 
322 check_if_dead:
323 	if (unlikely(fscache_object_is_dead(object))) {
324 		fscache_stat(stat_object_dead);
325 		return -ENOBUFS;
326 	}
327 	return 0;
328 }
329 
330 /*
331  * read a page from the cache or allocate a block in which to store it
332  * - we return:
333  *   -ENOMEM	- out of memory, nothing done
334  *   -ERESTARTSYS - interrupted
335  *   -ENOBUFS	- no backing object available in which to cache the block
336  *   -ENODATA	- no data available in the backing object for this block
337  *   0		- dispatched a read - it'll call end_io_func() when finished
338  */
__fscache_read_or_alloc_page(struct fscache_cookie * cookie,struct page * page,fscache_rw_complete_t end_io_func,void * context,gfp_t gfp)339 int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
340 				 struct page *page,
341 				 fscache_rw_complete_t end_io_func,
342 				 void *context,
343 				 gfp_t gfp)
344 {
345 	struct fscache_retrieval *op;
346 	struct fscache_object *object;
347 	int ret;
348 
349 	_enter("%p,%p,,,", cookie, page);
350 
351 	fscache_stat(&fscache_n_retrievals);
352 
353 	if (hlist_empty(&cookie->backing_objects))
354 		goto nobufs;
355 
356 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
357 	ASSERTCMP(page, !=, NULL);
358 
359 	if (fscache_wait_for_deferred_lookup(cookie) < 0)
360 		return -ERESTARTSYS;
361 
362 	op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
363 	if (!op) {
364 		_leave(" = -ENOMEM");
365 		return -ENOMEM;
366 	}
367 
368 	spin_lock(&cookie->lock);
369 
370 	if (hlist_empty(&cookie->backing_objects))
371 		goto nobufs_unlock;
372 	object = hlist_entry(cookie->backing_objects.first,
373 			     struct fscache_object, cookie_link);
374 
375 	ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
376 
377 	atomic_inc(&object->n_reads);
378 	set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
379 
380 	if (fscache_submit_op(object, &op->op) < 0)
381 		goto nobufs_unlock;
382 	spin_unlock(&cookie->lock);
383 
384 	fscache_stat(&fscache_n_retrieval_ops);
385 
386 	/* pin the netfs read context in case we need to do the actual netfs
387 	 * read because we've encountered a cache read failure */
388 	fscache_get_context(object->cookie, op->context);
389 
390 	/* we wait for the operation to become active, and then process it
391 	 * *here*, in this thread, and not in the thread pool */
392 	ret = fscache_wait_for_retrieval_activation(
393 		object, op,
394 		__fscache_stat(&fscache_n_retrieval_op_waits),
395 		__fscache_stat(&fscache_n_retrievals_object_dead));
396 	if (ret < 0)
397 		goto error;
398 
399 	/* ask the cache to honour the operation */
400 	if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
401 		fscache_stat(&fscache_n_cop_allocate_page);
402 		ret = object->cache->ops->allocate_page(op, page, gfp);
403 		fscache_stat_d(&fscache_n_cop_allocate_page);
404 		if (ret == 0)
405 			ret = -ENODATA;
406 	} else {
407 		fscache_stat(&fscache_n_cop_read_or_alloc_page);
408 		ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
409 		fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
410 	}
411 
412 error:
413 	if (ret == -ENOMEM)
414 		fscache_stat(&fscache_n_retrievals_nomem);
415 	else if (ret == -ERESTARTSYS)
416 		fscache_stat(&fscache_n_retrievals_intr);
417 	else if (ret == -ENODATA)
418 		fscache_stat(&fscache_n_retrievals_nodata);
419 	else if (ret < 0)
420 		fscache_stat(&fscache_n_retrievals_nobufs);
421 	else
422 		fscache_stat(&fscache_n_retrievals_ok);
423 
424 	fscache_put_retrieval(op);
425 	_leave(" = %d", ret);
426 	return ret;
427 
428 nobufs_unlock:
429 	spin_unlock(&cookie->lock);
430 	kfree(op);
431 nobufs:
432 	fscache_stat(&fscache_n_retrievals_nobufs);
433 	_leave(" = -ENOBUFS");
434 	return -ENOBUFS;
435 }
436 EXPORT_SYMBOL(__fscache_read_or_alloc_page);
437 
438 /*
439  * read a list of page from the cache or allocate a block in which to store
440  * them
441  * - we return:
442  *   -ENOMEM	- out of memory, some pages may be being read
443  *   -ERESTARTSYS - interrupted, some pages may be being read
444  *   -ENOBUFS	- no backing object or space available in which to cache any
445  *                pages not being read
446  *   -ENODATA	- no data available in the backing object for some or all of
447  *                the pages
448  *   0		- dispatched a read on all pages
449  *
450  * end_io_func() will be called for each page read from the cache as it is
451  * finishes being read
452  *
453  * any pages for which a read is dispatched will be removed from pages and
454  * nr_pages
455  */
__fscache_read_or_alloc_pages(struct fscache_cookie * cookie,struct address_space * mapping,struct list_head * pages,unsigned * nr_pages,fscache_rw_complete_t end_io_func,void * context,gfp_t gfp)456 int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
457 				  struct address_space *mapping,
458 				  struct list_head *pages,
459 				  unsigned *nr_pages,
460 				  fscache_rw_complete_t end_io_func,
461 				  void *context,
462 				  gfp_t gfp)
463 {
464 	struct fscache_retrieval *op;
465 	struct fscache_object *object;
466 	int ret;
467 
468 	_enter("%p,,%d,,,", cookie, *nr_pages);
469 
470 	fscache_stat(&fscache_n_retrievals);
471 
472 	if (hlist_empty(&cookie->backing_objects))
473 		goto nobufs;
474 
475 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
476 	ASSERTCMP(*nr_pages, >, 0);
477 	ASSERT(!list_empty(pages));
478 
479 	if (fscache_wait_for_deferred_lookup(cookie) < 0)
480 		return -ERESTARTSYS;
481 
482 	op = fscache_alloc_retrieval(mapping, end_io_func, context);
483 	if (!op)
484 		return -ENOMEM;
485 
486 	spin_lock(&cookie->lock);
487 
488 	if (hlist_empty(&cookie->backing_objects))
489 		goto nobufs_unlock;
490 	object = hlist_entry(cookie->backing_objects.first,
491 			     struct fscache_object, cookie_link);
492 
493 	atomic_inc(&object->n_reads);
494 	set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
495 
496 	if (fscache_submit_op(object, &op->op) < 0)
497 		goto nobufs_unlock;
498 	spin_unlock(&cookie->lock);
499 
500 	fscache_stat(&fscache_n_retrieval_ops);
501 
502 	/* pin the netfs read context in case we need to do the actual netfs
503 	 * read because we've encountered a cache read failure */
504 	fscache_get_context(object->cookie, op->context);
505 
506 	/* we wait for the operation to become active, and then process it
507 	 * *here*, in this thread, and not in the thread pool */
508 	ret = fscache_wait_for_retrieval_activation(
509 		object, op,
510 		__fscache_stat(&fscache_n_retrieval_op_waits),
511 		__fscache_stat(&fscache_n_retrievals_object_dead));
512 	if (ret < 0)
513 		goto error;
514 
515 	/* ask the cache to honour the operation */
516 	if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
517 		fscache_stat(&fscache_n_cop_allocate_pages);
518 		ret = object->cache->ops->allocate_pages(
519 			op, pages, nr_pages, gfp);
520 		fscache_stat_d(&fscache_n_cop_allocate_pages);
521 	} else {
522 		fscache_stat(&fscache_n_cop_read_or_alloc_pages);
523 		ret = object->cache->ops->read_or_alloc_pages(
524 			op, pages, nr_pages, gfp);
525 		fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
526 	}
527 
528 error:
529 	if (ret == -ENOMEM)
530 		fscache_stat(&fscache_n_retrievals_nomem);
531 	else if (ret == -ERESTARTSYS)
532 		fscache_stat(&fscache_n_retrievals_intr);
533 	else if (ret == -ENODATA)
534 		fscache_stat(&fscache_n_retrievals_nodata);
535 	else if (ret < 0)
536 		fscache_stat(&fscache_n_retrievals_nobufs);
537 	else
538 		fscache_stat(&fscache_n_retrievals_ok);
539 
540 	fscache_put_retrieval(op);
541 	_leave(" = %d", ret);
542 	return ret;
543 
544 nobufs_unlock:
545 	spin_unlock(&cookie->lock);
546 	kfree(op);
547 nobufs:
548 	fscache_stat(&fscache_n_retrievals_nobufs);
549 	_leave(" = -ENOBUFS");
550 	return -ENOBUFS;
551 }
552 EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
553 
554 /*
555  * allocate a block in the cache on which to store a page
556  * - we return:
557  *   -ENOMEM	- out of memory, nothing done
558  *   -ERESTARTSYS - interrupted
559  *   -ENOBUFS	- no backing object available in which to cache the block
560  *   0		- block allocated
561  */
__fscache_alloc_page(struct fscache_cookie * cookie,struct page * page,gfp_t gfp)562 int __fscache_alloc_page(struct fscache_cookie *cookie,
563 			 struct page *page,
564 			 gfp_t gfp)
565 {
566 	struct fscache_retrieval *op;
567 	struct fscache_object *object;
568 	int ret;
569 
570 	_enter("%p,%p,,,", cookie, page);
571 
572 	fscache_stat(&fscache_n_allocs);
573 
574 	if (hlist_empty(&cookie->backing_objects))
575 		goto nobufs;
576 
577 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
578 	ASSERTCMP(page, !=, NULL);
579 
580 	if (fscache_wait_for_deferred_lookup(cookie) < 0)
581 		return -ERESTARTSYS;
582 
583 	op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
584 	if (!op)
585 		return -ENOMEM;
586 
587 	spin_lock(&cookie->lock);
588 
589 	if (hlist_empty(&cookie->backing_objects))
590 		goto nobufs_unlock;
591 	object = hlist_entry(cookie->backing_objects.first,
592 			     struct fscache_object, cookie_link);
593 
594 	if (fscache_submit_op(object, &op->op) < 0)
595 		goto nobufs_unlock;
596 	spin_unlock(&cookie->lock);
597 
598 	fscache_stat(&fscache_n_alloc_ops);
599 
600 	ret = fscache_wait_for_retrieval_activation(
601 		object, op,
602 		__fscache_stat(&fscache_n_alloc_op_waits),
603 		__fscache_stat(&fscache_n_allocs_object_dead));
604 	if (ret < 0)
605 		goto error;
606 
607 	/* ask the cache to honour the operation */
608 	fscache_stat(&fscache_n_cop_allocate_page);
609 	ret = object->cache->ops->allocate_page(op, page, gfp);
610 	fscache_stat_d(&fscache_n_cop_allocate_page);
611 
612 error:
613 	if (ret == -ERESTARTSYS)
614 		fscache_stat(&fscache_n_allocs_intr);
615 	else if (ret < 0)
616 		fscache_stat(&fscache_n_allocs_nobufs);
617 	else
618 		fscache_stat(&fscache_n_allocs_ok);
619 
620 	fscache_put_retrieval(op);
621 	_leave(" = %d", ret);
622 	return ret;
623 
624 nobufs_unlock:
625 	spin_unlock(&cookie->lock);
626 	kfree(op);
627 nobufs:
628 	fscache_stat(&fscache_n_allocs_nobufs);
629 	_leave(" = -ENOBUFS");
630 	return -ENOBUFS;
631 }
632 EXPORT_SYMBOL(__fscache_alloc_page);
633 
634 /*
635  * release a write op reference
636  */
fscache_release_write_op(struct fscache_operation * _op)637 static void fscache_release_write_op(struct fscache_operation *_op)
638 {
639 	_enter("{OP%x}", _op->debug_id);
640 }
641 
642 /*
643  * perform the background storage of a page into the cache
644  */
fscache_write_op(struct fscache_operation * _op)645 static void fscache_write_op(struct fscache_operation *_op)
646 {
647 	struct fscache_storage *op =
648 		container_of(_op, struct fscache_storage, op);
649 	struct fscache_object *object = op->op.object;
650 	struct fscache_cookie *cookie;
651 	struct page *page;
652 	unsigned n;
653 	void *results[1];
654 	int ret;
655 
656 	_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
657 
658 	spin_lock(&object->lock);
659 	cookie = object->cookie;
660 
661 	if (!fscache_object_is_active(object) || !cookie) {
662 		spin_unlock(&object->lock);
663 		_leave("");
664 		return;
665 	}
666 
667 	spin_lock(&cookie->stores_lock);
668 
669 	fscache_stat(&fscache_n_store_calls);
670 
671 	/* find a page to store */
672 	page = NULL;
673 	n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
674 				       FSCACHE_COOKIE_PENDING_TAG);
675 	if (n != 1)
676 		goto superseded;
677 	page = results[0];
678 	_debug("gang %d [%lx]", n, page->index);
679 	if (page->index > op->store_limit) {
680 		fscache_stat(&fscache_n_store_pages_over_limit);
681 		goto superseded;
682 	}
683 
684 	radix_tree_tag_set(&cookie->stores, page->index,
685 			   FSCACHE_COOKIE_STORING_TAG);
686 	radix_tree_tag_clear(&cookie->stores, page->index,
687 			     FSCACHE_COOKIE_PENDING_TAG);
688 
689 	spin_unlock(&cookie->stores_lock);
690 	spin_unlock(&object->lock);
691 
692 	fscache_stat(&fscache_n_store_pages);
693 	fscache_stat(&fscache_n_cop_write_page);
694 	ret = object->cache->ops->write_page(op, page);
695 	fscache_stat_d(&fscache_n_cop_write_page);
696 	fscache_end_page_write(object, page);
697 	if (ret < 0) {
698 		fscache_abort_object(object);
699 	} else {
700 		fscache_enqueue_operation(&op->op);
701 	}
702 
703 	_leave("");
704 	return;
705 
706 superseded:
707 	/* this writer is going away and there aren't any more things to
708 	 * write */
709 	_debug("cease");
710 	spin_unlock(&cookie->stores_lock);
711 	clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
712 	spin_unlock(&object->lock);
713 	_leave("");
714 }
715 
716 /*
717  * request a page be stored in the cache
718  * - returns:
719  *   -ENOMEM	- out of memory, nothing done
720  *   -ENOBUFS	- no backing object available in which to cache the page
721  *   0		- dispatched a write - it'll call end_io_func() when finished
722  *
723  * if the cookie still has a backing object at this point, that object can be
724  * in one of a few states with respect to storage processing:
725  *
726  *  (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
727  *      set)
728  *
729  *	(a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
730  *	    fill op)
731  *
732  *	(b) writes deferred till post-creation (mark page for writing and
733  *	    return immediately)
734  *
735  *  (2) negative lookup, object created, initial fill being made from netfs
736  *      (FSCACHE_COOKIE_INITIAL_FILL is set)
737  *
738  *	(a) fill point not yet reached this page (mark page for writing and
739  *          return)
740  *
741  *	(b) fill point passed this page (queue op to store this page)
742  *
743  *  (3) object extant (queue op to store this page)
744  *
745  * any other state is invalid
746  */
__fscache_write_page(struct fscache_cookie * cookie,struct page * page,gfp_t gfp)747 int __fscache_write_page(struct fscache_cookie *cookie,
748 			 struct page *page,
749 			 gfp_t gfp)
750 {
751 	struct fscache_storage *op;
752 	struct fscache_object *object;
753 	int ret;
754 
755 	_enter("%p,%x,", cookie, (u32) page->flags);
756 
757 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
758 	ASSERT(PageFsCache(page));
759 
760 	fscache_stat(&fscache_n_stores);
761 
762 	op = kzalloc(sizeof(*op), GFP_NOIO);
763 	if (!op)
764 		goto nomem;
765 
766 	fscache_operation_init(&op->op, fscache_write_op,
767 			       fscache_release_write_op);
768 	op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
769 
770 	ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
771 	if (ret < 0)
772 		goto nomem_free;
773 
774 	ret = -ENOBUFS;
775 	spin_lock(&cookie->lock);
776 
777 	if (hlist_empty(&cookie->backing_objects))
778 		goto nobufs;
779 	object = hlist_entry(cookie->backing_objects.first,
780 			     struct fscache_object, cookie_link);
781 	if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
782 		goto nobufs;
783 
784 	/* add the page to the pending-storage radix tree on the backing
785 	 * object */
786 	spin_lock(&object->lock);
787 	spin_lock(&cookie->stores_lock);
788 
789 	_debug("store limit %llx", (unsigned long long) object->store_limit);
790 
791 	ret = radix_tree_insert(&cookie->stores, page->index, page);
792 	if (ret < 0) {
793 		if (ret == -EEXIST)
794 			goto already_queued;
795 		_debug("insert failed %d", ret);
796 		goto nobufs_unlock_obj;
797 	}
798 
799 	radix_tree_tag_set(&cookie->stores, page->index,
800 			   FSCACHE_COOKIE_PENDING_TAG);
801 	page_cache_get(page);
802 
803 	/* we only want one writer at a time, but we do need to queue new
804 	 * writers after exclusive ops */
805 	if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
806 		goto already_pending;
807 
808 	spin_unlock(&cookie->stores_lock);
809 	spin_unlock(&object->lock);
810 
811 	op->op.debug_id	= atomic_inc_return(&fscache_op_debug_id);
812 	op->store_limit = object->store_limit;
813 
814 	if (fscache_submit_op(object, &op->op) < 0)
815 		goto submit_failed;
816 
817 	spin_unlock(&cookie->lock);
818 	radix_tree_preload_end();
819 	fscache_stat(&fscache_n_store_ops);
820 	fscache_stat(&fscache_n_stores_ok);
821 
822 	/* the work queue now carries its own ref on the object */
823 	fscache_put_operation(&op->op);
824 	_leave(" = 0");
825 	return 0;
826 
827 already_queued:
828 	fscache_stat(&fscache_n_stores_again);
829 already_pending:
830 	spin_unlock(&cookie->stores_lock);
831 	spin_unlock(&object->lock);
832 	spin_unlock(&cookie->lock);
833 	radix_tree_preload_end();
834 	kfree(op);
835 	fscache_stat(&fscache_n_stores_ok);
836 	_leave(" = 0");
837 	return 0;
838 
839 submit_failed:
840 	spin_lock(&cookie->stores_lock);
841 	radix_tree_delete(&cookie->stores, page->index);
842 	spin_unlock(&cookie->stores_lock);
843 	page_cache_release(page);
844 	ret = -ENOBUFS;
845 	goto nobufs;
846 
847 nobufs_unlock_obj:
848 	spin_unlock(&cookie->stores_lock);
849 	spin_unlock(&object->lock);
850 nobufs:
851 	spin_unlock(&cookie->lock);
852 	radix_tree_preload_end();
853 	kfree(op);
854 	fscache_stat(&fscache_n_stores_nobufs);
855 	_leave(" = -ENOBUFS");
856 	return -ENOBUFS;
857 
858 nomem_free:
859 	kfree(op);
860 nomem:
861 	fscache_stat(&fscache_n_stores_oom);
862 	_leave(" = -ENOMEM");
863 	return -ENOMEM;
864 }
865 EXPORT_SYMBOL(__fscache_write_page);
866 
867 /*
868  * remove a page from the cache
869  */
__fscache_uncache_page(struct fscache_cookie * cookie,struct page * page)870 void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
871 {
872 	struct fscache_object *object;
873 
874 	_enter(",%p", page);
875 
876 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
877 	ASSERTCMP(page, !=, NULL);
878 
879 	fscache_stat(&fscache_n_uncaches);
880 
881 	/* cache withdrawal may beat us to it */
882 	if (!PageFsCache(page))
883 		goto done;
884 
885 	/* get the object */
886 	spin_lock(&cookie->lock);
887 
888 	if (hlist_empty(&cookie->backing_objects)) {
889 		ClearPageFsCache(page);
890 		goto done_unlock;
891 	}
892 
893 	object = hlist_entry(cookie->backing_objects.first,
894 			     struct fscache_object, cookie_link);
895 
896 	/* there might now be stuff on disk we could read */
897 	clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
898 
899 	/* only invoke the cache backend if we managed to mark the page
900 	 * uncached here; this deals with synchronisation vs withdrawal */
901 	if (TestClearPageFsCache(page) &&
902 	    object->cache->ops->uncache_page) {
903 		/* the cache backend releases the cookie lock */
904 		fscache_stat(&fscache_n_cop_uncache_page);
905 		object->cache->ops->uncache_page(object, page);
906 		fscache_stat_d(&fscache_n_cop_uncache_page);
907 		goto done;
908 	}
909 
910 done_unlock:
911 	spin_unlock(&cookie->lock);
912 done:
913 	_leave("");
914 }
915 EXPORT_SYMBOL(__fscache_uncache_page);
916 
917 /**
918  * fscache_mark_pages_cached - Mark pages as being cached
919  * @op: The retrieval op pages are being marked for
920  * @pagevec: The pages to be marked
921  *
922  * Mark a bunch of netfs pages as being cached.  After this is called,
923  * the netfs must call fscache_uncache_page() to remove the mark.
924  */
fscache_mark_pages_cached(struct fscache_retrieval * op,struct pagevec * pagevec)925 void fscache_mark_pages_cached(struct fscache_retrieval *op,
926 			       struct pagevec *pagevec)
927 {
928 	struct fscache_cookie *cookie = op->op.object->cookie;
929 	unsigned long loop;
930 
931 #ifdef CONFIG_FSCACHE_STATS
932 	atomic_add(pagevec->nr, &fscache_n_marks);
933 #endif
934 
935 	for (loop = 0; loop < pagevec->nr; loop++) {
936 		struct page *page = pagevec->pages[loop];
937 
938 		_debug("- mark %p{%lx}", page, page->index);
939 		if (TestSetPageFsCache(page)) {
940 			static bool once_only;
941 			if (!once_only) {
942 				once_only = true;
943 				printk(KERN_WARNING "FS-Cache:"
944 				       " Cookie type %s marked page %lx"
945 				       " multiple times\n",
946 				       cookie->def->name, page->index);
947 			}
948 		}
949 	}
950 
951 	if (cookie->def->mark_pages_cached)
952 		cookie->def->mark_pages_cached(cookie->netfs_data,
953 					       op->mapping, pagevec);
954 	pagevec_reinit(pagevec);
955 }
956 EXPORT_SYMBOL(fscache_mark_pages_cached);
957 
958 /*
959  * Uncache all the pages in an inode that are marked PG_fscache, assuming them
960  * to be associated with the given cookie.
961  */
__fscache_uncache_all_inode_pages(struct fscache_cookie * cookie,struct inode * inode)962 void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
963 				       struct inode *inode)
964 {
965 	struct address_space *mapping = inode->i_mapping;
966 	struct pagevec pvec;
967 	pgoff_t next;
968 	int i;
969 
970 	_enter("%p,%p", cookie, inode);
971 
972 	if (!mapping || mapping->nrpages == 0) {
973 		_leave(" [no pages]");
974 		return;
975 	}
976 
977 	pagevec_init(&pvec, 0);
978 	next = 0;
979 	do {
980 		if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
981 			break;
982 		for (i = 0; i < pagevec_count(&pvec); i++) {
983 			struct page *page = pvec.pages[i];
984 			next = page->index;
985 			if (PageFsCache(page)) {
986 				__fscache_wait_on_page_write(cookie, page);
987 				__fscache_uncache_page(cookie, page);
988 			}
989 		}
990 		pagevec_release(&pvec);
991 		cond_resched();
992 	} while (++next);
993 
994 	_leave("");
995 }
996 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);
997