1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2022, Alibaba Cloud
4  * Copyright (C) 2022, Bytedance Inc. All rights reserved.
5  */
6 #include <linux/fscache.h>
7 #include "internal.h"
8 
9 static DEFINE_MUTEX(erofs_domain_list_lock);
10 static DEFINE_MUTEX(erofs_domain_cookies_lock);
11 static LIST_HEAD(erofs_domain_list);
12 static struct vfsmount *erofs_pseudo_mnt;
13 
erofs_fscache_alloc_request(struct address_space * mapping,loff_t start,size_t len)14 static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping,
15 					     loff_t start, size_t len)
16 {
17 	struct netfs_io_request *rreq;
18 
19 	rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
20 	if (!rreq)
21 		return ERR_PTR(-ENOMEM);
22 
23 	rreq->start	= start;
24 	rreq->len	= len;
25 	rreq->mapping	= mapping;
26 	rreq->inode	= mapping->host;
27 	INIT_LIST_HEAD(&rreq->subrequests);
28 	refcount_set(&rreq->ref, 1);
29 	return rreq;
30 }
31 
erofs_fscache_put_request(struct netfs_io_request * rreq)32 static void erofs_fscache_put_request(struct netfs_io_request *rreq)
33 {
34 	if (!refcount_dec_and_test(&rreq->ref))
35 		return;
36 	if (rreq->cache_resources.ops)
37 		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
38 	kfree(rreq);
39 }
40 
erofs_fscache_put_subrequest(struct netfs_io_subrequest * subreq)41 static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq)
42 {
43 	if (!refcount_dec_and_test(&subreq->ref))
44 		return;
45 	erofs_fscache_put_request(subreq->rreq);
46 	kfree(subreq);
47 }
48 
erofs_fscache_clear_subrequests(struct netfs_io_request * rreq)49 static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq)
50 {
51 	struct netfs_io_subrequest *subreq;
52 
53 	while (!list_empty(&rreq->subrequests)) {
54 		subreq = list_first_entry(&rreq->subrequests,
55 				struct netfs_io_subrequest, rreq_link);
56 		list_del(&subreq->rreq_link);
57 		erofs_fscache_put_subrequest(subreq);
58 	}
59 }
60 
erofs_fscache_rreq_unlock_folios(struct netfs_io_request * rreq)61 static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
62 {
63 	struct netfs_io_subrequest *subreq;
64 	struct folio *folio;
65 	unsigned int iopos = 0;
66 	pgoff_t start_page = rreq->start / PAGE_SIZE;
67 	pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
68 	bool subreq_failed = false;
69 
70 	XA_STATE(xas, &rreq->mapping->i_pages, start_page);
71 
72 	subreq = list_first_entry(&rreq->subrequests,
73 				  struct netfs_io_subrequest, rreq_link);
74 	subreq_failed = (subreq->error < 0);
75 
76 	rcu_read_lock();
77 	xas_for_each(&xas, folio, last_page) {
78 		unsigned int pgpos, pgend;
79 		bool pg_failed = false;
80 
81 		if (xas_retry(&xas, folio))
82 			continue;
83 
84 		pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
85 		pgend = pgpos + folio_size(folio);
86 
87 		for (;;) {
88 			if (!subreq) {
89 				pg_failed = true;
90 				break;
91 			}
92 
93 			pg_failed |= subreq_failed;
94 			if (pgend < iopos + subreq->len)
95 				break;
96 
97 			iopos += subreq->len;
98 			if (!list_is_last(&subreq->rreq_link,
99 					  &rreq->subrequests)) {
100 				subreq = list_next_entry(subreq, rreq_link);
101 				subreq_failed = (subreq->error < 0);
102 			} else {
103 				subreq = NULL;
104 				subreq_failed = false;
105 			}
106 			if (pgend == iopos)
107 				break;
108 		}
109 
110 		if (!pg_failed)
111 			folio_mark_uptodate(folio);
112 
113 		folio_unlock(folio);
114 	}
115 	rcu_read_unlock();
116 }
117 
erofs_fscache_rreq_complete(struct netfs_io_request * rreq)118 static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq)
119 {
120 	erofs_fscache_rreq_unlock_folios(rreq);
121 	erofs_fscache_clear_subrequests(rreq);
122 	erofs_fscache_put_request(rreq);
123 }
124 
erofc_fscache_subreq_complete(void * priv,ssize_t transferred_or_error,bool was_async)125 static void erofc_fscache_subreq_complete(void *priv,
126 		ssize_t transferred_or_error, bool was_async)
127 {
128 	struct netfs_io_subrequest *subreq = priv;
129 	struct netfs_io_request *rreq = subreq->rreq;
130 
131 	if (IS_ERR_VALUE(transferred_or_error))
132 		subreq->error = transferred_or_error;
133 
134 	if (atomic_dec_and_test(&rreq->nr_outstanding))
135 		erofs_fscache_rreq_complete(rreq);
136 
137 	erofs_fscache_put_subrequest(subreq);
138 }
139 
140 /*
141  * Read data from fscache and fill the read data into page cache described by
142  * @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes
143  * the start physical address in the cache file.
144  */
erofs_fscache_read_folios_async(struct fscache_cookie * cookie,struct netfs_io_request * rreq,loff_t pstart)145 static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
146 				struct netfs_io_request *rreq, loff_t pstart)
147 {
148 	enum netfs_io_source source;
149 	struct super_block *sb = rreq->mapping->host->i_sb;
150 	struct netfs_io_subrequest *subreq;
151 	struct netfs_cache_resources *cres = &rreq->cache_resources;
152 	struct iov_iter iter;
153 	loff_t start = rreq->start;
154 	size_t len = rreq->len;
155 	size_t done = 0;
156 	int ret;
157 
158 	atomic_set(&rreq->nr_outstanding, 1);
159 
160 	ret = fscache_begin_read_operation(cres, cookie);
161 	if (ret)
162 		goto out;
163 
164 	while (done < len) {
165 		subreq = kzalloc(sizeof(struct netfs_io_subrequest),
166 				 GFP_KERNEL);
167 		if (subreq) {
168 			INIT_LIST_HEAD(&subreq->rreq_link);
169 			refcount_set(&subreq->ref, 2);
170 			subreq->rreq = rreq;
171 			refcount_inc(&rreq->ref);
172 		} else {
173 			ret = -ENOMEM;
174 			goto out;
175 		}
176 
177 		subreq->start = pstart + done;
178 		subreq->len	=  len - done;
179 		subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
180 
181 		list_add_tail(&subreq->rreq_link, &rreq->subrequests);
182 
183 		source = cres->ops->prepare_read(subreq, LLONG_MAX);
184 		if (WARN_ON(subreq->len == 0))
185 			source = NETFS_INVALID_READ;
186 		if (source != NETFS_READ_FROM_CACHE) {
187 			erofs_err(sb, "failed to fscache prepare_read (source %d)",
188 				  source);
189 			ret = -EIO;
190 			subreq->error = ret;
191 			erofs_fscache_put_subrequest(subreq);
192 			goto out;
193 		}
194 
195 		atomic_inc(&rreq->nr_outstanding);
196 
197 		iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
198 				start + done, subreq->len);
199 
200 		ret = fscache_read(cres, subreq->start, &iter,
201 				   NETFS_READ_HOLE_FAIL,
202 				   erofc_fscache_subreq_complete, subreq);
203 		if (ret == -EIOCBQUEUED)
204 			ret = 0;
205 		if (ret) {
206 			erofs_err(sb, "failed to fscache_read (ret %d)", ret);
207 			goto out;
208 		}
209 
210 		done += subreq->len;
211 	}
212 out:
213 	if (atomic_dec_and_test(&rreq->nr_outstanding))
214 		erofs_fscache_rreq_complete(rreq);
215 
216 	return ret;
217 }
218 
erofs_fscache_meta_read_folio(struct file * data,struct folio * folio)219 static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
220 {
221 	int ret;
222 	struct super_block *sb = folio_mapping(folio)->host->i_sb;
223 	struct netfs_io_request *rreq;
224 	struct erofs_map_dev mdev = {
225 		.m_deviceid = 0,
226 		.m_pa = folio_pos(folio),
227 	};
228 
229 	ret = erofs_map_dev(sb, &mdev);
230 	if (ret)
231 		goto out;
232 
233 	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
234 				folio_pos(folio), folio_size(folio));
235 	if (IS_ERR(rreq)) {
236 		ret = PTR_ERR(rreq);
237 		goto out;
238 	}
239 
240 	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
241 				rreq, mdev.m_pa);
242 out:
243 	folio_unlock(folio);
244 	return ret;
245 }
246 
247 /*
248  * Read into page cache in the range described by (@pos, @len).
249  *
250  * On return, the caller is responsible for page unlocking if the output @unlock
251  * is true, or the callee will take this responsibility through netfs_io_request
252  * interface.
253  *
254  * The return value is the number of bytes successfully handled, or negative
255  * error code on failure. The only exception is that, the length of the range
256  * instead of the error code is returned on failure after netfs_io_request is
257  * allocated, so that .readahead() could advance rac accordingly.
258  */
erofs_fscache_data_read(struct address_space * mapping,loff_t pos,size_t len,bool * unlock)259 static int erofs_fscache_data_read(struct address_space *mapping,
260 				   loff_t pos, size_t len, bool *unlock)
261 {
262 	struct inode *inode = mapping->host;
263 	struct super_block *sb = inode->i_sb;
264 	struct netfs_io_request *rreq;
265 	struct erofs_map_blocks map;
266 	struct erofs_map_dev mdev;
267 	struct iov_iter iter;
268 	size_t count;
269 	int ret;
270 
271 	*unlock = true;
272 
273 	map.m_la = pos;
274 	ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
275 	if (ret)
276 		return ret;
277 
278 	if (map.m_flags & EROFS_MAP_META) {
279 		struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
280 		erofs_blk_t blknr;
281 		size_t offset, size;
282 		void *src;
283 
284 		/* For tail packing layout, the offset may be non-zero. */
285 		offset = erofs_blkoff(map.m_pa);
286 		blknr = erofs_blknr(map.m_pa);
287 		size = map.m_llen;
288 
289 		src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
290 		if (IS_ERR(src))
291 			return PTR_ERR(src);
292 
293 		iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, PAGE_SIZE);
294 		if (copy_to_iter(src + offset, size, &iter) != size) {
295 			erofs_put_metabuf(&buf);
296 			return -EFAULT;
297 		}
298 		iov_iter_zero(PAGE_SIZE - size, &iter);
299 		erofs_put_metabuf(&buf);
300 		return PAGE_SIZE;
301 	}
302 
303 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
304 		count = len;
305 		iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, count);
306 		iov_iter_zero(count, &iter);
307 		return count;
308 	}
309 
310 	count = min_t(size_t, map.m_llen - (pos - map.m_la), len);
311 	DBG_BUGON(!count || count % PAGE_SIZE);
312 
313 	mdev = (struct erofs_map_dev) {
314 		.m_deviceid = map.m_deviceid,
315 		.m_pa = map.m_pa,
316 	};
317 	ret = erofs_map_dev(sb, &mdev);
318 	if (ret)
319 		return ret;
320 
321 	rreq = erofs_fscache_alloc_request(mapping, pos, count);
322 	if (IS_ERR(rreq))
323 		return PTR_ERR(rreq);
324 
325 	*unlock = false;
326 	erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
327 			rreq, mdev.m_pa + (pos - map.m_la));
328 	return count;
329 }
330 
erofs_fscache_read_folio(struct file * file,struct folio * folio)331 static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
332 {
333 	bool unlock;
334 	int ret;
335 
336 	DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);
337 
338 	ret = erofs_fscache_data_read(folio_mapping(folio), folio_pos(folio),
339 				      folio_size(folio), &unlock);
340 	if (unlock) {
341 		if (ret > 0)
342 			folio_mark_uptodate(folio);
343 		folio_unlock(folio);
344 	}
345 	return ret < 0 ? ret : 0;
346 }
347 
erofs_fscache_readahead(struct readahead_control * rac)348 static void erofs_fscache_readahead(struct readahead_control *rac)
349 {
350 	struct folio *folio;
351 	size_t len, done = 0;
352 	loff_t start, pos;
353 	bool unlock;
354 	int ret, size;
355 
356 	if (!readahead_count(rac))
357 		return;
358 
359 	start = readahead_pos(rac);
360 	len = readahead_length(rac);
361 
362 	do {
363 		pos = start + done;
364 		ret = erofs_fscache_data_read(rac->mapping, pos,
365 					      len - done, &unlock);
366 		if (ret <= 0)
367 			return;
368 
369 		size = ret;
370 		while (size) {
371 			folio = readahead_folio(rac);
372 			size -= folio_size(folio);
373 			if (unlock) {
374 				folio_mark_uptodate(folio);
375 				folio_unlock(folio);
376 			}
377 		}
378 	} while ((done += ret) < len);
379 }
380 
381 static const struct address_space_operations erofs_fscache_meta_aops = {
382 	.read_folio = erofs_fscache_meta_read_folio,
383 };
384 
385 const struct address_space_operations erofs_fscache_access_aops = {
386 	.read_folio = erofs_fscache_read_folio,
387 	.readahead = erofs_fscache_readahead,
388 };
389 
erofs_fscache_domain_put(struct erofs_domain * domain)390 static void erofs_fscache_domain_put(struct erofs_domain *domain)
391 {
392 	if (!domain)
393 		return;
394 	mutex_lock(&erofs_domain_list_lock);
395 	if (refcount_dec_and_test(&domain->ref)) {
396 		list_del(&domain->list);
397 		if (list_empty(&erofs_domain_list)) {
398 			kern_unmount(erofs_pseudo_mnt);
399 			erofs_pseudo_mnt = NULL;
400 		}
401 		mutex_unlock(&erofs_domain_list_lock);
402 		fscache_relinquish_volume(domain->volume, NULL, false);
403 		kfree(domain->domain_id);
404 		kfree(domain);
405 		return;
406 	}
407 	mutex_unlock(&erofs_domain_list_lock);
408 }
409 
erofs_fscache_register_volume(struct super_block * sb)410 static int erofs_fscache_register_volume(struct super_block *sb)
411 {
412 	struct erofs_sb_info *sbi = EROFS_SB(sb);
413 	char *domain_id = sbi->domain_id;
414 	struct fscache_volume *volume;
415 	char *name;
416 	int ret = 0;
417 
418 	name = kasprintf(GFP_KERNEL, "erofs,%s",
419 			 domain_id ? domain_id : sbi->fsid);
420 	if (!name)
421 		return -ENOMEM;
422 
423 	volume = fscache_acquire_volume(name, NULL, NULL, 0);
424 	if (IS_ERR_OR_NULL(volume)) {
425 		erofs_err(sb, "failed to register volume for %s", name);
426 		ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
427 		volume = NULL;
428 	}
429 
430 	sbi->volume = volume;
431 	kfree(name);
432 	return ret;
433 }
434 
erofs_fscache_init_domain(struct super_block * sb)435 static int erofs_fscache_init_domain(struct super_block *sb)
436 {
437 	int err;
438 	struct erofs_domain *domain;
439 	struct erofs_sb_info *sbi = EROFS_SB(sb);
440 
441 	domain = kzalloc(sizeof(struct erofs_domain), GFP_KERNEL);
442 	if (!domain)
443 		return -ENOMEM;
444 
445 	domain->domain_id = kstrdup(sbi->domain_id, GFP_KERNEL);
446 	if (!domain->domain_id) {
447 		kfree(domain);
448 		return -ENOMEM;
449 	}
450 
451 	err = erofs_fscache_register_volume(sb);
452 	if (err)
453 		goto out;
454 
455 	if (!erofs_pseudo_mnt) {
456 		erofs_pseudo_mnt = kern_mount(&erofs_fs_type);
457 		if (IS_ERR(erofs_pseudo_mnt)) {
458 			err = PTR_ERR(erofs_pseudo_mnt);
459 			goto out;
460 		}
461 	}
462 
463 	domain->volume = sbi->volume;
464 	refcount_set(&domain->ref, 1);
465 	list_add(&domain->list, &erofs_domain_list);
466 	sbi->domain = domain;
467 	return 0;
468 out:
469 	kfree(domain->domain_id);
470 	kfree(domain);
471 	return err;
472 }
473 
erofs_fscache_register_domain(struct super_block * sb)474 static int erofs_fscache_register_domain(struct super_block *sb)
475 {
476 	int err;
477 	struct erofs_domain *domain;
478 	struct erofs_sb_info *sbi = EROFS_SB(sb);
479 
480 	mutex_lock(&erofs_domain_list_lock);
481 	list_for_each_entry(domain, &erofs_domain_list, list) {
482 		if (!strcmp(domain->domain_id, sbi->domain_id)) {
483 			sbi->domain = domain;
484 			sbi->volume = domain->volume;
485 			refcount_inc(&domain->ref);
486 			mutex_unlock(&erofs_domain_list_lock);
487 			return 0;
488 		}
489 	}
490 	err = erofs_fscache_init_domain(sb);
491 	mutex_unlock(&erofs_domain_list_lock);
492 	return err;
493 }
494 
495 static
erofs_fscache_acquire_cookie(struct super_block * sb,char * name,unsigned int flags)496 struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb,
497 						   char *name,
498 						   unsigned int flags)
499 {
500 	struct fscache_volume *volume = EROFS_SB(sb)->volume;
501 	struct erofs_fscache *ctx;
502 	struct fscache_cookie *cookie;
503 	int ret;
504 
505 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
506 	if (!ctx)
507 		return ERR_PTR(-ENOMEM);
508 
509 	cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
510 					name, strlen(name), NULL, 0, 0);
511 	if (!cookie) {
512 		erofs_err(sb, "failed to get cookie for %s", name);
513 		ret = -EINVAL;
514 		goto err;
515 	}
516 
517 	fscache_use_cookie(cookie, false);
518 	ctx->cookie = cookie;
519 
520 	if (flags & EROFS_REG_COOKIE_NEED_INODE) {
521 		struct inode *const inode = new_inode(sb);
522 
523 		if (!inode) {
524 			erofs_err(sb, "failed to get anon inode for %s", name);
525 			ret = -ENOMEM;
526 			goto err_cookie;
527 		}
528 
529 		set_nlink(inode, 1);
530 		inode->i_size = OFFSET_MAX;
531 		inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
532 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
533 
534 		ctx->inode = inode;
535 	}
536 
537 	return ctx;
538 
539 err_cookie:
540 	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
541 	fscache_relinquish_cookie(ctx->cookie, false);
542 err:
543 	kfree(ctx);
544 	return ERR_PTR(ret);
545 }
546 
erofs_fscache_relinquish_cookie(struct erofs_fscache * ctx)547 static void erofs_fscache_relinquish_cookie(struct erofs_fscache *ctx)
548 {
549 	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
550 	fscache_relinquish_cookie(ctx->cookie, false);
551 	iput(ctx->inode);
552 	kfree(ctx->name);
553 	kfree(ctx);
554 }
555 
556 static
erofs_fscache_domain_init_cookie(struct super_block * sb,char * name,unsigned int flags)557 struct erofs_fscache *erofs_fscache_domain_init_cookie(struct super_block *sb,
558 						       char *name,
559 						       unsigned int flags)
560 {
561 	int err;
562 	struct inode *inode;
563 	struct erofs_fscache *ctx;
564 	struct erofs_domain *domain = EROFS_SB(sb)->domain;
565 
566 	ctx = erofs_fscache_acquire_cookie(sb, name, flags);
567 	if (IS_ERR(ctx))
568 		return ctx;
569 
570 	ctx->name = kstrdup(name, GFP_KERNEL);
571 	if (!ctx->name) {
572 		err = -ENOMEM;
573 		goto out;
574 	}
575 
576 	inode = new_inode(erofs_pseudo_mnt->mnt_sb);
577 	if (!inode) {
578 		err = -ENOMEM;
579 		goto out;
580 	}
581 
582 	ctx->domain = domain;
583 	ctx->anon_inode = inode;
584 	inode->i_private = ctx;
585 	refcount_inc(&domain->ref);
586 	return ctx;
587 out:
588 	erofs_fscache_relinquish_cookie(ctx);
589 	return ERR_PTR(err);
590 }
591 
592 static
erofs_domain_register_cookie(struct super_block * sb,char * name,unsigned int flags)593 struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
594 						   char *name,
595 						   unsigned int flags)
596 {
597 	struct inode *inode;
598 	struct erofs_fscache *ctx;
599 	struct erofs_domain *domain = EROFS_SB(sb)->domain;
600 	struct super_block *psb = erofs_pseudo_mnt->mnt_sb;
601 
602 	mutex_lock(&erofs_domain_cookies_lock);
603 	spin_lock(&psb->s_inode_list_lock);
604 	list_for_each_entry(inode, &psb->s_inodes, i_sb_list) {
605 		ctx = inode->i_private;
606 		if (!ctx || ctx->domain != domain || strcmp(ctx->name, name))
607 			continue;
608 		if (!(flags & EROFS_REG_COOKIE_NEED_NOEXIST)) {
609 			igrab(inode);
610 		} else {
611 			erofs_err(sb, "%s already exists in domain %s", name,
612 				  domain->domain_id);
613 			ctx = ERR_PTR(-EEXIST);
614 		}
615 		spin_unlock(&psb->s_inode_list_lock);
616 		mutex_unlock(&erofs_domain_cookies_lock);
617 		return ctx;
618 	}
619 	spin_unlock(&psb->s_inode_list_lock);
620 	ctx = erofs_fscache_domain_init_cookie(sb, name, flags);
621 	mutex_unlock(&erofs_domain_cookies_lock);
622 	return ctx;
623 }
624 
erofs_fscache_register_cookie(struct super_block * sb,char * name,unsigned int flags)625 struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
626 						    char *name,
627 						    unsigned int flags)
628 {
629 	if (EROFS_SB(sb)->domain_id)
630 		return erofs_domain_register_cookie(sb, name, flags);
631 	return erofs_fscache_acquire_cookie(sb, name, flags);
632 }
633 
erofs_fscache_unregister_cookie(struct erofs_fscache * ctx)634 void erofs_fscache_unregister_cookie(struct erofs_fscache *ctx)
635 {
636 	bool drop;
637 	struct erofs_domain *domain;
638 
639 	if (!ctx)
640 		return;
641 	domain = ctx->domain;
642 	if (domain) {
643 		mutex_lock(&erofs_domain_cookies_lock);
644 		drop = atomic_read(&ctx->anon_inode->i_count) == 1;
645 		iput(ctx->anon_inode);
646 		mutex_unlock(&erofs_domain_cookies_lock);
647 		if (!drop)
648 			return;
649 	}
650 
651 	erofs_fscache_relinquish_cookie(ctx);
652 	erofs_fscache_domain_put(domain);
653 }
654 
erofs_fscache_register_fs(struct super_block * sb)655 int erofs_fscache_register_fs(struct super_block *sb)
656 {
657 	int ret;
658 	struct erofs_sb_info *sbi = EROFS_SB(sb);
659 	struct erofs_fscache *fscache;
660 	unsigned int flags;
661 
662 	if (sbi->domain_id)
663 		ret = erofs_fscache_register_domain(sb);
664 	else
665 		ret = erofs_fscache_register_volume(sb);
666 	if (ret)
667 		return ret;
668 
669 	/*
670 	 * When shared domain is enabled, using NEED_NOEXIST to guarantee
671 	 * the primary data blob (aka fsid) is unique in the shared domain.
672 	 *
673 	 * For non-shared-domain case, fscache_acquire_volume() invoked by
674 	 * erofs_fscache_register_volume() has already guaranteed
675 	 * the uniqueness of primary data blob.
676 	 *
677 	 * Acquired domain/volume will be relinquished in kill_sb() on error.
678 	 */
679 	flags = EROFS_REG_COOKIE_NEED_INODE;
680 	if (sbi->domain_id)
681 		flags |= EROFS_REG_COOKIE_NEED_NOEXIST;
682 	fscache = erofs_fscache_register_cookie(sb, sbi->fsid, flags);
683 	if (IS_ERR(fscache))
684 		return PTR_ERR(fscache);
685 
686 	sbi->s_fscache = fscache;
687 	return 0;
688 }
689 
erofs_fscache_unregister_fs(struct super_block * sb)690 void erofs_fscache_unregister_fs(struct super_block *sb)
691 {
692 	struct erofs_sb_info *sbi = EROFS_SB(sb);
693 
694 	erofs_fscache_unregister_cookie(sbi->s_fscache);
695 
696 	if (sbi->domain)
697 		erofs_fscache_domain_put(sbi->domain);
698 	else
699 		fscache_relinquish_volume(sbi->volume, NULL, false);
700 
701 	sbi->s_fscache = NULL;
702 	sbi->volume = NULL;
703 	sbi->domain = NULL;
704 }
705