1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
4  * Copyright (C) 2019 Samsung Electronics Co., Ltd.
5  */
6 
7 #include <linux/fs.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 
11 #include "glob.h"
12 #include "vfs_cache.h"
13 #include "oplock.h"
14 #include "vfs.h"
15 #include "connection.h"
16 #include "mgmt/tree_connect.h"
17 #include "mgmt/user_session.h"
18 #include "smb_common.h"
19 
20 #define S_DEL_PENDING			1
21 #define S_DEL_ON_CLS			2
22 #define S_DEL_ON_CLS_STREAM		8
23 
24 static unsigned int inode_hash_mask __read_mostly;
25 static unsigned int inode_hash_shift __read_mostly;
26 static struct hlist_head *inode_hashtable __read_mostly;
27 static DEFINE_RWLOCK(inode_hash_lock);
28 
29 static struct ksmbd_file_table global_ft;
30 static atomic_long_t fd_limit;
31 static struct kmem_cache *filp_cache;
32 
ksmbd_set_fd_limit(unsigned long limit)33 void ksmbd_set_fd_limit(unsigned long limit)
34 {
35 	limit = min(limit, get_max_files());
36 	atomic_long_set(&fd_limit, limit);
37 }
38 
fd_limit_depleted(void)39 static bool fd_limit_depleted(void)
40 {
41 	long v = atomic_long_dec_return(&fd_limit);
42 
43 	if (v >= 0)
44 		return false;
45 	atomic_long_inc(&fd_limit);
46 	return true;
47 }
48 
fd_limit_close(void)49 static void fd_limit_close(void)
50 {
51 	atomic_long_inc(&fd_limit);
52 }
53 
54 /*
55  * INODE hash
56  */
57 
inode_hash(struct super_block * sb,unsigned long hashval)58 static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
59 {
60 	unsigned long tmp;
61 
62 	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
63 		L1_CACHE_BYTES;
64 	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
65 	return tmp & inode_hash_mask;
66 }
67 
__ksmbd_inode_lookup(struct inode * inode)68 static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
69 {
70 	struct hlist_head *head = inode_hashtable +
71 		inode_hash(inode->i_sb, inode->i_ino);
72 	struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
73 
74 	hlist_for_each_entry(ci, head, m_hash) {
75 		if (ci->m_inode == inode) {
76 			if (atomic_inc_not_zero(&ci->m_count))
77 				ret_ci = ci;
78 			break;
79 		}
80 	}
81 	return ret_ci;
82 }
83 
ksmbd_inode_lookup(struct ksmbd_file * fp)84 static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
85 {
86 	return __ksmbd_inode_lookup(file_inode(fp->filp));
87 }
88 
ksmbd_inode_lookup_by_vfsinode(struct inode * inode)89 static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode)
90 {
91 	struct ksmbd_inode *ci;
92 
93 	read_lock(&inode_hash_lock);
94 	ci = __ksmbd_inode_lookup(inode);
95 	read_unlock(&inode_hash_lock);
96 	return ci;
97 }
98 
ksmbd_query_inode_status(struct inode * inode)99 int ksmbd_query_inode_status(struct inode *inode)
100 {
101 	struct ksmbd_inode *ci;
102 	int ret = KSMBD_INODE_STATUS_UNKNOWN;
103 
104 	read_lock(&inode_hash_lock);
105 	ci = __ksmbd_inode_lookup(inode);
106 	if (ci) {
107 		ret = KSMBD_INODE_STATUS_OK;
108 		if (ci->m_flags & S_DEL_PENDING)
109 			ret = KSMBD_INODE_STATUS_PENDING_DELETE;
110 		atomic_dec(&ci->m_count);
111 	}
112 	read_unlock(&inode_hash_lock);
113 	return ret;
114 }
115 
ksmbd_inode_pending_delete(struct ksmbd_file * fp)116 bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
117 {
118 	return (fp->f_ci->m_flags & S_DEL_PENDING);
119 }
120 
ksmbd_set_inode_pending_delete(struct ksmbd_file * fp)121 void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
122 {
123 	fp->f_ci->m_flags |= S_DEL_PENDING;
124 }
125 
ksmbd_clear_inode_pending_delete(struct ksmbd_file * fp)126 void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
127 {
128 	fp->f_ci->m_flags &= ~S_DEL_PENDING;
129 }
130 
ksmbd_fd_set_delete_on_close(struct ksmbd_file * fp,int file_info)131 void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
132 				  int file_info)
133 {
134 	if (ksmbd_stream_fd(fp)) {
135 		fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM;
136 		return;
137 	}
138 
139 	fp->f_ci->m_flags |= S_DEL_ON_CLS;
140 }
141 
ksmbd_inode_hash(struct ksmbd_inode * ci)142 static void ksmbd_inode_hash(struct ksmbd_inode *ci)
143 {
144 	struct hlist_head *b = inode_hashtable +
145 		inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino);
146 
147 	hlist_add_head(&ci->m_hash, b);
148 }
149 
ksmbd_inode_unhash(struct ksmbd_inode * ci)150 static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
151 {
152 	write_lock(&inode_hash_lock);
153 	hlist_del_init(&ci->m_hash);
154 	write_unlock(&inode_hash_lock);
155 }
156 
ksmbd_inode_init(struct ksmbd_inode * ci,struct ksmbd_file * fp)157 static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
158 {
159 	ci->m_inode = file_inode(fp->filp);
160 	atomic_set(&ci->m_count, 1);
161 	atomic_set(&ci->op_count, 0);
162 	atomic_set(&ci->sop_count, 0);
163 	ci->m_flags = 0;
164 	ci->m_fattr = 0;
165 	INIT_LIST_HEAD(&ci->m_fp_list);
166 	INIT_LIST_HEAD(&ci->m_op_list);
167 	rwlock_init(&ci->m_lock);
168 	return 0;
169 }
170 
ksmbd_inode_get(struct ksmbd_file * fp)171 static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
172 {
173 	struct ksmbd_inode *ci, *tmpci;
174 	int rc;
175 
176 	read_lock(&inode_hash_lock);
177 	ci = ksmbd_inode_lookup(fp);
178 	read_unlock(&inode_hash_lock);
179 	if (ci)
180 		return ci;
181 
182 	ci = kmalloc(sizeof(struct ksmbd_inode), GFP_KERNEL);
183 	if (!ci)
184 		return NULL;
185 
186 	rc = ksmbd_inode_init(ci, fp);
187 	if (rc) {
188 		pr_err("inode initialized failed\n");
189 		kfree(ci);
190 		return NULL;
191 	}
192 
193 	write_lock(&inode_hash_lock);
194 	tmpci = ksmbd_inode_lookup(fp);
195 	if (!tmpci) {
196 		ksmbd_inode_hash(ci);
197 	} else {
198 		kfree(ci);
199 		ci = tmpci;
200 	}
201 	write_unlock(&inode_hash_lock);
202 	return ci;
203 }
204 
ksmbd_inode_free(struct ksmbd_inode * ci)205 static void ksmbd_inode_free(struct ksmbd_inode *ci)
206 {
207 	ksmbd_inode_unhash(ci);
208 	kfree(ci);
209 }
210 
ksmbd_inode_put(struct ksmbd_inode * ci)211 static void ksmbd_inode_put(struct ksmbd_inode *ci)
212 {
213 	if (atomic_dec_and_test(&ci->m_count))
214 		ksmbd_inode_free(ci);
215 }
216 
ksmbd_inode_hash_init(void)217 int __init ksmbd_inode_hash_init(void)
218 {
219 	unsigned int loop;
220 	unsigned long numentries = 16384;
221 	unsigned long bucketsize = sizeof(struct hlist_head);
222 	unsigned long size;
223 
224 	inode_hash_shift = ilog2(numentries);
225 	inode_hash_mask = (1 << inode_hash_shift) - 1;
226 
227 	size = bucketsize << inode_hash_shift;
228 
229 	/* init master fp hash table */
230 	inode_hashtable = vmalloc(size);
231 	if (!inode_hashtable)
232 		return -ENOMEM;
233 
234 	for (loop = 0; loop < (1U << inode_hash_shift); loop++)
235 		INIT_HLIST_HEAD(&inode_hashtable[loop]);
236 	return 0;
237 }
238 
ksmbd_release_inode_hash(void)239 void ksmbd_release_inode_hash(void)
240 {
241 	vfree(inode_hashtable);
242 }
243 
__ksmbd_inode_close(struct ksmbd_file * fp)244 static void __ksmbd_inode_close(struct ksmbd_file *fp)
245 {
246 	struct dentry *dir, *dentry;
247 	struct ksmbd_inode *ci = fp->f_ci;
248 	int err;
249 	struct file *filp;
250 
251 	filp = fp->filp;
252 	if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) {
253 		ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
254 		err = ksmbd_vfs_remove_xattr(file_mnt_user_ns(filp),
255 					     filp->f_path.dentry,
256 					     fp->stream.name);
257 		if (err)
258 			pr_err("remove xattr failed : %s\n",
259 			       fp->stream.name);
260 	}
261 
262 	if (atomic_dec_and_test(&ci->m_count)) {
263 		write_lock(&ci->m_lock);
264 		if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
265 			dentry = filp->f_path.dentry;
266 			dir = dentry->d_parent;
267 			ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
268 			write_unlock(&ci->m_lock);
269 			ksmbd_vfs_unlink(file_mnt_user_ns(filp), dir, dentry);
270 			write_lock(&ci->m_lock);
271 		}
272 		write_unlock(&ci->m_lock);
273 
274 		ksmbd_inode_free(ci);
275 	}
276 }
277 
__ksmbd_remove_durable_fd(struct ksmbd_file * fp)278 static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
279 {
280 	if (!has_file_id(fp->persistent_id))
281 		return;
282 
283 	write_lock(&global_ft.lock);
284 	idr_remove(global_ft.idr, fp->persistent_id);
285 	write_unlock(&global_ft.lock);
286 }
287 
__ksmbd_remove_fd(struct ksmbd_file_table * ft,struct ksmbd_file * fp)288 static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
289 {
290 	if (!has_file_id(fp->volatile_id))
291 		return;
292 
293 	write_lock(&fp->f_ci->m_lock);
294 	list_del_init(&fp->node);
295 	write_unlock(&fp->f_ci->m_lock);
296 
297 	write_lock(&ft->lock);
298 	idr_remove(ft->idr, fp->volatile_id);
299 	write_unlock(&ft->lock);
300 }
301 
__ksmbd_close_fd(struct ksmbd_file_table * ft,struct ksmbd_file * fp)302 static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
303 {
304 	struct file *filp;
305 	struct ksmbd_lock *smb_lock, *tmp_lock;
306 
307 	fd_limit_close();
308 	__ksmbd_remove_durable_fd(fp);
309 	__ksmbd_remove_fd(ft, fp);
310 
311 	close_id_del_oplock(fp);
312 	filp = fp->filp;
313 
314 	__ksmbd_inode_close(fp);
315 	if (!IS_ERR_OR_NULL(filp))
316 		fput(filp);
317 
318 	/* because the reference count of fp is 0, it is guaranteed that
319 	 * there are not accesses to fp->lock_list.
320 	 */
321 	list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
322 		spin_lock(&fp->conn->llist_lock);
323 		list_del(&smb_lock->clist);
324 		spin_unlock(&fp->conn->llist_lock);
325 
326 		list_del(&smb_lock->flist);
327 		locks_free_lock(smb_lock->fl);
328 		kfree(smb_lock);
329 	}
330 
331 	if (ksmbd_stream_fd(fp))
332 		kfree(fp->stream.name);
333 	kmem_cache_free(filp_cache, fp);
334 }
335 
ksmbd_fp_get(struct ksmbd_file * fp)336 static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
337 {
338 	if (!atomic_inc_not_zero(&fp->refcount))
339 		return NULL;
340 	return fp;
341 }
342 
__ksmbd_lookup_fd(struct ksmbd_file_table * ft,u64 id)343 static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
344 					    u64 id)
345 {
346 	struct ksmbd_file *fp;
347 
348 	if (!has_file_id(id))
349 		return NULL;
350 
351 	read_lock(&ft->lock);
352 	fp = idr_find(ft->idr, id);
353 	if (fp)
354 		fp = ksmbd_fp_get(fp);
355 	read_unlock(&ft->lock);
356 	return fp;
357 }
358 
__put_fd_final(struct ksmbd_work * work,struct ksmbd_file * fp)359 static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
360 {
361 	__ksmbd_close_fd(&work->sess->file_table, fp);
362 	atomic_dec(&work->conn->stats.open_files_count);
363 }
364 
set_close_state_blocked_works(struct ksmbd_file * fp)365 static void set_close_state_blocked_works(struct ksmbd_file *fp)
366 {
367 	struct ksmbd_work *cancel_work, *ctmp;
368 
369 	spin_lock(&fp->f_lock);
370 	list_for_each_entry_safe(cancel_work, ctmp, &fp->blocked_works,
371 				 fp_entry) {
372 		list_del(&cancel_work->fp_entry);
373 		cancel_work->state = KSMBD_WORK_CLOSED;
374 		cancel_work->cancel_fn(cancel_work->cancel_argv);
375 	}
376 	spin_unlock(&fp->f_lock);
377 }
378 
ksmbd_close_fd(struct ksmbd_work * work,u64 id)379 int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
380 {
381 	struct ksmbd_file	*fp;
382 	struct ksmbd_file_table	*ft;
383 
384 	if (!has_file_id(id))
385 		return 0;
386 
387 	ft = &work->sess->file_table;
388 	read_lock(&ft->lock);
389 	fp = idr_find(ft->idr, id);
390 	if (fp) {
391 		set_close_state_blocked_works(fp);
392 
393 		if (!atomic_dec_and_test(&fp->refcount))
394 			fp = NULL;
395 	}
396 	read_unlock(&ft->lock);
397 
398 	if (!fp)
399 		return -EINVAL;
400 
401 	__put_fd_final(work, fp);
402 	return 0;
403 }
404 
ksmbd_fd_put(struct ksmbd_work * work,struct ksmbd_file * fp)405 void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
406 {
407 	if (!fp)
408 		return;
409 
410 	if (!atomic_dec_and_test(&fp->refcount))
411 		return;
412 	__put_fd_final(work, fp);
413 }
414 
__sanity_check(struct ksmbd_tree_connect * tcon,struct ksmbd_file * fp)415 static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)
416 {
417 	if (!fp)
418 		return false;
419 	if (fp->tcon != tcon)
420 		return false;
421 	return true;
422 }
423 
ksmbd_lookup_foreign_fd(struct ksmbd_work * work,u64 id)424 struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
425 {
426 	return __ksmbd_lookup_fd(&work->sess->file_table, id);
427 }
428 
ksmbd_lookup_fd_fast(struct ksmbd_work * work,u64 id)429 struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
430 {
431 	struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
432 
433 	if (__sanity_check(work->tcon, fp))
434 		return fp;
435 
436 	ksmbd_fd_put(work, fp);
437 	return NULL;
438 }
439 
ksmbd_lookup_fd_slow(struct ksmbd_work * work,u64 id,u64 pid)440 struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
441 					u64 pid)
442 {
443 	struct ksmbd_file *fp;
444 
445 	if (!has_file_id(id)) {
446 		id = work->compound_fid;
447 		pid = work->compound_pfid;
448 	}
449 
450 	fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
451 	if (!__sanity_check(work->tcon, fp)) {
452 		ksmbd_fd_put(work, fp);
453 		return NULL;
454 	}
455 	if (fp->persistent_id != pid) {
456 		ksmbd_fd_put(work, fp);
457 		return NULL;
458 	}
459 	return fp;
460 }
461 
ksmbd_lookup_durable_fd(unsigned long long id)462 struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
463 {
464 	return __ksmbd_lookup_fd(&global_ft, id);
465 }
466 
ksmbd_lookup_fd_cguid(char * cguid)467 struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
468 {
469 	struct ksmbd_file	*fp = NULL;
470 	unsigned int		id;
471 
472 	read_lock(&global_ft.lock);
473 	idr_for_each_entry(global_ft.idr, fp, id) {
474 		if (!memcmp(fp->create_guid,
475 			    cguid,
476 			    SMB2_CREATE_GUID_SIZE)) {
477 			fp = ksmbd_fp_get(fp);
478 			break;
479 		}
480 	}
481 	read_unlock(&global_ft.lock);
482 
483 	return fp;
484 }
485 
ksmbd_lookup_fd_inode(struct inode * inode)486 struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
487 {
488 	struct ksmbd_file	*lfp;
489 	struct ksmbd_inode	*ci;
490 
491 	ci = ksmbd_inode_lookup_by_vfsinode(inode);
492 	if (!ci)
493 		return NULL;
494 
495 	read_lock(&ci->m_lock);
496 	list_for_each_entry(lfp, &ci->m_fp_list, node) {
497 		if (inode == file_inode(lfp->filp)) {
498 			atomic_dec(&ci->m_count);
499 			lfp = ksmbd_fp_get(lfp);
500 			read_unlock(&ci->m_lock);
501 			return lfp;
502 		}
503 	}
504 	atomic_dec(&ci->m_count);
505 	read_unlock(&ci->m_lock);
506 	return NULL;
507 }
508 
509 #define OPEN_ID_TYPE_VOLATILE_ID	(0)
510 #define OPEN_ID_TYPE_PERSISTENT_ID	(1)
511 
__open_id_set(struct ksmbd_file * fp,u64 id,int type)512 static void __open_id_set(struct ksmbd_file *fp, u64 id, int type)
513 {
514 	if (type == OPEN_ID_TYPE_VOLATILE_ID)
515 		fp->volatile_id = id;
516 	if (type == OPEN_ID_TYPE_PERSISTENT_ID)
517 		fp->persistent_id = id;
518 }
519 
__open_id(struct ksmbd_file_table * ft,struct ksmbd_file * fp,int type)520 static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
521 		     int type)
522 {
523 	u64			id = 0;
524 	int			ret;
525 
526 	if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
527 		__open_id_set(fp, KSMBD_NO_FID, type);
528 		return -EMFILE;
529 	}
530 
531 	idr_preload(GFP_KERNEL);
532 	write_lock(&ft->lock);
533 	ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
534 	if (ret >= 0) {
535 		id = ret;
536 		ret = 0;
537 	} else {
538 		id = KSMBD_NO_FID;
539 		fd_limit_close();
540 	}
541 
542 	__open_id_set(fp, id, type);
543 	write_unlock(&ft->lock);
544 	idr_preload_end();
545 	return ret;
546 }
547 
ksmbd_open_durable_fd(struct ksmbd_file * fp)548 unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
549 {
550 	__open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
551 	return fp->persistent_id;
552 }
553 
ksmbd_open_fd(struct ksmbd_work * work,struct file * filp)554 struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
555 {
556 	struct ksmbd_file *fp;
557 	int ret;
558 
559 	fp = kmem_cache_zalloc(filp_cache, GFP_KERNEL);
560 	if (!fp) {
561 		pr_err("Failed to allocate memory\n");
562 		return ERR_PTR(-ENOMEM);
563 	}
564 
565 	INIT_LIST_HEAD(&fp->blocked_works);
566 	INIT_LIST_HEAD(&fp->node);
567 	INIT_LIST_HEAD(&fp->lock_list);
568 	spin_lock_init(&fp->f_lock);
569 	atomic_set(&fp->refcount, 1);
570 
571 	fp->filp		= filp;
572 	fp->conn		= work->sess->conn;
573 	fp->tcon		= work->tcon;
574 	fp->volatile_id		= KSMBD_NO_FID;
575 	fp->persistent_id	= KSMBD_NO_FID;
576 	fp->f_ci		= ksmbd_inode_get(fp);
577 
578 	if (!fp->f_ci) {
579 		ret = -ENOMEM;
580 		goto err_out;
581 	}
582 
583 	ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
584 	if (ret) {
585 		ksmbd_inode_put(fp->f_ci);
586 		goto err_out;
587 	}
588 
589 	atomic_inc(&work->conn->stats.open_files_count);
590 	return fp;
591 
592 err_out:
593 	kmem_cache_free(filp_cache, fp);
594 	return ERR_PTR(ret);
595 }
596 
597 static int
__close_file_table_ids(struct ksmbd_file_table * ft,struct ksmbd_tree_connect * tcon,bool (* skip)(struct ksmbd_tree_connect * tcon,struct ksmbd_file * fp))598 __close_file_table_ids(struct ksmbd_file_table *ft,
599 		       struct ksmbd_tree_connect *tcon,
600 		       bool (*skip)(struct ksmbd_tree_connect *tcon,
601 				    struct ksmbd_file *fp))
602 {
603 	unsigned int			id;
604 	struct ksmbd_file		*fp;
605 	int				num = 0;
606 
607 	idr_for_each_entry(ft->idr, fp, id) {
608 		if (skip(tcon, fp))
609 			continue;
610 
611 		set_close_state_blocked_works(fp);
612 
613 		if (!atomic_dec_and_test(&fp->refcount))
614 			continue;
615 		__ksmbd_close_fd(ft, fp);
616 		num++;
617 	}
618 	return num;
619 }
620 
tree_conn_fd_check(struct ksmbd_tree_connect * tcon,struct ksmbd_file * fp)621 static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
622 			       struct ksmbd_file *fp)
623 {
624 	return fp->tcon != tcon;
625 }
626 
session_fd_check(struct ksmbd_tree_connect * tcon,struct ksmbd_file * fp)627 static bool session_fd_check(struct ksmbd_tree_connect *tcon,
628 			     struct ksmbd_file *fp)
629 {
630 	return false;
631 }
632 
ksmbd_close_tree_conn_fds(struct ksmbd_work * work)633 void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
634 {
635 	int num = __close_file_table_ids(&work->sess->file_table,
636 					 work->tcon,
637 					 tree_conn_fd_check);
638 
639 	atomic_sub(num, &work->conn->stats.open_files_count);
640 }
641 
ksmbd_close_session_fds(struct ksmbd_work * work)642 void ksmbd_close_session_fds(struct ksmbd_work *work)
643 {
644 	int num = __close_file_table_ids(&work->sess->file_table,
645 					 work->tcon,
646 					 session_fd_check);
647 
648 	atomic_sub(num, &work->conn->stats.open_files_count);
649 }
650 
ksmbd_init_global_file_table(void)651 int ksmbd_init_global_file_table(void)
652 {
653 	return ksmbd_init_file_table(&global_ft);
654 }
655 
ksmbd_free_global_file_table(void)656 void ksmbd_free_global_file_table(void)
657 {
658 	struct ksmbd_file	*fp = NULL;
659 	unsigned int		id;
660 
661 	idr_for_each_entry(global_ft.idr, fp, id) {
662 		__ksmbd_remove_durable_fd(fp);
663 		kmem_cache_free(filp_cache, fp);
664 	}
665 
666 	ksmbd_destroy_file_table(&global_ft);
667 }
668 
ksmbd_init_file_table(struct ksmbd_file_table * ft)669 int ksmbd_init_file_table(struct ksmbd_file_table *ft)
670 {
671 	ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL);
672 	if (!ft->idr)
673 		return -ENOMEM;
674 
675 	idr_init(ft->idr);
676 	rwlock_init(&ft->lock);
677 	return 0;
678 }
679 
ksmbd_destroy_file_table(struct ksmbd_file_table * ft)680 void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
681 {
682 	if (!ft->idr)
683 		return;
684 
685 	__close_file_table_ids(ft, NULL, session_fd_check);
686 	idr_destroy(ft->idr);
687 	kfree(ft->idr);
688 	ft->idr = NULL;
689 }
690 
ksmbd_init_file_cache(void)691 int ksmbd_init_file_cache(void)
692 {
693 	filp_cache = kmem_cache_create("ksmbd_file_cache",
694 				       sizeof(struct ksmbd_file), 0,
695 				       SLAB_HWCACHE_ALIGN, NULL);
696 	if (!filp_cache)
697 		goto out;
698 
699 	return 0;
700 
701 out:
702 	pr_err("failed to allocate file cache\n");
703 	return -ENOMEM;
704 }
705 
ksmbd_exit_file_cache(void)706 void ksmbd_exit_file_cache(void)
707 {
708 	kmem_cache_destroy(filp_cache);
709 }
710