1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/mount.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/seq_file.h>
20 #include <linux/vfs.h>
21 #include <linux/mempool.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 #include <linux/freezer.h>
25 #include <linux/namei.h>
26 #include <linux/random.h>
27 #include <linux/uuid.h>
28 #include <linux/xattr.h>
29 #include <uapi/linux/magic.h>
30 #include <net/ipv6.h>
31 #include "cifsfs.h"
32 #include "cifspdu.h"
33 #define DECLARE_GLOBALS_HERE
34 #include "cifsglob.h"
35 #include "cifsproto.h"
36 #include "cifs_debug.h"
37 #include "cifs_fs_sb.h"
38 #include <linux/mm.h>
39 #include <linux/key-type.h>
40 #include "cifs_spnego.h"
41 #include "fscache.h"
42 #ifdef CONFIG_CIFS_DFS_UPCALL
43 #include "dfs_cache.h"
44 #endif
45 #ifdef CONFIG_CIFS_SWN_UPCALL
46 #include "netlink.h"
47 #endif
48 #include "fs_context.h"
49 #include "cached_dir.h"
50
51 /*
52 * DOS dates from 1980/1/1 through 2107/12/31
53 * Protocol specifications indicate the range should be to 119, which
54 * limits maximum year to 2099. But this range has not been checked.
55 */
56 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
57 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
58 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
59
60 int cifsFYI = 0;
61 bool traceSMB;
62 bool enable_oplocks = true;
63 bool linuxExtEnabled = true;
64 bool lookupCacheEnabled = true;
65 bool disable_legacy_dialects; /* false by default */
66 bool enable_gcm_256 = true;
67 bool require_gcm_256; /* false by default */
68 bool enable_negotiate_signing; /* false by default */
69 unsigned int global_secflags = CIFSSEC_DEF;
70 /* unsigned int ntlmv2_support = 0; */
71 unsigned int sign_CIFS_PDUs = 1;
72
73 /*
74 * Global transaction id (XID) information
75 */
76 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
77 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
78 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
79 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
80
81 /*
82 * Global counters, updated atomically
83 */
84 atomic_t sesInfoAllocCount;
85 atomic_t tconInfoAllocCount;
86 atomic_t tcpSesNextId;
87 atomic_t tcpSesAllocCount;
88 atomic_t tcpSesReconnectCount;
89 atomic_t tconInfoReconnectCount;
90
91 atomic_t mid_count;
92 atomic_t buf_alloc_count;
93 atomic_t small_buf_alloc_count;
94 #ifdef CONFIG_CIFS_STATS2
95 atomic_t total_buf_alloc_count;
96 atomic_t total_small_buf_alloc_count;
97 #endif/* STATS2 */
98 struct list_head cifs_tcp_ses_list;
99 spinlock_t cifs_tcp_ses_lock;
100 static const struct super_operations cifs_super_ops;
101 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
102 module_param(CIFSMaxBufSize, uint, 0444);
103 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
104 "for CIFS requests. "
105 "Default: 16384 Range: 8192 to 130048");
106 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
107 module_param(cifs_min_rcv, uint, 0444);
108 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
109 "1 to 64");
110 unsigned int cifs_min_small = 30;
111 module_param(cifs_min_small, uint, 0444);
112 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
113 "Range: 2 to 256");
114 unsigned int cifs_max_pending = CIFS_MAX_REQ;
115 module_param(cifs_max_pending, uint, 0444);
116 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
117 "CIFS/SMB1 dialect (N/A for SMB3) "
118 "Default: 32767 Range: 2 to 32767.");
119 #ifdef CONFIG_CIFS_STATS2
120 unsigned int slow_rsp_threshold = 1;
121 module_param(slow_rsp_threshold, uint, 0644);
122 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
123 "before logging that a response is delayed. "
124 "Default: 1 (if set to 0 disables msg).");
125 #endif /* STATS2 */
126
127 module_param(enable_oplocks, bool, 0644);
128 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
129
130 module_param(enable_gcm_256, bool, 0644);
131 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
132
133 module_param(require_gcm_256, bool, 0644);
134 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
135
136 module_param(enable_negotiate_signing, bool, 0644);
137 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
138
139 module_param(disable_legacy_dialects, bool, 0644);
140 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
141 "helpful to restrict the ability to "
142 "override the default dialects (SMB2.1, "
143 "SMB3 and SMB3.02) on mount with old "
144 "dialects (CIFS/SMB1 and SMB2) since "
145 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
146 " and less secure. Default: n/N/0");
147
148 extern mempool_t *cifs_sm_req_poolp;
149 extern mempool_t *cifs_req_poolp;
150 extern mempool_t *cifs_mid_poolp;
151
152 struct workqueue_struct *cifsiod_wq;
153 struct workqueue_struct *decrypt_wq;
154 struct workqueue_struct *fileinfo_put_wq;
155 struct workqueue_struct *cifsoplockd_wq;
156 struct workqueue_struct *deferredclose_wq;
157 __u32 cifs_lock_secret;
158
159 /*
160 * Bumps refcount for cifs super block.
161 * Note that it should be only called if a referece to VFS super block is
162 * already held, e.g. in open-type syscalls context. Otherwise it can race with
163 * atomic_dec_and_test in deactivate_locked_super.
164 */
165 void
cifs_sb_active(struct super_block * sb)166 cifs_sb_active(struct super_block *sb)
167 {
168 struct cifs_sb_info *server = CIFS_SB(sb);
169
170 if (atomic_inc_return(&server->active) == 1)
171 atomic_inc(&sb->s_active);
172 }
173
174 void
cifs_sb_deactive(struct super_block * sb)175 cifs_sb_deactive(struct super_block *sb)
176 {
177 struct cifs_sb_info *server = CIFS_SB(sb);
178
179 if (atomic_dec_and_test(&server->active))
180 deactivate_super(sb);
181 }
182
183 static int
cifs_read_super(struct super_block * sb)184 cifs_read_super(struct super_block *sb)
185 {
186 struct inode *inode;
187 struct cifs_sb_info *cifs_sb;
188 struct cifs_tcon *tcon;
189 struct timespec64 ts;
190 int rc = 0;
191
192 cifs_sb = CIFS_SB(sb);
193 tcon = cifs_sb_master_tcon(cifs_sb);
194
195 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
196 sb->s_flags |= SB_POSIXACL;
197
198 if (tcon->snapshot_time)
199 sb->s_flags |= SB_RDONLY;
200
201 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
202 sb->s_maxbytes = MAX_LFS_FILESIZE;
203 else
204 sb->s_maxbytes = MAX_NON_LFS;
205
206 /*
207 * Some very old servers like DOS and OS/2 used 2 second granularity
208 * (while all current servers use 100ns granularity - see MS-DTYP)
209 * but 1 second is the maximum allowed granularity for the VFS
210 * so for old servers set time granularity to 1 second while for
211 * everything else (current servers) set it to 100ns.
212 */
213 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
214 ((tcon->ses->capabilities &
215 tcon->ses->server->vals->cap_nt_find) == 0) &&
216 !tcon->unix_ext) {
217 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
218 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
219 sb->s_time_min = ts.tv_sec;
220 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
221 cpu_to_le16(SMB_TIME_MAX), 0);
222 sb->s_time_max = ts.tv_sec;
223 } else {
224 /*
225 * Almost every server, including all SMB2+, uses DCE TIME
226 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
227 */
228 sb->s_time_gran = 100;
229 ts = cifs_NTtimeToUnix(0);
230 sb->s_time_min = ts.tv_sec;
231 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
232 sb->s_time_max = ts.tv_sec;
233 }
234
235 sb->s_magic = CIFS_SUPER_MAGIC;
236 sb->s_op = &cifs_super_ops;
237 sb->s_xattr = cifs_xattr_handlers;
238 rc = super_setup_bdi(sb);
239 if (rc)
240 goto out_no_root;
241 /* tune readahead according to rsize if readahead size not set on mount */
242 if (cifs_sb->ctx->rsize == 0)
243 cifs_sb->ctx->rsize =
244 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
245 if (cifs_sb->ctx->rasize)
246 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
247 else
248 sb->s_bdi->ra_pages = cifs_sb->ctx->rsize / PAGE_SIZE;
249
250 sb->s_blocksize = CIFS_MAX_MSGSIZE;
251 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
252 inode = cifs_root_iget(sb);
253
254 if (IS_ERR(inode)) {
255 rc = PTR_ERR(inode);
256 goto out_no_root;
257 }
258
259 if (tcon->nocase)
260 sb->s_d_op = &cifs_ci_dentry_ops;
261 else
262 sb->s_d_op = &cifs_dentry_ops;
263
264 sb->s_root = d_make_root(inode);
265 if (!sb->s_root) {
266 rc = -ENOMEM;
267 goto out_no_root;
268 }
269
270 #ifdef CONFIG_CIFS_NFSD_EXPORT
271 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
272 cifs_dbg(FYI, "export ops supported\n");
273 sb->s_export_op = &cifs_export_ops;
274 }
275 #endif /* CONFIG_CIFS_NFSD_EXPORT */
276
277 return 0;
278
279 out_no_root:
280 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
281 return rc;
282 }
283
cifs_kill_sb(struct super_block * sb)284 static void cifs_kill_sb(struct super_block *sb)
285 {
286 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
287
288 /*
289 * We ned to release all dentries for the cached directories
290 * before we kill the sb.
291 */
292 if (cifs_sb->root) {
293 close_all_cached_dirs(cifs_sb);
294
295 /* finally release root dentry */
296 dput(cifs_sb->root);
297 cifs_sb->root = NULL;
298 }
299
300 kill_anon_super(sb);
301 cifs_umount(cifs_sb);
302 }
303
304 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)305 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
306 {
307 struct super_block *sb = dentry->d_sb;
308 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
309 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
310 struct TCP_Server_Info *server = tcon->ses->server;
311 unsigned int xid;
312 int rc = 0;
313
314 xid = get_xid();
315
316 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
317 buf->f_namelen =
318 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
319 else
320 buf->f_namelen = PATH_MAX;
321
322 buf->f_fsid.val[0] = tcon->vol_serial_number;
323 /* are using part of create time for more randomness, see man statfs */
324 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
325
326 buf->f_files = 0; /* undefined */
327 buf->f_ffree = 0; /* unlimited */
328
329 if (server->ops->queryfs)
330 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
331
332 free_xid(xid);
333 return rc;
334 }
335
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)336 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
337 {
338 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
339 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
340 struct TCP_Server_Info *server = tcon->ses->server;
341
342 if (server->ops->fallocate)
343 return server->ops->fallocate(file, tcon, mode, off, len);
344
345 return -EOPNOTSUPP;
346 }
347
cifs_permission(struct user_namespace * mnt_userns,struct inode * inode,int mask)348 static int cifs_permission(struct user_namespace *mnt_userns,
349 struct inode *inode, int mask)
350 {
351 struct cifs_sb_info *cifs_sb;
352
353 cifs_sb = CIFS_SB(inode->i_sb);
354
355 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
356 if ((mask & MAY_EXEC) && !execute_ok(inode))
357 return -EACCES;
358 else
359 return 0;
360 } else /* file mode might have been restricted at mount time
361 on the client (above and beyond ACL on servers) for
362 servers which do not support setting and viewing mode bits,
363 so allowing client to check permissions is useful */
364 return generic_permission(&init_user_ns, inode, mask);
365 }
366
367 static struct kmem_cache *cifs_inode_cachep;
368 static struct kmem_cache *cifs_req_cachep;
369 static struct kmem_cache *cifs_mid_cachep;
370 static struct kmem_cache *cifs_sm_req_cachep;
371 mempool_t *cifs_sm_req_poolp;
372 mempool_t *cifs_req_poolp;
373 mempool_t *cifs_mid_poolp;
374
375 static struct inode *
cifs_alloc_inode(struct super_block * sb)376 cifs_alloc_inode(struct super_block *sb)
377 {
378 struct cifsInodeInfo *cifs_inode;
379 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
380 if (!cifs_inode)
381 return NULL;
382 cifs_inode->cifsAttrs = 0x20; /* default */
383 cifs_inode->time = 0;
384 /*
385 * Until the file is open and we have gotten oplock info back from the
386 * server, can not assume caching of file data or metadata.
387 */
388 cifs_set_oplock_level(cifs_inode, 0);
389 cifs_inode->flags = 0;
390 spin_lock_init(&cifs_inode->writers_lock);
391 cifs_inode->writers = 0;
392 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
393 cifs_inode->server_eof = 0;
394 cifs_inode->uniqueid = 0;
395 cifs_inode->createtime = 0;
396 cifs_inode->epoch = 0;
397 spin_lock_init(&cifs_inode->open_file_lock);
398 generate_random_uuid(cifs_inode->lease_key);
399 cifs_inode->symlink_target = NULL;
400
401 /*
402 * Can not set i_flags here - they get immediately overwritten to zero
403 * by the VFS.
404 */
405 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
406 INIT_LIST_HEAD(&cifs_inode->openFileList);
407 INIT_LIST_HEAD(&cifs_inode->llist);
408 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
409 spin_lock_init(&cifs_inode->deferred_lock);
410 return &cifs_inode->netfs.inode;
411 }
412
413 static void
cifs_free_inode(struct inode * inode)414 cifs_free_inode(struct inode *inode)
415 {
416 struct cifsInodeInfo *cinode = CIFS_I(inode);
417
418 if (S_ISLNK(inode->i_mode))
419 kfree(cinode->symlink_target);
420 kmem_cache_free(cifs_inode_cachep, cinode);
421 }
422
423 static void
cifs_evict_inode(struct inode * inode)424 cifs_evict_inode(struct inode *inode)
425 {
426 truncate_inode_pages_final(&inode->i_data);
427 if (inode->i_state & I_PINNING_FSCACHE_WB)
428 cifs_fscache_unuse_inode_cookie(inode, true);
429 cifs_fscache_release_inode_cookie(inode);
430 clear_inode(inode);
431 }
432
433 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)434 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
435 {
436 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
437 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
438
439 seq_puts(s, ",addr=");
440
441 switch (server->dstaddr.ss_family) {
442 case AF_INET:
443 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
444 break;
445 case AF_INET6:
446 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
447 if (sa6->sin6_scope_id)
448 seq_printf(s, "%%%u", sa6->sin6_scope_id);
449 break;
450 default:
451 seq_puts(s, "(unknown)");
452 }
453 if (server->rdma)
454 seq_puts(s, ",rdma");
455 }
456
457 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)458 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
459 {
460 if (ses->sectype == Unspecified) {
461 if (ses->user_name == NULL)
462 seq_puts(s, ",sec=none");
463 return;
464 }
465
466 seq_puts(s, ",sec=");
467
468 switch (ses->sectype) {
469 case NTLMv2:
470 seq_puts(s, "ntlmv2");
471 break;
472 case Kerberos:
473 seq_puts(s, "krb5");
474 break;
475 case RawNTLMSSP:
476 seq_puts(s, "ntlmssp");
477 break;
478 default:
479 /* shouldn't ever happen */
480 seq_puts(s, "unknown");
481 break;
482 }
483
484 if (ses->sign)
485 seq_puts(s, "i");
486
487 if (ses->sectype == Kerberos)
488 seq_printf(s, ",cruid=%u",
489 from_kuid_munged(&init_user_ns, ses->cred_uid));
490 }
491
492 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)493 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
494 {
495 seq_puts(s, ",cache=");
496
497 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
498 seq_puts(s, "strict");
499 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
500 seq_puts(s, "none");
501 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
502 seq_puts(s, "singleclient"); /* assume only one client access */
503 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
504 seq_puts(s, "ro"); /* read only caching assumed */
505 else
506 seq_puts(s, "loose");
507 }
508
509 /*
510 * cifs_show_devname() is used so we show the mount device name with correct
511 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
512 */
cifs_show_devname(struct seq_file * m,struct dentry * root)513 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
514 {
515 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
516 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
517
518 if (devname == NULL)
519 seq_puts(m, "none");
520 else {
521 convert_delimiter(devname, '/');
522 /* escape all spaces in share names */
523 seq_escape(m, devname, " \t");
524 kfree(devname);
525 }
526 return 0;
527 }
528
529 /*
530 * cifs_show_options() is for displaying mount options in /proc/mounts.
531 * Not all settable options are displayed but most of the important
532 * ones are.
533 */
534 static int
cifs_show_options(struct seq_file * s,struct dentry * root)535 cifs_show_options(struct seq_file *s, struct dentry *root)
536 {
537 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
538 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
539 struct sockaddr *srcaddr;
540 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
541
542 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
543 cifs_show_security(s, tcon->ses);
544 cifs_show_cache_flavor(s, cifs_sb);
545
546 if (tcon->no_lease)
547 seq_puts(s, ",nolease");
548 if (cifs_sb->ctx->multiuser)
549 seq_puts(s, ",multiuser");
550 else if (tcon->ses->user_name)
551 seq_show_option(s, "username", tcon->ses->user_name);
552
553 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
554 seq_show_option(s, "domain", tcon->ses->domainName);
555
556 if (srcaddr->sa_family != AF_UNSPEC) {
557 struct sockaddr_in *saddr4;
558 struct sockaddr_in6 *saddr6;
559 saddr4 = (struct sockaddr_in *)srcaddr;
560 saddr6 = (struct sockaddr_in6 *)srcaddr;
561 if (srcaddr->sa_family == AF_INET6)
562 seq_printf(s, ",srcaddr=%pI6c",
563 &saddr6->sin6_addr);
564 else if (srcaddr->sa_family == AF_INET)
565 seq_printf(s, ",srcaddr=%pI4",
566 &saddr4->sin_addr.s_addr);
567 else
568 seq_printf(s, ",srcaddr=BAD-AF:%i",
569 (int)(srcaddr->sa_family));
570 }
571
572 seq_printf(s, ",uid=%u",
573 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
574 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
575 seq_puts(s, ",forceuid");
576 else
577 seq_puts(s, ",noforceuid");
578
579 seq_printf(s, ",gid=%u",
580 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
581 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
582 seq_puts(s, ",forcegid");
583 else
584 seq_puts(s, ",noforcegid");
585
586 cifs_show_address(s, tcon->ses->server);
587
588 if (!tcon->unix_ext)
589 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
590 cifs_sb->ctx->file_mode,
591 cifs_sb->ctx->dir_mode);
592 if (cifs_sb->ctx->iocharset)
593 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
594 if (tcon->seal)
595 seq_puts(s, ",seal");
596 else if (tcon->ses->server->ignore_signature)
597 seq_puts(s, ",signloosely");
598 if (tcon->nocase)
599 seq_puts(s, ",nocase");
600 if (tcon->nodelete)
601 seq_puts(s, ",nodelete");
602 if (cifs_sb->ctx->no_sparse)
603 seq_puts(s, ",nosparse");
604 if (tcon->local_lease)
605 seq_puts(s, ",locallease");
606 if (tcon->retry)
607 seq_puts(s, ",hard");
608 else
609 seq_puts(s, ",soft");
610 if (tcon->use_persistent)
611 seq_puts(s, ",persistenthandles");
612 else if (tcon->use_resilient)
613 seq_puts(s, ",resilienthandles");
614 if (tcon->posix_extensions)
615 seq_puts(s, ",posix");
616 else if (tcon->unix_ext)
617 seq_puts(s, ",unix");
618 else
619 seq_puts(s, ",nounix");
620 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
621 seq_puts(s, ",nodfs");
622 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
623 seq_puts(s, ",posixpaths");
624 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
625 seq_puts(s, ",setuids");
626 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
627 seq_puts(s, ",idsfromsid");
628 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
629 seq_puts(s, ",serverino");
630 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
631 seq_puts(s, ",rwpidforward");
632 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
633 seq_puts(s, ",forcemand");
634 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
635 seq_puts(s, ",nouser_xattr");
636 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
637 seq_puts(s, ",mapchars");
638 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
639 seq_puts(s, ",mapposix");
640 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
641 seq_puts(s, ",sfu");
642 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
643 seq_puts(s, ",nobrl");
644 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
645 seq_puts(s, ",nohandlecache");
646 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
647 seq_puts(s, ",modefromsid");
648 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
649 seq_puts(s, ",cifsacl");
650 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
651 seq_puts(s, ",dynperm");
652 if (root->d_sb->s_flags & SB_POSIXACL)
653 seq_puts(s, ",acl");
654 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
655 seq_puts(s, ",mfsymlinks");
656 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
657 seq_puts(s, ",fsc");
658 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
659 seq_puts(s, ",nostrictsync");
660 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
661 seq_puts(s, ",noperm");
662 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
663 seq_printf(s, ",backupuid=%u",
664 from_kuid_munged(&init_user_ns,
665 cifs_sb->ctx->backupuid));
666 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
667 seq_printf(s, ",backupgid=%u",
668 from_kgid_munged(&init_user_ns,
669 cifs_sb->ctx->backupgid));
670
671 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
672 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
673 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
674 if (cifs_sb->ctx->rasize)
675 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
676 if (tcon->ses->server->min_offload)
677 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
678 seq_printf(s, ",echo_interval=%lu",
679 tcon->ses->server->echo_interval / HZ);
680
681 /* Only display the following if overridden on mount */
682 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
683 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
684 if (tcon->ses->server->tcp_nodelay)
685 seq_puts(s, ",tcpnodelay");
686 if (tcon->ses->server->noautotune)
687 seq_puts(s, ",noautotune");
688 if (tcon->ses->server->noblocksnd)
689 seq_puts(s, ",noblocksend");
690
691 if (tcon->snapshot_time)
692 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
693 if (tcon->handle_timeout)
694 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
695
696 /*
697 * Display file and directory attribute timeout in seconds.
698 * If file and directory attribute timeout the same then actimeo
699 * was likely specified on mount
700 */
701 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
702 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
703 else {
704 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
705 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
706 }
707 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
708
709 if (tcon->ses->chan_max > 1)
710 seq_printf(s, ",multichannel,max_channels=%zu",
711 tcon->ses->chan_max);
712
713 if (tcon->use_witness)
714 seq_puts(s, ",witness");
715
716 return 0;
717 }
718
cifs_umount_begin(struct super_block * sb)719 static void cifs_umount_begin(struct super_block *sb)
720 {
721 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
722 struct cifs_tcon *tcon;
723
724 if (cifs_sb == NULL)
725 return;
726
727 tcon = cifs_sb_master_tcon(cifs_sb);
728
729 spin_lock(&cifs_tcp_ses_lock);
730 spin_lock(&tcon->tc_lock);
731 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
732 /* we have other mounts to same share or we have
733 already tried to force umount this and woken up
734 all waiting network requests, nothing to do */
735 spin_unlock(&tcon->tc_lock);
736 spin_unlock(&cifs_tcp_ses_lock);
737 return;
738 } else if (tcon->tc_count == 1)
739 tcon->status = TID_EXITING;
740 spin_unlock(&tcon->tc_lock);
741 spin_unlock(&cifs_tcp_ses_lock);
742
743 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
744 /* cancel_notify_requests(tcon); */
745 if (tcon->ses && tcon->ses->server) {
746 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
747 wake_up_all(&tcon->ses->server->request_q);
748 wake_up_all(&tcon->ses->server->response_q);
749 msleep(1); /* yield */
750 /* we have to kick the requests once more */
751 wake_up_all(&tcon->ses->server->response_q);
752 msleep(1);
753 }
754
755 return;
756 }
757
758 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)759 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
760 {
761 /* BB FIXME */
762 return 0;
763 }
764 #endif
765
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)766 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
767 {
768 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
769 return 0;
770 }
771
cifs_drop_inode(struct inode * inode)772 static int cifs_drop_inode(struct inode *inode)
773 {
774 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
775
776 /* no serverino => unconditional eviction */
777 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
778 generic_drop_inode(inode);
779 }
780
781 static const struct super_operations cifs_super_ops = {
782 .statfs = cifs_statfs,
783 .alloc_inode = cifs_alloc_inode,
784 .write_inode = cifs_write_inode,
785 .free_inode = cifs_free_inode,
786 .drop_inode = cifs_drop_inode,
787 .evict_inode = cifs_evict_inode,
788 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
789 .show_devname = cifs_show_devname,
790 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
791 function unless later we add lazy close of inodes or unless the
792 kernel forgets to call us with the same number of releases (closes)
793 as opens */
794 .show_options = cifs_show_options,
795 .umount_begin = cifs_umount_begin,
796 #ifdef CONFIG_CIFS_STATS2
797 .show_stats = cifs_show_stats,
798 #endif
799 };
800
801 /*
802 * Get root dentry from superblock according to prefix path mount option.
803 * Return dentry with refcount + 1 on success and NULL otherwise.
804 */
805 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)806 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
807 {
808 struct dentry *dentry;
809 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
810 char *full_path = NULL;
811 char *s, *p;
812 char sep;
813
814 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
815 return dget(sb->s_root);
816
817 full_path = cifs_build_path_to_root(ctx, cifs_sb,
818 cifs_sb_master_tcon(cifs_sb), 0);
819 if (full_path == NULL)
820 return ERR_PTR(-ENOMEM);
821
822 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
823
824 sep = CIFS_DIR_SEP(cifs_sb);
825 dentry = dget(sb->s_root);
826 s = full_path;
827
828 do {
829 struct inode *dir = d_inode(dentry);
830 struct dentry *child;
831
832 if (!S_ISDIR(dir->i_mode)) {
833 dput(dentry);
834 dentry = ERR_PTR(-ENOTDIR);
835 break;
836 }
837
838 /* skip separators */
839 while (*s == sep)
840 s++;
841 if (!*s)
842 break;
843 p = s++;
844 /* next separator */
845 while (*s && *s != sep)
846 s++;
847
848 child = lookup_positive_unlocked(p, dentry, s - p);
849 dput(dentry);
850 dentry = child;
851 } while (!IS_ERR(dentry));
852 kfree(full_path);
853 return dentry;
854 }
855
cifs_set_super(struct super_block * sb,void * data)856 static int cifs_set_super(struct super_block *sb, void *data)
857 {
858 struct cifs_mnt_data *mnt_data = data;
859 sb->s_fs_info = mnt_data->cifs_sb;
860 return set_anon_super(sb, NULL);
861 }
862
863 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)864 cifs_smb3_do_mount(struct file_system_type *fs_type,
865 int flags, struct smb3_fs_context *old_ctx)
866 {
867 int rc;
868 struct super_block *sb = NULL;
869 struct cifs_sb_info *cifs_sb = NULL;
870 struct cifs_mnt_data mnt_data;
871 struct dentry *root;
872
873 /*
874 * Prints in Kernel / CIFS log the attempted mount operation
875 * If CIFS_DEBUG && cifs_FYI
876 */
877 if (cifsFYI)
878 cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
879 else
880 cifs_info("Attempting to mount %s\n", old_ctx->UNC);
881
882 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
883 if (cifs_sb == NULL) {
884 root = ERR_PTR(-ENOMEM);
885 goto out;
886 }
887
888 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
889 if (!cifs_sb->ctx) {
890 root = ERR_PTR(-ENOMEM);
891 goto out;
892 }
893 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
894 if (rc) {
895 root = ERR_PTR(rc);
896 goto out;
897 }
898
899 rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
900 if (rc) {
901 root = ERR_PTR(rc);
902 goto out;
903 }
904
905 rc = cifs_setup_cifs_sb(cifs_sb);
906 if (rc) {
907 root = ERR_PTR(rc);
908 goto out;
909 }
910
911 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
912 if (rc) {
913 if (!(flags & SB_SILENT))
914 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
915 rc);
916 root = ERR_PTR(rc);
917 goto out;
918 }
919
920 mnt_data.ctx = cifs_sb->ctx;
921 mnt_data.cifs_sb = cifs_sb;
922 mnt_data.flags = flags;
923
924 /* BB should we make this contingent on mount parm? */
925 flags |= SB_NODIRATIME | SB_NOATIME;
926
927 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
928 if (IS_ERR(sb)) {
929 root = ERR_CAST(sb);
930 cifs_umount(cifs_sb);
931 cifs_sb = NULL;
932 goto out;
933 }
934
935 if (sb->s_root) {
936 cifs_dbg(FYI, "Use existing superblock\n");
937 cifs_umount(cifs_sb);
938 cifs_sb = NULL;
939 } else {
940 rc = cifs_read_super(sb);
941 if (rc) {
942 root = ERR_PTR(rc);
943 goto out_super;
944 }
945
946 sb->s_flags |= SB_ACTIVE;
947 }
948
949 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
950 if (IS_ERR(root))
951 goto out_super;
952
953 if (cifs_sb)
954 cifs_sb->root = dget(root);
955
956 cifs_dbg(FYI, "dentry root is: %p\n", root);
957 return root;
958
959 out_super:
960 deactivate_locked_super(sb);
961 return root;
962 out:
963 if (cifs_sb) {
964 if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */
965 kfree(cifs_sb->prepath);
966 smb3_cleanup_fs_context(cifs_sb->ctx);
967 kfree(cifs_sb);
968 }
969 }
970 return root;
971 }
972
973
974 static ssize_t
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)975 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
976 {
977 ssize_t rc;
978 struct inode *inode = file_inode(iocb->ki_filp);
979
980 if (iocb->ki_flags & IOCB_DIRECT)
981 return cifs_user_readv(iocb, iter);
982
983 rc = cifs_revalidate_mapping(inode);
984 if (rc)
985 return rc;
986
987 return generic_file_read_iter(iocb, iter);
988 }
989
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)990 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
991 {
992 struct inode *inode = file_inode(iocb->ki_filp);
993 struct cifsInodeInfo *cinode = CIFS_I(inode);
994 ssize_t written;
995 int rc;
996
997 if (iocb->ki_filp->f_flags & O_DIRECT) {
998 written = cifs_user_writev(iocb, from);
999 if (written > 0 && CIFS_CACHE_READ(cinode)) {
1000 cifs_zap_mapping(inode);
1001 cifs_dbg(FYI,
1002 "Set no oplock for inode=%p after a write operation\n",
1003 inode);
1004 cinode->oplock = 0;
1005 }
1006 return written;
1007 }
1008
1009 written = cifs_get_writer(cinode);
1010 if (written)
1011 return written;
1012
1013 written = generic_file_write_iter(iocb, from);
1014
1015 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1016 goto out;
1017
1018 rc = filemap_fdatawrite(inode->i_mapping);
1019 if (rc)
1020 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1021 rc, inode);
1022
1023 out:
1024 cifs_put_writer(cinode);
1025 return written;
1026 }
1027
cifs_llseek(struct file * file,loff_t offset,int whence)1028 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1029 {
1030 struct cifsFileInfo *cfile = file->private_data;
1031 struct cifs_tcon *tcon;
1032
1033 /*
1034 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1035 * the cached file length
1036 */
1037 if (whence != SEEK_SET && whence != SEEK_CUR) {
1038 int rc;
1039 struct inode *inode = file_inode(file);
1040
1041 /*
1042 * We need to be sure that all dirty pages are written and the
1043 * server has the newest file length.
1044 */
1045 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1046 inode->i_mapping->nrpages != 0) {
1047 rc = filemap_fdatawait(inode->i_mapping);
1048 if (rc) {
1049 mapping_set_error(inode->i_mapping, rc);
1050 return rc;
1051 }
1052 }
1053 /*
1054 * Some applications poll for the file length in this strange
1055 * way so we must seek to end on non-oplocked files by
1056 * setting the revalidate time to zero.
1057 */
1058 CIFS_I(inode)->time = 0;
1059
1060 rc = cifs_revalidate_file_attr(file);
1061 if (rc < 0)
1062 return (loff_t)rc;
1063 }
1064 if (cfile && cfile->tlink) {
1065 tcon = tlink_tcon(cfile->tlink);
1066 if (tcon->ses->server->ops->llseek)
1067 return tcon->ses->server->ops->llseek(file, tcon,
1068 offset, whence);
1069 }
1070 return generic_file_llseek(file, offset, whence);
1071 }
1072
1073 static int
cifs_setlease(struct file * file,long arg,struct file_lock ** lease,void ** priv)1074 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
1075 {
1076 /*
1077 * Note that this is called by vfs setlease with i_lock held to
1078 * protect *lease from going away.
1079 */
1080 struct inode *inode = file_inode(file);
1081 struct cifsFileInfo *cfile = file->private_data;
1082
1083 if (!(S_ISREG(inode->i_mode)))
1084 return -EINVAL;
1085
1086 /* Check if file is oplocked if this is request for new lease */
1087 if (arg == F_UNLCK ||
1088 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1089 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1090 return generic_setlease(file, arg, lease, priv);
1091 else if (tlink_tcon(cfile->tlink)->local_lease &&
1092 !CIFS_CACHE_READ(CIFS_I(inode)))
1093 /*
1094 * If the server claims to support oplock on this file, then we
1095 * still need to check oplock even if the local_lease mount
1096 * option is set, but there are servers which do not support
1097 * oplock for which this mount option may be useful if the user
1098 * knows that the file won't be changed on the server by anyone
1099 * else.
1100 */
1101 return generic_setlease(file, arg, lease, priv);
1102 else
1103 return -EAGAIN;
1104 }
1105
1106 struct file_system_type cifs_fs_type = {
1107 .owner = THIS_MODULE,
1108 .name = "cifs",
1109 .init_fs_context = smb3_init_fs_context,
1110 .parameters = smb3_fs_parameters,
1111 .kill_sb = cifs_kill_sb,
1112 .fs_flags = FS_RENAME_DOES_D_MOVE,
1113 };
1114 MODULE_ALIAS_FS("cifs");
1115
1116 struct file_system_type smb3_fs_type = {
1117 .owner = THIS_MODULE,
1118 .name = "smb3",
1119 .init_fs_context = smb3_init_fs_context,
1120 .parameters = smb3_fs_parameters,
1121 .kill_sb = cifs_kill_sb,
1122 .fs_flags = FS_RENAME_DOES_D_MOVE,
1123 };
1124 MODULE_ALIAS_FS("smb3");
1125 MODULE_ALIAS("smb3");
1126
1127 const struct inode_operations cifs_dir_inode_ops = {
1128 .create = cifs_create,
1129 .atomic_open = cifs_atomic_open,
1130 .lookup = cifs_lookup,
1131 .getattr = cifs_getattr,
1132 .unlink = cifs_unlink,
1133 .link = cifs_hardlink,
1134 .mkdir = cifs_mkdir,
1135 .rmdir = cifs_rmdir,
1136 .rename = cifs_rename2,
1137 .permission = cifs_permission,
1138 .setattr = cifs_setattr,
1139 .symlink = cifs_symlink,
1140 .mknod = cifs_mknod,
1141 .listxattr = cifs_listxattr,
1142 };
1143
1144 const struct inode_operations cifs_file_inode_ops = {
1145 .setattr = cifs_setattr,
1146 .getattr = cifs_getattr,
1147 .permission = cifs_permission,
1148 .listxattr = cifs_listxattr,
1149 .fiemap = cifs_fiemap,
1150 };
1151
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1152 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1153 struct delayed_call *done)
1154 {
1155 char *target_path;
1156
1157 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1158 if (!target_path)
1159 return ERR_PTR(-ENOMEM);
1160
1161 spin_lock(&inode->i_lock);
1162 if (likely(CIFS_I(inode)->symlink_target)) {
1163 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1164 } else {
1165 kfree(target_path);
1166 target_path = ERR_PTR(-EOPNOTSUPP);
1167 }
1168 spin_unlock(&inode->i_lock);
1169
1170 if (!IS_ERR(target_path))
1171 set_delayed_call(done, kfree_link, target_path);
1172
1173 return target_path;
1174 }
1175
1176 const struct inode_operations cifs_symlink_inode_ops = {
1177 .get_link = cifs_get_link,
1178 .permission = cifs_permission,
1179 .listxattr = cifs_listxattr,
1180 };
1181
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1182 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1183 struct file *dst_file, loff_t destoff, loff_t len,
1184 unsigned int remap_flags)
1185 {
1186 struct inode *src_inode = file_inode(src_file);
1187 struct inode *target_inode = file_inode(dst_file);
1188 struct cifsFileInfo *smb_file_src = src_file->private_data;
1189 struct cifsFileInfo *smb_file_target;
1190 struct cifs_tcon *target_tcon;
1191 unsigned int xid;
1192 int rc;
1193
1194 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1195 return -EINVAL;
1196
1197 cifs_dbg(FYI, "clone range\n");
1198
1199 xid = get_xid();
1200
1201 if (!src_file->private_data || !dst_file->private_data) {
1202 rc = -EBADF;
1203 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1204 goto out;
1205 }
1206
1207 smb_file_target = dst_file->private_data;
1208 target_tcon = tlink_tcon(smb_file_target->tlink);
1209
1210 /*
1211 * Note: cifs case is easier than btrfs since server responsible for
1212 * checks for proper open modes and file type and if it wants
1213 * server could even support copy of range where source = target
1214 */
1215 lock_two_nondirectories(target_inode, src_inode);
1216
1217 if (len == 0)
1218 len = src_inode->i_size - off;
1219
1220 cifs_dbg(FYI, "about to flush pages\n");
1221 /* should we flush first and last page first */
1222 truncate_inode_pages_range(&target_inode->i_data, destoff,
1223 PAGE_ALIGN(destoff + len)-1);
1224
1225 if (target_tcon->ses->server->ops->duplicate_extents)
1226 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1227 smb_file_src, smb_file_target, off, len, destoff);
1228 else
1229 rc = -EOPNOTSUPP;
1230
1231 /* force revalidate of size and timestamps of target file now
1232 that target is updated on the server */
1233 CIFS_I(target_inode)->time = 0;
1234 /* although unlocking in the reverse order from locking is not
1235 strictly necessary here it is a little cleaner to be consistent */
1236 unlock_two_nondirectories(src_inode, target_inode);
1237 out:
1238 free_xid(xid);
1239 return rc < 0 ? rc : len;
1240 }
1241
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1242 ssize_t cifs_file_copychunk_range(unsigned int xid,
1243 struct file *src_file, loff_t off,
1244 struct file *dst_file, loff_t destoff,
1245 size_t len, unsigned int flags)
1246 {
1247 struct inode *src_inode = file_inode(src_file);
1248 struct inode *target_inode = file_inode(dst_file);
1249 struct cifsFileInfo *smb_file_src;
1250 struct cifsFileInfo *smb_file_target;
1251 struct cifs_tcon *src_tcon;
1252 struct cifs_tcon *target_tcon;
1253 ssize_t rc;
1254
1255 cifs_dbg(FYI, "copychunk range\n");
1256
1257 if (!src_file->private_data || !dst_file->private_data) {
1258 rc = -EBADF;
1259 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1260 goto out;
1261 }
1262
1263 rc = -EXDEV;
1264 smb_file_target = dst_file->private_data;
1265 smb_file_src = src_file->private_data;
1266 src_tcon = tlink_tcon(smb_file_src->tlink);
1267 target_tcon = tlink_tcon(smb_file_target->tlink);
1268
1269 if (src_tcon->ses != target_tcon->ses) {
1270 cifs_dbg(VFS, "source and target of copy not on same server\n");
1271 goto out;
1272 }
1273
1274 rc = -EOPNOTSUPP;
1275 if (!target_tcon->ses->server->ops->copychunk_range)
1276 goto out;
1277
1278 /*
1279 * Note: cifs case is easier than btrfs since server responsible for
1280 * checks for proper open modes and file type and if it wants
1281 * server could even support copy of range where source = target
1282 */
1283 lock_two_nondirectories(target_inode, src_inode);
1284
1285 cifs_dbg(FYI, "about to flush pages\n");
1286
1287 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1288 off + len - 1);
1289 if (rc)
1290 goto unlock;
1291
1292 /* should we flush first and last page first */
1293 truncate_inode_pages(&target_inode->i_data, 0);
1294
1295 rc = file_modified(dst_file);
1296 if (!rc)
1297 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1298 smb_file_src, smb_file_target, off, len, destoff);
1299
1300 file_accessed(src_file);
1301
1302 /* force revalidate of size and timestamps of target file now
1303 * that target is updated on the server
1304 */
1305 CIFS_I(target_inode)->time = 0;
1306
1307 unlock:
1308 /* although unlocking in the reverse order from locking is not
1309 * strictly necessary here it is a little cleaner to be consistent
1310 */
1311 unlock_two_nondirectories(src_inode, target_inode);
1312
1313 out:
1314 return rc;
1315 }
1316
1317 /*
1318 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1319 * is a dummy operation.
1320 */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1321 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1322 {
1323 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1324 file, datasync);
1325
1326 return 0;
1327 }
1328
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1329 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1330 struct file *dst_file, loff_t destoff,
1331 size_t len, unsigned int flags)
1332 {
1333 unsigned int xid = get_xid();
1334 ssize_t rc;
1335 struct cifsFileInfo *cfile = dst_file->private_data;
1336
1337 if (cfile->swapfile) {
1338 rc = -EOPNOTSUPP;
1339 free_xid(xid);
1340 return rc;
1341 }
1342
1343 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1344 len, flags);
1345 free_xid(xid);
1346
1347 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1348 rc = generic_copy_file_range(src_file, off, dst_file,
1349 destoff, len, flags);
1350 return rc;
1351 }
1352
1353 const struct file_operations cifs_file_ops = {
1354 .read_iter = cifs_loose_read_iter,
1355 .write_iter = cifs_file_write_iter,
1356 .open = cifs_open,
1357 .release = cifs_close,
1358 .lock = cifs_lock,
1359 .flock = cifs_flock,
1360 .fsync = cifs_fsync,
1361 .flush = cifs_flush,
1362 .mmap = cifs_file_mmap,
1363 .splice_read = generic_file_splice_read,
1364 .splice_write = iter_file_splice_write,
1365 .llseek = cifs_llseek,
1366 .unlocked_ioctl = cifs_ioctl,
1367 .copy_file_range = cifs_copy_file_range,
1368 .remap_file_range = cifs_remap_file_range,
1369 .setlease = cifs_setlease,
1370 .fallocate = cifs_fallocate,
1371 };
1372
1373 const struct file_operations cifs_file_strict_ops = {
1374 .read_iter = cifs_strict_readv,
1375 .write_iter = cifs_strict_writev,
1376 .open = cifs_open,
1377 .release = cifs_close,
1378 .lock = cifs_lock,
1379 .flock = cifs_flock,
1380 .fsync = cifs_strict_fsync,
1381 .flush = cifs_flush,
1382 .mmap = cifs_file_strict_mmap,
1383 .splice_read = generic_file_splice_read,
1384 .splice_write = iter_file_splice_write,
1385 .llseek = cifs_llseek,
1386 .unlocked_ioctl = cifs_ioctl,
1387 .copy_file_range = cifs_copy_file_range,
1388 .remap_file_range = cifs_remap_file_range,
1389 .setlease = cifs_setlease,
1390 .fallocate = cifs_fallocate,
1391 };
1392
1393 const struct file_operations cifs_file_direct_ops = {
1394 .read_iter = cifs_direct_readv,
1395 .write_iter = cifs_direct_writev,
1396 .open = cifs_open,
1397 .release = cifs_close,
1398 .lock = cifs_lock,
1399 .flock = cifs_flock,
1400 .fsync = cifs_fsync,
1401 .flush = cifs_flush,
1402 .mmap = cifs_file_mmap,
1403 .splice_read = generic_file_splice_read,
1404 .splice_write = iter_file_splice_write,
1405 .unlocked_ioctl = cifs_ioctl,
1406 .copy_file_range = cifs_copy_file_range,
1407 .remap_file_range = cifs_remap_file_range,
1408 .llseek = cifs_llseek,
1409 .setlease = cifs_setlease,
1410 .fallocate = cifs_fallocate,
1411 };
1412
1413 const struct file_operations cifs_file_nobrl_ops = {
1414 .read_iter = cifs_loose_read_iter,
1415 .write_iter = cifs_file_write_iter,
1416 .open = cifs_open,
1417 .release = cifs_close,
1418 .fsync = cifs_fsync,
1419 .flush = cifs_flush,
1420 .mmap = cifs_file_mmap,
1421 .splice_read = generic_file_splice_read,
1422 .splice_write = iter_file_splice_write,
1423 .llseek = cifs_llseek,
1424 .unlocked_ioctl = cifs_ioctl,
1425 .copy_file_range = cifs_copy_file_range,
1426 .remap_file_range = cifs_remap_file_range,
1427 .setlease = cifs_setlease,
1428 .fallocate = cifs_fallocate,
1429 };
1430
1431 const struct file_operations cifs_file_strict_nobrl_ops = {
1432 .read_iter = cifs_strict_readv,
1433 .write_iter = cifs_strict_writev,
1434 .open = cifs_open,
1435 .release = cifs_close,
1436 .fsync = cifs_strict_fsync,
1437 .flush = cifs_flush,
1438 .mmap = cifs_file_strict_mmap,
1439 .splice_read = generic_file_splice_read,
1440 .splice_write = iter_file_splice_write,
1441 .llseek = cifs_llseek,
1442 .unlocked_ioctl = cifs_ioctl,
1443 .copy_file_range = cifs_copy_file_range,
1444 .remap_file_range = cifs_remap_file_range,
1445 .setlease = cifs_setlease,
1446 .fallocate = cifs_fallocate,
1447 };
1448
1449 const struct file_operations cifs_file_direct_nobrl_ops = {
1450 .read_iter = cifs_direct_readv,
1451 .write_iter = cifs_direct_writev,
1452 .open = cifs_open,
1453 .release = cifs_close,
1454 .fsync = cifs_fsync,
1455 .flush = cifs_flush,
1456 .mmap = cifs_file_mmap,
1457 .splice_read = generic_file_splice_read,
1458 .splice_write = iter_file_splice_write,
1459 .unlocked_ioctl = cifs_ioctl,
1460 .copy_file_range = cifs_copy_file_range,
1461 .remap_file_range = cifs_remap_file_range,
1462 .llseek = cifs_llseek,
1463 .setlease = cifs_setlease,
1464 .fallocate = cifs_fallocate,
1465 };
1466
1467 const struct file_operations cifs_dir_ops = {
1468 .iterate_shared = cifs_readdir,
1469 .release = cifs_closedir,
1470 .read = generic_read_dir,
1471 .unlocked_ioctl = cifs_ioctl,
1472 .copy_file_range = cifs_copy_file_range,
1473 .remap_file_range = cifs_remap_file_range,
1474 .llseek = generic_file_llseek,
1475 .fsync = cifs_dir_fsync,
1476 };
1477
1478 static void
cifs_init_once(void * inode)1479 cifs_init_once(void *inode)
1480 {
1481 struct cifsInodeInfo *cifsi = inode;
1482
1483 inode_init_once(&cifsi->netfs.inode);
1484 init_rwsem(&cifsi->lock_sem);
1485 }
1486
1487 static int __init
cifs_init_inodecache(void)1488 cifs_init_inodecache(void)
1489 {
1490 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1491 sizeof(struct cifsInodeInfo),
1492 0, (SLAB_RECLAIM_ACCOUNT|
1493 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1494 cifs_init_once);
1495 if (cifs_inode_cachep == NULL)
1496 return -ENOMEM;
1497
1498 return 0;
1499 }
1500
1501 static void
cifs_destroy_inodecache(void)1502 cifs_destroy_inodecache(void)
1503 {
1504 /*
1505 * Make sure all delayed rcu free inodes are flushed before we
1506 * destroy cache.
1507 */
1508 rcu_barrier();
1509 kmem_cache_destroy(cifs_inode_cachep);
1510 }
1511
1512 static int
cifs_init_request_bufs(void)1513 cifs_init_request_bufs(void)
1514 {
1515 /*
1516 * SMB2 maximum header size is bigger than CIFS one - no problems to
1517 * allocate some more bytes for CIFS.
1518 */
1519 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1520
1521 if (CIFSMaxBufSize < 8192) {
1522 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1523 Unicode path name has to fit in any SMB/CIFS path based frames */
1524 CIFSMaxBufSize = 8192;
1525 } else if (CIFSMaxBufSize > 1024*127) {
1526 CIFSMaxBufSize = 1024 * 127;
1527 } else {
1528 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1529 }
1530 /*
1531 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1532 CIFSMaxBufSize, CIFSMaxBufSize);
1533 */
1534 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1535 CIFSMaxBufSize + max_hdr_size, 0,
1536 SLAB_HWCACHE_ALIGN, 0,
1537 CIFSMaxBufSize + max_hdr_size,
1538 NULL);
1539 if (cifs_req_cachep == NULL)
1540 return -ENOMEM;
1541
1542 if (cifs_min_rcv < 1)
1543 cifs_min_rcv = 1;
1544 else if (cifs_min_rcv > 64) {
1545 cifs_min_rcv = 64;
1546 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1547 }
1548
1549 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1550 cifs_req_cachep);
1551
1552 if (cifs_req_poolp == NULL) {
1553 kmem_cache_destroy(cifs_req_cachep);
1554 return -ENOMEM;
1555 }
1556 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1557 almost all handle based requests (but not write response, nor is it
1558 sufficient for path based requests). A smaller size would have
1559 been more efficient (compacting multiple slab items on one 4k page)
1560 for the case in which debug was on, but this larger size allows
1561 more SMBs to use small buffer alloc and is still much more
1562 efficient to alloc 1 per page off the slab compared to 17K (5page)
1563 alloc of large cifs buffers even when page debugging is on */
1564 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1565 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1566 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1567 if (cifs_sm_req_cachep == NULL) {
1568 mempool_destroy(cifs_req_poolp);
1569 kmem_cache_destroy(cifs_req_cachep);
1570 return -ENOMEM;
1571 }
1572
1573 if (cifs_min_small < 2)
1574 cifs_min_small = 2;
1575 else if (cifs_min_small > 256) {
1576 cifs_min_small = 256;
1577 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1578 }
1579
1580 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1581 cifs_sm_req_cachep);
1582
1583 if (cifs_sm_req_poolp == NULL) {
1584 mempool_destroy(cifs_req_poolp);
1585 kmem_cache_destroy(cifs_req_cachep);
1586 kmem_cache_destroy(cifs_sm_req_cachep);
1587 return -ENOMEM;
1588 }
1589
1590 return 0;
1591 }
1592
1593 static void
cifs_destroy_request_bufs(void)1594 cifs_destroy_request_bufs(void)
1595 {
1596 mempool_destroy(cifs_req_poolp);
1597 kmem_cache_destroy(cifs_req_cachep);
1598 mempool_destroy(cifs_sm_req_poolp);
1599 kmem_cache_destroy(cifs_sm_req_cachep);
1600 }
1601
init_mids(void)1602 static int init_mids(void)
1603 {
1604 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1605 sizeof(struct mid_q_entry), 0,
1606 SLAB_HWCACHE_ALIGN, NULL);
1607 if (cifs_mid_cachep == NULL)
1608 return -ENOMEM;
1609
1610 /* 3 is a reasonable minimum number of simultaneous operations */
1611 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1612 if (cifs_mid_poolp == NULL) {
1613 kmem_cache_destroy(cifs_mid_cachep);
1614 return -ENOMEM;
1615 }
1616
1617 return 0;
1618 }
1619
destroy_mids(void)1620 static void destroy_mids(void)
1621 {
1622 mempool_destroy(cifs_mid_poolp);
1623 kmem_cache_destroy(cifs_mid_cachep);
1624 }
1625
1626 static int __init
init_cifs(void)1627 init_cifs(void)
1628 {
1629 int rc = 0;
1630 cifs_proc_init();
1631 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1632 /*
1633 * Initialize Global counters
1634 */
1635 atomic_set(&sesInfoAllocCount, 0);
1636 atomic_set(&tconInfoAllocCount, 0);
1637 atomic_set(&tcpSesNextId, 0);
1638 atomic_set(&tcpSesAllocCount, 0);
1639 atomic_set(&tcpSesReconnectCount, 0);
1640 atomic_set(&tconInfoReconnectCount, 0);
1641
1642 atomic_set(&buf_alloc_count, 0);
1643 atomic_set(&small_buf_alloc_count, 0);
1644 #ifdef CONFIG_CIFS_STATS2
1645 atomic_set(&total_buf_alloc_count, 0);
1646 atomic_set(&total_small_buf_alloc_count, 0);
1647 if (slow_rsp_threshold < 1)
1648 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1649 else if (slow_rsp_threshold > 32767)
1650 cifs_dbg(VFS,
1651 "slow response threshold set higher than recommended (0 to 32767)\n");
1652 #endif /* CONFIG_CIFS_STATS2 */
1653
1654 atomic_set(&mid_count, 0);
1655 GlobalCurrentXid = 0;
1656 GlobalTotalActiveXid = 0;
1657 GlobalMaxActiveXid = 0;
1658 spin_lock_init(&cifs_tcp_ses_lock);
1659 spin_lock_init(&GlobalMid_Lock);
1660
1661 cifs_lock_secret = get_random_u32();
1662
1663 if (cifs_max_pending < 2) {
1664 cifs_max_pending = 2;
1665 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1666 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1667 cifs_max_pending = CIFS_MAX_REQ;
1668 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1669 CIFS_MAX_REQ);
1670 }
1671
1672 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1673 if (!cifsiod_wq) {
1674 rc = -ENOMEM;
1675 goto out_clean_proc;
1676 }
1677
1678 /*
1679 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1680 * so that we don't launch too many worker threads but
1681 * Documentation/core-api/workqueue.rst recommends setting it to 0
1682 */
1683
1684 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1685 decrypt_wq = alloc_workqueue("smb3decryptd",
1686 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1687 if (!decrypt_wq) {
1688 rc = -ENOMEM;
1689 goto out_destroy_cifsiod_wq;
1690 }
1691
1692 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1693 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1694 if (!fileinfo_put_wq) {
1695 rc = -ENOMEM;
1696 goto out_destroy_decrypt_wq;
1697 }
1698
1699 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1700 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1701 if (!cifsoplockd_wq) {
1702 rc = -ENOMEM;
1703 goto out_destroy_fileinfo_put_wq;
1704 }
1705
1706 deferredclose_wq = alloc_workqueue("deferredclose",
1707 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1708 if (!deferredclose_wq) {
1709 rc = -ENOMEM;
1710 goto out_destroy_cifsoplockd_wq;
1711 }
1712
1713 rc = cifs_init_inodecache();
1714 if (rc)
1715 goto out_destroy_deferredclose_wq;
1716
1717 rc = init_mids();
1718 if (rc)
1719 goto out_destroy_inodecache;
1720
1721 rc = cifs_init_request_bufs();
1722 if (rc)
1723 goto out_destroy_mids;
1724
1725 #ifdef CONFIG_CIFS_DFS_UPCALL
1726 rc = dfs_cache_init();
1727 if (rc)
1728 goto out_destroy_request_bufs;
1729 #endif /* CONFIG_CIFS_DFS_UPCALL */
1730 #ifdef CONFIG_CIFS_UPCALL
1731 rc = init_cifs_spnego();
1732 if (rc)
1733 goto out_destroy_dfs_cache;
1734 #endif /* CONFIG_CIFS_UPCALL */
1735 #ifdef CONFIG_CIFS_SWN_UPCALL
1736 rc = cifs_genl_init();
1737 if (rc)
1738 goto out_register_key_type;
1739 #endif /* CONFIG_CIFS_SWN_UPCALL */
1740
1741 rc = init_cifs_idmap();
1742 if (rc)
1743 goto out_cifs_swn_init;
1744
1745 rc = register_filesystem(&cifs_fs_type);
1746 if (rc)
1747 goto out_init_cifs_idmap;
1748
1749 rc = register_filesystem(&smb3_fs_type);
1750 if (rc) {
1751 unregister_filesystem(&cifs_fs_type);
1752 goto out_init_cifs_idmap;
1753 }
1754
1755 return 0;
1756
1757 out_init_cifs_idmap:
1758 exit_cifs_idmap();
1759 out_cifs_swn_init:
1760 #ifdef CONFIG_CIFS_SWN_UPCALL
1761 cifs_genl_exit();
1762 out_register_key_type:
1763 #endif
1764 #ifdef CONFIG_CIFS_UPCALL
1765 exit_cifs_spnego();
1766 out_destroy_dfs_cache:
1767 #endif
1768 #ifdef CONFIG_CIFS_DFS_UPCALL
1769 dfs_cache_destroy();
1770 out_destroy_request_bufs:
1771 #endif
1772 cifs_destroy_request_bufs();
1773 out_destroy_mids:
1774 destroy_mids();
1775 out_destroy_inodecache:
1776 cifs_destroy_inodecache();
1777 out_destroy_deferredclose_wq:
1778 destroy_workqueue(deferredclose_wq);
1779 out_destroy_cifsoplockd_wq:
1780 destroy_workqueue(cifsoplockd_wq);
1781 out_destroy_fileinfo_put_wq:
1782 destroy_workqueue(fileinfo_put_wq);
1783 out_destroy_decrypt_wq:
1784 destroy_workqueue(decrypt_wq);
1785 out_destroy_cifsiod_wq:
1786 destroy_workqueue(cifsiod_wq);
1787 out_clean_proc:
1788 cifs_proc_clean();
1789 return rc;
1790 }
1791
1792 static void __exit
exit_cifs(void)1793 exit_cifs(void)
1794 {
1795 cifs_dbg(NOISY, "exit_smb3\n");
1796 unregister_filesystem(&cifs_fs_type);
1797 unregister_filesystem(&smb3_fs_type);
1798 cifs_dfs_release_automount_timer();
1799 exit_cifs_idmap();
1800 #ifdef CONFIG_CIFS_SWN_UPCALL
1801 cifs_genl_exit();
1802 #endif
1803 #ifdef CONFIG_CIFS_UPCALL
1804 exit_cifs_spnego();
1805 #endif
1806 #ifdef CONFIG_CIFS_DFS_UPCALL
1807 dfs_cache_destroy();
1808 #endif
1809 cifs_destroy_request_bufs();
1810 destroy_mids();
1811 cifs_destroy_inodecache();
1812 destroy_workqueue(deferredclose_wq);
1813 destroy_workqueue(cifsoplockd_wq);
1814 destroy_workqueue(decrypt_wq);
1815 destroy_workqueue(fileinfo_put_wq);
1816 destroy_workqueue(cifsiod_wq);
1817 cifs_proc_clean();
1818 }
1819
1820 MODULE_AUTHOR("Steve French");
1821 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1822 MODULE_DESCRIPTION
1823 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1824 "also older servers complying with the SNIA CIFS Specification)");
1825 MODULE_VERSION(CIFS_VERSION);
1826 MODULE_SOFTDEP("ecb");
1827 MODULE_SOFTDEP("hmac");
1828 MODULE_SOFTDEP("md5");
1829 MODULE_SOFTDEP("nls");
1830 MODULE_SOFTDEP("aes");
1831 MODULE_SOFTDEP("cmac");
1832 MODULE_SOFTDEP("sha256");
1833 MODULE_SOFTDEP("sha512");
1834 MODULE_SOFTDEP("aead2");
1835 MODULE_SOFTDEP("ccm");
1836 MODULE_SOFTDEP("gcm");
1837 module_init(init_cifs)
1838 module_exit(exit_cifs)
1839