1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/ctype.h>
11 #include <linux/mempool.h>
12 #include <linux/vmalloc.h>
13 #include "cifspdu.h"
14 #include "cifsglob.h"
15 #include "cifsproto.h"
16 #include "cifs_debug.h"
17 #include "smberr.h"
18 #include "nterr.h"
19 #include "cifs_unicode.h"
20 #include "smb2pdu.h"
21 #include "cifsfs.h"
22 #ifdef CONFIG_CIFS_DFS_UPCALL
23 #include "dns_resolve.h"
24 #endif
25 #include "fs_context.h"
26 
27 extern mempool_t *cifs_sm_req_poolp;
28 extern mempool_t *cifs_req_poolp;
29 
30 /* The xid serves as a useful identifier for each incoming vfs request,
31    in a similar way to the mid which is useful to track each sent smb,
32    and CurrentXid can also provide a running counter (although it
33    will eventually wrap past zero) of the total vfs operations handled
34    since the cifs fs was mounted */
35 
36 unsigned int
_get_xid(void)37 _get_xid(void)
38 {
39 	unsigned int xid;
40 
41 	spin_lock(&GlobalMid_Lock);
42 	GlobalTotalActiveXid++;
43 
44 	/* keep high water mark for number of simultaneous ops in filesystem */
45 	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
46 		GlobalMaxActiveXid = GlobalTotalActiveXid;
47 	if (GlobalTotalActiveXid > 65000)
48 		cifs_dbg(FYI, "warning: more than 65000 requests active\n");
49 	xid = GlobalCurrentXid++;
50 	spin_unlock(&GlobalMid_Lock);
51 	return xid;
52 }
53 
54 void
_free_xid(unsigned int xid)55 _free_xid(unsigned int xid)
56 {
57 	spin_lock(&GlobalMid_Lock);
58 	/* if (GlobalTotalActiveXid == 0)
59 		BUG(); */
60 	GlobalTotalActiveXid--;
61 	spin_unlock(&GlobalMid_Lock);
62 }
63 
64 struct cifs_ses *
sesInfoAlloc(void)65 sesInfoAlloc(void)
66 {
67 	struct cifs_ses *ret_buf;
68 
69 	ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
70 	if (ret_buf) {
71 		atomic_inc(&sesInfoAllocCount);
72 		ret_buf->ses_status = SES_NEW;
73 		++ret_buf->ses_count;
74 		INIT_LIST_HEAD(&ret_buf->smb_ses_list);
75 		INIT_LIST_HEAD(&ret_buf->tcon_list);
76 		mutex_init(&ret_buf->session_mutex);
77 		spin_lock_init(&ret_buf->iface_lock);
78 		INIT_LIST_HEAD(&ret_buf->iface_list);
79 		spin_lock_init(&ret_buf->chan_lock);
80 	}
81 	return ret_buf;
82 }
83 
84 void
sesInfoFree(struct cifs_ses * buf_to_free)85 sesInfoFree(struct cifs_ses *buf_to_free)
86 {
87 	struct cifs_server_iface *iface = NULL, *niface = NULL;
88 
89 	if (buf_to_free == NULL) {
90 		cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
91 		return;
92 	}
93 
94 	atomic_dec(&sesInfoAllocCount);
95 	kfree(buf_to_free->serverOS);
96 	kfree(buf_to_free->serverDomain);
97 	kfree(buf_to_free->serverNOS);
98 	kfree_sensitive(buf_to_free->password);
99 	kfree(buf_to_free->user_name);
100 	kfree(buf_to_free->domainName);
101 	kfree_sensitive(buf_to_free->auth_key.response);
102 	spin_lock(&buf_to_free->iface_lock);
103 	list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
104 				 iface_head)
105 		kref_put(&iface->refcount, release_iface);
106 	spin_unlock(&buf_to_free->iface_lock);
107 	kfree_sensitive(buf_to_free);
108 }
109 
110 struct cifs_tcon *
tconInfoAlloc(void)111 tconInfoAlloc(void)
112 {
113 	struct cifs_tcon *ret_buf;
114 
115 	ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
116 	if (!ret_buf)
117 		return NULL;
118 	ret_buf->crfid.fid = kzalloc(sizeof(*ret_buf->crfid.fid), GFP_KERNEL);
119 	if (!ret_buf->crfid.fid) {
120 		kfree(ret_buf);
121 		return NULL;
122 	}
123 	INIT_LIST_HEAD(&ret_buf->crfid.dirents.entries);
124 	mutex_init(&ret_buf->crfid.dirents.de_mutex);
125 
126 	atomic_inc(&tconInfoAllocCount);
127 	ret_buf->status = TID_NEW;
128 	++ret_buf->tc_count;
129 	INIT_LIST_HEAD(&ret_buf->openFileList);
130 	INIT_LIST_HEAD(&ret_buf->tcon_list);
131 	spin_lock_init(&ret_buf->open_file_lock);
132 	mutex_init(&ret_buf->crfid.fid_mutex);
133 	spin_lock_init(&ret_buf->stat_lock);
134 	atomic_set(&ret_buf->num_local_opens, 0);
135 	atomic_set(&ret_buf->num_remote_opens, 0);
136 
137 	return ret_buf;
138 }
139 
140 void
tconInfoFree(struct cifs_tcon * buf_to_free)141 tconInfoFree(struct cifs_tcon *buf_to_free)
142 {
143 	if (buf_to_free == NULL) {
144 		cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
145 		return;
146 	}
147 	atomic_dec(&tconInfoAllocCount);
148 	kfree(buf_to_free->nativeFileSystem);
149 	kfree_sensitive(buf_to_free->password);
150 	kfree(buf_to_free->crfid.fid);
151 	kfree(buf_to_free);
152 }
153 
154 struct smb_hdr *
cifs_buf_get(void)155 cifs_buf_get(void)
156 {
157 	struct smb_hdr *ret_buf = NULL;
158 	/*
159 	 * SMB2 header is bigger than CIFS one - no problems to clean some
160 	 * more bytes for CIFS.
161 	 */
162 	size_t buf_size = sizeof(struct smb2_hdr);
163 
164 	/*
165 	 * We could use negotiated size instead of max_msgsize -
166 	 * but it may be more efficient to always alloc same size
167 	 * albeit slightly larger than necessary and maxbuffersize
168 	 * defaults to this and can not be bigger.
169 	 */
170 	ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
171 
172 	/* clear the first few header bytes */
173 	/* for most paths, more is cleared in header_assemble */
174 	memset(ret_buf, 0, buf_size + 3);
175 	atomic_inc(&bufAllocCount);
176 #ifdef CONFIG_CIFS_STATS2
177 	atomic_inc(&totBufAllocCount);
178 #endif /* CONFIG_CIFS_STATS2 */
179 
180 	return ret_buf;
181 }
182 
183 void
cifs_buf_release(void * buf_to_free)184 cifs_buf_release(void *buf_to_free)
185 {
186 	if (buf_to_free == NULL) {
187 		/* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
188 		return;
189 	}
190 	mempool_free(buf_to_free, cifs_req_poolp);
191 
192 	atomic_dec(&bufAllocCount);
193 	return;
194 }
195 
196 struct smb_hdr *
cifs_small_buf_get(void)197 cifs_small_buf_get(void)
198 {
199 	struct smb_hdr *ret_buf = NULL;
200 
201 /* We could use negotiated size instead of max_msgsize -
202    but it may be more efficient to always alloc same size
203    albeit slightly larger than necessary and maxbuffersize
204    defaults to this and can not be bigger */
205 	ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
206 	/* No need to clear memory here, cleared in header assemble */
207 	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
208 	atomic_inc(&smBufAllocCount);
209 #ifdef CONFIG_CIFS_STATS2
210 	atomic_inc(&totSmBufAllocCount);
211 #endif /* CONFIG_CIFS_STATS2 */
212 
213 	return ret_buf;
214 }
215 
216 void
cifs_small_buf_release(void * buf_to_free)217 cifs_small_buf_release(void *buf_to_free)
218 {
219 
220 	if (buf_to_free == NULL) {
221 		cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
222 		return;
223 	}
224 	mempool_free(buf_to_free, cifs_sm_req_poolp);
225 
226 	atomic_dec(&smBufAllocCount);
227 	return;
228 }
229 
230 void
free_rsp_buf(int resp_buftype,void * rsp)231 free_rsp_buf(int resp_buftype, void *rsp)
232 {
233 	if (resp_buftype == CIFS_SMALL_BUFFER)
234 		cifs_small_buf_release(rsp);
235 	else if (resp_buftype == CIFS_LARGE_BUFFER)
236 		cifs_buf_release(rsp);
237 }
238 
239 /* NB: MID can not be set if treeCon not passed in, in that
240    case it is responsbility of caller to set the mid */
241 void
header_assemble(struct smb_hdr * buffer,char smb_command,const struct cifs_tcon * treeCon,int word_count)242 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
243 		const struct cifs_tcon *treeCon, int word_count
244 		/* length of fixed section (word count) in two byte units  */)
245 {
246 	char *temp = (char *) buffer;
247 
248 	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
249 
250 	buffer->smb_buf_length = cpu_to_be32(
251 	    (2 * word_count) + sizeof(struct smb_hdr) -
252 	    4 /*  RFC 1001 length field does not count */  +
253 	    2 /* for bcc field itself */) ;
254 
255 	buffer->Protocol[0] = 0xFF;
256 	buffer->Protocol[1] = 'S';
257 	buffer->Protocol[2] = 'M';
258 	buffer->Protocol[3] = 'B';
259 	buffer->Command = smb_command;
260 	buffer->Flags = 0x00;	/* case sensitive */
261 	buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
262 	buffer->Pid = cpu_to_le16((__u16)current->tgid);
263 	buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
264 	if (treeCon) {
265 		buffer->Tid = treeCon->tid;
266 		if (treeCon->ses) {
267 			if (treeCon->ses->capabilities & CAP_UNICODE)
268 				buffer->Flags2 |= SMBFLG2_UNICODE;
269 			if (treeCon->ses->capabilities & CAP_STATUS32)
270 				buffer->Flags2 |= SMBFLG2_ERR_STATUS;
271 
272 			/* Uid is not converted */
273 			buffer->Uid = treeCon->ses->Suid;
274 			if (treeCon->ses->server)
275 				buffer->Mid = get_next_mid(treeCon->ses->server);
276 		}
277 		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
278 			buffer->Flags2 |= SMBFLG2_DFS;
279 		if (treeCon->nocase)
280 			buffer->Flags  |= SMBFLG_CASELESS;
281 		if ((treeCon->ses) && (treeCon->ses->server))
282 			if (treeCon->ses->server->sign)
283 				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
284 	}
285 
286 /*  endian conversion of flags is now done just before sending */
287 	buffer->WordCount = (char) word_count;
288 	return;
289 }
290 
291 static int
check_smb_hdr(struct smb_hdr * smb)292 check_smb_hdr(struct smb_hdr *smb)
293 {
294 	/* does it have the right SMB "signature" ? */
295 	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
296 		cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
297 			 *(unsigned int *)smb->Protocol);
298 		return 1;
299 	}
300 
301 	/* if it's a response then accept */
302 	if (smb->Flags & SMBFLG_RESPONSE)
303 		return 0;
304 
305 	/* only one valid case where server sends us request */
306 	if (smb->Command == SMB_COM_LOCKING_ANDX)
307 		return 0;
308 
309 	cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
310 		 get_mid(smb));
311 	return 1;
312 }
313 
314 int
checkSMB(char * buf,unsigned int total_read,struct TCP_Server_Info * server)315 checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
316 {
317 	struct smb_hdr *smb = (struct smb_hdr *)buf;
318 	__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
319 	__u32 clc_len;  /* calculated length */
320 	cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
321 		 total_read, rfclen);
322 
323 	/* is this frame too small to even get to a BCC? */
324 	if (total_read < 2 + sizeof(struct smb_hdr)) {
325 		if ((total_read >= sizeof(struct smb_hdr) - 1)
326 			    && (smb->Status.CifsError != 0)) {
327 			/* it's an error return */
328 			smb->WordCount = 0;
329 			/* some error cases do not return wct and bcc */
330 			return 0;
331 		} else if ((total_read == sizeof(struct smb_hdr) + 1) &&
332 				(smb->WordCount == 0)) {
333 			char *tmp = (char *)smb;
334 			/* Need to work around a bug in two servers here */
335 			/* First, check if the part of bcc they sent was zero */
336 			if (tmp[sizeof(struct smb_hdr)] == 0) {
337 				/* some servers return only half of bcc
338 				 * on simple responses (wct, bcc both zero)
339 				 * in particular have seen this on
340 				 * ulogoffX and FindClose. This leaves
341 				 * one byte of bcc potentially unitialized
342 				 */
343 				/* zero rest of bcc */
344 				tmp[sizeof(struct smb_hdr)+1] = 0;
345 				return 0;
346 			}
347 			cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
348 		} else {
349 			cifs_dbg(VFS, "Length less than smb header size\n");
350 		}
351 		return -EIO;
352 	}
353 
354 	/* otherwise, there is enough to get to the BCC */
355 	if (check_smb_hdr(smb))
356 		return -EIO;
357 	clc_len = smbCalcSize(smb, server);
358 
359 	if (4 + rfclen != total_read) {
360 		cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
361 			 rfclen);
362 		return -EIO;
363 	}
364 
365 	if (4 + rfclen != clc_len) {
366 		__u16 mid = get_mid(smb);
367 		/* check if bcc wrapped around for large read responses */
368 		if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
369 			/* check if lengths match mod 64K */
370 			if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
371 				return 0; /* bcc wrapped */
372 		}
373 		cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
374 			 clc_len, 4 + rfclen, mid);
375 
376 		if (4 + rfclen < clc_len) {
377 			cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
378 				 rfclen, mid);
379 			return -EIO;
380 		} else if (rfclen > clc_len + 512) {
381 			/*
382 			 * Some servers (Windows XP in particular) send more
383 			 * data than the lengths in the SMB packet would
384 			 * indicate on certain calls (byte range locks and
385 			 * trans2 find first calls in particular). While the
386 			 * client can handle such a frame by ignoring the
387 			 * trailing data, we choose limit the amount of extra
388 			 * data to 512 bytes.
389 			 */
390 			cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
391 				 rfclen, mid);
392 			return -EIO;
393 		}
394 	}
395 	return 0;
396 }
397 
398 bool
is_valid_oplock_break(char * buffer,struct TCP_Server_Info * srv)399 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
400 {
401 	struct smb_hdr *buf = (struct smb_hdr *)buffer;
402 	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
403 	struct list_head *tmp, *tmp1, *tmp2;
404 	struct cifs_ses *ses;
405 	struct cifs_tcon *tcon;
406 	struct cifsInodeInfo *pCifsInode;
407 	struct cifsFileInfo *netfile;
408 
409 	cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
410 	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
411 	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
412 		struct smb_com_transaction_change_notify_rsp *pSMBr =
413 			(struct smb_com_transaction_change_notify_rsp *)buf;
414 		struct file_notify_information *pnotify;
415 		__u32 data_offset = 0;
416 		size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
417 
418 		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
419 			data_offset = le32_to_cpu(pSMBr->DataOffset);
420 
421 			if (data_offset >
422 			    len - sizeof(struct file_notify_information)) {
423 				cifs_dbg(FYI, "Invalid data_offset %u\n",
424 					 data_offset);
425 				return true;
426 			}
427 			pnotify = (struct file_notify_information *)
428 				((char *)&pSMBr->hdr.Protocol + data_offset);
429 			cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
430 				 pnotify->FileName, pnotify->Action);
431 			/*   cifs_dump_mem("Rcvd notify Data: ",buf,
432 				sizeof(struct smb_hdr)+60); */
433 			return true;
434 		}
435 		if (pSMBr->hdr.Status.CifsError) {
436 			cifs_dbg(FYI, "notify err 0x%x\n",
437 				 pSMBr->hdr.Status.CifsError);
438 			return true;
439 		}
440 		return false;
441 	}
442 	if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
443 		return false;
444 	if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
445 		/* no sense logging error on invalid handle on oplock
446 		   break - harmless race between close request and oplock
447 		   break response is expected from time to time writing out
448 		   large dirty files cached on the client */
449 		if ((NT_STATUS_INVALID_HANDLE) ==
450 		   le32_to_cpu(pSMB->hdr.Status.CifsError)) {
451 			cifs_dbg(FYI, "Invalid handle on oplock break\n");
452 			return true;
453 		} else if (ERRbadfid ==
454 		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
455 			return true;
456 		} else {
457 			return false; /* on valid oplock brk we get "request" */
458 		}
459 	}
460 	if (pSMB->hdr.WordCount != 8)
461 		return false;
462 
463 	cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
464 		 pSMB->LockType, pSMB->OplockLevel);
465 	if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
466 		return false;
467 
468 	/* look up tcon based on tid & uid */
469 	spin_lock(&cifs_tcp_ses_lock);
470 	list_for_each(tmp, &srv->smb_ses_list) {
471 		ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
472 		list_for_each(tmp1, &ses->tcon_list) {
473 			tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
474 			if (tcon->tid != buf->Tid)
475 				continue;
476 
477 			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
478 			spin_lock(&tcon->open_file_lock);
479 			list_for_each(tmp2, &tcon->openFileList) {
480 				netfile = list_entry(tmp2, struct cifsFileInfo,
481 						     tlist);
482 				if (pSMB->Fid != netfile->fid.netfid)
483 					continue;
484 
485 				cifs_dbg(FYI, "file id match, oplock break\n");
486 				pCifsInode = CIFS_I(d_inode(netfile->dentry));
487 
488 				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
489 					&pCifsInode->flags);
490 
491 				netfile->oplock_epoch = 0;
492 				netfile->oplock_level = pSMB->OplockLevel;
493 				netfile->oplock_break_cancelled = false;
494 				cifs_queue_oplock_break(netfile);
495 
496 				spin_unlock(&tcon->open_file_lock);
497 				spin_unlock(&cifs_tcp_ses_lock);
498 				return true;
499 			}
500 			spin_unlock(&tcon->open_file_lock);
501 			spin_unlock(&cifs_tcp_ses_lock);
502 			cifs_dbg(FYI, "No matching file for oplock break\n");
503 			return true;
504 		}
505 	}
506 	spin_unlock(&cifs_tcp_ses_lock);
507 	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
508 	return true;
509 }
510 
511 void
dump_smb(void * buf,int smb_buf_length)512 dump_smb(void *buf, int smb_buf_length)
513 {
514 	if (traceSMB == 0)
515 		return;
516 
517 	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
518 		       smb_buf_length, true);
519 }
520 
521 void
cifs_autodisable_serverino(struct cifs_sb_info * cifs_sb)522 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
523 {
524 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
525 		struct cifs_tcon *tcon = NULL;
526 
527 		if (cifs_sb->master_tlink)
528 			tcon = cifs_sb_master_tcon(cifs_sb);
529 
530 		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
531 		cifs_sb->mnt_cifs_serverino_autodisabled = true;
532 		cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
533 			 tcon ? tcon->treeName : "new server");
534 		cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
535 		cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
536 
537 	}
538 }
539 
cifs_set_oplock_level(struct cifsInodeInfo * cinode,__u32 oplock)540 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
541 {
542 	oplock &= 0xF;
543 
544 	if (oplock == OPLOCK_EXCLUSIVE) {
545 		cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
546 		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
547 			 &cinode->netfs.inode);
548 	} else if (oplock == OPLOCK_READ) {
549 		cinode->oplock = CIFS_CACHE_READ_FLG;
550 		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
551 			 &cinode->netfs.inode);
552 	} else
553 		cinode->oplock = 0;
554 }
555 
556 /*
557  * We wait for oplock breaks to be processed before we attempt to perform
558  * writes.
559  */
cifs_get_writer(struct cifsInodeInfo * cinode)560 int cifs_get_writer(struct cifsInodeInfo *cinode)
561 {
562 	int rc;
563 
564 start:
565 	rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
566 			 TASK_KILLABLE);
567 	if (rc)
568 		return rc;
569 
570 	spin_lock(&cinode->writers_lock);
571 	if (!cinode->writers)
572 		set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
573 	cinode->writers++;
574 	/* Check to see if we have started servicing an oplock break */
575 	if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
576 		cinode->writers--;
577 		if (cinode->writers == 0) {
578 			clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
579 			wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
580 		}
581 		spin_unlock(&cinode->writers_lock);
582 		goto start;
583 	}
584 	spin_unlock(&cinode->writers_lock);
585 	return 0;
586 }
587 
cifs_put_writer(struct cifsInodeInfo * cinode)588 void cifs_put_writer(struct cifsInodeInfo *cinode)
589 {
590 	spin_lock(&cinode->writers_lock);
591 	cinode->writers--;
592 	if (cinode->writers == 0) {
593 		clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
594 		wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
595 	}
596 	spin_unlock(&cinode->writers_lock);
597 }
598 
599 /**
600  * cifs_queue_oplock_break - queue the oplock break handler for cfile
601  * @cfile: The file to break the oplock on
602  *
603  * This function is called from the demultiplex thread when it
604  * receives an oplock break for @cfile.
605  *
606  * Assumes the tcon->open_file_lock is held.
607  * Assumes cfile->file_info_lock is NOT held.
608  */
cifs_queue_oplock_break(struct cifsFileInfo * cfile)609 void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
610 {
611 	/*
612 	 * Bump the handle refcount now while we hold the
613 	 * open_file_lock to enforce the validity of it for the oplock
614 	 * break handler. The matching put is done at the end of the
615 	 * handler.
616 	 */
617 	cifsFileInfo_get(cfile);
618 
619 	queue_work(cifsoplockd_wq, &cfile->oplock_break);
620 }
621 
cifs_done_oplock_break(struct cifsInodeInfo * cinode)622 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
623 {
624 	clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
625 	wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
626 }
627 
628 bool
backup_cred(struct cifs_sb_info * cifs_sb)629 backup_cred(struct cifs_sb_info *cifs_sb)
630 {
631 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
632 		if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
633 			return true;
634 	}
635 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
636 		if (in_group_p(cifs_sb->ctx->backupgid))
637 			return true;
638 	}
639 
640 	return false;
641 }
642 
643 void
cifs_del_pending_open(struct cifs_pending_open * open)644 cifs_del_pending_open(struct cifs_pending_open *open)
645 {
646 	spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
647 	list_del(&open->olist);
648 	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
649 }
650 
651 void
cifs_add_pending_open_locked(struct cifs_fid * fid,struct tcon_link * tlink,struct cifs_pending_open * open)652 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
653 			     struct cifs_pending_open *open)
654 {
655 	memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
656 	open->oplock = CIFS_OPLOCK_NO_CHANGE;
657 	open->tlink = tlink;
658 	fid->pending_open = open;
659 	list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
660 }
661 
662 void
cifs_add_pending_open(struct cifs_fid * fid,struct tcon_link * tlink,struct cifs_pending_open * open)663 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
664 		      struct cifs_pending_open *open)
665 {
666 	spin_lock(&tlink_tcon(tlink)->open_file_lock);
667 	cifs_add_pending_open_locked(fid, tlink, open);
668 	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
669 }
670 
671 /*
672  * Critical section which runs after acquiring deferred_lock.
673  * As there is no reference count on cifs_deferred_close, pdclose
674  * should not be used outside deferred_lock.
675  */
676 bool
cifs_is_deferred_close(struct cifsFileInfo * cfile,struct cifs_deferred_close ** pdclose)677 cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
678 {
679 	struct cifs_deferred_close *dclose;
680 
681 	list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
682 		if ((dclose->netfid == cfile->fid.netfid) &&
683 			(dclose->persistent_fid == cfile->fid.persistent_fid) &&
684 			(dclose->volatile_fid == cfile->fid.volatile_fid)) {
685 			*pdclose = dclose;
686 			return true;
687 		}
688 	}
689 	return false;
690 }
691 
692 /*
693  * Critical section which runs after acquiring deferred_lock.
694  */
695 void
cifs_add_deferred_close(struct cifsFileInfo * cfile,struct cifs_deferred_close * dclose)696 cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
697 {
698 	bool is_deferred = false;
699 	struct cifs_deferred_close *pdclose;
700 
701 	is_deferred = cifs_is_deferred_close(cfile, &pdclose);
702 	if (is_deferred) {
703 		kfree(dclose);
704 		return;
705 	}
706 
707 	dclose->tlink = cfile->tlink;
708 	dclose->netfid = cfile->fid.netfid;
709 	dclose->persistent_fid = cfile->fid.persistent_fid;
710 	dclose->volatile_fid = cfile->fid.volatile_fid;
711 	list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
712 }
713 
714 /*
715  * Critical section which runs after acquiring deferred_lock.
716  */
717 void
cifs_del_deferred_close(struct cifsFileInfo * cfile)718 cifs_del_deferred_close(struct cifsFileInfo *cfile)
719 {
720 	bool is_deferred = false;
721 	struct cifs_deferred_close *dclose;
722 
723 	is_deferred = cifs_is_deferred_close(cfile, &dclose);
724 	if (!is_deferred)
725 		return;
726 	list_del(&dclose->dlist);
727 	kfree(dclose);
728 }
729 
730 void
cifs_close_deferred_file(struct cifsInodeInfo * cifs_inode)731 cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
732 {
733 	struct cifsFileInfo *cfile = NULL;
734 	struct file_list *tmp_list, *tmp_next_list;
735 	struct list_head file_head;
736 
737 	if (cifs_inode == NULL)
738 		return;
739 
740 	INIT_LIST_HEAD(&file_head);
741 	spin_lock(&cifs_inode->open_file_lock);
742 	list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
743 		if (delayed_work_pending(&cfile->deferred)) {
744 			if (cancel_delayed_work(&cfile->deferred)) {
745 				cifs_del_deferred_close(cfile);
746 
747 				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
748 				if (tmp_list == NULL)
749 					break;
750 				tmp_list->cfile = cfile;
751 				list_add_tail(&tmp_list->list, &file_head);
752 			}
753 		}
754 	}
755 	spin_unlock(&cifs_inode->open_file_lock);
756 
757 	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
758 		_cifsFileInfo_put(tmp_list->cfile, true, false);
759 		list_del(&tmp_list->list);
760 		kfree(tmp_list);
761 	}
762 }
763 
764 void
cifs_close_all_deferred_files(struct cifs_tcon * tcon)765 cifs_close_all_deferred_files(struct cifs_tcon *tcon)
766 {
767 	struct cifsFileInfo *cfile;
768 	struct list_head *tmp;
769 	struct file_list *tmp_list, *tmp_next_list;
770 	struct list_head file_head;
771 
772 	INIT_LIST_HEAD(&file_head);
773 	spin_lock(&tcon->open_file_lock);
774 	list_for_each(tmp, &tcon->openFileList) {
775 		cfile = list_entry(tmp, struct cifsFileInfo, tlist);
776 		if (delayed_work_pending(&cfile->deferred)) {
777 			if (cancel_delayed_work(&cfile->deferred)) {
778 				cifs_del_deferred_close(cfile);
779 
780 				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
781 				if (tmp_list == NULL)
782 					break;
783 				tmp_list->cfile = cfile;
784 				list_add_tail(&tmp_list->list, &file_head);
785 			}
786 		}
787 	}
788 	spin_unlock(&tcon->open_file_lock);
789 
790 	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
791 		_cifsFileInfo_put(tmp_list->cfile, true, false);
792 		list_del(&tmp_list->list);
793 		kfree(tmp_list);
794 	}
795 }
796 void
cifs_close_deferred_file_under_dentry(struct cifs_tcon * tcon,const char * path)797 cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
798 {
799 	struct cifsFileInfo *cfile;
800 	struct list_head *tmp;
801 	struct file_list *tmp_list, *tmp_next_list;
802 	struct list_head file_head;
803 	void *page;
804 	const char *full_path;
805 
806 	INIT_LIST_HEAD(&file_head);
807 	page = alloc_dentry_path();
808 	spin_lock(&tcon->open_file_lock);
809 	list_for_each(tmp, &tcon->openFileList) {
810 		cfile = list_entry(tmp, struct cifsFileInfo, tlist);
811 		full_path = build_path_from_dentry(cfile->dentry, page);
812 		if (strstr(full_path, path)) {
813 			if (delayed_work_pending(&cfile->deferred)) {
814 				if (cancel_delayed_work(&cfile->deferred)) {
815 					cifs_del_deferred_close(cfile);
816 
817 					tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
818 					if (tmp_list == NULL)
819 						break;
820 					tmp_list->cfile = cfile;
821 					list_add_tail(&tmp_list->list, &file_head);
822 				}
823 			}
824 		}
825 	}
826 	spin_unlock(&tcon->open_file_lock);
827 
828 	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
829 		_cifsFileInfo_put(tmp_list->cfile, true, false);
830 		list_del(&tmp_list->list);
831 		kfree(tmp_list);
832 	}
833 	free_dentry_path(page);
834 }
835 
836 /* parses DFS refferal V3 structure
837  * caller is responsible for freeing target_nodes
838  * returns:
839  * - on success - 0
840  * - on failure - errno
841  */
842 int
parse_dfs_referrals(struct get_dfs_referral_rsp * rsp,u32 rsp_size,unsigned int * num_of_nodes,struct dfs_info3_param ** target_nodes,const struct nls_table * nls_codepage,int remap,const char * searchName,bool is_unicode)843 parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
844 		    unsigned int *num_of_nodes,
845 		    struct dfs_info3_param **target_nodes,
846 		    const struct nls_table *nls_codepage, int remap,
847 		    const char *searchName, bool is_unicode)
848 {
849 	int i, rc = 0;
850 	char *data_end;
851 	struct dfs_referral_level_3 *ref;
852 
853 	*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
854 
855 	if (*num_of_nodes < 1) {
856 		cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
857 			 *num_of_nodes);
858 		rc = -EINVAL;
859 		goto parse_DFS_referrals_exit;
860 	}
861 
862 	ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
863 	if (ref->VersionNumber != cpu_to_le16(3)) {
864 		cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
865 			 le16_to_cpu(ref->VersionNumber));
866 		rc = -EINVAL;
867 		goto parse_DFS_referrals_exit;
868 	}
869 
870 	/* get the upper boundary of the resp buffer */
871 	data_end = (char *)rsp + rsp_size;
872 
873 	cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
874 		 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
875 
876 	*target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
877 				GFP_KERNEL);
878 	if (*target_nodes == NULL) {
879 		rc = -ENOMEM;
880 		goto parse_DFS_referrals_exit;
881 	}
882 
883 	/* collect necessary data from referrals */
884 	for (i = 0; i < *num_of_nodes; i++) {
885 		char *temp;
886 		int max_len;
887 		struct dfs_info3_param *node = (*target_nodes)+i;
888 
889 		node->flags = le32_to_cpu(rsp->DFSFlags);
890 		if (is_unicode) {
891 			__le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
892 						GFP_KERNEL);
893 			if (tmp == NULL) {
894 				rc = -ENOMEM;
895 				goto parse_DFS_referrals_exit;
896 			}
897 			cifsConvertToUTF16((__le16 *) tmp, searchName,
898 					   PATH_MAX, nls_codepage, remap);
899 			node->path_consumed = cifs_utf16_bytes(tmp,
900 					le16_to_cpu(rsp->PathConsumed),
901 					nls_codepage);
902 			kfree(tmp);
903 		} else
904 			node->path_consumed = le16_to_cpu(rsp->PathConsumed);
905 
906 		node->server_type = le16_to_cpu(ref->ServerType);
907 		node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
908 
909 		/* copy DfsPath */
910 		temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
911 		max_len = data_end - temp;
912 		node->path_name = cifs_strndup_from_utf16(temp, max_len,
913 						is_unicode, nls_codepage);
914 		if (!node->path_name) {
915 			rc = -ENOMEM;
916 			goto parse_DFS_referrals_exit;
917 		}
918 
919 		/* copy link target UNC */
920 		temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
921 		max_len = data_end - temp;
922 		node->node_name = cifs_strndup_from_utf16(temp, max_len,
923 						is_unicode, nls_codepage);
924 		if (!node->node_name) {
925 			rc = -ENOMEM;
926 			goto parse_DFS_referrals_exit;
927 		}
928 
929 		node->ttl = le32_to_cpu(ref->TimeToLive);
930 
931 		ref++;
932 	}
933 
934 parse_DFS_referrals_exit:
935 	if (rc) {
936 		free_dfs_info_array(*target_nodes, *num_of_nodes);
937 		*target_nodes = NULL;
938 		*num_of_nodes = 0;
939 	}
940 	return rc;
941 }
942 
943 struct cifs_aio_ctx *
cifs_aio_ctx_alloc(void)944 cifs_aio_ctx_alloc(void)
945 {
946 	struct cifs_aio_ctx *ctx;
947 
948 	/*
949 	 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
950 	 * to false so that we know when we have to unreference pages within
951 	 * cifs_aio_ctx_release()
952 	 */
953 	ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
954 	if (!ctx)
955 		return NULL;
956 
957 	INIT_LIST_HEAD(&ctx->list);
958 	mutex_init(&ctx->aio_mutex);
959 	init_completion(&ctx->done);
960 	kref_init(&ctx->refcount);
961 	return ctx;
962 }
963 
964 void
cifs_aio_ctx_release(struct kref * refcount)965 cifs_aio_ctx_release(struct kref *refcount)
966 {
967 	struct cifs_aio_ctx *ctx = container_of(refcount,
968 					struct cifs_aio_ctx, refcount);
969 
970 	cifsFileInfo_put(ctx->cfile);
971 
972 	/*
973 	 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
974 	 * which means that iov_iter_get_pages() was a success and thus that
975 	 * we have taken reference on pages.
976 	 */
977 	if (ctx->bv) {
978 		unsigned i;
979 
980 		for (i = 0; i < ctx->npages; i++) {
981 			if (ctx->should_dirty)
982 				set_page_dirty(ctx->bv[i].bv_page);
983 			put_page(ctx->bv[i].bv_page);
984 		}
985 		kvfree(ctx->bv);
986 	}
987 
988 	kfree(ctx);
989 }
990 
991 #define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
992 
993 int
setup_aio_ctx_iter(struct cifs_aio_ctx * ctx,struct iov_iter * iter,int rw)994 setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
995 {
996 	ssize_t rc;
997 	unsigned int cur_npages;
998 	unsigned int npages = 0;
999 	unsigned int i;
1000 	size_t len;
1001 	size_t count = iov_iter_count(iter);
1002 	unsigned int saved_len;
1003 	size_t start;
1004 	unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
1005 	struct page **pages = NULL;
1006 	struct bio_vec *bv = NULL;
1007 
1008 	if (iov_iter_is_kvec(iter)) {
1009 		memcpy(&ctx->iter, iter, sizeof(*iter));
1010 		ctx->len = count;
1011 		iov_iter_advance(iter, count);
1012 		return 0;
1013 	}
1014 
1015 	if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT)
1016 		bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL);
1017 
1018 	if (!bv) {
1019 		bv = vmalloc(array_size(max_pages, sizeof(*bv)));
1020 		if (!bv)
1021 			return -ENOMEM;
1022 	}
1023 
1024 	if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT)
1025 		pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
1026 
1027 	if (!pages) {
1028 		pages = vmalloc(array_size(max_pages, sizeof(*pages)));
1029 		if (!pages) {
1030 			kvfree(bv);
1031 			return -ENOMEM;
1032 		}
1033 	}
1034 
1035 	saved_len = count;
1036 
1037 	while (count && npages < max_pages) {
1038 		rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
1039 		if (rc < 0) {
1040 			cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
1041 			break;
1042 		}
1043 
1044 		if (rc > count) {
1045 			cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
1046 				 count);
1047 			break;
1048 		}
1049 
1050 		iov_iter_advance(iter, rc);
1051 		count -= rc;
1052 		rc += start;
1053 		cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
1054 
1055 		if (npages + cur_npages > max_pages) {
1056 			cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
1057 				 npages + cur_npages, max_pages);
1058 			break;
1059 		}
1060 
1061 		for (i = 0; i < cur_npages; i++) {
1062 			len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
1063 			bv[npages + i].bv_page = pages[i];
1064 			bv[npages + i].bv_offset = start;
1065 			bv[npages + i].bv_len = len - start;
1066 			rc -= len;
1067 			start = 0;
1068 		}
1069 
1070 		npages += cur_npages;
1071 	}
1072 
1073 	kvfree(pages);
1074 	ctx->bv = bv;
1075 	ctx->len = saved_len - count;
1076 	ctx->npages = npages;
1077 	iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
1078 	return 0;
1079 }
1080 
1081 /**
1082  * cifs_alloc_hash - allocate hash and hash context together
1083  * @name: The name of the crypto hash algo
1084  * @shash: Where to put the pointer to the hash algo
1085  * @sdesc: Where to put the pointer to the hash descriptor
1086  *
1087  * The caller has to make sure @sdesc is initialized to either NULL or
1088  * a valid context. Both can be freed via cifs_free_hash().
1089  */
1090 int
cifs_alloc_hash(const char * name,struct crypto_shash ** shash,struct sdesc ** sdesc)1091 cifs_alloc_hash(const char *name,
1092 		struct crypto_shash **shash, struct sdesc **sdesc)
1093 {
1094 	int rc = 0;
1095 	size_t size;
1096 
1097 	if (*sdesc != NULL)
1098 		return 0;
1099 
1100 	*shash = crypto_alloc_shash(name, 0, 0);
1101 	if (IS_ERR(*shash)) {
1102 		cifs_dbg(VFS, "Could not allocate crypto %s\n", name);
1103 		rc = PTR_ERR(*shash);
1104 		*shash = NULL;
1105 		*sdesc = NULL;
1106 		return rc;
1107 	}
1108 
1109 	size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
1110 	*sdesc = kmalloc(size, GFP_KERNEL);
1111 	if (*sdesc == NULL) {
1112 		cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
1113 		crypto_free_shash(*shash);
1114 		*shash = NULL;
1115 		return -ENOMEM;
1116 	}
1117 
1118 	(*sdesc)->shash.tfm = *shash;
1119 	return 0;
1120 }
1121 
1122 /**
1123  * cifs_free_hash - free hash and hash context together
1124  * @shash: Where to find the pointer to the hash algo
1125  * @sdesc: Where to find the pointer to the hash descriptor
1126  *
1127  * Freeing a NULL hash or context is safe.
1128  */
1129 void
cifs_free_hash(struct crypto_shash ** shash,struct sdesc ** sdesc)1130 cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
1131 {
1132 	kfree(*sdesc);
1133 	*sdesc = NULL;
1134 	if (*shash)
1135 		crypto_free_shash(*shash);
1136 	*shash = NULL;
1137 }
1138 
1139 /**
1140  * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
1141  * @rqst: The request descriptor
1142  * @page: The index of the page to query
1143  * @len: Where to store the length for this page:
1144  * @offset: Where to store the offset for this page
1145  */
rqst_page_get_length(struct smb_rqst * rqst,unsigned int page,unsigned int * len,unsigned int * offset)1146 void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
1147 				unsigned int *len, unsigned int *offset)
1148 {
1149 	*len = rqst->rq_pagesz;
1150 	*offset = (page == 0) ? rqst->rq_offset : 0;
1151 
1152 	if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
1153 		*len = rqst->rq_tailsz;
1154 	else if (page == 0)
1155 		*len = rqst->rq_pagesz - rqst->rq_offset;
1156 }
1157 
extract_unc_hostname(const char * unc,const char ** h,size_t * len)1158 void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1159 {
1160 	const char *end;
1161 
1162 	/* skip initial slashes */
1163 	while (*unc && (*unc == '\\' || *unc == '/'))
1164 		unc++;
1165 
1166 	end = unc;
1167 
1168 	while (*end && !(*end == '\\' || *end == '/'))
1169 		end++;
1170 
1171 	*h = unc;
1172 	*len = end - unc;
1173 }
1174 
1175 /**
1176  * copy_path_name - copy src path to dst, possibly truncating
1177  * @dst: The destination buffer
1178  * @src: The source name
1179  *
1180  * returns number of bytes written (including trailing nul)
1181  */
copy_path_name(char * dst,const char * src)1182 int copy_path_name(char *dst, const char *src)
1183 {
1184 	int name_len;
1185 
1186 	/*
1187 	 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1188 	 * will truncate and strlen(dst) will be PATH_MAX-1
1189 	 */
1190 	name_len = strscpy(dst, src, PATH_MAX);
1191 	if (WARN_ON_ONCE(name_len < 0))
1192 		name_len = PATH_MAX-1;
1193 
1194 	/* we count the trailing nul */
1195 	name_len++;
1196 	return name_len;
1197 }
1198 
1199 struct super_cb_data {
1200 	void *data;
1201 	struct super_block *sb;
1202 };
1203 
tcp_super_cb(struct super_block * sb,void * arg)1204 static void tcp_super_cb(struct super_block *sb, void *arg)
1205 {
1206 	struct super_cb_data *sd = arg;
1207 	struct TCP_Server_Info *server = sd->data;
1208 	struct cifs_sb_info *cifs_sb;
1209 	struct cifs_tcon *tcon;
1210 
1211 	if (sd->sb)
1212 		return;
1213 
1214 	cifs_sb = CIFS_SB(sb);
1215 	tcon = cifs_sb_master_tcon(cifs_sb);
1216 	if (tcon->ses->server == server)
1217 		sd->sb = sb;
1218 }
1219 
__cifs_get_super(void (* f)(struct super_block *,void *),void * data)1220 static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
1221 					    void *data)
1222 {
1223 	struct super_cb_data sd = {
1224 		.data = data,
1225 		.sb = NULL,
1226 	};
1227 	struct file_system_type **fs_type = (struct file_system_type *[]) {
1228 		&cifs_fs_type, &smb3_fs_type, NULL,
1229 	};
1230 
1231 	for (; *fs_type; fs_type++) {
1232 		iterate_supers_type(*fs_type, f, &sd);
1233 		if (sd.sb) {
1234 			/*
1235 			 * Grab an active reference in order to prevent automounts (DFS links)
1236 			 * of expiring and then freeing up our cifs superblock pointer while
1237 			 * we're doing failover.
1238 			 */
1239 			cifs_sb_active(sd.sb);
1240 			return sd.sb;
1241 		}
1242 	}
1243 	return ERR_PTR(-EINVAL);
1244 }
1245 
__cifs_put_super(struct super_block * sb)1246 static void __cifs_put_super(struct super_block *sb)
1247 {
1248 	if (!IS_ERR_OR_NULL(sb))
1249 		cifs_sb_deactive(sb);
1250 }
1251 
cifs_get_tcp_super(struct TCP_Server_Info * server)1252 struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
1253 {
1254 	return __cifs_get_super(tcp_super_cb, server);
1255 }
1256 
cifs_put_tcp_super(struct super_block * sb)1257 void cifs_put_tcp_super(struct super_block *sb)
1258 {
1259 	__cifs_put_super(sb);
1260 }
1261 
1262 #ifdef CONFIG_CIFS_DFS_UPCALL
match_target_ip(struct TCP_Server_Info * server,const char * share,size_t share_len,bool * result)1263 int match_target_ip(struct TCP_Server_Info *server,
1264 		    const char *share, size_t share_len,
1265 		    bool *result)
1266 {
1267 	int rc;
1268 	char *target, *tip = NULL;
1269 	struct sockaddr tipaddr;
1270 
1271 	*result = false;
1272 
1273 	target = kzalloc(share_len + 3, GFP_KERNEL);
1274 	if (!target) {
1275 		rc = -ENOMEM;
1276 		goto out;
1277 	}
1278 
1279 	scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
1280 
1281 	cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
1282 
1283 	rc = dns_resolve_server_name_to_ip(target, &tip, NULL);
1284 	if (rc < 0)
1285 		goto out;
1286 
1287 	cifs_dbg(FYI, "%s: target ip: %s\n", __func__, tip);
1288 
1289 	if (!cifs_convert_address(&tipaddr, tip, strlen(tip))) {
1290 		cifs_dbg(VFS, "%s: failed to convert target ip address\n",
1291 			 __func__);
1292 		rc = -EINVAL;
1293 		goto out;
1294 	}
1295 
1296 	*result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr,
1297 				    &tipaddr);
1298 	cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
1299 	rc = 0;
1300 
1301 out:
1302 	kfree(target);
1303 	kfree(tip);
1304 
1305 	return rc;
1306 }
1307 
cifs_update_super_prepath(struct cifs_sb_info * cifs_sb,char * prefix)1308 int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
1309 {
1310 	kfree(cifs_sb->prepath);
1311 
1312 	if (prefix && *prefix) {
1313 		cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC);
1314 		if (!cifs_sb->prepath)
1315 			return -ENOMEM;
1316 
1317 		convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
1318 	} else
1319 		cifs_sb->prepath = NULL;
1320 
1321 	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1322 	return 0;
1323 }
1324 
1325 /** cifs_dfs_query_info_nonascii_quirk
1326  * Handle weird Windows SMB server behaviour. It responds with
1327  * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request
1328  * for "\<server>\<dfsname>\<linkpath>" DFS reference,
1329  * where <dfsname> contains non-ASCII unicode symbols.
1330  *
1331  * Check such DFS reference.
1332  */
cifs_dfs_query_info_nonascii_quirk(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,const char * linkpath)1333 int cifs_dfs_query_info_nonascii_quirk(const unsigned int xid,
1334 				       struct cifs_tcon *tcon,
1335 				       struct cifs_sb_info *cifs_sb,
1336 				       const char *linkpath)
1337 {
1338 	char *treename, *dfspath, sep;
1339 	int treenamelen, linkpathlen, rc;
1340 
1341 	treename = tcon->treeName;
1342 	/* MS-DFSC: All paths in REQ_GET_DFS_REFERRAL and RESP_GET_DFS_REFERRAL
1343 	 * messages MUST be encoded with exactly one leading backslash, not two
1344 	 * leading backslashes.
1345 	 */
1346 	sep = CIFS_DIR_SEP(cifs_sb);
1347 	if (treename[0] == sep && treename[1] == sep)
1348 		treename++;
1349 	linkpathlen = strlen(linkpath);
1350 	treenamelen = strnlen(treename, MAX_TREE_SIZE + 1);
1351 	dfspath = kzalloc(treenamelen + linkpathlen + 1, GFP_KERNEL);
1352 	if (!dfspath)
1353 		return -ENOMEM;
1354 	if (treenamelen)
1355 		memcpy(dfspath, treename, treenamelen);
1356 	memcpy(dfspath + treenamelen, linkpath, linkpathlen);
1357 	rc = dfs_cache_find(xid, tcon->ses, cifs_sb->local_nls,
1358 			    cifs_remap(cifs_sb), dfspath, NULL, NULL);
1359 	if (rc == 0) {
1360 		cifs_dbg(FYI, "DFS ref '%s' is found, emulate -EREMOTE\n",
1361 			 dfspath);
1362 		rc = -EREMOTE;
1363 	} else {
1364 		cifs_dbg(FYI, "%s: dfs_cache_find returned %d\n", __func__, rc);
1365 	}
1366 	kfree(dfspath);
1367 	return rc;
1368 }
1369 #endif
1370