1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
37 #include "cifsfs.h"
38 #include "cifspdu.h"
39 #include "cifsglob.h"
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
44 #include "fscache.h"
45 
cifs_convert_flags(unsigned int flags)46 static inline int cifs_convert_flags(unsigned int flags)
47 {
48 	if ((flags & O_ACCMODE) == O_RDONLY)
49 		return GENERIC_READ;
50 	else if ((flags & O_ACCMODE) == O_WRONLY)
51 		return GENERIC_WRITE;
52 	else if ((flags & O_ACCMODE) == O_RDWR) {
53 		/* GENERIC_ALL is too much permission to request
54 		   can cause unnecessary access denied on create */
55 		/* return GENERIC_ALL; */
56 		return (GENERIC_READ | GENERIC_WRITE);
57 	}
58 
59 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 		FILE_READ_DATA);
62 }
63 
cifs_posix_convert_flags(unsigned int flags)64 static u32 cifs_posix_convert_flags(unsigned int flags)
65 {
66 	u32 posix_flags = 0;
67 
68 	if ((flags & O_ACCMODE) == O_RDONLY)
69 		posix_flags = SMB_O_RDONLY;
70 	else if ((flags & O_ACCMODE) == O_WRONLY)
71 		posix_flags = SMB_O_WRONLY;
72 	else if ((flags & O_ACCMODE) == O_RDWR)
73 		posix_flags = SMB_O_RDWR;
74 
75 	if (flags & O_CREAT)
76 		posix_flags |= SMB_O_CREAT;
77 	if (flags & O_EXCL)
78 		posix_flags |= SMB_O_EXCL;
79 	if (flags & O_TRUNC)
80 		posix_flags |= SMB_O_TRUNC;
81 	/* be safe and imply O_SYNC for O_DSYNC */
82 	if (flags & O_DSYNC)
83 		posix_flags |= SMB_O_SYNC;
84 	if (flags & O_DIRECTORY)
85 		posix_flags |= SMB_O_DIRECTORY;
86 	if (flags & O_NOFOLLOW)
87 		posix_flags |= SMB_O_NOFOLLOW;
88 	if (flags & O_DIRECT)
89 		posix_flags |= SMB_O_DIRECT;
90 
91 	return posix_flags;
92 }
93 
cifs_get_disposition(unsigned int flags)94 static inline int cifs_get_disposition(unsigned int flags)
95 {
96 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 		return FILE_CREATE;
98 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 		return FILE_OVERWRITE_IF;
100 	else if ((flags & O_CREAT) == O_CREAT)
101 		return FILE_OPEN_IF;
102 	else if ((flags & O_TRUNC) == O_TRUNC)
103 		return FILE_OVERWRITE;
104 	else
105 		return FILE_OPEN;
106 }
107 
cifs_posix_open(char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,int xid)108 int cifs_posix_open(char *full_path, struct inode **pinode,
109 			struct super_block *sb, int mode, unsigned int f_flags,
110 			__u32 *poplock, __u16 *pnetfid, int xid)
111 {
112 	int rc;
113 	FILE_UNIX_BASIC_INFO *presp_data;
114 	__u32 posix_flags = 0;
115 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 	struct cifs_fattr fattr;
117 	struct tcon_link *tlink;
118 	struct cifs_tcon *tcon;
119 
120 	cFYI(1, "posix open %s", full_path);
121 
122 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 	if (presp_data == NULL)
124 		return -ENOMEM;
125 
126 	tlink = cifs_sb_tlink(cifs_sb);
127 	if (IS_ERR(tlink)) {
128 		rc = PTR_ERR(tlink);
129 		goto posix_open_ret;
130 	}
131 
132 	tcon = tlink_tcon(tlink);
133 	mode &= ~current_umask();
134 
135 	posix_flags = cifs_posix_convert_flags(f_flags);
136 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 			     poplock, full_path, cifs_sb->local_nls,
138 			     cifs_sb->mnt_cifs_flags &
139 					CIFS_MOUNT_MAP_SPECIAL_CHR);
140 	cifs_put_tlink(tlink);
141 
142 	if (rc)
143 		goto posix_open_ret;
144 
145 	if (presp_data->Type == cpu_to_le32(-1))
146 		goto posix_open_ret; /* open ok, caller does qpathinfo */
147 
148 	if (!pinode)
149 		goto posix_open_ret; /* caller does not need info */
150 
151 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152 
153 	/* get new inode and set it up */
154 	if (*pinode == NULL) {
155 		cifs_fill_uniqueid(sb, &fattr);
156 		*pinode = cifs_iget(sb, &fattr);
157 		if (!*pinode) {
158 			rc = -ENOMEM;
159 			goto posix_open_ret;
160 		}
161 	} else {
162 		cifs_fattr_to_inode(*pinode, &fattr);
163 	}
164 
165 posix_open_ret:
166 	kfree(presp_data);
167 	return rc;
168 }
169 
170 static int
cifs_nt_open(char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,int xid)171 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
172 	     struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
173 	     __u16 *pnetfid, int xid)
174 {
175 	int rc;
176 	int desiredAccess;
177 	int disposition;
178 	int create_options = CREATE_NOT_DIR;
179 	FILE_ALL_INFO *buf;
180 
181 	desiredAccess = cifs_convert_flags(f_flags);
182 
183 /*********************************************************************
184  *  open flag mapping table:
185  *
186  *	POSIX Flag            CIFS Disposition
187  *	----------            ----------------
188  *	O_CREAT               FILE_OPEN_IF
189  *	O_CREAT | O_EXCL      FILE_CREATE
190  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
191  *	O_TRUNC               FILE_OVERWRITE
192  *	none of the above     FILE_OPEN
193  *
194  *	Note that there is not a direct match between disposition
195  *	FILE_SUPERSEDE (ie create whether or not file exists although
196  *	O_CREAT | O_TRUNC is similar but truncates the existing
197  *	file rather than creating a new file as FILE_SUPERSEDE does
198  *	(which uses the attributes / metadata passed in on open call)
199  *?
200  *?  O_SYNC is a reasonable match to CIFS writethrough flag
201  *?  and the read write flags match reasonably.  O_LARGEFILE
202  *?  is irrelevant because largefile support is always used
203  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205  *********************************************************************/
206 
207 	disposition = cifs_get_disposition(f_flags);
208 
209 	/* BB pass O_SYNC flag through on file attributes .. BB */
210 
211 	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 	if (!buf)
213 		return -ENOMEM;
214 
215 	if (backup_cred(cifs_sb))
216 		create_options |= CREATE_OPEN_BACKUP_INTENT;
217 
218 	if (tcon->ses->capabilities & CAP_NT_SMBS)
219 		rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
220 			 desiredAccess, create_options, pnetfid, poplock, buf,
221 			 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 				 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 	else
224 		rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 			desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 			cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 				& CIFS_MOUNT_MAP_SPECIAL_CHR);
228 
229 	if (rc)
230 		goto out;
231 
232 	if (tcon->unix_ext)
233 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 					      xid);
235 	else
236 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 					 xid, pnetfid);
238 
239 out:
240 	kfree(buf);
241 	return rc;
242 }
243 
244 struct cifsFileInfo *
cifs_new_fileinfo(__u16 fileHandle,struct file * file,struct tcon_link * tlink,__u32 oplock)245 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 		  struct tcon_link *tlink, __u32 oplock)
247 {
248 	struct dentry *dentry = file->f_path.dentry;
249 	struct inode *inode = dentry->d_inode;
250 	struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 	struct cifsFileInfo *pCifsFile;
252 
253 	pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 	if (pCifsFile == NULL)
255 		return pCifsFile;
256 
257 	pCifsFile->count = 1;
258 	pCifsFile->netfid = fileHandle;
259 	pCifsFile->pid = current->tgid;
260 	pCifsFile->uid = current_fsuid();
261 	pCifsFile->dentry = dget(dentry);
262 	pCifsFile->f_flags = file->f_flags;
263 	pCifsFile->invalidHandle = false;
264 	pCifsFile->tlink = cifs_get_tlink(tlink);
265 	mutex_init(&pCifsFile->fh_mutex);
266 	INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
267 
268 	cifs_sb_active(inode->i_sb);
269 
270 	spin_lock(&cifs_file_list_lock);
271 	list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
272 	/* if readable file instance put first in list*/
273 	if (file->f_mode & FMODE_READ)
274 		list_add(&pCifsFile->flist, &pCifsInode->openFileList);
275 	else
276 		list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
277 	spin_unlock(&cifs_file_list_lock);
278 
279 	cifs_set_oplock_level(pCifsInode, oplock);
280 	pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
281 
282 	file->private_data = pCifsFile;
283 	return pCifsFile;
284 }
285 
286 static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
287 
288 /*
289  * Release a reference on the file private data. This may involve closing
290  * the filehandle out on the server. Must be called without holding
291  * cifs_file_list_lock.
292  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)293 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
294 {
295 	struct inode *inode = cifs_file->dentry->d_inode;
296 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
297 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
298 	struct super_block *sb = inode->i_sb;
299 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
300 	struct cifsLockInfo *li, *tmp;
301 
302 	spin_lock(&cifs_file_list_lock);
303 	if (--cifs_file->count > 0) {
304 		spin_unlock(&cifs_file_list_lock);
305 		return;
306 	}
307 
308 	/* remove it from the lists */
309 	list_del(&cifs_file->flist);
310 	list_del(&cifs_file->tlist);
311 
312 	if (list_empty(&cifsi->openFileList)) {
313 		cFYI(1, "closing last open instance for inode %p",
314 			cifs_file->dentry->d_inode);
315 
316 		/* in strict cache mode we need invalidate mapping on the last
317 		   close  because it may cause a error when we open this file
318 		   again and get at least level II oplock */
319 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
320 			CIFS_I(inode)->invalid_mapping = true;
321 
322 		cifs_set_oplock_level(cifsi, 0);
323 	}
324 	spin_unlock(&cifs_file_list_lock);
325 
326 	cancel_work_sync(&cifs_file->oplock_break);
327 
328 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
329 		int xid, rc;
330 
331 		xid = GetXid();
332 		rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
333 		FreeXid(xid);
334 	}
335 
336 	/* Delete any outstanding lock records. We'll lose them when the file
337 	 * is closed anyway.
338 	 */
339 	mutex_lock(&cifsi->lock_mutex);
340 	list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
341 		if (li->netfid != cifs_file->netfid)
342 			continue;
343 		list_del(&li->llist);
344 		cifs_del_lock_waiters(li);
345 		kfree(li);
346 	}
347 	mutex_unlock(&cifsi->lock_mutex);
348 
349 	cifs_put_tlink(cifs_file->tlink);
350 	dput(cifs_file->dentry);
351 	cifs_sb_deactive(sb);
352 	kfree(cifs_file);
353 }
354 
cifs_open(struct inode * inode,struct file * file)355 int cifs_open(struct inode *inode, struct file *file)
356 {
357 	int rc = -EACCES;
358 	int xid;
359 	__u32 oplock;
360 	struct cifs_sb_info *cifs_sb;
361 	struct cifs_tcon *tcon;
362 	struct tcon_link *tlink;
363 	struct cifsFileInfo *pCifsFile = NULL;
364 	char *full_path = NULL;
365 	bool posix_open_ok = false;
366 	__u16 netfid;
367 
368 	xid = GetXid();
369 
370 	cifs_sb = CIFS_SB(inode->i_sb);
371 	tlink = cifs_sb_tlink(cifs_sb);
372 	if (IS_ERR(tlink)) {
373 		FreeXid(xid);
374 		return PTR_ERR(tlink);
375 	}
376 	tcon = tlink_tcon(tlink);
377 
378 	full_path = build_path_from_dentry(file->f_path.dentry);
379 	if (full_path == NULL) {
380 		rc = -ENOMEM;
381 		goto out;
382 	}
383 
384 	cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
385 		 inode, file->f_flags, full_path);
386 
387 	if (tcon->ses->server->oplocks)
388 		oplock = REQ_OPLOCK;
389 	else
390 		oplock = 0;
391 
392 	if (!tcon->broken_posix_open && tcon->unix_ext &&
393 	    (tcon->ses->capabilities & CAP_UNIX) &&
394 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
395 			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
396 		/* can not refresh inode info since size could be stale */
397 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
398 				cifs_sb->mnt_file_mode /* ignored */,
399 				file->f_flags, &oplock, &netfid, xid);
400 		if (rc == 0) {
401 			cFYI(1, "posix open succeeded");
402 			posix_open_ok = true;
403 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
404 			if (tcon->ses->serverNOS)
405 				cERROR(1, "server %s of type %s returned"
406 					   " unexpected error on SMB posix open"
407 					   ", disabling posix open support."
408 					   " Check if server update available.",
409 					   tcon->ses->serverName,
410 					   tcon->ses->serverNOS);
411 			tcon->broken_posix_open = true;
412 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
413 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
414 			goto out;
415 		/* else fallthrough to retry open the old way on network i/o
416 		   or DFS errors */
417 	}
418 
419 	if (!posix_open_ok) {
420 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
421 				  file->f_flags, &oplock, &netfid, xid);
422 		if (rc)
423 			goto out;
424 	}
425 
426 	pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
427 	if (pCifsFile == NULL) {
428 		CIFSSMBClose(xid, tcon, netfid);
429 		rc = -ENOMEM;
430 		goto out;
431 	}
432 
433 	cifs_fscache_set_inode_cookie(inode, file);
434 
435 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
436 		/* time to set mode which we can not set earlier due to
437 		   problems creating new read-only files */
438 		struct cifs_unix_set_info_args args = {
439 			.mode	= inode->i_mode,
440 			.uid	= NO_CHANGE_64,
441 			.gid	= NO_CHANGE_64,
442 			.ctime	= NO_CHANGE_64,
443 			.atime	= NO_CHANGE_64,
444 			.mtime	= NO_CHANGE_64,
445 			.device	= 0,
446 		};
447 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
448 					pCifsFile->pid);
449 	}
450 
451 out:
452 	kfree(full_path);
453 	FreeXid(xid);
454 	cifs_put_tlink(tlink);
455 	return rc;
456 }
457 
458 /* Try to reacquire byte range locks that were released when session */
459 /* to server was lost */
cifs_relock_file(struct cifsFileInfo * cifsFile)460 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
461 {
462 	int rc = 0;
463 
464 /* BB list all locks open on this file and relock */
465 
466 	return rc;
467 }
468 
cifs_reopen_file(struct cifsFileInfo * pCifsFile,bool can_flush)469 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
470 {
471 	int rc = -EACCES;
472 	int xid;
473 	__u32 oplock;
474 	struct cifs_sb_info *cifs_sb;
475 	struct cifs_tcon *tcon;
476 	struct cifsInodeInfo *pCifsInode;
477 	struct inode *inode;
478 	char *full_path = NULL;
479 	int desiredAccess;
480 	int disposition = FILE_OPEN;
481 	int create_options = CREATE_NOT_DIR;
482 	__u16 netfid;
483 
484 	xid = GetXid();
485 	mutex_lock(&pCifsFile->fh_mutex);
486 	if (!pCifsFile->invalidHandle) {
487 		mutex_unlock(&pCifsFile->fh_mutex);
488 		rc = 0;
489 		FreeXid(xid);
490 		return rc;
491 	}
492 
493 	inode = pCifsFile->dentry->d_inode;
494 	cifs_sb = CIFS_SB(inode->i_sb);
495 	tcon = tlink_tcon(pCifsFile->tlink);
496 
497 /* can not grab rename sem here because various ops, including
498    those that already have the rename sem can end up causing writepage
499    to get called and if the server was down that means we end up here,
500    and we can never tell if the caller already has the rename_sem */
501 	full_path = build_path_from_dentry(pCifsFile->dentry);
502 	if (full_path == NULL) {
503 		rc = -ENOMEM;
504 		mutex_unlock(&pCifsFile->fh_mutex);
505 		FreeXid(xid);
506 		return rc;
507 	}
508 
509 	cFYI(1, "inode = 0x%p file flags 0x%x for %s",
510 		 inode, pCifsFile->f_flags, full_path);
511 
512 	if (tcon->ses->server->oplocks)
513 		oplock = REQ_OPLOCK;
514 	else
515 		oplock = 0;
516 
517 	if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
518 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
519 			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
520 
521 		/*
522 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
523 		 * original open. Must mask them off for a reopen.
524 		 */
525 		unsigned int oflags = pCifsFile->f_flags &
526 						~(O_CREAT | O_EXCL | O_TRUNC);
527 
528 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
529 				cifs_sb->mnt_file_mode /* ignored */,
530 				oflags, &oplock, &netfid, xid);
531 		if (rc == 0) {
532 			cFYI(1, "posix reopen succeeded");
533 			goto reopen_success;
534 		}
535 		/* fallthrough to retry open the old way on errors, especially
536 		   in the reconnect path it is important to retry hard */
537 	}
538 
539 	desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
540 
541 	if (backup_cred(cifs_sb))
542 		create_options |= CREATE_OPEN_BACKUP_INTENT;
543 
544 	/* Can not refresh inode by passing in file_info buf to be returned
545 	   by SMBOpen and then calling get_inode_info with returned buf
546 	   since file might have write behind data that needs to be flushed
547 	   and server version of file size can be stale. If we knew for sure
548 	   that inode was not dirty locally we could do this */
549 
550 	rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
551 			 create_options, &netfid, &oplock, NULL,
552 			 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
553 				CIFS_MOUNT_MAP_SPECIAL_CHR);
554 	if (rc) {
555 		mutex_unlock(&pCifsFile->fh_mutex);
556 		cFYI(1, "cifs_open returned 0x%x", rc);
557 		cFYI(1, "oplock: %d", oplock);
558 		goto reopen_error_exit;
559 	}
560 
561 reopen_success:
562 	pCifsFile->netfid = netfid;
563 	pCifsFile->invalidHandle = false;
564 	mutex_unlock(&pCifsFile->fh_mutex);
565 	pCifsInode = CIFS_I(inode);
566 
567 	if (can_flush) {
568 		rc = filemap_write_and_wait(inode->i_mapping);
569 		mapping_set_error(inode->i_mapping, rc);
570 
571 		if (tcon->unix_ext)
572 			rc = cifs_get_inode_info_unix(&inode,
573 				full_path, inode->i_sb, xid);
574 		else
575 			rc = cifs_get_inode_info(&inode,
576 				full_path, NULL, inode->i_sb,
577 				xid, NULL);
578 	} /* else we are writing out data to server already
579 	     and could deadlock if we tried to flush data, and
580 	     since we do not know if we have data that would
581 	     invalidate the current end of file on the server
582 	     we can not go to the server to get the new inod
583 	     info */
584 
585 	cifs_set_oplock_level(pCifsInode, oplock);
586 
587 	cifs_relock_file(pCifsFile);
588 
589 reopen_error_exit:
590 	kfree(full_path);
591 	FreeXid(xid);
592 	return rc;
593 }
594 
cifs_close(struct inode * inode,struct file * file)595 int cifs_close(struct inode *inode, struct file *file)
596 {
597 	if (file->private_data != NULL) {
598 		cifsFileInfo_put(file->private_data);
599 		file->private_data = NULL;
600 	}
601 
602 	/* return code from the ->release op is always ignored */
603 	return 0;
604 }
605 
cifs_closedir(struct inode * inode,struct file * file)606 int cifs_closedir(struct inode *inode, struct file *file)
607 {
608 	int rc = 0;
609 	int xid;
610 	struct cifsFileInfo *pCFileStruct = file->private_data;
611 	char *ptmp;
612 
613 	cFYI(1, "Closedir inode = 0x%p", inode);
614 
615 	xid = GetXid();
616 
617 	if (pCFileStruct) {
618 		struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
619 
620 		cFYI(1, "Freeing private data in close dir");
621 		spin_lock(&cifs_file_list_lock);
622 		if (!pCFileStruct->srch_inf.endOfSearch &&
623 		    !pCFileStruct->invalidHandle) {
624 			pCFileStruct->invalidHandle = true;
625 			spin_unlock(&cifs_file_list_lock);
626 			rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
627 			cFYI(1, "Closing uncompleted readdir with rc %d",
628 				 rc);
629 			/* not much we can do if it fails anyway, ignore rc */
630 			rc = 0;
631 		} else
632 			spin_unlock(&cifs_file_list_lock);
633 		ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
634 		if (ptmp) {
635 			cFYI(1, "closedir free smb buf in srch struct");
636 			pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
637 			if (pCFileStruct->srch_inf.smallBuf)
638 				cifs_small_buf_release(ptmp);
639 			else
640 				cifs_buf_release(ptmp);
641 		}
642 		cifs_put_tlink(pCFileStruct->tlink);
643 		kfree(file->private_data);
644 		file->private_data = NULL;
645 	}
646 	/* BB can we lock the filestruct while this is going on? */
647 	FreeXid(xid);
648 	return rc;
649 }
650 
651 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 netfid)652 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
653 {
654 	struct cifsLockInfo *lock =
655 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
656 	if (!lock)
657 		return lock;
658 	lock->offset = offset;
659 	lock->length = length;
660 	lock->type = type;
661 	lock->netfid = netfid;
662 	lock->pid = current->tgid;
663 	INIT_LIST_HEAD(&lock->blist);
664 	init_waitqueue_head(&lock->block_q);
665 	return lock;
666 }
667 
668 static void
cifs_del_lock_waiters(struct cifsLockInfo * lock)669 cifs_del_lock_waiters(struct cifsLockInfo *lock)
670 {
671 	struct cifsLockInfo *li, *tmp;
672 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
673 		list_del_init(&li->blist);
674 		wake_up(&li->block_q);
675 	}
676 }
677 
678 static bool
__cifs_find_lock_conflict(struct cifsInodeInfo * cinode,__u64 offset,__u64 length,__u8 type,__u16 netfid,struct cifsLockInfo ** conf_lock)679 __cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
680 			__u64 length, __u8 type, __u16 netfid,
681 			struct cifsLockInfo **conf_lock)
682 {
683 	struct cifsLockInfo *li, *tmp;
684 
685 	list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
686 		if (offset + length <= li->offset ||
687 		    offset >= li->offset + li->length)
688 			continue;
689 		else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
690 			 ((netfid == li->netfid && current->tgid == li->pid) ||
691 			  type == li->type))
692 			continue;
693 		else {
694 			*conf_lock = li;
695 			return true;
696 		}
697 	}
698 	return false;
699 }
700 
701 static bool
cifs_find_lock_conflict(struct cifsInodeInfo * cinode,struct cifsLockInfo * lock,struct cifsLockInfo ** conf_lock)702 cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
703 			struct cifsLockInfo **conf_lock)
704 {
705 	return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
706 					 lock->type, lock->netfid, conf_lock);
707 }
708 
709 /*
710  * Check if there is another lock that prevents us to set the lock (mandatory
711  * style). If such a lock exists, update the flock structure with its
712  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
713  * or leave it the same if we can't. Returns 0 if we don't need to request to
714  * the server or 1 otherwise.
715  */
716 static int
cifs_lock_test(struct cifsInodeInfo * cinode,__u64 offset,__u64 length,__u8 type,__u16 netfid,struct file_lock * flock)717 cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
718 	       __u8 type, __u16 netfid, struct file_lock *flock)
719 {
720 	int rc = 0;
721 	struct cifsLockInfo *conf_lock;
722 	bool exist;
723 
724 	mutex_lock(&cinode->lock_mutex);
725 
726 	exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
727 					  &conf_lock);
728 	if (exist) {
729 		flock->fl_start = conf_lock->offset;
730 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
731 		flock->fl_pid = conf_lock->pid;
732 		if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
733 			flock->fl_type = F_RDLCK;
734 		else
735 			flock->fl_type = F_WRLCK;
736 	} else if (!cinode->can_cache_brlcks)
737 		rc = 1;
738 	else
739 		flock->fl_type = F_UNLCK;
740 
741 	mutex_unlock(&cinode->lock_mutex);
742 	return rc;
743 }
744 
745 static void
cifs_lock_add(struct cifsInodeInfo * cinode,struct cifsLockInfo * lock)746 cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
747 {
748 	mutex_lock(&cinode->lock_mutex);
749 	list_add_tail(&lock->llist, &cinode->llist);
750 	mutex_unlock(&cinode->lock_mutex);
751 }
752 
753 /*
754  * Set the byte-range lock (mandatory style). Returns:
755  * 1) 0, if we set the lock and don't need to request to the server;
756  * 2) 1, if no locks prevent us but we need to request to the server;
757  * 3) -EACCESS, if there is a lock that prevents us and wait is false.
758  */
759 static int
cifs_lock_add_if(struct cifsInodeInfo * cinode,struct cifsLockInfo * lock,bool wait)760 cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
761 		 bool wait)
762 {
763 	struct cifsLockInfo *conf_lock;
764 	bool exist;
765 	int rc = 0;
766 
767 try_again:
768 	exist = false;
769 	mutex_lock(&cinode->lock_mutex);
770 
771 	exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
772 	if (!exist && cinode->can_cache_brlcks) {
773 		list_add_tail(&lock->llist, &cinode->llist);
774 		mutex_unlock(&cinode->lock_mutex);
775 		return rc;
776 	}
777 
778 	if (!exist)
779 		rc = 1;
780 	else if (!wait)
781 		rc = -EACCES;
782 	else {
783 		list_add_tail(&lock->blist, &conf_lock->blist);
784 		mutex_unlock(&cinode->lock_mutex);
785 		rc = wait_event_interruptible(lock->block_q,
786 					(lock->blist.prev == &lock->blist) &&
787 					(lock->blist.next == &lock->blist));
788 		if (!rc)
789 			goto try_again;
790 		mutex_lock(&cinode->lock_mutex);
791 		list_del_init(&lock->blist);
792 	}
793 
794 	mutex_unlock(&cinode->lock_mutex);
795 	return rc;
796 }
797 
798 /*
799  * Check if there is another lock that prevents us to set the lock (posix
800  * style). If such a lock exists, update the flock structure with its
801  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
802  * or leave it the same if we can't. Returns 0 if we don't need to request to
803  * the server or 1 otherwise.
804  */
805 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)806 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
807 {
808 	int rc = 0;
809 	struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
810 	unsigned char saved_type = flock->fl_type;
811 
812 	if ((flock->fl_flags & FL_POSIX) == 0)
813 		return 1;
814 
815 	mutex_lock(&cinode->lock_mutex);
816 	posix_test_lock(file, flock);
817 
818 	if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
819 		flock->fl_type = saved_type;
820 		rc = 1;
821 	}
822 
823 	mutex_unlock(&cinode->lock_mutex);
824 	return rc;
825 }
826 
827 /*
828  * Set the byte-range lock (posix style). Returns:
829  * 1) 0, if we set the lock and don't need to request to the server;
830  * 2) 1, if we need to request to the server;
831  * 3) <0, if the error occurs while setting the lock.
832  */
833 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)834 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
835 {
836 	struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
837 	int rc = 1;
838 
839 	if ((flock->fl_flags & FL_POSIX) == 0)
840 		return rc;
841 
842 try_again:
843 	mutex_lock(&cinode->lock_mutex);
844 	if (!cinode->can_cache_brlcks) {
845 		mutex_unlock(&cinode->lock_mutex);
846 		return rc;
847 	}
848 
849 	rc = posix_lock_file(file, flock, NULL);
850 	mutex_unlock(&cinode->lock_mutex);
851 	if (rc == FILE_LOCK_DEFERRED) {
852 		rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
853 		if (!rc)
854 			goto try_again;
855 		locks_delete_block(flock);
856 	}
857 	return rc;
858 }
859 
860 static int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)861 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
862 {
863 	int xid, rc = 0, stored_rc;
864 	struct cifsLockInfo *li, *tmp;
865 	struct cifs_tcon *tcon;
866 	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
867 	unsigned int num, max_num;
868 	LOCKING_ANDX_RANGE *buf, *cur;
869 	int types[] = {LOCKING_ANDX_LARGE_FILES,
870 		       LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
871 	int i;
872 
873 	xid = GetXid();
874 	tcon = tlink_tcon(cfile->tlink);
875 
876 	mutex_lock(&cinode->lock_mutex);
877 	if (!cinode->can_cache_brlcks) {
878 		mutex_unlock(&cinode->lock_mutex);
879 		FreeXid(xid);
880 		return rc;
881 	}
882 
883 	max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
884 		  sizeof(LOCKING_ANDX_RANGE);
885 	buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
886 	if (!buf) {
887 		mutex_unlock(&cinode->lock_mutex);
888 		FreeXid(xid);
889 		return -ENOMEM;
890 	}
891 
892 	for (i = 0; i < 2; i++) {
893 		cur = buf;
894 		num = 0;
895 		list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
896 			if (li->type != types[i])
897 				continue;
898 			cur->Pid = cpu_to_le16(li->pid);
899 			cur->LengthLow = cpu_to_le32((u32)li->length);
900 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
901 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
902 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
903 			if (++num == max_num) {
904 				stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
905 						       li->type, 0, num, buf);
906 				if (stored_rc)
907 					rc = stored_rc;
908 				cur = buf;
909 				num = 0;
910 			} else
911 				cur++;
912 		}
913 
914 		if (num) {
915 			stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
916 					       types[i], 0, num, buf);
917 			if (stored_rc)
918 				rc = stored_rc;
919 		}
920 	}
921 
922 	cinode->can_cache_brlcks = false;
923 	mutex_unlock(&cinode->lock_mutex);
924 
925 	kfree(buf);
926 	FreeXid(xid);
927 	return rc;
928 }
929 
930 /* copied from fs/locks.c with a name change */
931 #define cifs_for_each_lock(inode, lockp) \
932 	for (lockp = &inode->i_flock; *lockp != NULL; \
933 	     lockp = &(*lockp)->fl_next)
934 
935 struct lock_to_push {
936 	struct list_head llist;
937 	__u64 offset;
938 	__u64 length;
939 	__u32 pid;
940 	__u16 netfid;
941 	__u8 type;
942 };
943 
944 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)945 cifs_push_posix_locks(struct cifsFileInfo *cfile)
946 {
947 	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
948 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
949 	struct file_lock *flock, **before;
950 	unsigned int count = 0, i = 0;
951 	int rc = 0, xid, type;
952 	struct list_head locks_to_send, *el;
953 	struct lock_to_push *lck, *tmp;
954 	__u64 length;
955 
956 	xid = GetXid();
957 
958 	mutex_lock(&cinode->lock_mutex);
959 	if (!cinode->can_cache_brlcks) {
960 		mutex_unlock(&cinode->lock_mutex);
961 		FreeXid(xid);
962 		return rc;
963 	}
964 
965 	lock_flocks();
966 	cifs_for_each_lock(cfile->dentry->d_inode, before) {
967 		if ((*before)->fl_flags & FL_POSIX)
968 			count++;
969 	}
970 	unlock_flocks();
971 
972 	INIT_LIST_HEAD(&locks_to_send);
973 
974 	/*
975 	 * Allocating count locks is enough because no FL_POSIX locks can be
976 	 * added to the list while we are holding cinode->lock_mutex that
977 	 * protects locking operations of this inode.
978 	 */
979 	for (; i < count; i++) {
980 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
981 		if (!lck) {
982 			rc = -ENOMEM;
983 			goto err_out;
984 		}
985 		list_add_tail(&lck->llist, &locks_to_send);
986 	}
987 
988 	el = locks_to_send.next;
989 	lock_flocks();
990 	cifs_for_each_lock(cfile->dentry->d_inode, before) {
991 		flock = *before;
992 		if ((flock->fl_flags & FL_POSIX) == 0)
993 			continue;
994 		if (el == &locks_to_send) {
995 			/*
996 			 * The list ended. We don't have enough allocated
997 			 * structures - something is really wrong.
998 			 */
999 			cERROR(1, "Can't push all brlocks!");
1000 			break;
1001 		}
1002 		length = 1 + flock->fl_end - flock->fl_start;
1003 		if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1004 			type = CIFS_RDLCK;
1005 		else
1006 			type = CIFS_WRLCK;
1007 		lck = list_entry(el, struct lock_to_push, llist);
1008 		lck->pid = flock->fl_pid;
1009 		lck->netfid = cfile->netfid;
1010 		lck->length = length;
1011 		lck->type = type;
1012 		lck->offset = flock->fl_start;
1013 		el = el->next;
1014 	}
1015 	unlock_flocks();
1016 
1017 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1018 		struct file_lock tmp_lock;
1019 		int stored_rc;
1020 
1021 		tmp_lock.fl_start = lck->offset;
1022 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1023 					     0, lck->length, &tmp_lock,
1024 					     lck->type, 0);
1025 		if (stored_rc)
1026 			rc = stored_rc;
1027 		list_del(&lck->llist);
1028 		kfree(lck);
1029 	}
1030 
1031 out:
1032 	cinode->can_cache_brlcks = false;
1033 	mutex_unlock(&cinode->lock_mutex);
1034 
1035 	FreeXid(xid);
1036 	return rc;
1037 err_out:
1038 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1039 		list_del(&lck->llist);
1040 		kfree(lck);
1041 	}
1042 	goto out;
1043 }
1044 
1045 static int
cifs_push_locks(struct cifsFileInfo * cfile)1046 cifs_push_locks(struct cifsFileInfo *cfile)
1047 {
1048 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1049 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1050 
1051 	if ((tcon->ses->capabilities & CAP_UNIX) &&
1052 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1053 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1054 		return cifs_push_posix_locks(cfile);
1055 
1056 	return cifs_push_mandatory_locks(cfile);
1057 }
1058 
1059 static void
cifs_read_flock(struct file_lock * flock,__u8 * type,int * lock,int * unlock,bool * wait_flag)1060 cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
1061 		bool *wait_flag)
1062 {
1063 	if (flock->fl_flags & FL_POSIX)
1064 		cFYI(1, "Posix");
1065 	if (flock->fl_flags & FL_FLOCK)
1066 		cFYI(1, "Flock");
1067 	if (flock->fl_flags & FL_SLEEP) {
1068 		cFYI(1, "Blocking lock");
1069 		*wait_flag = true;
1070 	}
1071 	if (flock->fl_flags & FL_ACCESS)
1072 		cFYI(1, "Process suspended by mandatory locking - "
1073 			"not implemented yet");
1074 	if (flock->fl_flags & FL_LEASE)
1075 		cFYI(1, "Lease on file - not implemented yet");
1076 	if (flock->fl_flags &
1077 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
1078 		cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1079 
1080 	*type = LOCKING_ANDX_LARGE_FILES;
1081 	if (flock->fl_type == F_WRLCK) {
1082 		cFYI(1, "F_WRLCK ");
1083 		*lock = 1;
1084 	} else if (flock->fl_type == F_UNLCK) {
1085 		cFYI(1, "F_UNLCK");
1086 		*unlock = 1;
1087 		/* Check if unlock includes more than one lock range */
1088 	} else if (flock->fl_type == F_RDLCK) {
1089 		cFYI(1, "F_RDLCK");
1090 		*type |= LOCKING_ANDX_SHARED_LOCK;
1091 		*lock = 1;
1092 	} else if (flock->fl_type == F_EXLCK) {
1093 		cFYI(1, "F_EXLCK");
1094 		*lock = 1;
1095 	} else if (flock->fl_type == F_SHLCK) {
1096 		cFYI(1, "F_SHLCK");
1097 		*type |= LOCKING_ANDX_SHARED_LOCK;
1098 		*lock = 1;
1099 	} else
1100 		cFYI(1, "Unknown type of lock");
1101 }
1102 
1103 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u8 type,bool wait_flag,bool posix_lck,int xid)1104 cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
1105 	   bool wait_flag, bool posix_lck, int xid)
1106 {
1107 	int rc = 0;
1108 	__u64 length = 1 + flock->fl_end - flock->fl_start;
1109 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1110 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1111 	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1112 	__u16 netfid = cfile->netfid;
1113 
1114 	if (posix_lck) {
1115 		int posix_lock_type;
1116 
1117 		rc = cifs_posix_lock_test(file, flock);
1118 		if (!rc)
1119 			return rc;
1120 
1121 		if (type & LOCKING_ANDX_SHARED_LOCK)
1122 			posix_lock_type = CIFS_RDLCK;
1123 		else
1124 			posix_lock_type = CIFS_WRLCK;
1125 		rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1126 				      1 /* get */, length, flock,
1127 				      posix_lock_type, wait_flag);
1128 		return rc;
1129 	}
1130 
1131 	rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
1132 			    flock);
1133 	if (!rc)
1134 		return rc;
1135 
1136 	/* BB we could chain these into one lock request BB */
1137 	rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1138 			 flock->fl_start, 0, 1, type, 0, 0);
1139 	if (rc == 0) {
1140 		rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1141 				 length, flock->fl_start, 1, 0,
1142 				 type, 0, 0);
1143 		flock->fl_type = F_UNLCK;
1144 		if (rc != 0)
1145 			cERROR(1, "Error unlocking previously locked "
1146 				   "range %d during test of lock", rc);
1147 		return 0;
1148 	}
1149 
1150 	if (type & LOCKING_ANDX_SHARED_LOCK) {
1151 		flock->fl_type = F_WRLCK;
1152 		return 0;
1153 	}
1154 
1155 	rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1156 			 flock->fl_start, 0, 1,
1157 			 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
1158 	if (rc == 0) {
1159 		rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1160 				 length, flock->fl_start, 1, 0,
1161 				 type | LOCKING_ANDX_SHARED_LOCK,
1162 				 0, 0);
1163 		flock->fl_type = F_RDLCK;
1164 		if (rc != 0)
1165 			cERROR(1, "Error unlocking previously locked "
1166 				  "range %d during test of lock", rc);
1167 	} else
1168 		flock->fl_type = F_WRLCK;
1169 
1170 	return 0;
1171 }
1172 
1173 static void
cifs_move_llist(struct list_head * source,struct list_head * dest)1174 cifs_move_llist(struct list_head *source, struct list_head *dest)
1175 {
1176 	struct list_head *li, *tmp;
1177 	list_for_each_safe(li, tmp, source)
1178 		list_move(li, dest);
1179 }
1180 
1181 static void
cifs_free_llist(struct list_head * llist)1182 cifs_free_llist(struct list_head *llist)
1183 {
1184 	struct cifsLockInfo *li, *tmp;
1185 	list_for_each_entry_safe(li, tmp, llist, llist) {
1186 		cifs_del_lock_waiters(li);
1187 		list_del(&li->llist);
1188 		kfree(li);
1189 	}
1190 }
1191 
1192 static int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,int xid)1193 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1194 {
1195 	int rc = 0, stored_rc;
1196 	int types[] = {LOCKING_ANDX_LARGE_FILES,
1197 		       LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1198 	unsigned int i;
1199 	unsigned int max_num, num;
1200 	LOCKING_ANDX_RANGE *buf, *cur;
1201 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1202 	struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1203 	struct cifsLockInfo *li, *tmp;
1204 	__u64 length = 1 + flock->fl_end - flock->fl_start;
1205 	struct list_head tmp_llist;
1206 
1207 	INIT_LIST_HEAD(&tmp_llist);
1208 
1209 	max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
1210 		  sizeof(LOCKING_ANDX_RANGE);
1211 	buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1212 	if (!buf)
1213 		return -ENOMEM;
1214 
1215 	mutex_lock(&cinode->lock_mutex);
1216 	for (i = 0; i < 2; i++) {
1217 		cur = buf;
1218 		num = 0;
1219 		list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1220 			if (flock->fl_start > li->offset ||
1221 			    (flock->fl_start + length) <
1222 			    (li->offset + li->length))
1223 				continue;
1224 			if (current->tgid != li->pid)
1225 				continue;
1226 			if (cfile->netfid != li->netfid)
1227 				continue;
1228 			if (types[i] != li->type)
1229 				continue;
1230 			if (!cinode->can_cache_brlcks) {
1231 				cur->Pid = cpu_to_le16(li->pid);
1232 				cur->LengthLow = cpu_to_le32((u32)li->length);
1233 				cur->LengthHigh =
1234 					cpu_to_le32((u32)(li->length>>32));
1235 				cur->OffsetLow = cpu_to_le32((u32)li->offset);
1236 				cur->OffsetHigh =
1237 					cpu_to_le32((u32)(li->offset>>32));
1238 				/*
1239 				 * We need to save a lock here to let us add
1240 				 * it again to the inode list if the unlock
1241 				 * range request fails on the server.
1242 				 */
1243 				list_move(&li->llist, &tmp_llist);
1244 				if (++num == max_num) {
1245 					stored_rc = cifs_lockv(xid, tcon,
1246 							       cfile->netfid,
1247 							       li->type, num,
1248 							       0, buf);
1249 					if (stored_rc) {
1250 						/*
1251 						 * We failed on the unlock range
1252 						 * request - add all locks from
1253 						 * the tmp list to the head of
1254 						 * the inode list.
1255 						 */
1256 						cifs_move_llist(&tmp_llist,
1257 								&cinode->llist);
1258 						rc = stored_rc;
1259 					} else
1260 						/*
1261 						 * The unlock range request
1262 						 * succeed - free the tmp list.
1263 						 */
1264 						cifs_free_llist(&tmp_llist);
1265 					cur = buf;
1266 					num = 0;
1267 				} else
1268 					cur++;
1269 			} else {
1270 				/*
1271 				 * We can cache brlock requests - simply remove
1272 				 * a lock from the inode list.
1273 				 */
1274 				list_del(&li->llist);
1275 				cifs_del_lock_waiters(li);
1276 				kfree(li);
1277 			}
1278 		}
1279 		if (num) {
1280 			stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1281 					       types[i], num, 0, buf);
1282 			if (stored_rc) {
1283 				cifs_move_llist(&tmp_llist, &cinode->llist);
1284 				rc = stored_rc;
1285 			} else
1286 				cifs_free_llist(&tmp_llist);
1287 		}
1288 	}
1289 
1290 	mutex_unlock(&cinode->lock_mutex);
1291 	kfree(buf);
1292 	return rc;
1293 }
1294 
1295 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u8 type,bool wait_flag,bool posix_lck,int lock,int unlock,int xid)1296 cifs_setlk(struct file *file,  struct file_lock *flock, __u8 type,
1297 	   bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1298 {
1299 	int rc = 0;
1300 	__u64 length = 1 + flock->fl_end - flock->fl_start;
1301 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1302 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1303 	struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
1304 	__u16 netfid = cfile->netfid;
1305 
1306 	if (posix_lck) {
1307 		int posix_lock_type;
1308 
1309 		rc = cifs_posix_lock_set(file, flock);
1310 		if (!rc || rc < 0)
1311 			return rc;
1312 
1313 		if (type & LOCKING_ANDX_SHARED_LOCK)
1314 			posix_lock_type = CIFS_RDLCK;
1315 		else
1316 			posix_lock_type = CIFS_WRLCK;
1317 
1318 		if (unlock == 1)
1319 			posix_lock_type = CIFS_UNLCK;
1320 
1321 		rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1322 				      0 /* set */, length, flock,
1323 				      posix_lock_type, wait_flag);
1324 		goto out;
1325 	}
1326 
1327 	if (lock) {
1328 		struct cifsLockInfo *lock;
1329 
1330 		lock = cifs_lock_init(flock->fl_start, length, type, netfid);
1331 		if (!lock)
1332 			return -ENOMEM;
1333 
1334 		rc = cifs_lock_add_if(cinode, lock, wait_flag);
1335 		if (rc < 0)
1336 			kfree(lock);
1337 		if (rc <= 0)
1338 			goto out;
1339 
1340 		rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1341 				 flock->fl_start, 0, 1, type, wait_flag, 0);
1342 		if (rc) {
1343 			kfree(lock);
1344 			goto out;
1345 		}
1346 
1347 		cifs_lock_add(cinode, lock);
1348 	} else if (unlock)
1349 		rc = cifs_unlock_range(cfile, flock, xid);
1350 
1351 out:
1352 	if (flock->fl_flags & FL_POSIX)
1353 		posix_lock_file_wait(file, flock);
1354 	return rc;
1355 }
1356 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)1357 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1358 {
1359 	int rc, xid;
1360 	int lock = 0, unlock = 0;
1361 	bool wait_flag = false;
1362 	bool posix_lck = false;
1363 	struct cifs_sb_info *cifs_sb;
1364 	struct cifs_tcon *tcon;
1365 	struct cifsInodeInfo *cinode;
1366 	struct cifsFileInfo *cfile;
1367 	__u16 netfid;
1368 	__u8 type;
1369 
1370 	rc = -EACCES;
1371 	xid = GetXid();
1372 
1373 	cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1374 		"end: %lld", cmd, flock->fl_flags, flock->fl_type,
1375 		flock->fl_start, flock->fl_end);
1376 
1377 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1378 
1379 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1380 	cfile = (struct cifsFileInfo *)file->private_data;
1381 	tcon = tlink_tcon(cfile->tlink);
1382 	netfid = cfile->netfid;
1383 	cinode = CIFS_I(file->f_path.dentry->d_inode);
1384 
1385 	if ((tcon->ses->capabilities & CAP_UNIX) &&
1386 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1387 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1388 		posix_lck = true;
1389 	/*
1390 	 * BB add code here to normalize offset and length to account for
1391 	 * negative length which we can not accept over the wire.
1392 	 */
1393 	if (IS_GETLK(cmd)) {
1394 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1395 		FreeXid(xid);
1396 		return rc;
1397 	}
1398 
1399 	if (!lock && !unlock) {
1400 		/*
1401 		 * if no lock or unlock then nothing to do since we do not
1402 		 * know what it is
1403 		 */
1404 		FreeXid(xid);
1405 		return -EOPNOTSUPP;
1406 	}
1407 
1408 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1409 			xid);
1410 	FreeXid(xid);
1411 	return rc;
1412 }
1413 
1414 /*
1415  * update the file size (if needed) after a write. Should be called with
1416  * the inode->i_lock held
1417  */
1418 void
cifs_update_eof(struct cifsInodeInfo * cifsi,loff_t offset,unsigned int bytes_written)1419 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1420 		      unsigned int bytes_written)
1421 {
1422 	loff_t end_of_write = offset + bytes_written;
1423 
1424 	if (end_of_write > cifsi->server_eof)
1425 		cifsi->server_eof = end_of_write;
1426 }
1427 
cifs_write(struct cifsFileInfo * open_file,__u32 pid,const char * write_data,size_t write_size,loff_t * poffset)1428 static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
1429 			  const char *write_data, size_t write_size,
1430 			  loff_t *poffset)
1431 {
1432 	int rc = 0;
1433 	unsigned int bytes_written = 0;
1434 	unsigned int total_written;
1435 	struct cifs_sb_info *cifs_sb;
1436 	struct cifs_tcon *pTcon;
1437 	int xid;
1438 	struct dentry *dentry = open_file->dentry;
1439 	struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1440 	struct cifs_io_parms io_parms;
1441 
1442 	cifs_sb = CIFS_SB(dentry->d_sb);
1443 
1444 	cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1445 	   *poffset, dentry->d_name.name);
1446 
1447 	pTcon = tlink_tcon(open_file->tlink);
1448 
1449 	xid = GetXid();
1450 
1451 	for (total_written = 0; write_size > total_written;
1452 	     total_written += bytes_written) {
1453 		rc = -EAGAIN;
1454 		while (rc == -EAGAIN) {
1455 			struct kvec iov[2];
1456 			unsigned int len;
1457 
1458 			if (open_file->invalidHandle) {
1459 				/* we could deadlock if we called
1460 				   filemap_fdatawait from here so tell
1461 				   reopen_file not to flush data to
1462 				   server now */
1463 				rc = cifs_reopen_file(open_file, false);
1464 				if (rc != 0)
1465 					break;
1466 			}
1467 
1468 			len = min((size_t)cifs_sb->wsize,
1469 				  write_size - total_written);
1470 			/* iov[0] is reserved for smb header */
1471 			iov[1].iov_base = (char *)write_data + total_written;
1472 			iov[1].iov_len = len;
1473 			io_parms.netfid = open_file->netfid;
1474 			io_parms.pid = pid;
1475 			io_parms.tcon = pTcon;
1476 			io_parms.offset = *poffset;
1477 			io_parms.length = len;
1478 			rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1479 					   1, 0);
1480 		}
1481 		if (rc || (bytes_written == 0)) {
1482 			if (total_written)
1483 				break;
1484 			else {
1485 				FreeXid(xid);
1486 				return rc;
1487 			}
1488 		} else {
1489 			spin_lock(&dentry->d_inode->i_lock);
1490 			cifs_update_eof(cifsi, *poffset, bytes_written);
1491 			spin_unlock(&dentry->d_inode->i_lock);
1492 			*poffset += bytes_written;
1493 		}
1494 	}
1495 
1496 	cifs_stats_bytes_written(pTcon, total_written);
1497 
1498 	if (total_written > 0) {
1499 		spin_lock(&dentry->d_inode->i_lock);
1500 		if (*poffset > dentry->d_inode->i_size)
1501 			i_size_write(dentry->d_inode, *poffset);
1502 		spin_unlock(&dentry->d_inode->i_lock);
1503 	}
1504 	mark_inode_dirty_sync(dentry->d_inode);
1505 	FreeXid(xid);
1506 	return total_written;
1507 }
1508 
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)1509 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1510 					bool fsuid_only)
1511 {
1512 	struct cifsFileInfo *open_file = NULL;
1513 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1514 
1515 	/* only filter by fsuid on multiuser mounts */
1516 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1517 		fsuid_only = false;
1518 
1519 	spin_lock(&cifs_file_list_lock);
1520 	/* we could simply get the first_list_entry since write-only entries
1521 	   are always at the end of the list but since the first entry might
1522 	   have a close pending, we go through the whole list */
1523 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1524 		if (fsuid_only && open_file->uid != current_fsuid())
1525 			continue;
1526 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1527 			if (!open_file->invalidHandle) {
1528 				/* found a good file */
1529 				/* lock it so it will not be closed on us */
1530 				cifsFileInfo_get(open_file);
1531 				spin_unlock(&cifs_file_list_lock);
1532 				return open_file;
1533 			} /* else might as well continue, and look for
1534 			     another, or simply have the caller reopen it
1535 			     again rather than trying to fix this handle */
1536 		} else /* write only file */
1537 			break; /* write only files are last so must be done */
1538 	}
1539 	spin_unlock(&cifs_file_list_lock);
1540 	return NULL;
1541 }
1542 
find_writable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)1543 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1544 					bool fsuid_only)
1545 {
1546 	struct cifsFileInfo *open_file, *inv_file = NULL;
1547 	struct cifs_sb_info *cifs_sb;
1548 	bool any_available = false;
1549 	int rc;
1550 	unsigned int refind = 0;
1551 
1552 	/* Having a null inode here (because mapping->host was set to zero by
1553 	the VFS or MM) should not happen but we had reports of on oops (due to
1554 	it being zero) during stress testcases so we need to check for it */
1555 
1556 	if (cifs_inode == NULL) {
1557 		cERROR(1, "Null inode passed to cifs_writeable_file");
1558 		dump_stack();
1559 		return NULL;
1560 	}
1561 
1562 	cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1563 
1564 	/* only filter by fsuid on multiuser mounts */
1565 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1566 		fsuid_only = false;
1567 
1568 	spin_lock(&cifs_file_list_lock);
1569 refind_writable:
1570 	if (refind > MAX_REOPEN_ATT) {
1571 		spin_unlock(&cifs_file_list_lock);
1572 		return NULL;
1573 	}
1574 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1575 		if (!any_available && open_file->pid != current->tgid)
1576 			continue;
1577 		if (fsuid_only && open_file->uid != current_fsuid())
1578 			continue;
1579 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1580 			if (!open_file->invalidHandle) {
1581 				/* found a good writable file */
1582 				cifsFileInfo_get(open_file);
1583 				spin_unlock(&cifs_file_list_lock);
1584 				return open_file;
1585 			} else {
1586 				if (!inv_file)
1587 					inv_file = open_file;
1588 			}
1589 		}
1590 	}
1591 	/* couldn't find useable FH with same pid, try any available */
1592 	if (!any_available) {
1593 		any_available = true;
1594 		goto refind_writable;
1595 	}
1596 
1597 	if (inv_file) {
1598 		any_available = false;
1599 		cifsFileInfo_get(inv_file);
1600 	}
1601 
1602 	spin_unlock(&cifs_file_list_lock);
1603 
1604 	if (inv_file) {
1605 		rc = cifs_reopen_file(inv_file, false);
1606 		if (!rc)
1607 			return inv_file;
1608 		else {
1609 			spin_lock(&cifs_file_list_lock);
1610 			list_move_tail(&inv_file->flist,
1611 					&cifs_inode->openFileList);
1612 			spin_unlock(&cifs_file_list_lock);
1613 			cifsFileInfo_put(inv_file);
1614 			spin_lock(&cifs_file_list_lock);
1615 			++refind;
1616 			goto refind_writable;
1617 		}
1618 	}
1619 
1620 	return NULL;
1621 }
1622 
cifs_partialpagewrite(struct page * page,unsigned from,unsigned to)1623 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1624 {
1625 	struct address_space *mapping = page->mapping;
1626 	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1627 	char *write_data;
1628 	int rc = -EFAULT;
1629 	int bytes_written = 0;
1630 	struct inode *inode;
1631 	struct cifsFileInfo *open_file;
1632 
1633 	if (!mapping || !mapping->host)
1634 		return -EFAULT;
1635 
1636 	inode = page->mapping->host;
1637 
1638 	offset += (loff_t)from;
1639 	write_data = kmap(page);
1640 	write_data += from;
1641 
1642 	if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1643 		kunmap(page);
1644 		return -EIO;
1645 	}
1646 
1647 	/* racing with truncate? */
1648 	if (offset > mapping->host->i_size) {
1649 		kunmap(page);
1650 		return 0; /* don't care */
1651 	}
1652 
1653 	/* check to make sure that we are not extending the file */
1654 	if (mapping->host->i_size - offset < (loff_t)to)
1655 		to = (unsigned)(mapping->host->i_size - offset);
1656 
1657 	open_file = find_writable_file(CIFS_I(mapping->host), false);
1658 	if (open_file) {
1659 		bytes_written = cifs_write(open_file, open_file->pid,
1660 					   write_data, to - from, &offset);
1661 		cifsFileInfo_put(open_file);
1662 		/* Does mm or vfs already set times? */
1663 		inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1664 		if ((bytes_written > 0) && (offset))
1665 			rc = 0;
1666 		else if (bytes_written < 0)
1667 			rc = bytes_written;
1668 	} else {
1669 		cFYI(1, "No writeable filehandles for inode");
1670 		rc = -EIO;
1671 	}
1672 
1673 	kunmap(page);
1674 	return rc;
1675 }
1676 
1677 /*
1678  * Marshal up the iov array, reserving the first one for the header. Also,
1679  * set wdata->bytes.
1680  */
1681 static void
cifs_writepages_marshal_iov(struct kvec * iov,struct cifs_writedata * wdata)1682 cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
1683 {
1684 	int i;
1685 	struct inode *inode = wdata->cfile->dentry->d_inode;
1686 	loff_t size = i_size_read(inode);
1687 
1688 	/* marshal up the pages into iov array */
1689 	wdata->bytes = 0;
1690 	for (i = 0; i < wdata->nr_pages; i++) {
1691 		iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
1692 					(loff_t)PAGE_CACHE_SIZE);
1693 		iov[i + 1].iov_base = kmap(wdata->pages[i]);
1694 		wdata->bytes += iov[i + 1].iov_len;
1695 	}
1696 }
1697 
cifs_writepages(struct address_space * mapping,struct writeback_control * wbc)1698 static int cifs_writepages(struct address_space *mapping,
1699 			   struct writeback_control *wbc)
1700 {
1701 	struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1702 	bool done = false, scanned = false, range_whole = false;
1703 	pgoff_t end, index;
1704 	struct cifs_writedata *wdata;
1705 	struct page *page;
1706 	int rc = 0;
1707 
1708 	/*
1709 	 * If wsize is smaller than the page cache size, default to writing
1710 	 * one page at a time via cifs_writepage
1711 	 */
1712 	if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1713 		return generic_writepages(mapping, wbc);
1714 
1715 	if (wbc->range_cyclic) {
1716 		index = mapping->writeback_index; /* Start from prev offset */
1717 		end = -1;
1718 	} else {
1719 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
1720 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
1721 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1722 			range_whole = true;
1723 		scanned = true;
1724 	}
1725 retry:
1726 	while (!done && index <= end) {
1727 		unsigned int i, nr_pages, found_pages;
1728 		pgoff_t next = 0, tofind;
1729 		struct page **pages;
1730 
1731 		tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1732 				end - index) + 1;
1733 
1734 		wdata = cifs_writedata_alloc((unsigned int)tofind,
1735 					     cifs_writev_complete);
1736 		if (!wdata) {
1737 			rc = -ENOMEM;
1738 			break;
1739 		}
1740 
1741 		/*
1742 		 * find_get_pages_tag seems to return a max of 256 on each
1743 		 * iteration, so we must call it several times in order to
1744 		 * fill the array or the wsize is effectively limited to
1745 		 * 256 * PAGE_CACHE_SIZE.
1746 		 */
1747 		found_pages = 0;
1748 		pages = wdata->pages;
1749 		do {
1750 			nr_pages = find_get_pages_tag(mapping, &index,
1751 							PAGECACHE_TAG_DIRTY,
1752 							tofind, pages);
1753 			found_pages += nr_pages;
1754 			tofind -= nr_pages;
1755 			pages += nr_pages;
1756 		} while (nr_pages && tofind && index <= end);
1757 
1758 		if (found_pages == 0) {
1759 			kref_put(&wdata->refcount, cifs_writedata_release);
1760 			break;
1761 		}
1762 
1763 		nr_pages = 0;
1764 		for (i = 0; i < found_pages; i++) {
1765 			page = wdata->pages[i];
1766 			/*
1767 			 * At this point we hold neither mapping->tree_lock nor
1768 			 * lock on the page itself: the page may be truncated or
1769 			 * invalidated (changing page->mapping to NULL), or even
1770 			 * swizzled back from swapper_space to tmpfs file
1771 			 * mapping
1772 			 */
1773 
1774 			if (nr_pages == 0)
1775 				lock_page(page);
1776 			else if (!trylock_page(page))
1777 				break;
1778 
1779 			if (unlikely(page->mapping != mapping)) {
1780 				unlock_page(page);
1781 				break;
1782 			}
1783 
1784 			if (!wbc->range_cyclic && page->index > end) {
1785 				done = true;
1786 				unlock_page(page);
1787 				break;
1788 			}
1789 
1790 			if (next && (page->index != next)) {
1791 				/* Not next consecutive page */
1792 				unlock_page(page);
1793 				break;
1794 			}
1795 
1796 			if (wbc->sync_mode != WB_SYNC_NONE)
1797 				wait_on_page_writeback(page);
1798 
1799 			if (PageWriteback(page) ||
1800 					!clear_page_dirty_for_io(page)) {
1801 				unlock_page(page);
1802 				break;
1803 			}
1804 
1805 			/*
1806 			 * This actually clears the dirty bit in the radix tree.
1807 			 * See cifs_writepage() for more commentary.
1808 			 */
1809 			set_page_writeback(page);
1810 
1811 			if (page_offset(page) >= mapping->host->i_size) {
1812 				done = true;
1813 				unlock_page(page);
1814 				end_page_writeback(page);
1815 				break;
1816 			}
1817 
1818 			wdata->pages[i] = page;
1819 			next = page->index + 1;
1820 			++nr_pages;
1821 		}
1822 
1823 		/* reset index to refind any pages skipped */
1824 		if (nr_pages == 0)
1825 			index = wdata->pages[0]->index + 1;
1826 
1827 		/* put any pages we aren't going to use */
1828 		for (i = nr_pages; i < found_pages; i++) {
1829 			page_cache_release(wdata->pages[i]);
1830 			wdata->pages[i] = NULL;
1831 		}
1832 
1833 		/* nothing to write? */
1834 		if (nr_pages == 0) {
1835 			kref_put(&wdata->refcount, cifs_writedata_release);
1836 			continue;
1837 		}
1838 
1839 		wdata->sync_mode = wbc->sync_mode;
1840 		wdata->nr_pages = nr_pages;
1841 		wdata->offset = page_offset(wdata->pages[0]);
1842 		wdata->marshal_iov = cifs_writepages_marshal_iov;
1843 
1844 		do {
1845 			if (wdata->cfile != NULL)
1846 				cifsFileInfo_put(wdata->cfile);
1847 			wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1848 							  false);
1849 			if (!wdata->cfile) {
1850 				cERROR(1, "No writable handles for inode");
1851 				rc = -EBADF;
1852 				break;
1853 			}
1854 			wdata->pid = wdata->cfile->pid;
1855 			rc = cifs_async_writev(wdata);
1856 		} while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1857 
1858 		for (i = 0; i < nr_pages; ++i)
1859 			unlock_page(wdata->pages[i]);
1860 
1861 		/* send failure -- clean up the mess */
1862 		if (rc != 0) {
1863 			for (i = 0; i < nr_pages; ++i) {
1864 				if (rc == -EAGAIN)
1865 					redirty_page_for_writepage(wbc,
1866 							   wdata->pages[i]);
1867 				else
1868 					SetPageError(wdata->pages[i]);
1869 				end_page_writeback(wdata->pages[i]);
1870 				page_cache_release(wdata->pages[i]);
1871 			}
1872 			if (rc != -EAGAIN)
1873 				mapping_set_error(mapping, rc);
1874 		}
1875 		kref_put(&wdata->refcount, cifs_writedata_release);
1876 
1877 		wbc->nr_to_write -= nr_pages;
1878 		if (wbc->nr_to_write <= 0)
1879 			done = true;
1880 
1881 		index = next;
1882 	}
1883 
1884 	if (!scanned && !done) {
1885 		/*
1886 		 * We hit the last page and there is more work to be done: wrap
1887 		 * back to the start of the file
1888 		 */
1889 		scanned = true;
1890 		index = 0;
1891 		goto retry;
1892 	}
1893 
1894 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1895 		mapping->writeback_index = index;
1896 
1897 	return rc;
1898 }
1899 
1900 static int
cifs_writepage_locked(struct page * page,struct writeback_control * wbc)1901 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1902 {
1903 	int rc;
1904 	int xid;
1905 
1906 	xid = GetXid();
1907 /* BB add check for wbc flags */
1908 	page_cache_get(page);
1909 	if (!PageUptodate(page))
1910 		cFYI(1, "ppw - page not up to date");
1911 
1912 	/*
1913 	 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1914 	 *
1915 	 * A writepage() implementation always needs to do either this,
1916 	 * or re-dirty the page with "redirty_page_for_writepage()" in
1917 	 * the case of a failure.
1918 	 *
1919 	 * Just unlocking the page will cause the radix tree tag-bits
1920 	 * to fail to update with the state of the page correctly.
1921 	 */
1922 	set_page_writeback(page);
1923 retry_write:
1924 	rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1925 	if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1926 		goto retry_write;
1927 	else if (rc == -EAGAIN)
1928 		redirty_page_for_writepage(wbc, page);
1929 	else if (rc != 0)
1930 		SetPageError(page);
1931 	else
1932 		SetPageUptodate(page);
1933 	end_page_writeback(page);
1934 	page_cache_release(page);
1935 	FreeXid(xid);
1936 	return rc;
1937 }
1938 
cifs_writepage(struct page * page,struct writeback_control * wbc)1939 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1940 {
1941 	int rc = cifs_writepage_locked(page, wbc);
1942 	unlock_page(page);
1943 	return rc;
1944 }
1945 
cifs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)1946 static int cifs_write_end(struct file *file, struct address_space *mapping,
1947 			loff_t pos, unsigned len, unsigned copied,
1948 			struct page *page, void *fsdata)
1949 {
1950 	int rc;
1951 	struct inode *inode = mapping->host;
1952 	struct cifsFileInfo *cfile = file->private_data;
1953 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1954 	__u32 pid;
1955 
1956 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1957 		pid = cfile->pid;
1958 	else
1959 		pid = current->tgid;
1960 
1961 	cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1962 		 page, pos, copied);
1963 
1964 	if (PageChecked(page)) {
1965 		if (copied == len)
1966 			SetPageUptodate(page);
1967 		ClearPageChecked(page);
1968 	} else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1969 		SetPageUptodate(page);
1970 
1971 	if (!PageUptodate(page)) {
1972 		char *page_data;
1973 		unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1974 		int xid;
1975 
1976 		xid = GetXid();
1977 		/* this is probably better than directly calling
1978 		   partialpage_write since in this function the file handle is
1979 		   known which we might as well	leverage */
1980 		/* BB check if anything else missing out of ppw
1981 		   such as updating last write time */
1982 		page_data = kmap(page);
1983 		rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
1984 		/* if (rc < 0) should we set writebehind rc? */
1985 		kunmap(page);
1986 
1987 		FreeXid(xid);
1988 	} else {
1989 		rc = copied;
1990 		pos += copied;
1991 		set_page_dirty(page);
1992 	}
1993 
1994 	if (rc > 0) {
1995 		spin_lock(&inode->i_lock);
1996 		if (pos > inode->i_size)
1997 			i_size_write(inode, pos);
1998 		spin_unlock(&inode->i_lock);
1999 	}
2000 
2001 	unlock_page(page);
2002 	page_cache_release(page);
2003 
2004 	return rc;
2005 }
2006 
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2007 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2008 		      int datasync)
2009 {
2010 	int xid;
2011 	int rc = 0;
2012 	struct cifs_tcon *tcon;
2013 	struct cifsFileInfo *smbfile = file->private_data;
2014 	struct inode *inode = file->f_path.dentry->d_inode;
2015 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2016 
2017 	rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2018 	if (rc)
2019 		return rc;
2020 	mutex_lock(&inode->i_mutex);
2021 
2022 	xid = GetXid();
2023 
2024 	cFYI(1, "Sync file - name: %s datasync: 0x%x",
2025 		file->f_path.dentry->d_name.name, datasync);
2026 
2027 	if (!CIFS_I(inode)->clientCanCacheRead) {
2028 		rc = cifs_invalidate_mapping(inode);
2029 		if (rc) {
2030 			cFYI(1, "rc: %d during invalidate phase", rc);
2031 			rc = 0; /* don't care about it in fsync */
2032 		}
2033 	}
2034 
2035 	tcon = tlink_tcon(smbfile->tlink);
2036 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2037 		rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
2038 
2039 	FreeXid(xid);
2040 	mutex_unlock(&inode->i_mutex);
2041 	return rc;
2042 }
2043 
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2044 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2045 {
2046 	int xid;
2047 	int rc = 0;
2048 	struct cifs_tcon *tcon;
2049 	struct cifsFileInfo *smbfile = file->private_data;
2050 	struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2051 	struct inode *inode = file->f_mapping->host;
2052 
2053 	rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2054 	if (rc)
2055 		return rc;
2056 	mutex_lock(&inode->i_mutex);
2057 
2058 	xid = GetXid();
2059 
2060 	cFYI(1, "Sync file - name: %s datasync: 0x%x",
2061 		file->f_path.dentry->d_name.name, datasync);
2062 
2063 	tcon = tlink_tcon(smbfile->tlink);
2064 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2065 		rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
2066 
2067 	FreeXid(xid);
2068 	mutex_unlock(&inode->i_mutex);
2069 	return rc;
2070 }
2071 
2072 /*
2073  * As file closes, flush all cached write data for this inode checking
2074  * for write behind errors.
2075  */
cifs_flush(struct file * file,fl_owner_t id)2076 int cifs_flush(struct file *file, fl_owner_t id)
2077 {
2078 	struct inode *inode = file->f_path.dentry->d_inode;
2079 	int rc = 0;
2080 
2081 	if (file->f_mode & FMODE_WRITE)
2082 		rc = filemap_write_and_wait(inode->i_mapping);
2083 
2084 	cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
2085 
2086 	return rc;
2087 }
2088 
2089 static int
cifs_write_allocate_pages(struct page ** pages,unsigned long num_pages)2090 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2091 {
2092 	int rc = 0;
2093 	unsigned long i;
2094 
2095 	for (i = 0; i < num_pages; i++) {
2096 		pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2097 		if (!pages[i]) {
2098 			/*
2099 			 * save number of pages we have already allocated and
2100 			 * return with ENOMEM error
2101 			 */
2102 			num_pages = i;
2103 			rc = -ENOMEM;
2104 			break;
2105 		}
2106 	}
2107 
2108 	if (rc) {
2109 		for (i = 0; i < num_pages; i++)
2110 			put_page(pages[i]);
2111 	}
2112 	return rc;
2113 }
2114 
2115 static inline
get_numpages(const size_t wsize,const size_t len,size_t * cur_len)2116 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2117 {
2118 	size_t num_pages;
2119 	size_t clen;
2120 
2121 	clen = min_t(const size_t, len, wsize);
2122 	num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2123 
2124 	if (cur_len)
2125 		*cur_len = clen;
2126 
2127 	return num_pages;
2128 }
2129 
2130 static void
cifs_uncached_marshal_iov(struct kvec * iov,struct cifs_writedata * wdata)2131 cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
2132 {
2133 	int i;
2134 	size_t bytes = wdata->bytes;
2135 
2136 	/* marshal up the pages into iov array */
2137 	for (i = 0; i < wdata->nr_pages; i++) {
2138 		iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
2139 		iov[i + 1].iov_base = kmap(wdata->pages[i]);
2140 		bytes -= iov[i + 1].iov_len;
2141 	}
2142 }
2143 
2144 static void
cifs_uncached_writev_complete(struct work_struct * work)2145 cifs_uncached_writev_complete(struct work_struct *work)
2146 {
2147 	int i;
2148 	struct cifs_writedata *wdata = container_of(work,
2149 					struct cifs_writedata, work);
2150 	struct inode *inode = wdata->cfile->dentry->d_inode;
2151 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
2152 
2153 	spin_lock(&inode->i_lock);
2154 	cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2155 	if (cifsi->server_eof > inode->i_size)
2156 		i_size_write(inode, cifsi->server_eof);
2157 	spin_unlock(&inode->i_lock);
2158 
2159 	complete(&wdata->done);
2160 
2161 	if (wdata->result != -EAGAIN) {
2162 		for (i = 0; i < wdata->nr_pages; i++)
2163 			put_page(wdata->pages[i]);
2164 	}
2165 
2166 	kref_put(&wdata->refcount, cifs_writedata_release);
2167 }
2168 
2169 /* attempt to send write to server, retry on any -EAGAIN errors */
2170 static int
cifs_uncached_retry_writev(struct cifs_writedata * wdata)2171 cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2172 {
2173 	int rc;
2174 
2175 	do {
2176 		if (wdata->cfile->invalidHandle) {
2177 			rc = cifs_reopen_file(wdata->cfile, false);
2178 			if (rc != 0)
2179 				continue;
2180 		}
2181 		rc = cifs_async_writev(wdata);
2182 	} while (rc == -EAGAIN);
2183 
2184 	return rc;
2185 }
2186 
2187 static ssize_t
cifs_iovec_write(struct file * file,const struct iovec * iov,unsigned long nr_segs,loff_t * poffset)2188 cifs_iovec_write(struct file *file, const struct iovec *iov,
2189 		 unsigned long nr_segs, loff_t *poffset)
2190 {
2191 	unsigned long nr_pages, i;
2192 	size_t bytes, copied, len, cur_len;
2193 	ssize_t total_written = 0;
2194 	loff_t offset;
2195 	struct iov_iter it;
2196 	struct cifsFileInfo *open_file;
2197 	struct cifs_tcon *tcon;
2198 	struct cifs_sb_info *cifs_sb;
2199 	struct cifs_writedata *wdata, *tmp;
2200 	struct list_head wdata_list;
2201 	int rc;
2202 	pid_t pid;
2203 
2204 	len = iov_length(iov, nr_segs);
2205 	if (!len)
2206 		return 0;
2207 
2208 	rc = generic_write_checks(file, poffset, &len, 0);
2209 	if (rc)
2210 		return rc;
2211 
2212 	INIT_LIST_HEAD(&wdata_list);
2213 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2214 	open_file = file->private_data;
2215 	tcon = tlink_tcon(open_file->tlink);
2216 	offset = *poffset;
2217 
2218 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2219 		pid = open_file->pid;
2220 	else
2221 		pid = current->tgid;
2222 
2223 	iov_iter_init(&it, iov, nr_segs, len, 0);
2224 	do {
2225 		size_t save_len;
2226 
2227 		nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2228 		wdata = cifs_writedata_alloc(nr_pages,
2229 					     cifs_uncached_writev_complete);
2230 		if (!wdata) {
2231 			rc = -ENOMEM;
2232 			break;
2233 		}
2234 
2235 		rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2236 		if (rc) {
2237 			kfree(wdata);
2238 			break;
2239 		}
2240 
2241 		save_len = cur_len;
2242 		for (i = 0; i < nr_pages; i++) {
2243 			bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2244 			copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2245 							 0, bytes);
2246 			cur_len -= copied;
2247 			iov_iter_advance(&it, copied);
2248 			/*
2249 			 * If we didn't copy as much as we expected, then that
2250 			 * may mean we trod into an unmapped area. Stop copying
2251 			 * at that point. On the next pass through the big
2252 			 * loop, we'll likely end up getting a zero-length
2253 			 * write and bailing out of it.
2254 			 */
2255 			if (copied < bytes)
2256 				break;
2257 		}
2258 		cur_len = save_len - cur_len;
2259 
2260 		/*
2261 		 * If we have no data to send, then that probably means that
2262 		 * the copy above failed altogether. That's most likely because
2263 		 * the address in the iovec was bogus. Set the rc to -EFAULT,
2264 		 * free anything we allocated and bail out.
2265 		 */
2266 		if (!cur_len) {
2267 			for (i = 0; i < nr_pages; i++)
2268 				put_page(wdata->pages[i]);
2269 			kfree(wdata);
2270 			rc = -EFAULT;
2271 			break;
2272 		}
2273 
2274 		/*
2275 		 * i + 1 now represents the number of pages we actually used in
2276 		 * the copy phase above. Bring nr_pages down to that, and free
2277 		 * any pages that we didn't use.
2278 		 */
2279 		for ( ; nr_pages > i + 1; nr_pages--)
2280 			put_page(wdata->pages[nr_pages - 1]);
2281 
2282 		wdata->sync_mode = WB_SYNC_ALL;
2283 		wdata->nr_pages = nr_pages;
2284 		wdata->offset = (__u64)offset;
2285 		wdata->cfile = cifsFileInfo_get(open_file);
2286 		wdata->pid = pid;
2287 		wdata->bytes = cur_len;
2288 		wdata->marshal_iov = cifs_uncached_marshal_iov;
2289 		rc = cifs_uncached_retry_writev(wdata);
2290 		if (rc) {
2291 			kref_put(&wdata->refcount, cifs_writedata_release);
2292 			break;
2293 		}
2294 
2295 		list_add_tail(&wdata->list, &wdata_list);
2296 		offset += cur_len;
2297 		len -= cur_len;
2298 	} while (len > 0);
2299 
2300 	/*
2301 	 * If at least one write was successfully sent, then discard any rc
2302 	 * value from the later writes. If the other write succeeds, then
2303 	 * we'll end up returning whatever was written. If it fails, then
2304 	 * we'll get a new rc value from that.
2305 	 */
2306 	if (!list_empty(&wdata_list))
2307 		rc = 0;
2308 
2309 	/*
2310 	 * Wait for and collect replies for any successful sends in order of
2311 	 * increasing offset. Once an error is hit or we get a fatal signal
2312 	 * while waiting, then return without waiting for any more replies.
2313 	 */
2314 restart_loop:
2315 	list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2316 		if (!rc) {
2317 			/* FIXME: freezable too? */
2318 			rc = wait_for_completion_killable(&wdata->done);
2319 			if (rc)
2320 				rc = -EINTR;
2321 			else if (wdata->result)
2322 				rc = wdata->result;
2323 			else
2324 				total_written += wdata->bytes;
2325 
2326 			/* resend call if it's a retryable error */
2327 			if (rc == -EAGAIN) {
2328 				rc = cifs_uncached_retry_writev(wdata);
2329 				goto restart_loop;
2330 			}
2331 		}
2332 		list_del_init(&wdata->list);
2333 		kref_put(&wdata->refcount, cifs_writedata_release);
2334 	}
2335 
2336 	if (total_written > 0)
2337 		*poffset += total_written;
2338 
2339 	cifs_stats_bytes_written(tcon, total_written);
2340 	return total_written ? total_written : (ssize_t)rc;
2341 }
2342 
cifs_user_writev(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)2343 ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
2344 				unsigned long nr_segs, loff_t pos)
2345 {
2346 	ssize_t written;
2347 	struct inode *inode;
2348 
2349 	inode = iocb->ki_filp->f_path.dentry->d_inode;
2350 
2351 	/*
2352 	 * BB - optimize the way when signing is disabled. We can drop this
2353 	 * extra memory-to-memory copying and use iovec buffers for constructing
2354 	 * write request.
2355 	 */
2356 
2357 	written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2358 	if (written > 0) {
2359 		CIFS_I(inode)->invalid_mapping = true;
2360 		iocb->ki_pos = pos;
2361 	}
2362 
2363 	return written;
2364 }
2365 
cifs_strict_writev(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)2366 ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2367 			   unsigned long nr_segs, loff_t pos)
2368 {
2369 	struct inode *inode;
2370 
2371 	inode = iocb->ki_filp->f_path.dentry->d_inode;
2372 
2373 	if (CIFS_I(inode)->clientCanCacheAll)
2374 		return generic_file_aio_write(iocb, iov, nr_segs, pos);
2375 
2376 	/*
2377 	 * In strict cache mode we need to write the data to the server exactly
2378 	 * from the pos to pos+len-1 rather than flush all affected pages
2379 	 * because it may cause a error with mandatory locks on these pages but
2380 	 * not on the region from pos to ppos+len-1.
2381 	 */
2382 
2383 	return cifs_user_writev(iocb, iov, nr_segs, pos);
2384 }
2385 
2386 static ssize_t
cifs_iovec_read(struct file * file,const struct iovec * iov,unsigned long nr_segs,loff_t * poffset)2387 cifs_iovec_read(struct file *file, const struct iovec *iov,
2388 		 unsigned long nr_segs, loff_t *poffset)
2389 {
2390 	int rc;
2391 	int xid;
2392 	ssize_t total_read;
2393 	unsigned int bytes_read = 0;
2394 	size_t len, cur_len;
2395 	int iov_offset = 0;
2396 	struct cifs_sb_info *cifs_sb;
2397 	struct cifs_tcon *pTcon;
2398 	struct cifsFileInfo *open_file;
2399 	struct smb_com_read_rsp *pSMBr;
2400 	struct cifs_io_parms io_parms;
2401 	char *read_data;
2402 	unsigned int rsize;
2403 	__u32 pid;
2404 
2405 	if (!nr_segs)
2406 		return 0;
2407 
2408 	len = iov_length(iov, nr_segs);
2409 	if (!len)
2410 		return 0;
2411 
2412 	xid = GetXid();
2413 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2414 
2415 	/* FIXME: set up handlers for larger reads and/or convert to async */
2416 	rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2417 
2418 	open_file = file->private_data;
2419 	pTcon = tlink_tcon(open_file->tlink);
2420 
2421 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2422 		pid = open_file->pid;
2423 	else
2424 		pid = current->tgid;
2425 
2426 	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2427 		cFYI(1, "attempting read on write only file instance");
2428 
2429 	for (total_read = 0; total_read < len; total_read += bytes_read) {
2430 		cur_len = min_t(const size_t, len - total_read, rsize);
2431 		rc = -EAGAIN;
2432 		read_data = NULL;
2433 
2434 		while (rc == -EAGAIN) {
2435 			int buf_type = CIFS_NO_BUFFER;
2436 			if (open_file->invalidHandle) {
2437 				rc = cifs_reopen_file(open_file, true);
2438 				if (rc != 0)
2439 					break;
2440 			}
2441 			io_parms.netfid = open_file->netfid;
2442 			io_parms.pid = pid;
2443 			io_parms.tcon = pTcon;
2444 			io_parms.offset = *poffset;
2445 			io_parms.length = cur_len;
2446 			rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2447 					 &read_data, &buf_type);
2448 			pSMBr = (struct smb_com_read_rsp *)read_data;
2449 			if (read_data) {
2450 				char *data_offset = read_data + 4 +
2451 						le16_to_cpu(pSMBr->DataOffset);
2452 				if (memcpy_toiovecend(iov, data_offset,
2453 						      iov_offset, bytes_read))
2454 					rc = -EFAULT;
2455 				if (buf_type == CIFS_SMALL_BUFFER)
2456 					cifs_small_buf_release(read_data);
2457 				else if (buf_type == CIFS_LARGE_BUFFER)
2458 					cifs_buf_release(read_data);
2459 				read_data = NULL;
2460 				iov_offset += bytes_read;
2461 			}
2462 		}
2463 
2464 		if (rc || (bytes_read == 0)) {
2465 			if (total_read) {
2466 				break;
2467 			} else {
2468 				FreeXid(xid);
2469 				return rc;
2470 			}
2471 		} else {
2472 			cifs_stats_bytes_read(pTcon, bytes_read);
2473 			*poffset += bytes_read;
2474 		}
2475 	}
2476 
2477 	FreeXid(xid);
2478 	return total_read;
2479 }
2480 
cifs_user_readv(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)2481 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
2482 			       unsigned long nr_segs, loff_t pos)
2483 {
2484 	ssize_t read;
2485 
2486 	read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2487 	if (read > 0)
2488 		iocb->ki_pos = pos;
2489 
2490 	return read;
2491 }
2492 
cifs_strict_readv(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)2493 ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2494 			  unsigned long nr_segs, loff_t pos)
2495 {
2496 	struct inode *inode;
2497 
2498 	inode = iocb->ki_filp->f_path.dentry->d_inode;
2499 
2500 	if (CIFS_I(inode)->clientCanCacheRead)
2501 		return generic_file_aio_read(iocb, iov, nr_segs, pos);
2502 
2503 	/*
2504 	 * In strict cache mode we need to read from the server all the time
2505 	 * if we don't have level II oplock because the server can delay mtime
2506 	 * change - so we can't make a decision about inode invalidating.
2507 	 * And we can also fail with pagereading if there are mandatory locks
2508 	 * on pages affected by this read but not on the region from pos to
2509 	 * pos+len-1.
2510 	 */
2511 
2512 	return cifs_user_readv(iocb, iov, nr_segs, pos);
2513 }
2514 
cifs_read(struct file * file,char * read_data,size_t read_size,loff_t * poffset)2515 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
2516 			 loff_t *poffset)
2517 {
2518 	int rc = -EACCES;
2519 	unsigned int bytes_read = 0;
2520 	unsigned int total_read;
2521 	unsigned int current_read_size;
2522 	unsigned int rsize;
2523 	struct cifs_sb_info *cifs_sb;
2524 	struct cifs_tcon *pTcon;
2525 	int xid;
2526 	char *current_offset;
2527 	struct cifsFileInfo *open_file;
2528 	struct cifs_io_parms io_parms;
2529 	int buf_type = CIFS_NO_BUFFER;
2530 	__u32 pid;
2531 
2532 	xid = GetXid();
2533 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2534 
2535 	/* FIXME: set up handlers for larger reads and/or convert to async */
2536 	rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2537 
2538 	if (file->private_data == NULL) {
2539 		rc = -EBADF;
2540 		FreeXid(xid);
2541 		return rc;
2542 	}
2543 	open_file = file->private_data;
2544 	pTcon = tlink_tcon(open_file->tlink);
2545 
2546 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2547 		pid = open_file->pid;
2548 	else
2549 		pid = current->tgid;
2550 
2551 	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2552 		cFYI(1, "attempting read on write only file instance");
2553 
2554 	for (total_read = 0, current_offset = read_data;
2555 	     read_size > total_read;
2556 	     total_read += bytes_read, current_offset += bytes_read) {
2557 		current_read_size = min_t(uint, read_size - total_read, rsize);
2558 
2559 		/* For windows me and 9x we do not want to request more
2560 		than it negotiated since it will refuse the read then */
2561 		if ((pTcon->ses) &&
2562 			!(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
2563 			current_read_size = min_t(uint, current_read_size,
2564 					CIFSMaxBufSize);
2565 		}
2566 		rc = -EAGAIN;
2567 		while (rc == -EAGAIN) {
2568 			if (open_file->invalidHandle) {
2569 				rc = cifs_reopen_file(open_file, true);
2570 				if (rc != 0)
2571 					break;
2572 			}
2573 			io_parms.netfid = open_file->netfid;
2574 			io_parms.pid = pid;
2575 			io_parms.tcon = pTcon;
2576 			io_parms.offset = *poffset;
2577 			io_parms.length = current_read_size;
2578 			rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2579 					 &current_offset, &buf_type);
2580 		}
2581 		if (rc || (bytes_read == 0)) {
2582 			if (total_read) {
2583 				break;
2584 			} else {
2585 				FreeXid(xid);
2586 				return rc;
2587 			}
2588 		} else {
2589 			cifs_stats_bytes_read(pTcon, total_read);
2590 			*poffset += bytes_read;
2591 		}
2592 	}
2593 	FreeXid(xid);
2594 	return total_read;
2595 }
2596 
2597 /*
2598  * If the page is mmap'ed into a process' page tables, then we need to make
2599  * sure that it doesn't change while being written back.
2600  */
2601 static int
cifs_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf)2602 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2603 {
2604 	struct page *page = vmf->page;
2605 
2606 	lock_page(page);
2607 	return VM_FAULT_LOCKED;
2608 }
2609 
2610 static struct vm_operations_struct cifs_file_vm_ops = {
2611 	.fault = filemap_fault,
2612 	.page_mkwrite = cifs_page_mkwrite,
2613 };
2614 
cifs_file_strict_mmap(struct file * file,struct vm_area_struct * vma)2615 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2616 {
2617 	int rc, xid;
2618 	struct inode *inode = file->f_path.dentry->d_inode;
2619 
2620 	xid = GetXid();
2621 
2622 	if (!CIFS_I(inode)->clientCanCacheRead) {
2623 		rc = cifs_invalidate_mapping(inode);
2624 		if (rc)
2625 			return rc;
2626 	}
2627 
2628 	rc = generic_file_mmap(file, vma);
2629 	if (rc == 0)
2630 		vma->vm_ops = &cifs_file_vm_ops;
2631 	FreeXid(xid);
2632 	return rc;
2633 }
2634 
cifs_file_mmap(struct file * file,struct vm_area_struct * vma)2635 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2636 {
2637 	int rc, xid;
2638 
2639 	xid = GetXid();
2640 	rc = cifs_revalidate_file(file);
2641 	if (rc) {
2642 		cFYI(1, "Validation prior to mmap failed, error=%d", rc);
2643 		FreeXid(xid);
2644 		return rc;
2645 	}
2646 	rc = generic_file_mmap(file, vma);
2647 	if (rc == 0)
2648 		vma->vm_ops = &cifs_file_vm_ops;
2649 	FreeXid(xid);
2650 	return rc;
2651 }
2652 
cifs_readpages(struct file * file,struct address_space * mapping,struct list_head * page_list,unsigned num_pages)2653 static int cifs_readpages(struct file *file, struct address_space *mapping,
2654 	struct list_head *page_list, unsigned num_pages)
2655 {
2656 	int rc;
2657 	struct list_head tmplist;
2658 	struct cifsFileInfo *open_file = file->private_data;
2659 	struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2660 	unsigned int rsize = cifs_sb->rsize;
2661 	pid_t pid;
2662 
2663 	/*
2664 	 * Give up immediately if rsize is too small to read an entire page.
2665 	 * The VFS will fall back to readpage. We should never reach this
2666 	 * point however since we set ra_pages to 0 when the rsize is smaller
2667 	 * than a cache page.
2668 	 */
2669 	if (unlikely(rsize < PAGE_CACHE_SIZE))
2670 		return 0;
2671 
2672 	/*
2673 	 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2674 	 * immediately if the cookie is negative
2675 	 */
2676 	rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2677 					 &num_pages);
2678 	if (rc == 0)
2679 		return rc;
2680 
2681 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2682 		pid = open_file->pid;
2683 	else
2684 		pid = current->tgid;
2685 
2686 	rc = 0;
2687 	INIT_LIST_HEAD(&tmplist);
2688 
2689 	cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2690 		mapping, num_pages);
2691 
2692 	/*
2693 	 * Start with the page at end of list and move it to private
2694 	 * list. Do the same with any following pages until we hit
2695 	 * the rsize limit, hit an index discontinuity, or run out of
2696 	 * pages. Issue the async read and then start the loop again
2697 	 * until the list is empty.
2698 	 *
2699 	 * Note that list order is important. The page_list is in
2700 	 * the order of declining indexes. When we put the pages in
2701 	 * the rdata->pages, then we want them in increasing order.
2702 	 */
2703 	while (!list_empty(page_list)) {
2704 		unsigned int bytes = PAGE_CACHE_SIZE;
2705 		unsigned int expected_index;
2706 		unsigned int nr_pages = 1;
2707 		loff_t offset;
2708 		struct page *page, *tpage;
2709 		struct cifs_readdata *rdata;
2710 
2711 		page = list_entry(page_list->prev, struct page, lru);
2712 
2713 		/*
2714 		 * Lock the page and put it in the cache. Since no one else
2715 		 * should have access to this page, we're safe to simply set
2716 		 * PG_locked without checking it first.
2717 		 */
2718 		__set_page_locked(page);
2719 		rc = add_to_page_cache_locked(page, mapping,
2720 					      page->index, GFP_KERNEL);
2721 
2722 		/* give up if we can't stick it in the cache */
2723 		if (rc) {
2724 			__clear_page_locked(page);
2725 			break;
2726 		}
2727 
2728 		/* move first page to the tmplist */
2729 		offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2730 		list_move_tail(&page->lru, &tmplist);
2731 
2732 		/* now try and add more pages onto the request */
2733 		expected_index = page->index + 1;
2734 		list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2735 			/* discontinuity ? */
2736 			if (page->index != expected_index)
2737 				break;
2738 
2739 			/* would this page push the read over the rsize? */
2740 			if (bytes + PAGE_CACHE_SIZE > rsize)
2741 				break;
2742 
2743 			__set_page_locked(page);
2744 			if (add_to_page_cache_locked(page, mapping,
2745 						page->index, GFP_KERNEL)) {
2746 				__clear_page_locked(page);
2747 				break;
2748 			}
2749 			list_move_tail(&page->lru, &tmplist);
2750 			bytes += PAGE_CACHE_SIZE;
2751 			expected_index++;
2752 			nr_pages++;
2753 		}
2754 
2755 		rdata = cifs_readdata_alloc(nr_pages);
2756 		if (!rdata) {
2757 			/* best to give up if we're out of mem */
2758 			list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2759 				list_del(&page->lru);
2760 				lru_cache_add_file(page);
2761 				unlock_page(page);
2762 				page_cache_release(page);
2763 			}
2764 			rc = -ENOMEM;
2765 			break;
2766 		}
2767 
2768 		spin_lock(&cifs_file_list_lock);
2769 		cifsFileInfo_get(open_file);
2770 		spin_unlock(&cifs_file_list_lock);
2771 		rdata->cfile = open_file;
2772 		rdata->mapping = mapping;
2773 		rdata->offset = offset;
2774 		rdata->bytes = bytes;
2775 		rdata->pid = pid;
2776 		list_splice_init(&tmplist, &rdata->pages);
2777 
2778 		do {
2779 			if (open_file->invalidHandle) {
2780 				rc = cifs_reopen_file(open_file, true);
2781 				if (rc != 0)
2782 					continue;
2783 			}
2784 			rc = cifs_async_readv(rdata);
2785 		} while (rc == -EAGAIN);
2786 
2787 		if (rc != 0) {
2788 			list_for_each_entry_safe(page, tpage, &rdata->pages,
2789 						 lru) {
2790 				list_del(&page->lru);
2791 				lru_cache_add_file(page);
2792 				unlock_page(page);
2793 				page_cache_release(page);
2794 			}
2795 			cifs_readdata_free(rdata);
2796 			break;
2797 		}
2798 	}
2799 
2800 	return rc;
2801 }
2802 
cifs_readpage_worker(struct file * file,struct page * page,loff_t * poffset)2803 static int cifs_readpage_worker(struct file *file, struct page *page,
2804 	loff_t *poffset)
2805 {
2806 	char *read_data;
2807 	int rc;
2808 
2809 	/* Is the page cached? */
2810 	rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2811 	if (rc == 0)
2812 		goto read_complete;
2813 
2814 	page_cache_get(page);
2815 	read_data = kmap(page);
2816 	/* for reads over a certain size could initiate async read ahead */
2817 
2818 	rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2819 
2820 	if (rc < 0)
2821 		goto io_error;
2822 	else
2823 		cFYI(1, "Bytes read %d", rc);
2824 
2825 	file->f_path.dentry->d_inode->i_atime =
2826 		current_fs_time(file->f_path.dentry->d_inode->i_sb);
2827 
2828 	if (PAGE_CACHE_SIZE > rc)
2829 		memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2830 
2831 	flush_dcache_page(page);
2832 	SetPageUptodate(page);
2833 
2834 	/* send this page to the cache */
2835 	cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2836 
2837 	rc = 0;
2838 
2839 io_error:
2840 	kunmap(page);
2841 	page_cache_release(page);
2842 
2843 read_complete:
2844 	return rc;
2845 }
2846 
cifs_readpage(struct file * file,struct page * page)2847 static int cifs_readpage(struct file *file, struct page *page)
2848 {
2849 	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2850 	int rc = -EACCES;
2851 	int xid;
2852 
2853 	xid = GetXid();
2854 
2855 	if (file->private_data == NULL) {
2856 		rc = -EBADF;
2857 		FreeXid(xid);
2858 		return rc;
2859 	}
2860 
2861 	cFYI(1, "readpage %p at offset %d 0x%x\n",
2862 		 page, (int)offset, (int)offset);
2863 
2864 	rc = cifs_readpage_worker(file, page, &offset);
2865 
2866 	unlock_page(page);
2867 
2868 	FreeXid(xid);
2869 	return rc;
2870 }
2871 
is_inode_writable(struct cifsInodeInfo * cifs_inode)2872 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2873 {
2874 	struct cifsFileInfo *open_file;
2875 
2876 	spin_lock(&cifs_file_list_lock);
2877 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2878 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2879 			spin_unlock(&cifs_file_list_lock);
2880 			return 1;
2881 		}
2882 	}
2883 	spin_unlock(&cifs_file_list_lock);
2884 	return 0;
2885 }
2886 
2887 /* We do not want to update the file size from server for inodes
2888    open for write - to avoid races with writepage extending
2889    the file - in the future we could consider allowing
2890    refreshing the inode only on increases in the file size
2891    but this is tricky to do without racing with writebehind
2892    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file)2893 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2894 {
2895 	if (!cifsInode)
2896 		return true;
2897 
2898 	if (is_inode_writable(cifsInode)) {
2899 		/* This inode is open for write at least once */
2900 		struct cifs_sb_info *cifs_sb;
2901 
2902 		cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2903 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2904 			/* since no page cache to corrupt on directio
2905 			we can change size safely */
2906 			return true;
2907 		}
2908 
2909 		if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2910 			return true;
2911 
2912 		return false;
2913 	} else
2914 		return true;
2915 }
2916 
cifs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)2917 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2918 			loff_t pos, unsigned len, unsigned flags,
2919 			struct page **pagep, void **fsdata)
2920 {
2921 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2922 	loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2923 	loff_t page_start = pos & PAGE_MASK;
2924 	loff_t i_size;
2925 	struct page *page;
2926 	int rc = 0;
2927 
2928 	cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2929 
2930 	page = grab_cache_page_write_begin(mapping, index, flags);
2931 	if (!page) {
2932 		rc = -ENOMEM;
2933 		goto out;
2934 	}
2935 
2936 	if (PageUptodate(page))
2937 		goto out;
2938 
2939 	/*
2940 	 * If we write a full page it will be up to date, no need to read from
2941 	 * the server. If the write is short, we'll end up doing a sync write
2942 	 * instead.
2943 	 */
2944 	if (len == PAGE_CACHE_SIZE)
2945 		goto out;
2946 
2947 	/*
2948 	 * optimize away the read when we have an oplock, and we're not
2949 	 * expecting to use any of the data we'd be reading in. That
2950 	 * is, when the page lies beyond the EOF, or straddles the EOF
2951 	 * and the write will cover all of the existing data.
2952 	 */
2953 	if (CIFS_I(mapping->host)->clientCanCacheRead) {
2954 		i_size = i_size_read(mapping->host);
2955 		if (page_start >= i_size ||
2956 		    (offset == 0 && (pos + len) >= i_size)) {
2957 			zero_user_segments(page, 0, offset,
2958 					   offset + len,
2959 					   PAGE_CACHE_SIZE);
2960 			/*
2961 			 * PageChecked means that the parts of the page
2962 			 * to which we're not writing are considered up
2963 			 * to date. Once the data is copied to the
2964 			 * page, it can be set uptodate.
2965 			 */
2966 			SetPageChecked(page);
2967 			goto out;
2968 		}
2969 	}
2970 
2971 	if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2972 		/*
2973 		 * might as well read a page, it is fast enough. If we get
2974 		 * an error, we don't need to return it. cifs_write_end will
2975 		 * do a sync write instead since PG_uptodate isn't set.
2976 		 */
2977 		cifs_readpage_worker(file, page, &page_start);
2978 	} else {
2979 		/* we could try using another file handle if there is one -
2980 		   but how would we lock it to prevent close of that handle
2981 		   racing with this read? In any case
2982 		   this will be written out by write_end so is fine */
2983 	}
2984 out:
2985 	*pagep = page;
2986 	return rc;
2987 }
2988 
cifs_release_page(struct page * page,gfp_t gfp)2989 static int cifs_release_page(struct page *page, gfp_t gfp)
2990 {
2991 	if (PagePrivate(page))
2992 		return 0;
2993 
2994 	return cifs_fscache_release_page(page, gfp);
2995 }
2996 
cifs_invalidate_page(struct page * page,unsigned long offset)2997 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2998 {
2999 	struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3000 
3001 	if (offset == 0)
3002 		cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3003 }
3004 
cifs_launder_page(struct page * page)3005 static int cifs_launder_page(struct page *page)
3006 {
3007 	int rc = 0;
3008 	loff_t range_start = page_offset(page);
3009 	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3010 	struct writeback_control wbc = {
3011 		.sync_mode = WB_SYNC_ALL,
3012 		.nr_to_write = 0,
3013 		.range_start = range_start,
3014 		.range_end = range_end,
3015 	};
3016 
3017 	cFYI(1, "Launder page: %p", page);
3018 
3019 	if (clear_page_dirty_for_io(page))
3020 		rc = cifs_writepage_locked(page, &wbc);
3021 
3022 	cifs_fscache_invalidate_page(page, page->mapping->host);
3023 	return rc;
3024 }
3025 
cifs_oplock_break(struct work_struct * work)3026 void cifs_oplock_break(struct work_struct *work)
3027 {
3028 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3029 						  oplock_break);
3030 	struct inode *inode = cfile->dentry->d_inode;
3031 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3032 	int rc = 0;
3033 
3034 	if (inode && S_ISREG(inode->i_mode)) {
3035 		if (cinode->clientCanCacheRead)
3036 			break_lease(inode, O_RDONLY);
3037 		else
3038 			break_lease(inode, O_WRONLY);
3039 		rc = filemap_fdatawrite(inode->i_mapping);
3040 		if (cinode->clientCanCacheRead == 0) {
3041 			rc = filemap_fdatawait(inode->i_mapping);
3042 			mapping_set_error(inode->i_mapping, rc);
3043 			invalidate_remote_inode(inode);
3044 		}
3045 		cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3046 	}
3047 
3048 	rc = cifs_push_locks(cfile);
3049 	if (rc)
3050 		cERROR(1, "Push locks rc = %d", rc);
3051 
3052 	/*
3053 	 * releasing stale oplock after recent reconnect of smb session using
3054 	 * a now incorrect file handle is not a data integrity issue but do
3055 	 * not bother sending an oplock release if session to server still is
3056 	 * disconnected since oplock already released by the server
3057 	 */
3058 	if (!cfile->oplock_break_cancelled) {
3059 		rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
3060 				 current->tgid, 0, 0, 0, 0,
3061 				 LOCKING_ANDX_OPLOCK_RELEASE, false,
3062 				 cinode->clientCanCacheRead ? 1 : 0);
3063 		cFYI(1, "Oplock release rc = %d", rc);
3064 	}
3065 }
3066 
3067 const struct address_space_operations cifs_addr_ops = {
3068 	.readpage = cifs_readpage,
3069 	.readpages = cifs_readpages,
3070 	.writepage = cifs_writepage,
3071 	.writepages = cifs_writepages,
3072 	.write_begin = cifs_write_begin,
3073 	.write_end = cifs_write_end,
3074 	.set_page_dirty = __set_page_dirty_nobuffers,
3075 	.releasepage = cifs_release_page,
3076 	.invalidatepage = cifs_invalidate_page,
3077 	.launder_page = cifs_launder_page,
3078 };
3079 
3080 /*
3081  * cifs_readpages requires the server to support a buffer large enough to
3082  * contain the header plus one complete page of data.  Otherwise, we need
3083  * to leave cifs_readpages out of the address space operations.
3084  */
3085 const struct address_space_operations cifs_addr_ops_smallbuf = {
3086 	.readpage = cifs_readpage,
3087 	.writepage = cifs_writepage,
3088 	.writepages = cifs_writepages,
3089 	.write_begin = cifs_write_begin,
3090 	.write_end = cifs_write_end,
3091 	.set_page_dirty = __set_page_dirty_nobuffers,
3092 	.releasepage = cifs_release_page,
3093 	.invalidatepage = cifs_invalidate_page,
3094 	.launder_page = cifs_launder_page,
3095 };
3096