1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <asm/div64.h>
36 #include "cifsfs.h"
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_unicode.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
43 #include "fscache.h"
44 
cifs_convert_flags(unsigned int flags)45 static inline int cifs_convert_flags(unsigned int flags)
46 {
47 	if ((flags & O_ACCMODE) == O_RDONLY)
48 		return GENERIC_READ;
49 	else if ((flags & O_ACCMODE) == O_WRONLY)
50 		return GENERIC_WRITE;
51 	else if ((flags & O_ACCMODE) == O_RDWR) {
52 		/* GENERIC_ALL is too much permission to request
53 		   can cause unnecessary access denied on create */
54 		/* return GENERIC_ALL; */
55 		return (GENERIC_READ | GENERIC_WRITE);
56 	}
57 
58 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 		FILE_READ_DATA);
61 }
62 
cifs_posix_convert_flags(unsigned int flags)63 static u32 cifs_posix_convert_flags(unsigned int flags)
64 {
65 	u32 posix_flags = 0;
66 
67 	if ((flags & O_ACCMODE) == O_RDONLY)
68 		posix_flags = SMB_O_RDONLY;
69 	else if ((flags & O_ACCMODE) == O_WRONLY)
70 		posix_flags = SMB_O_WRONLY;
71 	else if ((flags & O_ACCMODE) == O_RDWR)
72 		posix_flags = SMB_O_RDWR;
73 
74 	if (flags & O_CREAT)
75 		posix_flags |= SMB_O_CREAT;
76 	if (flags & O_EXCL)
77 		posix_flags |= SMB_O_EXCL;
78 	if (flags & O_TRUNC)
79 		posix_flags |= SMB_O_TRUNC;
80 	/* be safe and imply O_SYNC for O_DSYNC */
81 	if (flags & O_DSYNC)
82 		posix_flags |= SMB_O_SYNC;
83 	if (flags & O_DIRECTORY)
84 		posix_flags |= SMB_O_DIRECTORY;
85 	if (flags & O_NOFOLLOW)
86 		posix_flags |= SMB_O_NOFOLLOW;
87 	if (flags & O_DIRECT)
88 		posix_flags |= SMB_O_DIRECT;
89 
90 	return posix_flags;
91 }
92 
cifs_get_disposition(unsigned int flags)93 static inline int cifs_get_disposition(unsigned int flags)
94 {
95 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96 		return FILE_CREATE;
97 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98 		return FILE_OVERWRITE_IF;
99 	else if ((flags & O_CREAT) == O_CREAT)
100 		return FILE_OPEN_IF;
101 	else if ((flags & O_TRUNC) == O_TRUNC)
102 		return FILE_OVERWRITE;
103 	else
104 		return FILE_OPEN;
105 }
106 
cifs_posix_open(char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,int xid)107 int cifs_posix_open(char *full_path, struct inode **pinode,
108 			struct super_block *sb, int mode, unsigned int f_flags,
109 			__u32 *poplock, __u16 *pnetfid, int xid)
110 {
111 	int rc;
112 	FILE_UNIX_BASIC_INFO *presp_data;
113 	__u32 posix_flags = 0;
114 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
115 	struct cifs_fattr fattr;
116 	struct tcon_link *tlink;
117 	struct cifsTconInfo *tcon;
118 
119 	cFYI(1, "posix open %s", full_path);
120 
121 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
122 	if (presp_data == NULL)
123 		return -ENOMEM;
124 
125 	tlink = cifs_sb_tlink(cifs_sb);
126 	if (IS_ERR(tlink)) {
127 		rc = PTR_ERR(tlink);
128 		goto posix_open_ret;
129 	}
130 
131 	tcon = tlink_tcon(tlink);
132 	mode &= ~current_umask();
133 
134 	posix_flags = cifs_posix_convert_flags(f_flags);
135 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
136 			     poplock, full_path, cifs_sb->local_nls,
137 			     cifs_sb->mnt_cifs_flags &
138 					CIFS_MOUNT_MAP_SPECIAL_CHR);
139 	cifs_put_tlink(tlink);
140 
141 	if (rc)
142 		goto posix_open_ret;
143 
144 	if (presp_data->Type == cpu_to_le32(-1))
145 		goto posix_open_ret; /* open ok, caller does qpathinfo */
146 
147 	if (!pinode)
148 		goto posix_open_ret; /* caller does not need info */
149 
150 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
151 
152 	/* get new inode and set it up */
153 	if (*pinode == NULL) {
154 		cifs_fill_uniqueid(sb, &fattr);
155 		*pinode = cifs_iget(sb, &fattr);
156 		if (!*pinode) {
157 			rc = -ENOMEM;
158 			goto posix_open_ret;
159 		}
160 	} else {
161 		cifs_fattr_to_inode(*pinode, &fattr);
162 	}
163 
164 posix_open_ret:
165 	kfree(presp_data);
166 	return rc;
167 }
168 
169 static int
cifs_nt_open(char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifsTconInfo * tcon,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,int xid)170 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
171 	     struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock,
172 	     __u16 *pnetfid, int xid)
173 {
174 	int rc;
175 	int desiredAccess;
176 	int disposition;
177 	FILE_ALL_INFO *buf;
178 
179 	desiredAccess = cifs_convert_flags(f_flags);
180 
181 /*********************************************************************
182  *  open flag mapping table:
183  *
184  *	POSIX Flag            CIFS Disposition
185  *	----------            ----------------
186  *	O_CREAT               FILE_OPEN_IF
187  *	O_CREAT | O_EXCL      FILE_CREATE
188  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
189  *	O_TRUNC               FILE_OVERWRITE
190  *	none of the above     FILE_OPEN
191  *
192  *	Note that there is not a direct match between disposition
193  *	FILE_SUPERSEDE (ie create whether or not file exists although
194  *	O_CREAT | O_TRUNC is similar but truncates the existing
195  *	file rather than creating a new file as FILE_SUPERSEDE does
196  *	(which uses the attributes / metadata passed in on open call)
197  *?
198  *?  O_SYNC is a reasonable match to CIFS writethrough flag
199  *?  and the read write flags match reasonably.  O_LARGEFILE
200  *?  is irrelevant because largefile support is always used
201  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
202  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
203  *********************************************************************/
204 
205 	disposition = cifs_get_disposition(f_flags);
206 
207 	/* BB pass O_SYNC flag through on file attributes .. BB */
208 
209 	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
210 	if (!buf)
211 		return -ENOMEM;
212 
213 	if (tcon->ses->capabilities & CAP_NT_SMBS)
214 		rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
215 			 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
216 			 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
217 				 & CIFS_MOUNT_MAP_SPECIAL_CHR);
218 	else
219 		rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
220 			desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
221 			cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 				& CIFS_MOUNT_MAP_SPECIAL_CHR);
223 
224 	if (rc)
225 		goto out;
226 
227 	if (tcon->unix_ext)
228 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
229 					      xid);
230 	else
231 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
232 					 xid, pnetfid);
233 
234 out:
235 	kfree(buf);
236 	return rc;
237 }
238 
239 struct cifsFileInfo *
cifs_new_fileinfo(__u16 fileHandle,struct file * file,struct tcon_link * tlink,__u32 oplock)240 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
241 		  struct tcon_link *tlink, __u32 oplock)
242 {
243 	struct dentry *dentry = file->f_path.dentry;
244 	struct inode *inode = dentry->d_inode;
245 	struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
246 	struct cifsFileInfo *pCifsFile;
247 
248 	pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
249 	if (pCifsFile == NULL)
250 		return pCifsFile;
251 
252 	pCifsFile->count = 1;
253 	pCifsFile->netfid = fileHandle;
254 	pCifsFile->pid = current->tgid;
255 	pCifsFile->uid = current_fsuid();
256 	pCifsFile->dentry = dget(dentry);
257 	pCifsFile->f_flags = file->f_flags;
258 	pCifsFile->invalidHandle = false;
259 	pCifsFile->tlink = cifs_get_tlink(tlink);
260 	mutex_init(&pCifsFile->fh_mutex);
261 	mutex_init(&pCifsFile->lock_mutex);
262 	INIT_LIST_HEAD(&pCifsFile->llist);
263 	INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
264 
265 	spin_lock(&cifs_file_list_lock);
266 	list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
267 	/* if readable file instance put first in list*/
268 	if (file->f_mode & FMODE_READ)
269 		list_add(&pCifsFile->flist, &pCifsInode->openFileList);
270 	else
271 		list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
272 	spin_unlock(&cifs_file_list_lock);
273 
274 	cifs_set_oplock_level(pCifsInode, oplock);
275 
276 	file->private_data = pCifsFile;
277 	return pCifsFile;
278 }
279 
280 /*
281  * Release a reference on the file private data. This may involve closing
282  * the filehandle out on the server. Must be called without holding
283  * cifs_file_list_lock.
284  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)285 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
286 {
287 	struct inode *inode = cifs_file->dentry->d_inode;
288 	struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
289 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
290 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
291 	struct cifsLockInfo *li, *tmp;
292 
293 	spin_lock(&cifs_file_list_lock);
294 	if (--cifs_file->count > 0) {
295 		spin_unlock(&cifs_file_list_lock);
296 		return;
297 	}
298 
299 	/* remove it from the lists */
300 	list_del(&cifs_file->flist);
301 	list_del(&cifs_file->tlist);
302 
303 	if (list_empty(&cifsi->openFileList)) {
304 		cFYI(1, "closing last open instance for inode %p",
305 			cifs_file->dentry->d_inode);
306 
307 		/* in strict cache mode we need invalidate mapping on the last
308 		   close  because it may cause a error when we open this file
309 		   again and get at least level II oplock */
310 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
311 			CIFS_I(inode)->invalid_mapping = true;
312 
313 		cifs_set_oplock_level(cifsi, 0);
314 	}
315 	spin_unlock(&cifs_file_list_lock);
316 
317 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
318 		int xid, rc;
319 
320 		xid = GetXid();
321 		rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
322 		FreeXid(xid);
323 	}
324 
325 	/* Delete any outstanding lock records. We'll lose them when the file
326 	 * is closed anyway.
327 	 */
328 	mutex_lock(&cifs_file->lock_mutex);
329 	list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
330 		list_del(&li->llist);
331 		kfree(li);
332 	}
333 	mutex_unlock(&cifs_file->lock_mutex);
334 
335 	cifs_put_tlink(cifs_file->tlink);
336 	dput(cifs_file->dentry);
337 	kfree(cifs_file);
338 }
339 
cifs_open(struct inode * inode,struct file * file)340 int cifs_open(struct inode *inode, struct file *file)
341 {
342 	int rc = -EACCES;
343 	int xid;
344 	__u32 oplock;
345 	struct cifs_sb_info *cifs_sb;
346 	struct cifsTconInfo *tcon;
347 	struct tcon_link *tlink;
348 	struct cifsFileInfo *pCifsFile = NULL;
349 	char *full_path = NULL;
350 	bool posix_open_ok = false;
351 	__u16 netfid;
352 
353 	xid = GetXid();
354 
355 	cifs_sb = CIFS_SB(inode->i_sb);
356 	tlink = cifs_sb_tlink(cifs_sb);
357 	if (IS_ERR(tlink)) {
358 		FreeXid(xid);
359 		return PTR_ERR(tlink);
360 	}
361 	tcon = tlink_tcon(tlink);
362 
363 	full_path = build_path_from_dentry(file->f_path.dentry);
364 	if (full_path == NULL) {
365 		rc = -ENOMEM;
366 		goto out;
367 	}
368 
369 	cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
370 		 inode, file->f_flags, full_path);
371 
372 	if (oplockEnabled)
373 		oplock = REQ_OPLOCK;
374 	else
375 		oplock = 0;
376 
377 	if (!tcon->broken_posix_open && tcon->unix_ext &&
378 	    (tcon->ses->capabilities & CAP_UNIX) &&
379 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
380 			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
381 		/* can not refresh inode info since size could be stale */
382 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
383 				cifs_sb->mnt_file_mode /* ignored */,
384 				file->f_flags, &oplock, &netfid, xid);
385 		if (rc == 0) {
386 			cFYI(1, "posix open succeeded");
387 			posix_open_ok = true;
388 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
389 			if (tcon->ses->serverNOS)
390 				cERROR(1, "server %s of type %s returned"
391 					   " unexpected error on SMB posix open"
392 					   ", disabling posix open support."
393 					   " Check if server update available.",
394 					   tcon->ses->serverName,
395 					   tcon->ses->serverNOS);
396 			tcon->broken_posix_open = true;
397 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
398 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
399 			goto out;
400 		/* else fallthrough to retry open the old way on network i/o
401 		   or DFS errors */
402 	}
403 
404 	if (!posix_open_ok) {
405 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
406 				  file->f_flags, &oplock, &netfid, xid);
407 		if (rc)
408 			goto out;
409 	}
410 
411 	pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
412 	if (pCifsFile == NULL) {
413 		CIFSSMBClose(xid, tcon, netfid);
414 		rc = -ENOMEM;
415 		goto out;
416 	}
417 
418 	cifs_fscache_set_inode_cookie(inode, file);
419 
420 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
421 		/* time to set mode which we can not set earlier due to
422 		   problems creating new read-only files */
423 		struct cifs_unix_set_info_args args = {
424 			.mode	= inode->i_mode,
425 			.uid	= NO_CHANGE_64,
426 			.gid	= NO_CHANGE_64,
427 			.ctime	= NO_CHANGE_64,
428 			.atime	= NO_CHANGE_64,
429 			.mtime	= NO_CHANGE_64,
430 			.device	= 0,
431 		};
432 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
433 					pCifsFile->pid);
434 	}
435 
436 out:
437 	kfree(full_path);
438 	FreeXid(xid);
439 	cifs_put_tlink(tlink);
440 	return rc;
441 }
442 
443 /* Try to reacquire byte range locks that were released when session */
444 /* to server was lost */
cifs_relock_file(struct cifsFileInfo * cifsFile)445 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
446 {
447 	int rc = 0;
448 
449 /* BB list all locks open on this file and relock */
450 
451 	return rc;
452 }
453 
cifs_reopen_file(struct cifsFileInfo * pCifsFile,bool can_flush)454 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
455 {
456 	int rc = -EACCES;
457 	int xid;
458 	__u32 oplock;
459 	struct cifs_sb_info *cifs_sb;
460 	struct cifsTconInfo *tcon;
461 	struct cifsInodeInfo *pCifsInode;
462 	struct inode *inode;
463 	char *full_path = NULL;
464 	int desiredAccess;
465 	int disposition = FILE_OPEN;
466 	__u16 netfid;
467 
468 	xid = GetXid();
469 	mutex_lock(&pCifsFile->fh_mutex);
470 	if (!pCifsFile->invalidHandle) {
471 		mutex_unlock(&pCifsFile->fh_mutex);
472 		rc = 0;
473 		FreeXid(xid);
474 		return rc;
475 	}
476 
477 	inode = pCifsFile->dentry->d_inode;
478 	cifs_sb = CIFS_SB(inode->i_sb);
479 	tcon = tlink_tcon(pCifsFile->tlink);
480 
481 /* can not grab rename sem here because various ops, including
482    those that already have the rename sem can end up causing writepage
483    to get called and if the server was down that means we end up here,
484    and we can never tell if the caller already has the rename_sem */
485 	full_path = build_path_from_dentry(pCifsFile->dentry);
486 	if (full_path == NULL) {
487 		rc = -ENOMEM;
488 		mutex_unlock(&pCifsFile->fh_mutex);
489 		FreeXid(xid);
490 		return rc;
491 	}
492 
493 	cFYI(1, "inode = 0x%p file flags 0x%x for %s",
494 		 inode, pCifsFile->f_flags, full_path);
495 
496 	if (oplockEnabled)
497 		oplock = REQ_OPLOCK;
498 	else
499 		oplock = 0;
500 
501 	if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
502 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
503 			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
504 
505 		/*
506 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
507 		 * original open. Must mask them off for a reopen.
508 		 */
509 		unsigned int oflags = pCifsFile->f_flags &
510 						~(O_CREAT | O_EXCL | O_TRUNC);
511 
512 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
513 				cifs_sb->mnt_file_mode /* ignored */,
514 				oflags, &oplock, &netfid, xid);
515 		if (rc == 0) {
516 			cFYI(1, "posix reopen succeeded");
517 			goto reopen_success;
518 		}
519 		/* fallthrough to retry open the old way on errors, especially
520 		   in the reconnect path it is important to retry hard */
521 	}
522 
523 	desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
524 
525 	/* Can not refresh inode by passing in file_info buf to be returned
526 	   by SMBOpen and then calling get_inode_info with returned buf
527 	   since file might have write behind data that needs to be flushed
528 	   and server version of file size can be stale. If we knew for sure
529 	   that inode was not dirty locally we could do this */
530 
531 	rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
532 			 CREATE_NOT_DIR, &netfid, &oplock, NULL,
533 			 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
534 				CIFS_MOUNT_MAP_SPECIAL_CHR);
535 	if (rc) {
536 		mutex_unlock(&pCifsFile->fh_mutex);
537 		cFYI(1, "cifs_open returned 0x%x", rc);
538 		cFYI(1, "oplock: %d", oplock);
539 		goto reopen_error_exit;
540 	}
541 
542 reopen_success:
543 	pCifsFile->netfid = netfid;
544 	pCifsFile->invalidHandle = false;
545 	mutex_unlock(&pCifsFile->fh_mutex);
546 	pCifsInode = CIFS_I(inode);
547 
548 	if (can_flush) {
549 		rc = filemap_write_and_wait(inode->i_mapping);
550 		mapping_set_error(inode->i_mapping, rc);
551 
552 		if (tcon->unix_ext)
553 			rc = cifs_get_inode_info_unix(&inode,
554 				full_path, inode->i_sb, xid);
555 		else
556 			rc = cifs_get_inode_info(&inode,
557 				full_path, NULL, inode->i_sb,
558 				xid, NULL);
559 	} /* else we are writing out data to server already
560 	     and could deadlock if we tried to flush data, and
561 	     since we do not know if we have data that would
562 	     invalidate the current end of file on the server
563 	     we can not go to the server to get the new inod
564 	     info */
565 
566 	cifs_set_oplock_level(pCifsInode, oplock);
567 
568 	cifs_relock_file(pCifsFile);
569 
570 reopen_error_exit:
571 	kfree(full_path);
572 	FreeXid(xid);
573 	return rc;
574 }
575 
cifs_close(struct inode * inode,struct file * file)576 int cifs_close(struct inode *inode, struct file *file)
577 {
578 	if (file->private_data != NULL) {
579 		cifsFileInfo_put(file->private_data);
580 		file->private_data = NULL;
581 	}
582 
583 	/* return code from the ->release op is always ignored */
584 	return 0;
585 }
586 
cifs_closedir(struct inode * inode,struct file * file)587 int cifs_closedir(struct inode *inode, struct file *file)
588 {
589 	int rc = 0;
590 	int xid;
591 	struct cifsFileInfo *pCFileStruct = file->private_data;
592 	char *ptmp;
593 
594 	cFYI(1, "Closedir inode = 0x%p", inode);
595 
596 	xid = GetXid();
597 
598 	if (pCFileStruct) {
599 		struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
600 
601 		cFYI(1, "Freeing private data in close dir");
602 		spin_lock(&cifs_file_list_lock);
603 		if (!pCFileStruct->srch_inf.endOfSearch &&
604 		    !pCFileStruct->invalidHandle) {
605 			pCFileStruct->invalidHandle = true;
606 			spin_unlock(&cifs_file_list_lock);
607 			rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
608 			cFYI(1, "Closing uncompleted readdir with rc %d",
609 				 rc);
610 			/* not much we can do if it fails anyway, ignore rc */
611 			rc = 0;
612 		} else
613 			spin_unlock(&cifs_file_list_lock);
614 		ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
615 		if (ptmp) {
616 			cFYI(1, "closedir free smb buf in srch struct");
617 			pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
618 			if (pCFileStruct->srch_inf.smallBuf)
619 				cifs_small_buf_release(ptmp);
620 			else
621 				cifs_buf_release(ptmp);
622 		}
623 		cifs_put_tlink(pCFileStruct->tlink);
624 		kfree(file->private_data);
625 		file->private_data = NULL;
626 	}
627 	/* BB can we lock the filestruct while this is going on? */
628 	FreeXid(xid);
629 	return rc;
630 }
631 
store_file_lock(struct cifsFileInfo * fid,__u64 len,__u64 offset,__u8 lockType)632 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
633 				__u64 offset, __u8 lockType)
634 {
635 	struct cifsLockInfo *li =
636 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
637 	if (li == NULL)
638 		return -ENOMEM;
639 	li->offset = offset;
640 	li->length = len;
641 	li->type = lockType;
642 	mutex_lock(&fid->lock_mutex);
643 	list_add(&li->llist, &fid->llist);
644 	mutex_unlock(&fid->lock_mutex);
645 	return 0;
646 }
647 
cifs_lock(struct file * file,int cmd,struct file_lock * pfLock)648 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
649 {
650 	int rc, xid;
651 	__u32 numLock = 0;
652 	__u32 numUnlock = 0;
653 	__u64 length;
654 	bool wait_flag = false;
655 	struct cifs_sb_info *cifs_sb;
656 	struct cifsTconInfo *tcon;
657 	__u16 netfid;
658 	__u8 lockType = LOCKING_ANDX_LARGE_FILES;
659 	bool posix_locking = 0;
660 
661 	length = 1 + pfLock->fl_end - pfLock->fl_start;
662 	rc = -EACCES;
663 	xid = GetXid();
664 
665 	cFYI(1, "Lock parm: 0x%x flockflags: "
666 		 "0x%x flocktype: 0x%x start: %lld end: %lld",
667 		cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
668 		pfLock->fl_end);
669 
670 	if (pfLock->fl_flags & FL_POSIX)
671 		cFYI(1, "Posix");
672 	if (pfLock->fl_flags & FL_FLOCK)
673 		cFYI(1, "Flock");
674 	if (pfLock->fl_flags & FL_SLEEP) {
675 		cFYI(1, "Blocking lock");
676 		wait_flag = true;
677 	}
678 	if (pfLock->fl_flags & FL_ACCESS)
679 		cFYI(1, "Process suspended by mandatory locking - "
680 			 "not implemented yet");
681 	if (pfLock->fl_flags & FL_LEASE)
682 		cFYI(1, "Lease on file - not implemented yet");
683 	if (pfLock->fl_flags &
684 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
685 		cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
686 
687 	if (pfLock->fl_type == F_WRLCK) {
688 		cFYI(1, "F_WRLCK ");
689 		numLock = 1;
690 	} else if (pfLock->fl_type == F_UNLCK) {
691 		cFYI(1, "F_UNLCK");
692 		numUnlock = 1;
693 		/* Check if unlock includes more than
694 		one lock range */
695 	} else if (pfLock->fl_type == F_RDLCK) {
696 		cFYI(1, "F_RDLCK");
697 		lockType |= LOCKING_ANDX_SHARED_LOCK;
698 		numLock = 1;
699 	} else if (pfLock->fl_type == F_EXLCK) {
700 		cFYI(1, "F_EXLCK");
701 		numLock = 1;
702 	} else if (pfLock->fl_type == F_SHLCK) {
703 		cFYI(1, "F_SHLCK");
704 		lockType |= LOCKING_ANDX_SHARED_LOCK;
705 		numLock = 1;
706 	} else
707 		cFYI(1, "Unknown type of lock");
708 
709 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
710 	tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
711 	netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
712 
713 	if ((tcon->ses->capabilities & CAP_UNIX) &&
714 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
715 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
716 		posix_locking = 1;
717 	/* BB add code here to normalize offset and length to
718 	account for negative length which we can not accept over the
719 	wire */
720 	if (IS_GETLK(cmd)) {
721 		if (posix_locking) {
722 			int posix_lock_type;
723 			if (lockType & LOCKING_ANDX_SHARED_LOCK)
724 				posix_lock_type = CIFS_RDLCK;
725 			else
726 				posix_lock_type = CIFS_WRLCK;
727 			rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
728 					length,	pfLock,
729 					posix_lock_type, wait_flag);
730 			FreeXid(xid);
731 			return rc;
732 		}
733 
734 		/* BB we could chain these into one lock request BB */
735 		rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
736 				 0, 1, lockType, 0 /* wait flag */, 0);
737 		if (rc == 0) {
738 			rc = CIFSSMBLock(xid, tcon, netfid, length,
739 					 pfLock->fl_start, 1 /* numUnlock */ ,
740 					 0 /* numLock */ , lockType,
741 					 0 /* wait flag */, 0);
742 			pfLock->fl_type = F_UNLCK;
743 			if (rc != 0)
744 				cERROR(1, "Error unlocking previously locked "
745 					   "range %d during test of lock", rc);
746 			rc = 0;
747 
748 		} else {
749 			/* if rc == ERR_SHARING_VIOLATION ? */
750 			rc = 0;
751 
752 			if (lockType & LOCKING_ANDX_SHARED_LOCK) {
753 				pfLock->fl_type = F_WRLCK;
754 			} else {
755 				rc = CIFSSMBLock(xid, tcon, netfid, length,
756 					pfLock->fl_start, 0, 1,
757 					lockType | LOCKING_ANDX_SHARED_LOCK,
758 					0 /* wait flag */, 0);
759 				if (rc == 0) {
760 					rc = CIFSSMBLock(xid, tcon, netfid,
761 						length, pfLock->fl_start, 1, 0,
762 						lockType |
763 						LOCKING_ANDX_SHARED_LOCK,
764 						0 /* wait flag */, 0);
765 					pfLock->fl_type = F_RDLCK;
766 					if (rc != 0)
767 						cERROR(1, "Error unlocking "
768 						"previously locked range %d "
769 						"during test of lock", rc);
770 					rc = 0;
771 				} else {
772 					pfLock->fl_type = F_WRLCK;
773 					rc = 0;
774 				}
775 			}
776 		}
777 
778 		FreeXid(xid);
779 		return rc;
780 	}
781 
782 	if (!numLock && !numUnlock) {
783 		/* if no lock or unlock then nothing
784 		to do since we do not know what it is */
785 		FreeXid(xid);
786 		return -EOPNOTSUPP;
787 	}
788 
789 	if (posix_locking) {
790 		int posix_lock_type;
791 		if (lockType & LOCKING_ANDX_SHARED_LOCK)
792 			posix_lock_type = CIFS_RDLCK;
793 		else
794 			posix_lock_type = CIFS_WRLCK;
795 
796 		if (numUnlock == 1)
797 			posix_lock_type = CIFS_UNLCK;
798 
799 		rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
800 				      length, pfLock,
801 				      posix_lock_type, wait_flag);
802 	} else {
803 		struct cifsFileInfo *fid = file->private_data;
804 
805 		if (numLock) {
806 			rc = CIFSSMBLock(xid, tcon, netfid, length,
807 					 pfLock->fl_start, 0, numLock, lockType,
808 					 wait_flag, 0);
809 
810 			if (rc == 0) {
811 				/* For Windows locks we must store them. */
812 				rc = store_file_lock(fid, length,
813 						pfLock->fl_start, lockType);
814 			}
815 		} else if (numUnlock) {
816 			/* For each stored lock that this unlock overlaps
817 			   completely, unlock it. */
818 			int stored_rc = 0;
819 			struct cifsLockInfo *li, *tmp;
820 
821 			rc = 0;
822 			mutex_lock(&fid->lock_mutex);
823 			list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
824 				if (pfLock->fl_start <= li->offset &&
825 						(pfLock->fl_start + length) >=
826 						(li->offset + li->length)) {
827 					stored_rc = CIFSSMBLock(xid, tcon,
828 							netfid, li->length,
829 							li->offset, 1, 0,
830 							li->type, false, 0);
831 					if (stored_rc)
832 						rc = stored_rc;
833 					else {
834 						list_del(&li->llist);
835 						kfree(li);
836 					}
837 				}
838 			}
839 			mutex_unlock(&fid->lock_mutex);
840 		}
841 	}
842 
843 	if (pfLock->fl_flags & FL_POSIX)
844 		posix_lock_file_wait(file, pfLock);
845 	FreeXid(xid);
846 	return rc;
847 }
848 
849 /* update the file size (if needed) after a write */
850 void
cifs_update_eof(struct cifsInodeInfo * cifsi,loff_t offset,unsigned int bytes_written)851 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
852 		      unsigned int bytes_written)
853 {
854 	loff_t end_of_write = offset + bytes_written;
855 
856 	if (end_of_write > cifsi->server_eof)
857 		cifsi->server_eof = end_of_write;
858 }
859 
cifs_user_write(struct file * file,const char __user * write_data,size_t write_size,loff_t * poffset)860 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
861 	size_t write_size, loff_t *poffset)
862 {
863 	struct inode *inode = file->f_path.dentry->d_inode;
864 	int rc = 0;
865 	unsigned int bytes_written = 0;
866 	unsigned int total_written;
867 	struct cifs_sb_info *cifs_sb;
868 	struct cifsTconInfo *pTcon;
869 	int xid;
870 	struct cifsFileInfo *open_file;
871 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
872 
873 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
874 
875 	/* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
876 	   *poffset, file->f_path.dentry->d_name.name); */
877 
878 	if (file->private_data == NULL)
879 		return -EBADF;
880 
881 	open_file = file->private_data;
882 	pTcon = tlink_tcon(open_file->tlink);
883 
884 	rc = generic_write_checks(file, poffset, &write_size, 0);
885 	if (rc)
886 		return rc;
887 
888 	xid = GetXid();
889 
890 	for (total_written = 0; write_size > total_written;
891 	     total_written += bytes_written) {
892 		rc = -EAGAIN;
893 		while (rc == -EAGAIN) {
894 			if (file->private_data == NULL) {
895 				/* file has been closed on us */
896 				FreeXid(xid);
897 			/* if we have gotten here we have written some data
898 			   and blocked, and the file has been freed on us while
899 			   we blocked so return what we managed to write */
900 				return total_written;
901 			}
902 			if (open_file->invalidHandle) {
903 				/* we could deadlock if we called
904 				   filemap_fdatawait from here so tell
905 				   reopen_file not to flush data to server
906 				   now */
907 				rc = cifs_reopen_file(open_file, false);
908 				if (rc != 0)
909 					break;
910 			}
911 
912 			rc = CIFSSMBWrite(xid, pTcon,
913 				open_file->netfid,
914 				min_t(const int, cifs_sb->wsize,
915 				      write_size - total_written),
916 				*poffset, &bytes_written,
917 				NULL, write_data + total_written, 0);
918 		}
919 		if (rc || (bytes_written == 0)) {
920 			if (total_written)
921 				break;
922 			else {
923 				FreeXid(xid);
924 				return rc;
925 			}
926 		} else {
927 			cifs_update_eof(cifsi, *poffset, bytes_written);
928 			*poffset += bytes_written;
929 		}
930 	}
931 
932 	cifs_stats_bytes_written(pTcon, total_written);
933 
934 /* Do not update local mtime - server will set its actual value on write
935  *	inode->i_ctime = inode->i_mtime =
936  * 		current_fs_time(inode->i_sb);*/
937 	if (total_written > 0) {
938 		spin_lock(&inode->i_lock);
939 		if (*poffset > inode->i_size)
940 			i_size_write(inode, *poffset);
941 		spin_unlock(&inode->i_lock);
942 	}
943 	mark_inode_dirty_sync(inode);
944 
945 	FreeXid(xid);
946 	return total_written;
947 }
948 
cifs_write(struct cifsFileInfo * open_file,const char * write_data,size_t write_size,loff_t * poffset)949 static ssize_t cifs_write(struct cifsFileInfo *open_file,
950 			  const char *write_data, size_t write_size,
951 			  loff_t *poffset)
952 {
953 	int rc = 0;
954 	unsigned int bytes_written = 0;
955 	unsigned int total_written;
956 	struct cifs_sb_info *cifs_sb;
957 	struct cifsTconInfo *pTcon;
958 	int xid;
959 	struct dentry *dentry = open_file->dentry;
960 	struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
961 
962 	cifs_sb = CIFS_SB(dentry->d_sb);
963 
964 	cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
965 	   *poffset, dentry->d_name.name);
966 
967 	pTcon = tlink_tcon(open_file->tlink);
968 
969 	xid = GetXid();
970 
971 	for (total_written = 0; write_size > total_written;
972 	     total_written += bytes_written) {
973 		rc = -EAGAIN;
974 		while (rc == -EAGAIN) {
975 			struct kvec iov[2];
976 			unsigned int len;
977 
978 			if (open_file->invalidHandle) {
979 				/* we could deadlock if we called
980 				   filemap_fdatawait from here so tell
981 				   reopen_file not to flush data to
982 				   server now */
983 				rc = cifs_reopen_file(open_file, false);
984 				if (rc != 0)
985 					break;
986 			}
987 
988 			len = min((size_t)cifs_sb->wsize,
989 				  write_size - total_written);
990 			/* iov[0] is reserved for smb header */
991 			iov[1].iov_base = (char *)write_data + total_written;
992 			iov[1].iov_len = len;
993 			rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len,
994 					   *poffset, &bytes_written, iov, 1, 0);
995 		}
996 		if (rc || (bytes_written == 0)) {
997 			if (total_written)
998 				break;
999 			else {
1000 				FreeXid(xid);
1001 				return rc;
1002 			}
1003 		} else {
1004 			cifs_update_eof(cifsi, *poffset, bytes_written);
1005 			*poffset += bytes_written;
1006 		}
1007 	}
1008 
1009 	cifs_stats_bytes_written(pTcon, total_written);
1010 
1011 	if (total_written > 0) {
1012 		spin_lock(&dentry->d_inode->i_lock);
1013 		if (*poffset > dentry->d_inode->i_size)
1014 			i_size_write(dentry->d_inode, *poffset);
1015 		spin_unlock(&dentry->d_inode->i_lock);
1016 	}
1017 	mark_inode_dirty_sync(dentry->d_inode);
1018 	FreeXid(xid);
1019 	return total_written;
1020 }
1021 
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)1022 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1023 					bool fsuid_only)
1024 {
1025 	struct cifsFileInfo *open_file = NULL;
1026 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1027 
1028 	/* only filter by fsuid on multiuser mounts */
1029 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1030 		fsuid_only = false;
1031 
1032 	spin_lock(&cifs_file_list_lock);
1033 	/* we could simply get the first_list_entry since write-only entries
1034 	   are always at the end of the list but since the first entry might
1035 	   have a close pending, we go through the whole list */
1036 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1037 		if (fsuid_only && open_file->uid != current_fsuid())
1038 			continue;
1039 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1040 			if (!open_file->invalidHandle) {
1041 				/* found a good file */
1042 				/* lock it so it will not be closed on us */
1043 				cifsFileInfo_get(open_file);
1044 				spin_unlock(&cifs_file_list_lock);
1045 				return open_file;
1046 			} /* else might as well continue, and look for
1047 			     another, or simply have the caller reopen it
1048 			     again rather than trying to fix this handle */
1049 		} else /* write only file */
1050 			break; /* write only files are last so must be done */
1051 	}
1052 	spin_unlock(&cifs_file_list_lock);
1053 	return NULL;
1054 }
1055 
find_writable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)1056 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1057 					bool fsuid_only)
1058 {
1059 	struct cifsFileInfo *open_file;
1060 	struct cifs_sb_info *cifs_sb;
1061 	bool any_available = false;
1062 	int rc;
1063 
1064 	/* Having a null inode here (because mapping->host was set to zero by
1065 	the VFS or MM) should not happen but we had reports of on oops (due to
1066 	it being zero) during stress testcases so we need to check for it */
1067 
1068 	if (cifs_inode == NULL) {
1069 		cERROR(1, "Null inode passed to cifs_writeable_file");
1070 		dump_stack();
1071 		return NULL;
1072 	}
1073 
1074 	cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1075 
1076 	/* only filter by fsuid on multiuser mounts */
1077 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1078 		fsuid_only = false;
1079 
1080 	spin_lock(&cifs_file_list_lock);
1081 refind_writable:
1082 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1083 		if (!any_available && open_file->pid != current->tgid)
1084 			continue;
1085 		if (fsuid_only && open_file->uid != current_fsuid())
1086 			continue;
1087 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1088 			cifsFileInfo_get(open_file);
1089 
1090 			if (!open_file->invalidHandle) {
1091 				/* found a good writable file */
1092 				spin_unlock(&cifs_file_list_lock);
1093 				return open_file;
1094 			}
1095 
1096 			spin_unlock(&cifs_file_list_lock);
1097 
1098 			/* Had to unlock since following call can block */
1099 			rc = cifs_reopen_file(open_file, false);
1100 			if (!rc)
1101 				return open_file;
1102 
1103 			/* if it fails, try another handle if possible */
1104 			cFYI(1, "wp failed on reopen file");
1105 			cifsFileInfo_put(open_file);
1106 
1107 			spin_lock(&cifs_file_list_lock);
1108 
1109 			/* else we simply continue to the next entry. Thus
1110 			   we do not loop on reopen errors.  If we
1111 			   can not reopen the file, for example if we
1112 			   reconnected to a server with another client
1113 			   racing to delete or lock the file we would not
1114 			   make progress if we restarted before the beginning
1115 			   of the loop here. */
1116 		}
1117 	}
1118 	/* couldn't find useable FH with same pid, try any available */
1119 	if (!any_available) {
1120 		any_available = true;
1121 		goto refind_writable;
1122 	}
1123 	spin_unlock(&cifs_file_list_lock);
1124 	return NULL;
1125 }
1126 
cifs_partialpagewrite(struct page * page,unsigned from,unsigned to)1127 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1128 {
1129 	struct address_space *mapping = page->mapping;
1130 	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1131 	char *write_data;
1132 	int rc = -EFAULT;
1133 	int bytes_written = 0;
1134 	struct inode *inode;
1135 	struct cifsFileInfo *open_file;
1136 
1137 	if (!mapping || !mapping->host)
1138 		return -EFAULT;
1139 
1140 	inode = page->mapping->host;
1141 
1142 	offset += (loff_t)from;
1143 	write_data = kmap(page);
1144 	write_data += from;
1145 
1146 	if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1147 		kunmap(page);
1148 		return -EIO;
1149 	}
1150 
1151 	/* racing with truncate? */
1152 	if (offset > mapping->host->i_size) {
1153 		kunmap(page);
1154 		return 0; /* don't care */
1155 	}
1156 
1157 	/* check to make sure that we are not extending the file */
1158 	if (mapping->host->i_size - offset < (loff_t)to)
1159 		to = (unsigned)(mapping->host->i_size - offset);
1160 
1161 	open_file = find_writable_file(CIFS_I(mapping->host), false);
1162 	if (open_file) {
1163 		bytes_written = cifs_write(open_file, write_data,
1164 					   to - from, &offset);
1165 		cifsFileInfo_put(open_file);
1166 		/* Does mm or vfs already set times? */
1167 		inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1168 		if ((bytes_written > 0) && (offset))
1169 			rc = 0;
1170 		else if (bytes_written < 0)
1171 			rc = bytes_written;
1172 	} else {
1173 		cFYI(1, "No writeable filehandles for inode");
1174 		rc = -EIO;
1175 	}
1176 
1177 	kunmap(page);
1178 	return rc;
1179 }
1180 
cifs_writepages(struct address_space * mapping,struct writeback_control * wbc)1181 static int cifs_writepages(struct address_space *mapping,
1182 			   struct writeback_control *wbc)
1183 {
1184 	unsigned int bytes_to_write;
1185 	unsigned int bytes_written;
1186 	struct cifs_sb_info *cifs_sb;
1187 	int done = 0;
1188 	pgoff_t end;
1189 	pgoff_t index;
1190 	int range_whole = 0;
1191 	struct kvec *iov;
1192 	int len;
1193 	int n_iov = 0;
1194 	pgoff_t next;
1195 	int nr_pages;
1196 	__u64 offset = 0;
1197 	struct cifsFileInfo *open_file;
1198 	struct cifsTconInfo *tcon;
1199 	struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
1200 	struct page *page;
1201 	struct pagevec pvec;
1202 	int rc = 0;
1203 	int scanned = 0;
1204 	int xid;
1205 
1206 	cifs_sb = CIFS_SB(mapping->host->i_sb);
1207 
1208 	/*
1209 	 * If wsize is smaller that the page cache size, default to writing
1210 	 * one page at a time via cifs_writepage
1211 	 */
1212 	if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1213 		return generic_writepages(mapping, wbc);
1214 
1215 	iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1216 	if (iov == NULL)
1217 		return generic_writepages(mapping, wbc);
1218 
1219 	/*
1220 	 * if there's no open file, then this is likely to fail too,
1221 	 * but it'll at least handle the return. Maybe it should be
1222 	 * a BUG() instead?
1223 	 */
1224 	open_file = find_writable_file(CIFS_I(mapping->host), false);
1225 	if (!open_file) {
1226 		kfree(iov);
1227 		return generic_writepages(mapping, wbc);
1228 	}
1229 
1230 	tcon = tlink_tcon(open_file->tlink);
1231 	cifsFileInfo_put(open_file);
1232 
1233 	xid = GetXid();
1234 
1235 	pagevec_init(&pvec, 0);
1236 	if (wbc->range_cyclic) {
1237 		index = mapping->writeback_index; /* Start from prev offset */
1238 		end = -1;
1239 	} else {
1240 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
1241 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
1242 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1243 			range_whole = 1;
1244 		scanned = 1;
1245 	}
1246 retry:
1247 	while (!done && (index <= end) &&
1248 	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1249 			PAGECACHE_TAG_DIRTY,
1250 			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1251 		int first;
1252 		unsigned int i;
1253 
1254 		first = -1;
1255 		next = 0;
1256 		n_iov = 0;
1257 		bytes_to_write = 0;
1258 
1259 		for (i = 0; i < nr_pages; i++) {
1260 			page = pvec.pages[i];
1261 			/*
1262 			 * At this point we hold neither mapping->tree_lock nor
1263 			 * lock on the page itself: the page may be truncated or
1264 			 * invalidated (changing page->mapping to NULL), or even
1265 			 * swizzled back from swapper_space to tmpfs file
1266 			 * mapping
1267 			 */
1268 
1269 			if (first < 0)
1270 				lock_page(page);
1271 			else if (!trylock_page(page))
1272 				break;
1273 
1274 			if (unlikely(page->mapping != mapping)) {
1275 				unlock_page(page);
1276 				break;
1277 			}
1278 
1279 			if (!wbc->range_cyclic && page->index > end) {
1280 				done = 1;
1281 				unlock_page(page);
1282 				break;
1283 			}
1284 
1285 			if (next && (page->index != next)) {
1286 				/* Not next consecutive page */
1287 				unlock_page(page);
1288 				break;
1289 			}
1290 
1291 			if (wbc->sync_mode != WB_SYNC_NONE)
1292 				wait_on_page_writeback(page);
1293 
1294 			if (PageWriteback(page) ||
1295 					!clear_page_dirty_for_io(page)) {
1296 				unlock_page(page);
1297 				break;
1298 			}
1299 
1300 			/*
1301 			 * This actually clears the dirty bit in the radix tree.
1302 			 * See cifs_writepage() for more commentary.
1303 			 */
1304 			set_page_writeback(page);
1305 
1306 			if (page_offset(page) >= mapping->host->i_size) {
1307 				done = 1;
1308 				unlock_page(page);
1309 				end_page_writeback(page);
1310 				break;
1311 			}
1312 
1313 			/*
1314 			 * BB can we get rid of this?  pages are held by pvec
1315 			 */
1316 			page_cache_get(page);
1317 
1318 			len = min(mapping->host->i_size - page_offset(page),
1319 				  (loff_t)PAGE_CACHE_SIZE);
1320 
1321 			/* reserve iov[0] for the smb header */
1322 			n_iov++;
1323 			iov[n_iov].iov_base = kmap(page);
1324 			iov[n_iov].iov_len = len;
1325 			bytes_to_write += len;
1326 
1327 			if (first < 0) {
1328 				first = i;
1329 				offset = page_offset(page);
1330 			}
1331 			next = page->index + 1;
1332 			if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1333 				break;
1334 		}
1335 		if (n_iov) {
1336 retry_write:
1337 			open_file = find_writable_file(CIFS_I(mapping->host),
1338 							false);
1339 			if (!open_file) {
1340 				cERROR(1, "No writable handles for inode");
1341 				rc = -EBADF;
1342 			} else {
1343 				rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
1344 						   bytes_to_write, offset,
1345 						   &bytes_written, iov, n_iov,
1346 						   0);
1347 				cifsFileInfo_put(open_file);
1348 			}
1349 
1350 			cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
1351 
1352 			/*
1353 			 * For now, treat a short write as if nothing got
1354 			 * written. A zero length write however indicates
1355 			 * ENOSPC or EFBIG. We have no way to know which
1356 			 * though, so call it ENOSPC for now. EFBIG would
1357 			 * get translated to AS_EIO anyway.
1358 			 *
1359 			 * FIXME: make it take into account the data that did
1360 			 *	  get written
1361 			 */
1362 			if (rc == 0) {
1363 				if (bytes_written == 0)
1364 					rc = -ENOSPC;
1365 				else if (bytes_written < bytes_to_write)
1366 					rc = -EAGAIN;
1367 			}
1368 
1369 			/* retry on data-integrity flush */
1370 			if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
1371 				goto retry_write;
1372 
1373 			/* fix the stats and EOF */
1374 			if (bytes_written > 0) {
1375 				cifs_stats_bytes_written(tcon, bytes_written);
1376 				cifs_update_eof(cifsi, offset, bytes_written);
1377 			}
1378 
1379 			for (i = 0; i < n_iov; i++) {
1380 				page = pvec.pages[first + i];
1381 				/* on retryable write error, redirty page */
1382 				if (rc == -EAGAIN)
1383 					redirty_page_for_writepage(wbc, page);
1384 				else if (rc != 0)
1385 					SetPageError(page);
1386 				kunmap(page);
1387 				unlock_page(page);
1388 				end_page_writeback(page);
1389 				page_cache_release(page);
1390 			}
1391 
1392 			if (rc != -EAGAIN)
1393 				mapping_set_error(mapping, rc);
1394 			else
1395 				rc = 0;
1396 
1397 			if ((wbc->nr_to_write -= n_iov) <= 0)
1398 				done = 1;
1399 			index = next;
1400 		} else
1401 			/* Need to re-find the pages we skipped */
1402 			index = pvec.pages[0]->index + 1;
1403 
1404 		pagevec_release(&pvec);
1405 	}
1406 	if (!scanned && !done) {
1407 		/*
1408 		 * We hit the last page and there is more work to be done: wrap
1409 		 * back to the start of the file
1410 		 */
1411 		scanned = 1;
1412 		index = 0;
1413 		goto retry;
1414 	}
1415 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1416 		mapping->writeback_index = index;
1417 
1418 	FreeXid(xid);
1419 	kfree(iov);
1420 	return rc;
1421 }
1422 
cifs_writepage(struct page * page,struct writeback_control * wbc)1423 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1424 {
1425 	int rc = -EFAULT;
1426 	int xid;
1427 
1428 	xid = GetXid();
1429 /* BB add check for wbc flags */
1430 	page_cache_get(page);
1431 	if (!PageUptodate(page))
1432 		cFYI(1, "ppw - page not up to date");
1433 
1434 	/*
1435 	 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1436 	 *
1437 	 * A writepage() implementation always needs to do either this,
1438 	 * or re-dirty the page with "redirty_page_for_writepage()" in
1439 	 * the case of a failure.
1440 	 *
1441 	 * Just unlocking the page will cause the radix tree tag-bits
1442 	 * to fail to update with the state of the page correctly.
1443 	 */
1444 	set_page_writeback(page);
1445 	rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1446 	SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1447 	unlock_page(page);
1448 	end_page_writeback(page);
1449 	page_cache_release(page);
1450 	FreeXid(xid);
1451 	return rc;
1452 }
1453 
cifs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)1454 static int cifs_write_end(struct file *file, struct address_space *mapping,
1455 			loff_t pos, unsigned len, unsigned copied,
1456 			struct page *page, void *fsdata)
1457 {
1458 	int rc;
1459 	struct inode *inode = mapping->host;
1460 
1461 	cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1462 		 page, pos, copied);
1463 
1464 	if (PageChecked(page)) {
1465 		if (copied == len)
1466 			SetPageUptodate(page);
1467 		ClearPageChecked(page);
1468 	} else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1469 		SetPageUptodate(page);
1470 
1471 	if (!PageUptodate(page)) {
1472 		char *page_data;
1473 		unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1474 		int xid;
1475 
1476 		xid = GetXid();
1477 		/* this is probably better than directly calling
1478 		   partialpage_write since in this function the file handle is
1479 		   known which we might as well	leverage */
1480 		/* BB check if anything else missing out of ppw
1481 		   such as updating last write time */
1482 		page_data = kmap(page);
1483 		rc = cifs_write(file->private_data, page_data + offset,
1484 				copied, &pos);
1485 		/* if (rc < 0) should we set writebehind rc? */
1486 		kunmap(page);
1487 
1488 		FreeXid(xid);
1489 	} else {
1490 		rc = copied;
1491 		pos += copied;
1492 		set_page_dirty(page);
1493 	}
1494 
1495 	if (rc > 0) {
1496 		spin_lock(&inode->i_lock);
1497 		if (pos > inode->i_size)
1498 			i_size_write(inode, pos);
1499 		spin_unlock(&inode->i_lock);
1500 	}
1501 
1502 	unlock_page(page);
1503 	page_cache_release(page);
1504 
1505 	return rc;
1506 }
1507 
cifs_strict_fsync(struct file * file,int datasync)1508 int cifs_strict_fsync(struct file *file, int datasync)
1509 {
1510 	int xid;
1511 	int rc = 0;
1512 	struct cifsTconInfo *tcon;
1513 	struct cifsFileInfo *smbfile = file->private_data;
1514 	struct inode *inode = file->f_path.dentry->d_inode;
1515 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1516 
1517 	xid = GetXid();
1518 
1519 	cFYI(1, "Sync file - name: %s datasync: 0x%x",
1520 		file->f_path.dentry->d_name.name, datasync);
1521 
1522 	if (!CIFS_I(inode)->clientCanCacheRead)
1523 		cifs_invalidate_mapping(inode);
1524 
1525 	tcon = tlink_tcon(smbfile->tlink);
1526 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1527 		rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1528 
1529 	FreeXid(xid);
1530 	return rc;
1531 }
1532 
cifs_fsync(struct file * file,int datasync)1533 int cifs_fsync(struct file *file, int datasync)
1534 {
1535 	int xid;
1536 	int rc = 0;
1537 	struct cifsTconInfo *tcon;
1538 	struct cifsFileInfo *smbfile = file->private_data;
1539 	struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1540 
1541 	xid = GetXid();
1542 
1543 	cFYI(1, "Sync file - name: %s datasync: 0x%x",
1544 		file->f_path.dentry->d_name.name, datasync);
1545 
1546 	tcon = tlink_tcon(smbfile->tlink);
1547 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1548 		rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1549 
1550 	FreeXid(xid);
1551 	return rc;
1552 }
1553 
1554 /*
1555  * As file closes, flush all cached write data for this inode checking
1556  * for write behind errors.
1557  */
cifs_flush(struct file * file,fl_owner_t id)1558 int cifs_flush(struct file *file, fl_owner_t id)
1559 {
1560 	struct inode *inode = file->f_path.dentry->d_inode;
1561 	int rc = 0;
1562 
1563 	if (file->f_mode & FMODE_WRITE)
1564 		rc = filemap_write_and_wait(inode->i_mapping);
1565 
1566 	cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1567 
1568 	return rc;
1569 }
1570 
1571 static int
cifs_write_allocate_pages(struct page ** pages,unsigned long num_pages)1572 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1573 {
1574 	int rc = 0;
1575 	unsigned long i;
1576 
1577 	for (i = 0; i < num_pages; i++) {
1578 		pages[i] = alloc_page(__GFP_HIGHMEM);
1579 		if (!pages[i]) {
1580 			/*
1581 			 * save number of pages we have already allocated and
1582 			 * return with ENOMEM error
1583 			 */
1584 			num_pages = i;
1585 			rc = -ENOMEM;
1586 			goto error;
1587 		}
1588 	}
1589 
1590 	return rc;
1591 
1592 error:
1593 	for (i = 0; i < num_pages; i++)
1594 		put_page(pages[i]);
1595 	return rc;
1596 }
1597 
1598 static inline
get_numpages(const size_t wsize,const size_t len,size_t * cur_len)1599 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1600 {
1601 	size_t num_pages;
1602 	size_t clen;
1603 
1604 	clen = min_t(const size_t, len, wsize);
1605 	num_pages = clen / PAGE_CACHE_SIZE;
1606 	if (clen % PAGE_CACHE_SIZE)
1607 		num_pages++;
1608 
1609 	if (cur_len)
1610 		*cur_len = clen;
1611 
1612 	return num_pages;
1613 }
1614 
1615 static ssize_t
cifs_iovec_write(struct file * file,const struct iovec * iov,unsigned long nr_segs,loff_t * poffset)1616 cifs_iovec_write(struct file *file, const struct iovec *iov,
1617 		 unsigned long nr_segs, loff_t *poffset)
1618 {
1619 	unsigned int written;
1620 	unsigned long num_pages, npages, i;
1621 	size_t copied, len, cur_len;
1622 	ssize_t total_written = 0;
1623 	struct kvec *to_send;
1624 	struct page **pages;
1625 	struct iov_iter it;
1626 	struct inode *inode;
1627 	struct cifsFileInfo *open_file;
1628 	struct cifsTconInfo *pTcon;
1629 	struct cifs_sb_info *cifs_sb;
1630 	int xid, rc;
1631 
1632 	len = iov_length(iov, nr_segs);
1633 	if (!len)
1634 		return 0;
1635 
1636 	rc = generic_write_checks(file, poffset, &len, 0);
1637 	if (rc)
1638 		return rc;
1639 
1640 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1641 	num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1642 
1643 	pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1644 	if (!pages)
1645 		return -ENOMEM;
1646 
1647 	to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1648 	if (!to_send) {
1649 		kfree(pages);
1650 		return -ENOMEM;
1651 	}
1652 
1653 	rc = cifs_write_allocate_pages(pages, num_pages);
1654 	if (rc) {
1655 		kfree(pages);
1656 		kfree(to_send);
1657 		return rc;
1658 	}
1659 
1660 	xid = GetXid();
1661 	open_file = file->private_data;
1662 	pTcon = tlink_tcon(open_file->tlink);
1663 	inode = file->f_path.dentry->d_inode;
1664 
1665 	iov_iter_init(&it, iov, nr_segs, len, 0);
1666 	npages = num_pages;
1667 
1668 	do {
1669 		size_t save_len = cur_len;
1670 		for (i = 0; i < npages; i++) {
1671 			copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1672 			copied = iov_iter_copy_from_user(pages[i], &it, 0,
1673 							 copied);
1674 			cur_len -= copied;
1675 			iov_iter_advance(&it, copied);
1676 			to_send[i+1].iov_base = kmap(pages[i]);
1677 			to_send[i+1].iov_len = copied;
1678 		}
1679 
1680 		cur_len = save_len - cur_len;
1681 
1682 		do {
1683 			if (open_file->invalidHandle) {
1684 				rc = cifs_reopen_file(open_file, false);
1685 				if (rc != 0)
1686 					break;
1687 			}
1688 			rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
1689 					   cur_len, *poffset, &written,
1690 					   to_send, npages, 0);
1691 		} while (rc == -EAGAIN);
1692 
1693 		for (i = 0; i < npages; i++)
1694 			kunmap(pages[i]);
1695 
1696 		if (written) {
1697 			len -= written;
1698 			total_written += written;
1699 			cifs_update_eof(CIFS_I(inode), *poffset, written);
1700 			*poffset += written;
1701 		} else if (rc < 0) {
1702 			if (!total_written)
1703 				total_written = rc;
1704 			break;
1705 		}
1706 
1707 		/* get length and number of kvecs of the next write */
1708 		npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1709 	} while (len > 0);
1710 
1711 	if (total_written > 0) {
1712 		spin_lock(&inode->i_lock);
1713 		if (*poffset > inode->i_size)
1714 			i_size_write(inode, *poffset);
1715 		spin_unlock(&inode->i_lock);
1716 	}
1717 
1718 	cifs_stats_bytes_written(pTcon, total_written);
1719 	mark_inode_dirty_sync(inode);
1720 
1721 	for (i = 0; i < num_pages; i++)
1722 		put_page(pages[i]);
1723 	kfree(to_send);
1724 	kfree(pages);
1725 	FreeXid(xid);
1726 	return total_written;
1727 }
1728 
cifs_user_writev(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)1729 static ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
1730 				unsigned long nr_segs, loff_t pos)
1731 {
1732 	ssize_t written;
1733 	struct inode *inode;
1734 
1735 	inode = iocb->ki_filp->f_path.dentry->d_inode;
1736 
1737 	/*
1738 	 * BB - optimize the way when signing is disabled. We can drop this
1739 	 * extra memory-to-memory copying and use iovec buffers for constructing
1740 	 * write request.
1741 	 */
1742 
1743 	written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1744 	if (written > 0) {
1745 		CIFS_I(inode)->invalid_mapping = true;
1746 		iocb->ki_pos = pos;
1747 	}
1748 
1749 	return written;
1750 }
1751 
cifs_strict_writev(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)1752 ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1753 			   unsigned long nr_segs, loff_t pos)
1754 {
1755 	struct inode *inode;
1756 
1757 	inode = iocb->ki_filp->f_path.dentry->d_inode;
1758 
1759 	if (CIFS_I(inode)->clientCanCacheAll)
1760 		return generic_file_aio_write(iocb, iov, nr_segs, pos);
1761 
1762 	/*
1763 	 * In strict cache mode we need to write the data to the server exactly
1764 	 * from the pos to pos+len-1 rather than flush all affected pages
1765 	 * because it may cause a error with mandatory locks on these pages but
1766 	 * not on the region from pos to ppos+len-1.
1767 	 */
1768 
1769 	return cifs_user_writev(iocb, iov, nr_segs, pos);
1770 }
1771 
1772 static ssize_t
cifs_iovec_read(struct file * file,const struct iovec * iov,unsigned long nr_segs,loff_t * poffset)1773 cifs_iovec_read(struct file *file, const struct iovec *iov,
1774 		 unsigned long nr_segs, loff_t *poffset)
1775 {
1776 	int rc;
1777 	int xid;
1778 	ssize_t total_read;
1779 	unsigned int bytes_read = 0;
1780 	size_t len, cur_len;
1781 	int iov_offset = 0;
1782 	struct cifs_sb_info *cifs_sb;
1783 	struct cifsTconInfo *pTcon;
1784 	struct cifsFileInfo *open_file;
1785 	struct smb_com_read_rsp *pSMBr;
1786 	char *read_data;
1787 
1788 	if (!nr_segs)
1789 		return 0;
1790 
1791 	len = iov_length(iov, nr_segs);
1792 	if (!len)
1793 		return 0;
1794 
1795 	xid = GetXid();
1796 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1797 
1798 	open_file = file->private_data;
1799 	pTcon = tlink_tcon(open_file->tlink);
1800 
1801 	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1802 		cFYI(1, "attempting read on write only file instance");
1803 
1804 	for (total_read = 0; total_read < len; total_read += bytes_read) {
1805 		cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
1806 		rc = -EAGAIN;
1807 		read_data = NULL;
1808 
1809 		while (rc == -EAGAIN) {
1810 			int buf_type = CIFS_NO_BUFFER;
1811 			if (open_file->invalidHandle) {
1812 				rc = cifs_reopen_file(open_file, true);
1813 				if (rc != 0)
1814 					break;
1815 			}
1816 			rc = CIFSSMBRead(xid, pTcon, open_file->netfid,
1817 					 cur_len, *poffset, &bytes_read,
1818 					 &read_data, &buf_type);
1819 			pSMBr = (struct smb_com_read_rsp *)read_data;
1820 			if (read_data) {
1821 				char *data_offset = read_data + 4 +
1822 						le16_to_cpu(pSMBr->DataOffset);
1823 				if (memcpy_toiovecend(iov, data_offset,
1824 						      iov_offset, bytes_read))
1825 					rc = -EFAULT;
1826 				if (buf_type == CIFS_SMALL_BUFFER)
1827 					cifs_small_buf_release(read_data);
1828 				else if (buf_type == CIFS_LARGE_BUFFER)
1829 					cifs_buf_release(read_data);
1830 				read_data = NULL;
1831 				iov_offset += bytes_read;
1832 			}
1833 		}
1834 
1835 		if (rc || (bytes_read == 0)) {
1836 			if (total_read) {
1837 				break;
1838 			} else {
1839 				FreeXid(xid);
1840 				return rc;
1841 			}
1842 		} else {
1843 			cifs_stats_bytes_read(pTcon, bytes_read);
1844 			*poffset += bytes_read;
1845 		}
1846 	}
1847 
1848 	FreeXid(xid);
1849 	return total_read;
1850 }
1851 
cifs_user_read(struct file * file,char __user * read_data,size_t read_size,loff_t * poffset)1852 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1853 		       size_t read_size, loff_t *poffset)
1854 {
1855 	struct iovec iov;
1856 	iov.iov_base = read_data;
1857 	iov.iov_len = read_size;
1858 
1859 	return cifs_iovec_read(file, &iov, 1, poffset);
1860 }
1861 
cifs_user_readv(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)1862 static ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
1863 			       unsigned long nr_segs, loff_t pos)
1864 {
1865 	ssize_t read;
1866 
1867 	read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
1868 	if (read > 0)
1869 		iocb->ki_pos = pos;
1870 
1871 	return read;
1872 }
1873 
cifs_strict_readv(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)1874 ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
1875 			  unsigned long nr_segs, loff_t pos)
1876 {
1877 	struct inode *inode;
1878 
1879 	inode = iocb->ki_filp->f_path.dentry->d_inode;
1880 
1881 	if (CIFS_I(inode)->clientCanCacheRead)
1882 		return generic_file_aio_read(iocb, iov, nr_segs, pos);
1883 
1884 	/*
1885 	 * In strict cache mode we need to read from the server all the time
1886 	 * if we don't have level II oplock because the server can delay mtime
1887 	 * change - so we can't make a decision about inode invalidating.
1888 	 * And we can also fail with pagereading if there are mandatory locks
1889 	 * on pages affected by this read but not on the region from pos to
1890 	 * pos+len-1.
1891 	 */
1892 
1893 	return cifs_user_readv(iocb, iov, nr_segs, pos);
1894 }
1895 
cifs_read(struct file * file,char * read_data,size_t read_size,loff_t * poffset)1896 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1897 			 loff_t *poffset)
1898 {
1899 	int rc = -EACCES;
1900 	unsigned int bytes_read = 0;
1901 	unsigned int total_read;
1902 	unsigned int current_read_size;
1903 	struct cifs_sb_info *cifs_sb;
1904 	struct cifsTconInfo *pTcon;
1905 	int xid;
1906 	char *current_offset;
1907 	struct cifsFileInfo *open_file;
1908 	int buf_type = CIFS_NO_BUFFER;
1909 
1910 	xid = GetXid();
1911 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1912 
1913 	if (file->private_data == NULL) {
1914 		rc = -EBADF;
1915 		FreeXid(xid);
1916 		return rc;
1917 	}
1918 	open_file = file->private_data;
1919 	pTcon = tlink_tcon(open_file->tlink);
1920 
1921 	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1922 		cFYI(1, "attempting read on write only file instance");
1923 
1924 	for (total_read = 0, current_offset = read_data;
1925 	     read_size > total_read;
1926 	     total_read += bytes_read, current_offset += bytes_read) {
1927 		current_read_size = min_t(const int, read_size - total_read,
1928 					  cifs_sb->rsize);
1929 		/* For windows me and 9x we do not want to request more
1930 		than it negotiated since it will refuse the read then */
1931 		if ((pTcon->ses) &&
1932 			!(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1933 			current_read_size = min_t(const int, current_read_size,
1934 					pTcon->ses->server->maxBuf - 128);
1935 		}
1936 		rc = -EAGAIN;
1937 		while (rc == -EAGAIN) {
1938 			if (open_file->invalidHandle) {
1939 				rc = cifs_reopen_file(open_file, true);
1940 				if (rc != 0)
1941 					break;
1942 			}
1943 			rc = CIFSSMBRead(xid, pTcon,
1944 					 open_file->netfid,
1945 					 current_read_size, *poffset,
1946 					 &bytes_read, &current_offset,
1947 					 &buf_type);
1948 		}
1949 		if (rc || (bytes_read == 0)) {
1950 			if (total_read) {
1951 				break;
1952 			} else {
1953 				FreeXid(xid);
1954 				return rc;
1955 			}
1956 		} else {
1957 			cifs_stats_bytes_read(pTcon, total_read);
1958 			*poffset += bytes_read;
1959 		}
1960 	}
1961 	FreeXid(xid);
1962 	return total_read;
1963 }
1964 
1965 /*
1966  * If the page is mmap'ed into a process' page tables, then we need to make
1967  * sure that it doesn't change while being written back.
1968  */
1969 static int
cifs_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf)1970 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1971 {
1972 	struct page *page = vmf->page;
1973 
1974 	lock_page(page);
1975 	return VM_FAULT_LOCKED;
1976 }
1977 
1978 static struct vm_operations_struct cifs_file_vm_ops = {
1979 	.fault = filemap_fault,
1980 	.page_mkwrite = cifs_page_mkwrite,
1981 };
1982 
cifs_file_strict_mmap(struct file * file,struct vm_area_struct * vma)1983 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
1984 {
1985 	int rc, xid;
1986 	struct inode *inode = file->f_path.dentry->d_inode;
1987 
1988 	xid = GetXid();
1989 
1990 	if (!CIFS_I(inode)->clientCanCacheRead)
1991 		cifs_invalidate_mapping(inode);
1992 
1993 	rc = generic_file_mmap(file, vma);
1994 	if (rc == 0)
1995 		vma->vm_ops = &cifs_file_vm_ops;
1996 	FreeXid(xid);
1997 	return rc;
1998 }
1999 
cifs_file_mmap(struct file * file,struct vm_area_struct * vma)2000 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2001 {
2002 	int rc, xid;
2003 
2004 	xid = GetXid();
2005 	rc = cifs_revalidate_file(file);
2006 	if (rc) {
2007 		cFYI(1, "Validation prior to mmap failed, error=%d", rc);
2008 		FreeXid(xid);
2009 		return rc;
2010 	}
2011 	rc = generic_file_mmap(file, vma);
2012 	if (rc == 0)
2013 		vma->vm_ops = &cifs_file_vm_ops;
2014 	FreeXid(xid);
2015 	return rc;
2016 }
2017 
2018 
cifs_copy_cache_pages(struct address_space * mapping,struct list_head * pages,int bytes_read,char * data)2019 static void cifs_copy_cache_pages(struct address_space *mapping,
2020 	struct list_head *pages, int bytes_read, char *data)
2021 {
2022 	struct page *page;
2023 	char *target;
2024 
2025 	while (bytes_read > 0) {
2026 		if (list_empty(pages))
2027 			break;
2028 
2029 		page = list_entry(pages->prev, struct page, lru);
2030 		list_del(&page->lru);
2031 
2032 		if (add_to_page_cache_lru(page, mapping, page->index,
2033 				      GFP_KERNEL)) {
2034 			page_cache_release(page);
2035 			cFYI(1, "Add page cache failed");
2036 			data += PAGE_CACHE_SIZE;
2037 			bytes_read -= PAGE_CACHE_SIZE;
2038 			continue;
2039 		}
2040 		page_cache_release(page);
2041 
2042 		target = kmap_atomic(page, KM_USER0);
2043 
2044 		if (PAGE_CACHE_SIZE > bytes_read) {
2045 			memcpy(target, data, bytes_read);
2046 			/* zero the tail end of this partial page */
2047 			memset(target + bytes_read, 0,
2048 			       PAGE_CACHE_SIZE - bytes_read);
2049 			bytes_read = 0;
2050 		} else {
2051 			memcpy(target, data, PAGE_CACHE_SIZE);
2052 			bytes_read -= PAGE_CACHE_SIZE;
2053 		}
2054 		kunmap_atomic(target, KM_USER0);
2055 
2056 		flush_dcache_page(page);
2057 		SetPageUptodate(page);
2058 		unlock_page(page);
2059 		data += PAGE_CACHE_SIZE;
2060 
2061 		/* add page to FS-Cache */
2062 		cifs_readpage_to_fscache(mapping->host, page);
2063 	}
2064 	return;
2065 }
2066 
cifs_readpages(struct file * file,struct address_space * mapping,struct list_head * page_list,unsigned num_pages)2067 static int cifs_readpages(struct file *file, struct address_space *mapping,
2068 	struct list_head *page_list, unsigned num_pages)
2069 {
2070 	int rc = -EACCES;
2071 	int xid;
2072 	loff_t offset;
2073 	struct page *page;
2074 	struct cifs_sb_info *cifs_sb;
2075 	struct cifsTconInfo *pTcon;
2076 	unsigned int bytes_read = 0;
2077 	unsigned int read_size, i;
2078 	char *smb_read_data = NULL;
2079 	struct smb_com_read_rsp *pSMBr;
2080 	struct cifsFileInfo *open_file;
2081 	int buf_type = CIFS_NO_BUFFER;
2082 
2083 	xid = GetXid();
2084 	if (file->private_data == NULL) {
2085 		rc = -EBADF;
2086 		FreeXid(xid);
2087 		return rc;
2088 	}
2089 	open_file = file->private_data;
2090 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2091 	pTcon = tlink_tcon(open_file->tlink);
2092 
2093 	/*
2094 	 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2095 	 * immediately if the cookie is negative
2096 	 */
2097 	rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2098 					 &num_pages);
2099 	if (rc == 0)
2100 		goto read_complete;
2101 
2102 	cFYI(DBG2, "rpages: num pages %d", num_pages);
2103 	for (i = 0; i < num_pages; ) {
2104 		unsigned contig_pages;
2105 		struct page *tmp_page;
2106 		unsigned long expected_index;
2107 
2108 		if (list_empty(page_list))
2109 			break;
2110 
2111 		page = list_entry(page_list->prev, struct page, lru);
2112 		offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2113 
2114 		/* count adjacent pages that we will read into */
2115 		contig_pages = 0;
2116 		expected_index =
2117 			list_entry(page_list->prev, struct page, lru)->index;
2118 		list_for_each_entry_reverse(tmp_page, page_list, lru) {
2119 			if (tmp_page->index == expected_index) {
2120 				contig_pages++;
2121 				expected_index++;
2122 			} else
2123 				break;
2124 		}
2125 		if (contig_pages + i >  num_pages)
2126 			contig_pages = num_pages - i;
2127 
2128 		/* for reads over a certain size could initiate async
2129 		   read ahead */
2130 
2131 		read_size = contig_pages * PAGE_CACHE_SIZE;
2132 		/* Read size needs to be in multiples of one page */
2133 		read_size = min_t(const unsigned int, read_size,
2134 				  cifs_sb->rsize & PAGE_CACHE_MASK);
2135 		cFYI(DBG2, "rpages: read size 0x%x  contiguous pages %d",
2136 				read_size, contig_pages);
2137 		rc = -EAGAIN;
2138 		while (rc == -EAGAIN) {
2139 			if (open_file->invalidHandle) {
2140 				rc = cifs_reopen_file(open_file, true);
2141 				if (rc != 0)
2142 					break;
2143 			}
2144 
2145 			rc = CIFSSMBRead(xid, pTcon,
2146 					 open_file->netfid,
2147 					 read_size, offset,
2148 					 &bytes_read, &smb_read_data,
2149 					 &buf_type);
2150 			/* BB more RC checks ? */
2151 			if (rc == -EAGAIN) {
2152 				if (smb_read_data) {
2153 					if (buf_type == CIFS_SMALL_BUFFER)
2154 						cifs_small_buf_release(smb_read_data);
2155 					else if (buf_type == CIFS_LARGE_BUFFER)
2156 						cifs_buf_release(smb_read_data);
2157 					smb_read_data = NULL;
2158 				}
2159 			}
2160 		}
2161 		if ((rc < 0) || (smb_read_data == NULL)) {
2162 			cFYI(1, "Read error in readpages: %d", rc);
2163 			break;
2164 		} else if (bytes_read > 0) {
2165 			task_io_account_read(bytes_read);
2166 			pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2167 			cifs_copy_cache_pages(mapping, page_list, bytes_read,
2168 				smb_read_data + 4 /* RFC1001 hdr */ +
2169 				le16_to_cpu(pSMBr->DataOffset));
2170 
2171 			i +=  bytes_read >> PAGE_CACHE_SHIFT;
2172 			cifs_stats_bytes_read(pTcon, bytes_read);
2173 			if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
2174 				i++; /* account for partial page */
2175 
2176 				/* server copy of file can have smaller size
2177 				   than client */
2178 				/* BB do we need to verify this common case ?
2179 				   this case is ok - if we are at server EOF
2180 				   we will hit it on next read */
2181 
2182 				/* break; */
2183 			}
2184 		} else {
2185 			cFYI(1, "No bytes read (%d) at offset %lld . "
2186 				"Cleaning remaining pages from readahead list",
2187 				bytes_read, offset);
2188 			/* BB turn off caching and do new lookup on
2189 			   file size at server? */
2190 			break;
2191 		}
2192 		if (smb_read_data) {
2193 			if (buf_type == CIFS_SMALL_BUFFER)
2194 				cifs_small_buf_release(smb_read_data);
2195 			else if (buf_type == CIFS_LARGE_BUFFER)
2196 				cifs_buf_release(smb_read_data);
2197 			smb_read_data = NULL;
2198 		}
2199 		bytes_read = 0;
2200 	}
2201 
2202 /* need to free smb_read_data buf before exit */
2203 	if (smb_read_data) {
2204 		if (buf_type == CIFS_SMALL_BUFFER)
2205 			cifs_small_buf_release(smb_read_data);
2206 		else if (buf_type == CIFS_LARGE_BUFFER)
2207 			cifs_buf_release(smb_read_data);
2208 		smb_read_data = NULL;
2209 	}
2210 
2211 read_complete:
2212 	FreeXid(xid);
2213 	return rc;
2214 }
2215 
cifs_readpage_worker(struct file * file,struct page * page,loff_t * poffset)2216 static int cifs_readpage_worker(struct file *file, struct page *page,
2217 	loff_t *poffset)
2218 {
2219 	char *read_data;
2220 	int rc;
2221 
2222 	/* Is the page cached? */
2223 	rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2224 	if (rc == 0)
2225 		goto read_complete;
2226 
2227 	page_cache_get(page);
2228 	read_data = kmap(page);
2229 	/* for reads over a certain size could initiate async read ahead */
2230 
2231 	rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2232 
2233 	if (rc < 0)
2234 		goto io_error;
2235 	else
2236 		cFYI(1, "Bytes read %d", rc);
2237 
2238 	file->f_path.dentry->d_inode->i_atime =
2239 		current_fs_time(file->f_path.dentry->d_inode->i_sb);
2240 
2241 	if (PAGE_CACHE_SIZE > rc)
2242 		memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2243 
2244 	flush_dcache_page(page);
2245 	SetPageUptodate(page);
2246 
2247 	/* send this page to the cache */
2248 	cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2249 
2250 	rc = 0;
2251 
2252 io_error:
2253 	kunmap(page);
2254 	page_cache_release(page);
2255 
2256 read_complete:
2257 	return rc;
2258 }
2259 
cifs_readpage(struct file * file,struct page * page)2260 static int cifs_readpage(struct file *file, struct page *page)
2261 {
2262 	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2263 	int rc = -EACCES;
2264 	int xid;
2265 
2266 	xid = GetXid();
2267 
2268 	if (file->private_data == NULL) {
2269 		rc = -EBADF;
2270 		FreeXid(xid);
2271 		return rc;
2272 	}
2273 
2274 	cFYI(1, "readpage %p at offset %d 0x%x\n",
2275 		 page, (int)offset, (int)offset);
2276 
2277 	rc = cifs_readpage_worker(file, page, &offset);
2278 
2279 	unlock_page(page);
2280 
2281 	FreeXid(xid);
2282 	return rc;
2283 }
2284 
is_inode_writable(struct cifsInodeInfo * cifs_inode)2285 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2286 {
2287 	struct cifsFileInfo *open_file;
2288 
2289 	spin_lock(&cifs_file_list_lock);
2290 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2291 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2292 			spin_unlock(&cifs_file_list_lock);
2293 			return 1;
2294 		}
2295 	}
2296 	spin_unlock(&cifs_file_list_lock);
2297 	return 0;
2298 }
2299 
2300 /* We do not want to update the file size from server for inodes
2301    open for write - to avoid races with writepage extending
2302    the file - in the future we could consider allowing
2303    refreshing the inode only on increases in the file size
2304    but this is tricky to do without racing with writebehind
2305    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file)2306 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2307 {
2308 	if (!cifsInode)
2309 		return true;
2310 
2311 	if (is_inode_writable(cifsInode)) {
2312 		/* This inode is open for write at least once */
2313 		struct cifs_sb_info *cifs_sb;
2314 
2315 		cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2316 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2317 			/* since no page cache to corrupt on directio
2318 			we can change size safely */
2319 			return true;
2320 		}
2321 
2322 		if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2323 			return true;
2324 
2325 		return false;
2326 	} else
2327 		return true;
2328 }
2329 
cifs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)2330 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2331 			loff_t pos, unsigned len, unsigned flags,
2332 			struct page **pagep, void **fsdata)
2333 {
2334 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2335 	loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2336 	loff_t page_start = pos & PAGE_MASK;
2337 	loff_t i_size;
2338 	struct page *page;
2339 	int rc = 0;
2340 
2341 	cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2342 
2343 	page = grab_cache_page_write_begin(mapping, index, flags);
2344 	if (!page) {
2345 		rc = -ENOMEM;
2346 		goto out;
2347 	}
2348 
2349 	if (PageUptodate(page))
2350 		goto out;
2351 
2352 	/*
2353 	 * If we write a full page it will be up to date, no need to read from
2354 	 * the server. If the write is short, we'll end up doing a sync write
2355 	 * instead.
2356 	 */
2357 	if (len == PAGE_CACHE_SIZE)
2358 		goto out;
2359 
2360 	/*
2361 	 * optimize away the read when we have an oplock, and we're not
2362 	 * expecting to use any of the data we'd be reading in. That
2363 	 * is, when the page lies beyond the EOF, or straddles the EOF
2364 	 * and the write will cover all of the existing data.
2365 	 */
2366 	if (CIFS_I(mapping->host)->clientCanCacheRead) {
2367 		i_size = i_size_read(mapping->host);
2368 		if (page_start >= i_size ||
2369 		    (offset == 0 && (pos + len) >= i_size)) {
2370 			zero_user_segments(page, 0, offset,
2371 					   offset + len,
2372 					   PAGE_CACHE_SIZE);
2373 			/*
2374 			 * PageChecked means that the parts of the page
2375 			 * to which we're not writing are considered up
2376 			 * to date. Once the data is copied to the
2377 			 * page, it can be set uptodate.
2378 			 */
2379 			SetPageChecked(page);
2380 			goto out;
2381 		}
2382 	}
2383 
2384 	if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2385 		/*
2386 		 * might as well read a page, it is fast enough. If we get
2387 		 * an error, we don't need to return it. cifs_write_end will
2388 		 * do a sync write instead since PG_uptodate isn't set.
2389 		 */
2390 		cifs_readpage_worker(file, page, &page_start);
2391 	} else {
2392 		/* we could try using another file handle if there is one -
2393 		   but how would we lock it to prevent close of that handle
2394 		   racing with this read? In any case
2395 		   this will be written out by write_end so is fine */
2396 	}
2397 out:
2398 	*pagep = page;
2399 	return rc;
2400 }
2401 
cifs_release_page(struct page * page,gfp_t gfp)2402 static int cifs_release_page(struct page *page, gfp_t gfp)
2403 {
2404 	if (PagePrivate(page))
2405 		return 0;
2406 
2407 	return cifs_fscache_release_page(page, gfp);
2408 }
2409 
cifs_invalidate_page(struct page * page,unsigned long offset)2410 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2411 {
2412 	struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2413 
2414 	if (offset == 0)
2415 		cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2416 }
2417 
cifs_oplock_break(struct work_struct * work)2418 void cifs_oplock_break(struct work_struct *work)
2419 {
2420 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2421 						  oplock_break);
2422 	struct inode *inode = cfile->dentry->d_inode;
2423 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2424 	int rc = 0;
2425 
2426 	if (inode && S_ISREG(inode->i_mode)) {
2427 		if (cinode->clientCanCacheRead)
2428 			break_lease(inode, O_RDONLY);
2429 		else
2430 			break_lease(inode, O_WRONLY);
2431 		rc = filemap_fdatawrite(inode->i_mapping);
2432 		if (cinode->clientCanCacheRead == 0) {
2433 			rc = filemap_fdatawait(inode->i_mapping);
2434 			mapping_set_error(inode->i_mapping, rc);
2435 			invalidate_remote_inode(inode);
2436 		}
2437 		cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2438 	}
2439 
2440 	/*
2441 	 * releasing stale oplock after recent reconnect of smb session using
2442 	 * a now incorrect file handle is not a data integrity issue but do
2443 	 * not bother sending an oplock release if session to server still is
2444 	 * disconnected since oplock already released by the server
2445 	 */
2446 	if (!cfile->oplock_break_cancelled) {
2447 		rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
2448 				 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
2449 				 cinode->clientCanCacheRead ? 1 : 0);
2450 		cFYI(1, "Oplock release rc = %d", rc);
2451 	}
2452 
2453 	/*
2454 	 * We might have kicked in before is_valid_oplock_break()
2455 	 * finished grabbing reference for us.  Make sure it's done by
2456 	 * waiting for cifs_file_list_lock.
2457 	 */
2458 	spin_lock(&cifs_file_list_lock);
2459 	spin_unlock(&cifs_file_list_lock);
2460 
2461 	cifs_oplock_break_put(cfile);
2462 }
2463 
2464 /* must be called while holding cifs_file_list_lock */
cifs_oplock_break_get(struct cifsFileInfo * cfile)2465 void cifs_oplock_break_get(struct cifsFileInfo *cfile)
2466 {
2467 	cifs_sb_active(cfile->dentry->d_sb);
2468 	cifsFileInfo_get(cfile);
2469 }
2470 
cifs_oplock_break_put(struct cifsFileInfo * cfile)2471 void cifs_oplock_break_put(struct cifsFileInfo *cfile)
2472 {
2473 	struct super_block *sb = cfile->dentry->d_sb;
2474 
2475 	cifsFileInfo_put(cfile);
2476 	cifs_sb_deactive(sb);
2477 }
2478 
2479 const struct address_space_operations cifs_addr_ops = {
2480 	.readpage = cifs_readpage,
2481 	.readpages = cifs_readpages,
2482 	.writepage = cifs_writepage,
2483 	.writepages = cifs_writepages,
2484 	.write_begin = cifs_write_begin,
2485 	.write_end = cifs_write_end,
2486 	.set_page_dirty = __set_page_dirty_nobuffers,
2487 	.releasepage = cifs_release_page,
2488 	.invalidatepage = cifs_invalidate_page,
2489 	/* .direct_IO = */
2490 };
2491 
2492 /*
2493  * cifs_readpages requires the server to support a buffer large enough to
2494  * contain the header plus one complete page of data.  Otherwise, we need
2495  * to leave cifs_readpages out of the address space operations.
2496  */
2497 const struct address_space_operations cifs_addr_ops_smallbuf = {
2498 	.readpage = cifs_readpage,
2499 	.writepage = cifs_writepage,
2500 	.writepages = cifs_writepages,
2501 	.write_begin = cifs_write_begin,
2502 	.write_end = cifs_write_end,
2503 	.set_page_dirty = __set_page_dirty_nobuffers,
2504 	.releasepage = cifs_release_page,
2505 	.invalidatepage = cifs_invalidate_page,
2506 	/* .direct_IO = */
2507 };
2508