1 /*
2  *  fs/nfs/nfs4proc.c
3  *
4  *  Client-side procedure declarations for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *  Andy Adamson   <andros@umich.edu>
11  *
12  *  Redistribution and use in source and binary forms, with or without
13  *  modification, are permitted provided that the following conditions
14  *  are met:
15  *
16  *  1. Redistributions of source code must retain the above copyright
17  *     notice, this list of conditions and the following disclaimer.
18  *  2. Redistributions in binary form must reproduce the above copyright
19  *     notice, this list of conditions and the following disclaimer in the
20  *     documentation and/or other materials provided with the distribution.
21  *  3. Neither the name of the University nor the names of its
22  *     contributors may be used to endorse or promote products derived
23  *     from this software without specific prior written permission.
24  *
25  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/slab.h>
43 #include <linux/sunrpc/clnt.h>
44 #include <linux/sunrpc/gss_api.h>
45 #include <linux/nfs.h>
46 #include <linux/nfs4.h>
47 #include <linux/nfs_fs.h>
48 #include <linux/nfs_page.h>
49 #include <linux/nfs_mount.h>
50 #include <linux/namei.h>
51 #include <linux/mount.h>
52 #include <linux/module.h>
53 #include <linux/sunrpc/bc_xprt.h>
54 #include <linux/xattr.h>
55 #include <linux/utsname.h>
56 
57 #include "nfs4_fs.h"
58 #include "delegation.h"
59 #include "internal.h"
60 #include "iostat.h"
61 #include "callback.h"
62 #include "pnfs.h"
63 
64 #define NFSDBG_FACILITY		NFSDBG_PROC
65 
66 #define NFS4_POLL_RETRY_MIN	(HZ/10)
67 #define NFS4_POLL_RETRY_MAX	(15*HZ)
68 
69 #define NFS4_MAX_LOOP_ON_RECOVER (10)
70 
71 struct nfs4_opendata;
72 static int _nfs4_proc_open(struct nfs4_opendata *data);
73 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
74 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
75 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
76 static int _nfs4_proc_lookup(struct rpc_clnt *client, struct inode *dir,
77 			     const struct qstr *name, struct nfs_fh *fhandle,
78 			     struct nfs_fattr *fattr);
79 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
80 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
81 			    struct nfs_fattr *fattr, struct iattr *sattr,
82 			    struct nfs4_state *state);
83 
84 /* Prevent leaks of NFSv4 errors into userland */
nfs4_map_errors(int err)85 static int nfs4_map_errors(int err)
86 {
87 	if (err >= -1000)
88 		return err;
89 	switch (err) {
90 	case -NFS4ERR_RESOURCE:
91 		return -EREMOTEIO;
92 	case -NFS4ERR_WRONGSEC:
93 		return -EPERM;
94 	case -NFS4ERR_BADOWNER:
95 	case -NFS4ERR_BADNAME:
96 		return -EINVAL;
97 	default:
98 		dprintk("%s could not handle NFSv4 error %d\n",
99 				__func__, -err);
100 		break;
101 	}
102 	return -EIO;
103 }
104 
105 /*
106  * This is our standard bitmap for GETATTR requests.
107  */
108 const u32 nfs4_fattr_bitmap[2] = {
109 	FATTR4_WORD0_TYPE
110 	| FATTR4_WORD0_CHANGE
111 	| FATTR4_WORD0_SIZE
112 	| FATTR4_WORD0_FSID
113 	| FATTR4_WORD0_FILEID,
114 	FATTR4_WORD1_MODE
115 	| FATTR4_WORD1_NUMLINKS
116 	| FATTR4_WORD1_OWNER
117 	| FATTR4_WORD1_OWNER_GROUP
118 	| FATTR4_WORD1_RAWDEV
119 	| FATTR4_WORD1_SPACE_USED
120 	| FATTR4_WORD1_TIME_ACCESS
121 	| FATTR4_WORD1_TIME_METADATA
122 	| FATTR4_WORD1_TIME_MODIFY
123 };
124 
125 const u32 nfs4_statfs_bitmap[2] = {
126 	FATTR4_WORD0_FILES_AVAIL
127 	| FATTR4_WORD0_FILES_FREE
128 	| FATTR4_WORD0_FILES_TOTAL,
129 	FATTR4_WORD1_SPACE_AVAIL
130 	| FATTR4_WORD1_SPACE_FREE
131 	| FATTR4_WORD1_SPACE_TOTAL
132 };
133 
134 const u32 nfs4_pathconf_bitmap[2] = {
135 	FATTR4_WORD0_MAXLINK
136 	| FATTR4_WORD0_MAXNAME,
137 	0
138 };
139 
140 const u32 nfs4_fsinfo_bitmap[2] = { FATTR4_WORD0_MAXFILESIZE
141 			| FATTR4_WORD0_MAXREAD
142 			| FATTR4_WORD0_MAXWRITE
143 			| FATTR4_WORD0_LEASE_TIME,
144 			FATTR4_WORD1_TIME_DELTA
145 			| FATTR4_WORD1_FS_LAYOUT_TYPES
146 };
147 
148 const u32 nfs4_fs_locations_bitmap[2] = {
149 	FATTR4_WORD0_TYPE
150 	| FATTR4_WORD0_CHANGE
151 	| FATTR4_WORD0_SIZE
152 	| FATTR4_WORD0_FSID
153 	| FATTR4_WORD0_FILEID
154 	| FATTR4_WORD0_FS_LOCATIONS,
155 	FATTR4_WORD1_MODE
156 	| FATTR4_WORD1_NUMLINKS
157 	| FATTR4_WORD1_OWNER
158 	| FATTR4_WORD1_OWNER_GROUP
159 	| FATTR4_WORD1_RAWDEV
160 	| FATTR4_WORD1_SPACE_USED
161 	| FATTR4_WORD1_TIME_ACCESS
162 	| FATTR4_WORD1_TIME_METADATA
163 	| FATTR4_WORD1_TIME_MODIFY
164 	| FATTR4_WORD1_MOUNTED_ON_FILEID
165 };
166 
nfs4_setup_readdir(u64 cookie,__be32 * verifier,struct dentry * dentry,struct nfs4_readdir_arg * readdir)167 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
168 		struct nfs4_readdir_arg *readdir)
169 {
170 	__be32 *start, *p;
171 
172 	BUG_ON(readdir->count < 80);
173 	if (cookie > 2) {
174 		readdir->cookie = cookie;
175 		memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
176 		return;
177 	}
178 
179 	readdir->cookie = 0;
180 	memset(&readdir->verifier, 0, sizeof(readdir->verifier));
181 	if (cookie == 2)
182 		return;
183 
184 	/*
185 	 * NFSv4 servers do not return entries for '.' and '..'
186 	 * Therefore, we fake these entries here.  We let '.'
187 	 * have cookie 0 and '..' have cookie 1.  Note that
188 	 * when talking to the server, we always send cookie 0
189 	 * instead of 1 or 2.
190 	 */
191 	start = p = kmap_atomic(*readdir->pages, KM_USER0);
192 
193 	if (cookie == 0) {
194 		*p++ = xdr_one;                                  /* next */
195 		*p++ = xdr_zero;                   /* cookie, first word */
196 		*p++ = xdr_one;                   /* cookie, second word */
197 		*p++ = xdr_one;                             /* entry len */
198 		memcpy(p, ".\0\0\0", 4);                        /* entry */
199 		p++;
200 		*p++ = xdr_one;                         /* bitmap length */
201 		*p++ = htonl(FATTR4_WORD0_FILEID);             /* bitmap */
202 		*p++ = htonl(8);              /* attribute buffer length */
203 		p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
204 	}
205 
206 	*p++ = xdr_one;                                  /* next */
207 	*p++ = xdr_zero;                   /* cookie, first word */
208 	*p++ = xdr_two;                   /* cookie, second word */
209 	*p++ = xdr_two;                             /* entry len */
210 	memcpy(p, "..\0\0", 4);                         /* entry */
211 	p++;
212 	*p++ = xdr_one;                         /* bitmap length */
213 	*p++ = htonl(FATTR4_WORD0_FILEID);             /* bitmap */
214 	*p++ = htonl(8);              /* attribute buffer length */
215 	p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
216 
217 	readdir->pgbase = (char *)p - (char *)start;
218 	readdir->count -= readdir->pgbase;
219 	kunmap_atomic(start, KM_USER0);
220 }
221 
nfs4_wait_clnt_recover(struct nfs_client * clp)222 static int nfs4_wait_clnt_recover(struct nfs_client *clp)
223 {
224 	int res;
225 
226 	might_sleep();
227 
228 	res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
229 			nfs_wait_bit_killable, TASK_KILLABLE);
230 	return res;
231 }
232 
nfs4_delay(struct rpc_clnt * clnt,long * timeout)233 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
234 {
235 	int res = 0;
236 
237 	might_sleep();
238 
239 	if (*timeout <= 0)
240 		*timeout = NFS4_POLL_RETRY_MIN;
241 	if (*timeout > NFS4_POLL_RETRY_MAX)
242 		*timeout = NFS4_POLL_RETRY_MAX;
243 	schedule_timeout_killable(*timeout);
244 	if (fatal_signal_pending(current))
245 		res = -ERESTARTSYS;
246 	*timeout <<= 1;
247 	return res;
248 }
249 
250 /* This is the error handling routine for processes that are allowed
251  * to sleep.
252  */
nfs4_handle_exception(struct nfs_server * server,int errorcode,struct nfs4_exception * exception)253 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
254 {
255 	struct nfs_client *clp = server->nfs_client;
256 	struct nfs4_state *state = exception->state;
257 	int ret = errorcode;
258 
259 	exception->retry = 0;
260 	switch(errorcode) {
261 		case 0:
262 			return 0;
263 		case -NFS4ERR_ADMIN_REVOKED:
264 		case -NFS4ERR_BAD_STATEID:
265 		case -NFS4ERR_OPENMODE:
266 			if (state == NULL)
267 				break;
268 			nfs4_schedule_stateid_recovery(server, state);
269 			goto wait_on_recovery;
270 		case -NFS4ERR_STALE_STATEID:
271 		case -NFS4ERR_STALE_CLIENTID:
272 		case -NFS4ERR_EXPIRED:
273 			nfs4_schedule_lease_recovery(clp);
274 			goto wait_on_recovery;
275 #if defined(CONFIG_NFS_V4_1)
276 		case -NFS4ERR_BADSESSION:
277 		case -NFS4ERR_BADSLOT:
278 		case -NFS4ERR_BAD_HIGH_SLOT:
279 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
280 		case -NFS4ERR_DEADSESSION:
281 		case -NFS4ERR_SEQ_FALSE_RETRY:
282 		case -NFS4ERR_SEQ_MISORDERED:
283 			dprintk("%s ERROR: %d Reset session\n", __func__,
284 				errorcode);
285 			nfs4_schedule_session_recovery(clp->cl_session);
286 			exception->retry = 1;
287 			break;
288 #endif /* defined(CONFIG_NFS_V4_1) */
289 		case -NFS4ERR_FILE_OPEN:
290 			if (exception->timeout > HZ) {
291 				/* We have retried a decent amount, time to
292 				 * fail
293 				 */
294 				ret = -EBUSY;
295 				break;
296 			}
297 		case -NFS4ERR_GRACE:
298 		case -NFS4ERR_DELAY:
299 		case -EKEYEXPIRED:
300 			ret = nfs4_delay(server->client, &exception->timeout);
301 			if (ret != 0)
302 				break;
303 		case -NFS4ERR_RETRY_UNCACHED_REP:
304 		case -NFS4ERR_OLD_STATEID:
305 			exception->retry = 1;
306 			break;
307 		case -NFS4ERR_BADOWNER:
308 			/* The following works around a Linux server bug! */
309 		case -NFS4ERR_BADNAME:
310 			if (server->caps & NFS_CAP_UIDGID_NOMAP) {
311 				server->caps &= ~NFS_CAP_UIDGID_NOMAP;
312 				exception->retry = 1;
313 				printk(KERN_WARNING "NFS: v4 server %s "
314 						"does not accept raw "
315 						"uid/gids. "
316 						"Reenabling the idmapper.\n",
317 						server->nfs_client->cl_hostname);
318 			}
319 	}
320 	/* We failed to handle the error */
321 	return nfs4_map_errors(ret);
322 wait_on_recovery:
323 	ret = nfs4_wait_clnt_recover(clp);
324 	if (ret == 0)
325 		exception->retry = 1;
326 	return ret;
327 }
328 
329 
do_renew_lease(struct nfs_client * clp,unsigned long timestamp)330 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
331 {
332 	spin_lock(&clp->cl_lock);
333 	if (time_before(clp->cl_last_renewal,timestamp))
334 		clp->cl_last_renewal = timestamp;
335 	spin_unlock(&clp->cl_lock);
336 }
337 
renew_lease(const struct nfs_server * server,unsigned long timestamp)338 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
339 {
340 	do_renew_lease(server->nfs_client, timestamp);
341 }
342 
343 #if defined(CONFIG_NFS_V4_1)
344 
345 /*
346  * nfs4_free_slot - free a slot and efficiently update slot table.
347  *
348  * freeing a slot is trivially done by clearing its respective bit
349  * in the bitmap.
350  * If the freed slotid equals highest_used_slotid we want to update it
351  * so that the server would be able to size down the slot table if needed,
352  * otherwise we know that the highest_used_slotid is still in use.
353  * When updating highest_used_slotid there may be "holes" in the bitmap
354  * so we need to scan down from highest_used_slotid to 0 looking for the now
355  * highest slotid in use.
356  * If none found, highest_used_slotid is set to -1.
357  *
358  * Must be called while holding tbl->slot_tbl_lock
359  */
360 static void
nfs4_free_slot(struct nfs4_slot_table * tbl,struct nfs4_slot * free_slot)361 nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *free_slot)
362 {
363 	int free_slotid = free_slot - tbl->slots;
364 	int slotid = free_slotid;
365 
366 	BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE);
367 	/* clear used bit in bitmap */
368 	__clear_bit(slotid, tbl->used_slots);
369 
370 	/* update highest_used_slotid when it is freed */
371 	if (slotid == tbl->highest_used_slotid) {
372 		slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
373 		if (slotid < tbl->max_slots)
374 			tbl->highest_used_slotid = slotid;
375 		else
376 			tbl->highest_used_slotid = -1;
377 	}
378 	dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
379 		free_slotid, tbl->highest_used_slotid);
380 }
381 
382 /*
383  * Signal state manager thread if session fore channel is drained
384  */
nfs4_check_drain_fc_complete(struct nfs4_session * ses)385 static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
386 {
387 	struct rpc_task *task;
388 
389 	if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
390 		task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq);
391 		if (task)
392 			rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
393 		return;
394 	}
395 
396 	if (ses->fc_slot_table.highest_used_slotid != -1)
397 		return;
398 
399 	dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
400 	complete(&ses->fc_slot_table.complete);
401 }
402 
403 /*
404  * Signal state manager thread if session back channel is drained
405  */
nfs4_check_drain_bc_complete(struct nfs4_session * ses)406 void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
407 {
408 	if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
409 	    ses->bc_slot_table.highest_used_slotid != -1)
410 		return;
411 	dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
412 	complete(&ses->bc_slot_table.complete);
413 }
414 
nfs41_sequence_free_slot(struct nfs4_sequence_res * res)415 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
416 {
417 	struct nfs4_slot_table *tbl;
418 
419 	tbl = &res->sr_session->fc_slot_table;
420 	if (!res->sr_slot) {
421 		/* just wake up the next guy waiting since
422 		 * we may have not consumed a slot after all */
423 		dprintk("%s: No slot\n", __func__);
424 		return;
425 	}
426 
427 	spin_lock(&tbl->slot_tbl_lock);
428 	nfs4_free_slot(tbl, res->sr_slot);
429 	nfs4_check_drain_fc_complete(res->sr_session);
430 	spin_unlock(&tbl->slot_tbl_lock);
431 	res->sr_slot = NULL;
432 }
433 
nfs41_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)434 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
435 {
436 	unsigned long timestamp;
437 	struct nfs_client *clp;
438 
439 	/*
440 	 * sr_status remains 1 if an RPC level error occurred. The server
441 	 * may or may not have processed the sequence operation..
442 	 * Proceed as if the server received and processed the sequence
443 	 * operation.
444 	 */
445 	if (res->sr_status == 1)
446 		res->sr_status = NFS_OK;
447 
448 	/* don't increment the sequence number if the task wasn't sent */
449 	if (!RPC_WAS_SENT(task))
450 		goto out;
451 
452 	/* Check the SEQUENCE operation status */
453 	switch (res->sr_status) {
454 	case 0:
455 		/* Update the slot's sequence and clientid lease timer */
456 		++res->sr_slot->seq_nr;
457 		timestamp = res->sr_renewal_time;
458 		clp = res->sr_session->clp;
459 		do_renew_lease(clp, timestamp);
460 		/* Check sequence flags */
461 		if (res->sr_status_flags != 0)
462 			nfs4_schedule_lease_recovery(clp);
463 		break;
464 	case -NFS4ERR_DELAY:
465 		/* The server detected a resend of the RPC call and
466 		 * returned NFS4ERR_DELAY as per Section 2.10.6.2
467 		 * of RFC5661.
468 		 */
469 		dprintk("%s: slot=%td seq=%d: Operation in progress\n",
470 			__func__,
471 			res->sr_slot - res->sr_session->fc_slot_table.slots,
472 			res->sr_slot->seq_nr);
473 		goto out_retry;
474 	default:
475 		/* Just update the slot sequence no. */
476 		++res->sr_slot->seq_nr;
477 	}
478 out:
479 	/* The session may be reset by one of the error handlers. */
480 	dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
481 	nfs41_sequence_free_slot(res);
482 	return 1;
483 out_retry:
484 	if (!rpc_restart_call(task))
485 		goto out;
486 	rpc_delay(task, NFS4_POLL_RETRY_MAX);
487 	return 0;
488 }
489 
nfs4_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)490 static int nfs4_sequence_done(struct rpc_task *task,
491 			       struct nfs4_sequence_res *res)
492 {
493 	if (res->sr_session == NULL)
494 		return 1;
495 	return nfs41_sequence_done(task, res);
496 }
497 
498 /*
499  * nfs4_find_slot - efficiently look for a free slot
500  *
501  * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
502  * If found, we mark the slot as used, update the highest_used_slotid,
503  * and respectively set up the sequence operation args.
504  * The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise.
505  *
506  * Note: must be called with under the slot_tbl_lock.
507  */
508 static u8
nfs4_find_slot(struct nfs4_slot_table * tbl)509 nfs4_find_slot(struct nfs4_slot_table *tbl)
510 {
511 	int slotid;
512 	u8 ret_id = NFS4_MAX_SLOT_TABLE;
513 	BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE);
514 
515 	dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n",
516 		__func__, tbl->used_slots[0], tbl->highest_used_slotid,
517 		tbl->max_slots);
518 	slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
519 	if (slotid >= tbl->max_slots)
520 		goto out;
521 	__set_bit(slotid, tbl->used_slots);
522 	if (slotid > tbl->highest_used_slotid)
523 		tbl->highest_used_slotid = slotid;
524 	ret_id = slotid;
525 out:
526 	dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
527 		__func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
528 	return ret_id;
529 }
530 
nfs41_setup_sequence(struct nfs4_session * session,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply,struct rpc_task * task)531 int nfs41_setup_sequence(struct nfs4_session *session,
532 				struct nfs4_sequence_args *args,
533 				struct nfs4_sequence_res *res,
534 				int cache_reply,
535 				struct rpc_task *task)
536 {
537 	struct nfs4_slot *slot;
538 	struct nfs4_slot_table *tbl;
539 	u8 slotid;
540 
541 	dprintk("--> %s\n", __func__);
542 	/* slot already allocated? */
543 	if (res->sr_slot != NULL)
544 		return 0;
545 
546 	tbl = &session->fc_slot_table;
547 
548 	spin_lock(&tbl->slot_tbl_lock);
549 	if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
550 	    !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
551 		/*
552 		 * The state manager will wait until the slot table is empty.
553 		 * Schedule the reset thread
554 		 */
555 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
556 		spin_unlock(&tbl->slot_tbl_lock);
557 		dprintk("%s Schedule Session Reset\n", __func__);
558 		return -EAGAIN;
559 	}
560 
561 	if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
562 	    !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
563 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
564 		spin_unlock(&tbl->slot_tbl_lock);
565 		dprintk("%s enforce FIFO order\n", __func__);
566 		return -EAGAIN;
567 	}
568 
569 	slotid = nfs4_find_slot(tbl);
570 	if (slotid == NFS4_MAX_SLOT_TABLE) {
571 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
572 		spin_unlock(&tbl->slot_tbl_lock);
573 		dprintk("<-- %s: no free slots\n", __func__);
574 		return -EAGAIN;
575 	}
576 	spin_unlock(&tbl->slot_tbl_lock);
577 
578 	rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
579 	slot = tbl->slots + slotid;
580 	args->sa_session = session;
581 	args->sa_slotid = slotid;
582 	args->sa_cache_this = cache_reply;
583 
584 	dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
585 
586 	res->sr_session = session;
587 	res->sr_slot = slot;
588 	res->sr_renewal_time = jiffies;
589 	res->sr_status_flags = 0;
590 	/*
591 	 * sr_status is only set in decode_sequence, and so will remain
592 	 * set to 1 if an rpc level failure occurs.
593 	 */
594 	res->sr_status = 1;
595 	return 0;
596 }
597 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
598 
nfs4_setup_sequence(const struct nfs_server * server,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply,struct rpc_task * task)599 int nfs4_setup_sequence(const struct nfs_server *server,
600 			struct nfs4_sequence_args *args,
601 			struct nfs4_sequence_res *res,
602 			int cache_reply,
603 			struct rpc_task *task)
604 {
605 	struct nfs4_session *session = nfs4_get_session(server);
606 	int ret = 0;
607 
608 	if (session == NULL) {
609 		args->sa_session = NULL;
610 		res->sr_session = NULL;
611 		goto out;
612 	}
613 
614 	dprintk("--> %s clp %p session %p sr_slot %td\n",
615 		__func__, session->clp, session, res->sr_slot ?
616 			res->sr_slot - session->fc_slot_table.slots : -1);
617 
618 	ret = nfs41_setup_sequence(session, args, res, cache_reply,
619 				   task);
620 out:
621 	dprintk("<-- %s status=%d\n", __func__, ret);
622 	return ret;
623 }
624 
625 struct nfs41_call_sync_data {
626 	const struct nfs_server *seq_server;
627 	struct nfs4_sequence_args *seq_args;
628 	struct nfs4_sequence_res *seq_res;
629 	int cache_reply;
630 };
631 
nfs41_call_sync_prepare(struct rpc_task * task,void * calldata)632 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
633 {
634 	struct nfs41_call_sync_data *data = calldata;
635 
636 	dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
637 
638 	if (nfs4_setup_sequence(data->seq_server, data->seq_args,
639 				data->seq_res, data->cache_reply, task))
640 		return;
641 	rpc_call_start(task);
642 }
643 
nfs41_call_priv_sync_prepare(struct rpc_task * task,void * calldata)644 static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
645 {
646 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
647 	nfs41_call_sync_prepare(task, calldata);
648 }
649 
nfs41_call_sync_done(struct rpc_task * task,void * calldata)650 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
651 {
652 	struct nfs41_call_sync_data *data = calldata;
653 
654 	nfs41_sequence_done(task, data->seq_res);
655 }
656 
657 struct rpc_call_ops nfs41_call_sync_ops = {
658 	.rpc_call_prepare = nfs41_call_sync_prepare,
659 	.rpc_call_done = nfs41_call_sync_done,
660 };
661 
662 struct rpc_call_ops nfs41_call_priv_sync_ops = {
663 	.rpc_call_prepare = nfs41_call_priv_sync_prepare,
664 	.rpc_call_done = nfs41_call_sync_done,
665 };
666 
nfs4_call_sync_sequence(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply,int privileged)667 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
668 				   struct nfs_server *server,
669 				   struct rpc_message *msg,
670 				   struct nfs4_sequence_args *args,
671 				   struct nfs4_sequence_res *res,
672 				   int cache_reply,
673 				   int privileged)
674 {
675 	int ret;
676 	struct rpc_task *task;
677 	struct nfs41_call_sync_data data = {
678 		.seq_server = server,
679 		.seq_args = args,
680 		.seq_res = res,
681 		.cache_reply = cache_reply,
682 	};
683 	struct rpc_task_setup task_setup = {
684 		.rpc_client = clnt,
685 		.rpc_message = msg,
686 		.callback_ops = &nfs41_call_sync_ops,
687 		.callback_data = &data
688 	};
689 
690 	res->sr_slot = NULL;
691 	if (privileged)
692 		task_setup.callback_ops = &nfs41_call_priv_sync_ops;
693 	task = rpc_run_task(&task_setup);
694 	if (IS_ERR(task))
695 		ret = PTR_ERR(task);
696 	else {
697 		ret = task->tk_status;
698 		rpc_put_task(task);
699 	}
700 	return ret;
701 }
702 
_nfs4_call_sync_session(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply)703 int _nfs4_call_sync_session(struct rpc_clnt *clnt,
704 			    struct nfs_server *server,
705 			    struct rpc_message *msg,
706 			    struct nfs4_sequence_args *args,
707 			    struct nfs4_sequence_res *res,
708 			    int cache_reply)
709 {
710 	return nfs4_call_sync_sequence(clnt, server, msg, args, res, cache_reply, 0);
711 }
712 
713 #else
nfs4_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)714 static int nfs4_sequence_done(struct rpc_task *task,
715 			       struct nfs4_sequence_res *res)
716 {
717 	return 1;
718 }
719 #endif /* CONFIG_NFS_V4_1 */
720 
_nfs4_call_sync(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply)721 int _nfs4_call_sync(struct rpc_clnt *clnt,
722 		    struct nfs_server *server,
723 		    struct rpc_message *msg,
724 		    struct nfs4_sequence_args *args,
725 		    struct nfs4_sequence_res *res,
726 		    int cache_reply)
727 {
728 	args->sa_session = res->sr_session = NULL;
729 	return rpc_call_sync(clnt, msg, 0);
730 }
731 
732 static inline
nfs4_call_sync(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply)733 int nfs4_call_sync(struct rpc_clnt *clnt,
734 		   struct nfs_server *server,
735 		   struct rpc_message *msg,
736 		   struct nfs4_sequence_args *args,
737 		   struct nfs4_sequence_res *res,
738 		   int cache_reply)
739 {
740 	return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
741 						args, res, cache_reply);
742 }
743 
update_changeattr(struct inode * dir,struct nfs4_change_info * cinfo)744 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
745 {
746 	struct nfs_inode *nfsi = NFS_I(dir);
747 
748 	spin_lock(&dir->i_lock);
749 	nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
750 	if (!cinfo->atomic || cinfo->before != nfsi->change_attr)
751 		nfs_force_lookup_revalidate(dir);
752 	nfsi->change_attr = cinfo->after;
753 	spin_unlock(&dir->i_lock);
754 }
755 
756 struct nfs4_opendata {
757 	struct kref kref;
758 	struct nfs_openargs o_arg;
759 	struct nfs_openres o_res;
760 	struct nfs_open_confirmargs c_arg;
761 	struct nfs_open_confirmres c_res;
762 	struct nfs_fattr f_attr;
763 	struct nfs_fattr dir_attr;
764 	struct path path;
765 	struct dentry *dir;
766 	struct nfs4_state_owner *owner;
767 	struct nfs4_state *state;
768 	struct iattr attrs;
769 	unsigned long timestamp;
770 	unsigned int rpc_done : 1;
771 	int rpc_status;
772 	int cancelled;
773 };
774 
775 
nfs4_init_opendata_res(struct nfs4_opendata * p)776 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
777 {
778 	p->o_res.f_attr = &p->f_attr;
779 	p->o_res.dir_attr = &p->dir_attr;
780 	p->o_res.seqid = p->o_arg.seqid;
781 	p->c_res.seqid = p->c_arg.seqid;
782 	p->o_res.server = p->o_arg.server;
783 	nfs_fattr_init(&p->f_attr);
784 	nfs_fattr_init(&p->dir_attr);
785 }
786 
nfs4_opendata_alloc(struct path * path,struct nfs4_state_owner * sp,fmode_t fmode,int flags,const struct iattr * attrs,gfp_t gfp_mask)787 static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
788 		struct nfs4_state_owner *sp, fmode_t fmode, int flags,
789 		const struct iattr *attrs,
790 		gfp_t gfp_mask)
791 {
792 	struct dentry *parent = dget_parent(path->dentry);
793 	struct inode *dir = parent->d_inode;
794 	struct nfs_server *server = NFS_SERVER(dir);
795 	struct nfs4_opendata *p;
796 
797 	p = kzalloc(sizeof(*p), gfp_mask);
798 	if (p == NULL)
799 		goto err;
800 	p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
801 	if (p->o_arg.seqid == NULL)
802 		goto err_free;
803 	path_get(path);
804 	p->path = *path;
805 	p->dir = parent;
806 	p->owner = sp;
807 	atomic_inc(&sp->so_count);
808 	p->o_arg.fh = NFS_FH(dir);
809 	p->o_arg.open_flags = flags;
810 	p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
811 	p->o_arg.clientid = server->nfs_client->cl_clientid;
812 	p->o_arg.id = sp->so_owner_id.id;
813 	p->o_arg.name = &p->path.dentry->d_name;
814 	p->o_arg.server = server;
815 	p->o_arg.bitmask = server->attr_bitmask;
816 	p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
817 	if (flags & O_CREAT) {
818 		u32 *s;
819 
820 		p->o_arg.u.attrs = &p->attrs;
821 		memcpy(&p->attrs, attrs, sizeof(p->attrs));
822 		s = (u32 *) p->o_arg.u.verifier.data;
823 		s[0] = jiffies;
824 		s[1] = current->pid;
825 	}
826 	p->c_arg.fh = &p->o_res.fh;
827 	p->c_arg.stateid = &p->o_res.stateid;
828 	p->c_arg.seqid = p->o_arg.seqid;
829 	nfs4_init_opendata_res(p);
830 	kref_init(&p->kref);
831 	return p;
832 err_free:
833 	kfree(p);
834 err:
835 	dput(parent);
836 	return NULL;
837 }
838 
nfs4_opendata_free(struct kref * kref)839 static void nfs4_opendata_free(struct kref *kref)
840 {
841 	struct nfs4_opendata *p = container_of(kref,
842 			struct nfs4_opendata, kref);
843 
844 	nfs_free_seqid(p->o_arg.seqid);
845 	if (p->state != NULL)
846 		nfs4_put_open_state(p->state);
847 	nfs4_put_state_owner(p->owner);
848 	dput(p->dir);
849 	path_put(&p->path);
850 	kfree(p);
851 }
852 
nfs4_opendata_put(struct nfs4_opendata * p)853 static void nfs4_opendata_put(struct nfs4_opendata *p)
854 {
855 	if (p != NULL)
856 		kref_put(&p->kref, nfs4_opendata_free);
857 }
858 
nfs4_wait_for_completion_rpc_task(struct rpc_task * task)859 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
860 {
861 	int ret;
862 
863 	ret = rpc_wait_for_completion_task(task);
864 	return ret;
865 }
866 
can_open_cached(struct nfs4_state * state,fmode_t mode,int open_mode)867 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
868 {
869 	int ret = 0;
870 
871 	if (open_mode & O_EXCL)
872 		goto out;
873 	switch (mode & (FMODE_READ|FMODE_WRITE)) {
874 		case FMODE_READ:
875 			ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
876 				&& state->n_rdonly != 0;
877 			break;
878 		case FMODE_WRITE:
879 			ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
880 				&& state->n_wronly != 0;
881 			break;
882 		case FMODE_READ|FMODE_WRITE:
883 			ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
884 				&& state->n_rdwr != 0;
885 	}
886 out:
887 	return ret;
888 }
889 
can_open_delegated(struct nfs_delegation * delegation,fmode_t fmode)890 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
891 {
892 	if ((delegation->type & fmode) != fmode)
893 		return 0;
894 	if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
895 		return 0;
896 	nfs_mark_delegation_referenced(delegation);
897 	return 1;
898 }
899 
update_open_stateflags(struct nfs4_state * state,fmode_t fmode)900 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
901 {
902 	switch (fmode) {
903 		case FMODE_WRITE:
904 			state->n_wronly++;
905 			break;
906 		case FMODE_READ:
907 			state->n_rdonly++;
908 			break;
909 		case FMODE_READ|FMODE_WRITE:
910 			state->n_rdwr++;
911 	}
912 	nfs4_state_set_mode_locked(state, state->state | fmode);
913 }
914 
nfs_set_open_stateid_locked(struct nfs4_state * state,nfs4_stateid * stateid,fmode_t fmode)915 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
916 {
917 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
918 		memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data));
919 	memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data));
920 	switch (fmode) {
921 		case FMODE_READ:
922 			set_bit(NFS_O_RDONLY_STATE, &state->flags);
923 			break;
924 		case FMODE_WRITE:
925 			set_bit(NFS_O_WRONLY_STATE, &state->flags);
926 			break;
927 		case FMODE_READ|FMODE_WRITE:
928 			set_bit(NFS_O_RDWR_STATE, &state->flags);
929 	}
930 }
931 
nfs_set_open_stateid(struct nfs4_state * state,nfs4_stateid * stateid,fmode_t fmode)932 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
933 {
934 	write_seqlock(&state->seqlock);
935 	nfs_set_open_stateid_locked(state, stateid, fmode);
936 	write_sequnlock(&state->seqlock);
937 }
938 
__update_open_stateid(struct nfs4_state * state,nfs4_stateid * open_stateid,const nfs4_stateid * deleg_stateid,fmode_t fmode)939 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
940 {
941 	/*
942 	 * Protect the call to nfs4_state_set_mode_locked and
943 	 * serialise the stateid update
944 	 */
945 	write_seqlock(&state->seqlock);
946 	if (deleg_stateid != NULL) {
947 		memcpy(state->stateid.data, deleg_stateid->data, sizeof(state->stateid.data));
948 		set_bit(NFS_DELEGATED_STATE, &state->flags);
949 	}
950 	if (open_stateid != NULL)
951 		nfs_set_open_stateid_locked(state, open_stateid, fmode);
952 	write_sequnlock(&state->seqlock);
953 	spin_lock(&state->owner->so_lock);
954 	update_open_stateflags(state, fmode);
955 	spin_unlock(&state->owner->so_lock);
956 }
957 
update_open_stateid(struct nfs4_state * state,nfs4_stateid * open_stateid,nfs4_stateid * delegation,fmode_t fmode)958 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
959 {
960 	struct nfs_inode *nfsi = NFS_I(state->inode);
961 	struct nfs_delegation *deleg_cur;
962 	int ret = 0;
963 
964 	fmode &= (FMODE_READ|FMODE_WRITE);
965 
966 	rcu_read_lock();
967 	deleg_cur = rcu_dereference(nfsi->delegation);
968 	if (deleg_cur == NULL)
969 		goto no_delegation;
970 
971 	spin_lock(&deleg_cur->lock);
972 	if (nfsi->delegation != deleg_cur ||
973 	    (deleg_cur->type & fmode) != fmode)
974 		goto no_delegation_unlock;
975 
976 	if (delegation == NULL)
977 		delegation = &deleg_cur->stateid;
978 	else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0)
979 		goto no_delegation_unlock;
980 
981 	nfs_mark_delegation_referenced(deleg_cur);
982 	__update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
983 	ret = 1;
984 no_delegation_unlock:
985 	spin_unlock(&deleg_cur->lock);
986 no_delegation:
987 	rcu_read_unlock();
988 
989 	if (!ret && open_stateid != NULL) {
990 		__update_open_stateid(state, open_stateid, NULL, fmode);
991 		ret = 1;
992 	}
993 
994 	return ret;
995 }
996 
997 
nfs4_return_incompatible_delegation(struct inode * inode,fmode_t fmode)998 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
999 {
1000 	struct nfs_delegation *delegation;
1001 
1002 	rcu_read_lock();
1003 	delegation = rcu_dereference(NFS_I(inode)->delegation);
1004 	if (delegation == NULL || (delegation->type & fmode) == fmode) {
1005 		rcu_read_unlock();
1006 		return;
1007 	}
1008 	rcu_read_unlock();
1009 	nfs_inode_return_delegation(inode);
1010 }
1011 
nfs4_try_open_cached(struct nfs4_opendata * opendata)1012 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1013 {
1014 	struct nfs4_state *state = opendata->state;
1015 	struct nfs_inode *nfsi = NFS_I(state->inode);
1016 	struct nfs_delegation *delegation;
1017 	int open_mode = opendata->o_arg.open_flags & O_EXCL;
1018 	fmode_t fmode = opendata->o_arg.fmode;
1019 	nfs4_stateid stateid;
1020 	int ret = -EAGAIN;
1021 
1022 	for (;;) {
1023 		if (can_open_cached(state, fmode, open_mode)) {
1024 			spin_lock(&state->owner->so_lock);
1025 			if (can_open_cached(state, fmode, open_mode)) {
1026 				update_open_stateflags(state, fmode);
1027 				spin_unlock(&state->owner->so_lock);
1028 				goto out_return_state;
1029 			}
1030 			spin_unlock(&state->owner->so_lock);
1031 		}
1032 		rcu_read_lock();
1033 		delegation = rcu_dereference(nfsi->delegation);
1034 		if (delegation == NULL ||
1035 		    !can_open_delegated(delegation, fmode)) {
1036 			rcu_read_unlock();
1037 			break;
1038 		}
1039 		/* Save the delegation */
1040 		memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data));
1041 		rcu_read_unlock();
1042 		ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1043 		if (ret != 0)
1044 			goto out;
1045 		ret = -EAGAIN;
1046 
1047 		/* Try to update the stateid using the delegation */
1048 		if (update_open_stateid(state, NULL, &stateid, fmode))
1049 			goto out_return_state;
1050 	}
1051 out:
1052 	return ERR_PTR(ret);
1053 out_return_state:
1054 	atomic_inc(&state->count);
1055 	return state;
1056 }
1057 
nfs4_opendata_to_nfs4_state(struct nfs4_opendata * data)1058 static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1059 {
1060 	struct inode *inode;
1061 	struct nfs4_state *state = NULL;
1062 	struct nfs_delegation *delegation;
1063 	int ret;
1064 
1065 	if (!data->rpc_done) {
1066 		state = nfs4_try_open_cached(data);
1067 		goto out;
1068 	}
1069 
1070 	ret = -EAGAIN;
1071 	if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1072 		goto err;
1073 	inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1074 	ret = PTR_ERR(inode);
1075 	if (IS_ERR(inode))
1076 		goto err;
1077 	ret = -ENOMEM;
1078 	state = nfs4_get_open_state(inode, data->owner);
1079 	if (state == NULL)
1080 		goto err_put_inode;
1081 	if (data->o_res.delegation_type != 0) {
1082 		int delegation_flags = 0;
1083 
1084 		rcu_read_lock();
1085 		delegation = rcu_dereference(NFS_I(inode)->delegation);
1086 		if (delegation)
1087 			delegation_flags = delegation->flags;
1088 		rcu_read_unlock();
1089 		if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1090 			nfs_inode_set_delegation(state->inode,
1091 					data->owner->so_cred,
1092 					&data->o_res);
1093 		else
1094 			nfs_inode_reclaim_delegation(state->inode,
1095 					data->owner->so_cred,
1096 					&data->o_res);
1097 	}
1098 
1099 	update_open_stateid(state, &data->o_res.stateid, NULL,
1100 			data->o_arg.fmode);
1101 	iput(inode);
1102 out:
1103 	return state;
1104 err_put_inode:
1105 	iput(inode);
1106 err:
1107 	return ERR_PTR(ret);
1108 }
1109 
nfs4_state_find_open_context(struct nfs4_state * state)1110 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1111 {
1112 	struct nfs_inode *nfsi = NFS_I(state->inode);
1113 	struct nfs_open_context *ctx;
1114 
1115 	spin_lock(&state->inode->i_lock);
1116 	list_for_each_entry(ctx, &nfsi->open_files, list) {
1117 		if (ctx->state != state)
1118 			continue;
1119 		get_nfs_open_context(ctx);
1120 		spin_unlock(&state->inode->i_lock);
1121 		return ctx;
1122 	}
1123 	spin_unlock(&state->inode->i_lock);
1124 	return ERR_PTR(-ENOENT);
1125 }
1126 
nfs4_open_recoverdata_alloc(struct nfs_open_context * ctx,struct nfs4_state * state)1127 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1128 {
1129 	struct nfs4_opendata *opendata;
1130 
1131 	opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL, GFP_NOFS);
1132 	if (opendata == NULL)
1133 		return ERR_PTR(-ENOMEM);
1134 	opendata->state = state;
1135 	atomic_inc(&state->count);
1136 	return opendata;
1137 }
1138 
nfs4_open_recover_helper(struct nfs4_opendata * opendata,fmode_t fmode,struct nfs4_state ** res)1139 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1140 {
1141 	struct nfs4_state *newstate;
1142 	int ret;
1143 
1144 	opendata->o_arg.open_flags = 0;
1145 	opendata->o_arg.fmode = fmode;
1146 	memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1147 	memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1148 	nfs4_init_opendata_res(opendata);
1149 	ret = _nfs4_recover_proc_open(opendata);
1150 	if (ret != 0)
1151 		return ret;
1152 	newstate = nfs4_opendata_to_nfs4_state(opendata);
1153 	if (IS_ERR(newstate))
1154 		return PTR_ERR(newstate);
1155 	nfs4_close_state(&opendata->path, newstate, fmode);
1156 	*res = newstate;
1157 	return 0;
1158 }
1159 
nfs4_open_recover(struct nfs4_opendata * opendata,struct nfs4_state * state)1160 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1161 {
1162 	struct nfs4_state *newstate;
1163 	int ret;
1164 
1165 	/* memory barrier prior to reading state->n_* */
1166 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
1167 	smp_rmb();
1168 	if (state->n_rdwr != 0) {
1169 		clear_bit(NFS_O_RDWR_STATE, &state->flags);
1170 		ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1171 		if (ret != 0)
1172 			return ret;
1173 		if (newstate != state)
1174 			return -ESTALE;
1175 	}
1176 	if (state->n_wronly != 0) {
1177 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1178 		ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1179 		if (ret != 0)
1180 			return ret;
1181 		if (newstate != state)
1182 			return -ESTALE;
1183 	}
1184 	if (state->n_rdonly != 0) {
1185 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1186 		ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1187 		if (ret != 0)
1188 			return ret;
1189 		if (newstate != state)
1190 			return -ESTALE;
1191 	}
1192 	/*
1193 	 * We may have performed cached opens for all three recoveries.
1194 	 * Check if we need to update the current stateid.
1195 	 */
1196 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1197 	    memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) {
1198 		write_seqlock(&state->seqlock);
1199 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1200 			memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data));
1201 		write_sequnlock(&state->seqlock);
1202 	}
1203 	return 0;
1204 }
1205 
1206 /*
1207  * OPEN_RECLAIM:
1208  * 	reclaim state on the server after a reboot.
1209  */
_nfs4_do_open_reclaim(struct nfs_open_context * ctx,struct nfs4_state * state)1210 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1211 {
1212 	struct nfs_delegation *delegation;
1213 	struct nfs4_opendata *opendata;
1214 	fmode_t delegation_type = 0;
1215 	int status;
1216 
1217 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1218 	if (IS_ERR(opendata))
1219 		return PTR_ERR(opendata);
1220 	opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1221 	opendata->o_arg.fh = NFS_FH(state->inode);
1222 	rcu_read_lock();
1223 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1224 	if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1225 		delegation_type = delegation->type;
1226 	rcu_read_unlock();
1227 	opendata->o_arg.u.delegation_type = delegation_type;
1228 	status = nfs4_open_recover(opendata, state);
1229 	nfs4_opendata_put(opendata);
1230 	return status;
1231 }
1232 
nfs4_do_open_reclaim(struct nfs_open_context * ctx,struct nfs4_state * state)1233 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1234 {
1235 	struct nfs_server *server = NFS_SERVER(state->inode);
1236 	struct nfs4_exception exception = { };
1237 	int err;
1238 	do {
1239 		err = _nfs4_do_open_reclaim(ctx, state);
1240 		if (err != -NFS4ERR_DELAY)
1241 			break;
1242 		nfs4_handle_exception(server, err, &exception);
1243 	} while (exception.retry);
1244 	return err;
1245 }
1246 
nfs4_open_reclaim(struct nfs4_state_owner * sp,struct nfs4_state * state)1247 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1248 {
1249 	struct nfs_open_context *ctx;
1250 	int ret;
1251 
1252 	ctx = nfs4_state_find_open_context(state);
1253 	if (IS_ERR(ctx))
1254 		return PTR_ERR(ctx);
1255 	ret = nfs4_do_open_reclaim(ctx, state);
1256 	put_nfs_open_context(ctx);
1257 	return ret;
1258 }
1259 
_nfs4_open_delegation_recall(struct nfs_open_context * ctx,struct nfs4_state * state,const nfs4_stateid * stateid)1260 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1261 {
1262 	struct nfs4_opendata *opendata;
1263 	int ret;
1264 
1265 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1266 	if (IS_ERR(opendata))
1267 		return PTR_ERR(opendata);
1268 	opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1269 	memcpy(opendata->o_arg.u.delegation.data, stateid->data,
1270 			sizeof(opendata->o_arg.u.delegation.data));
1271 	ret = nfs4_open_recover(opendata, state);
1272 	nfs4_opendata_put(opendata);
1273 	return ret;
1274 }
1275 
nfs4_open_delegation_recall(struct nfs_open_context * ctx,struct nfs4_state * state,const nfs4_stateid * stateid)1276 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1277 {
1278 	struct nfs4_exception exception = { };
1279 	struct nfs_server *server = NFS_SERVER(state->inode);
1280 	int err;
1281 	do {
1282 		err = _nfs4_open_delegation_recall(ctx, state, stateid);
1283 		switch (err) {
1284 			case 0:
1285 			case -ENOENT:
1286 			case -ESTALE:
1287 				goto out;
1288 			case -NFS4ERR_BADSESSION:
1289 			case -NFS4ERR_BADSLOT:
1290 			case -NFS4ERR_BAD_HIGH_SLOT:
1291 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1292 			case -NFS4ERR_DEADSESSION:
1293 				nfs4_schedule_session_recovery(server->nfs_client->cl_session);
1294 				goto out;
1295 			case -NFS4ERR_STALE_CLIENTID:
1296 			case -NFS4ERR_STALE_STATEID:
1297 			case -NFS4ERR_EXPIRED:
1298 				/* Don't recall a delegation if it was lost */
1299 				nfs4_schedule_lease_recovery(server->nfs_client);
1300 				goto out;
1301 			case -ERESTARTSYS:
1302 				/*
1303 				 * The show must go on: exit, but mark the
1304 				 * stateid as needing recovery.
1305 				 */
1306 			case -NFS4ERR_ADMIN_REVOKED:
1307 			case -NFS4ERR_BAD_STATEID:
1308 				nfs4_schedule_stateid_recovery(server, state);
1309 			case -EKEYEXPIRED:
1310 				/*
1311 				 * User RPCSEC_GSS context has expired.
1312 				 * We cannot recover this stateid now, so
1313 				 * skip it and allow recovery thread to
1314 				 * proceed.
1315 				 */
1316 			case -ENOMEM:
1317 				err = 0;
1318 				goto out;
1319 		}
1320 		err = nfs4_handle_exception(server, err, &exception);
1321 	} while (exception.retry);
1322 out:
1323 	return err;
1324 }
1325 
nfs4_open_confirm_done(struct rpc_task * task,void * calldata)1326 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1327 {
1328 	struct nfs4_opendata *data = calldata;
1329 
1330 	data->rpc_status = task->tk_status;
1331 	if (data->rpc_status == 0) {
1332 		memcpy(data->o_res.stateid.data, data->c_res.stateid.data,
1333 				sizeof(data->o_res.stateid.data));
1334 		nfs_confirm_seqid(&data->owner->so_seqid, 0);
1335 		renew_lease(data->o_res.server, data->timestamp);
1336 		data->rpc_done = 1;
1337 	}
1338 }
1339 
nfs4_open_confirm_release(void * calldata)1340 static void nfs4_open_confirm_release(void *calldata)
1341 {
1342 	struct nfs4_opendata *data = calldata;
1343 	struct nfs4_state *state = NULL;
1344 
1345 	/* If this request hasn't been cancelled, do nothing */
1346 	if (data->cancelled == 0)
1347 		goto out_free;
1348 	/* In case of error, no cleanup! */
1349 	if (!data->rpc_done)
1350 		goto out_free;
1351 	state = nfs4_opendata_to_nfs4_state(data);
1352 	if (!IS_ERR(state))
1353 		nfs4_close_state(&data->path, state, data->o_arg.fmode);
1354 out_free:
1355 	nfs4_opendata_put(data);
1356 }
1357 
1358 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1359 	.rpc_call_done = nfs4_open_confirm_done,
1360 	.rpc_release = nfs4_open_confirm_release,
1361 };
1362 
1363 /*
1364  * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1365  */
_nfs4_proc_open_confirm(struct nfs4_opendata * data)1366 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1367 {
1368 	struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1369 	struct rpc_task *task;
1370 	struct  rpc_message msg = {
1371 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1372 		.rpc_argp = &data->c_arg,
1373 		.rpc_resp = &data->c_res,
1374 		.rpc_cred = data->owner->so_cred,
1375 	};
1376 	struct rpc_task_setup task_setup_data = {
1377 		.rpc_client = server->client,
1378 		.rpc_message = &msg,
1379 		.callback_ops = &nfs4_open_confirm_ops,
1380 		.callback_data = data,
1381 		.workqueue = nfsiod_workqueue,
1382 		.flags = RPC_TASK_ASYNC,
1383 	};
1384 	int status;
1385 
1386 	kref_get(&data->kref);
1387 	data->rpc_done = 0;
1388 	data->rpc_status = 0;
1389 	data->timestamp = jiffies;
1390 	task = rpc_run_task(&task_setup_data);
1391 	if (IS_ERR(task))
1392 		return PTR_ERR(task);
1393 	status = nfs4_wait_for_completion_rpc_task(task);
1394 	if (status != 0) {
1395 		data->cancelled = 1;
1396 		smp_wmb();
1397 	} else
1398 		status = data->rpc_status;
1399 	rpc_put_task(task);
1400 	return status;
1401 }
1402 
nfs4_open_prepare(struct rpc_task * task,void * calldata)1403 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1404 {
1405 	struct nfs4_opendata *data = calldata;
1406 	struct nfs4_state_owner *sp = data->owner;
1407 
1408 	if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1409 		return;
1410 	/*
1411 	 * Check if we still need to send an OPEN call, or if we can use
1412 	 * a delegation instead.
1413 	 */
1414 	if (data->state != NULL) {
1415 		struct nfs_delegation *delegation;
1416 
1417 		if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1418 			goto out_no_action;
1419 		rcu_read_lock();
1420 		delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1421 		if (delegation != NULL &&
1422 		    test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
1423 			rcu_read_unlock();
1424 			goto out_no_action;
1425 		}
1426 		rcu_read_unlock();
1427 	}
1428 	/* Update sequence id. */
1429 	data->o_arg.id = sp->so_owner_id.id;
1430 	data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1431 	if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1432 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1433 		nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1434 	}
1435 	data->timestamp = jiffies;
1436 	if (nfs4_setup_sequence(data->o_arg.server,
1437 				&data->o_arg.seq_args,
1438 				&data->o_res.seq_res, 1, task))
1439 		return;
1440 	rpc_call_start(task);
1441 	return;
1442 out_no_action:
1443 	task->tk_action = NULL;
1444 
1445 }
1446 
nfs4_recover_open_prepare(struct rpc_task * task,void * calldata)1447 static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
1448 {
1449 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
1450 	nfs4_open_prepare(task, calldata);
1451 }
1452 
nfs4_open_done(struct rpc_task * task,void * calldata)1453 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1454 {
1455 	struct nfs4_opendata *data = calldata;
1456 
1457 	data->rpc_status = task->tk_status;
1458 
1459 	if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1460 		return;
1461 
1462 	if (task->tk_status == 0) {
1463 		switch (data->o_res.f_attr->mode & S_IFMT) {
1464 			case S_IFREG:
1465 				break;
1466 			case S_IFLNK:
1467 				data->rpc_status = -ELOOP;
1468 				break;
1469 			case S_IFDIR:
1470 				data->rpc_status = -EISDIR;
1471 				break;
1472 			default:
1473 				data->rpc_status = -ENOTDIR;
1474 		}
1475 		renew_lease(data->o_res.server, data->timestamp);
1476 		if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1477 			nfs_confirm_seqid(&data->owner->so_seqid, 0);
1478 	}
1479 	data->rpc_done = 1;
1480 }
1481 
nfs4_open_release(void * calldata)1482 static void nfs4_open_release(void *calldata)
1483 {
1484 	struct nfs4_opendata *data = calldata;
1485 	struct nfs4_state *state = NULL;
1486 
1487 	/* If this request hasn't been cancelled, do nothing */
1488 	if (data->cancelled == 0)
1489 		goto out_free;
1490 	/* In case of error, no cleanup! */
1491 	if (data->rpc_status != 0 || !data->rpc_done)
1492 		goto out_free;
1493 	/* In case we need an open_confirm, no cleanup! */
1494 	if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1495 		goto out_free;
1496 	state = nfs4_opendata_to_nfs4_state(data);
1497 	if (!IS_ERR(state))
1498 		nfs4_close_state(&data->path, state, data->o_arg.fmode);
1499 out_free:
1500 	nfs4_opendata_put(data);
1501 }
1502 
1503 static const struct rpc_call_ops nfs4_open_ops = {
1504 	.rpc_call_prepare = nfs4_open_prepare,
1505 	.rpc_call_done = nfs4_open_done,
1506 	.rpc_release = nfs4_open_release,
1507 };
1508 
1509 static const struct rpc_call_ops nfs4_recover_open_ops = {
1510 	.rpc_call_prepare = nfs4_recover_open_prepare,
1511 	.rpc_call_done = nfs4_open_done,
1512 	.rpc_release = nfs4_open_release,
1513 };
1514 
nfs4_run_open_task(struct nfs4_opendata * data,int isrecover)1515 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1516 {
1517 	struct inode *dir = data->dir->d_inode;
1518 	struct nfs_server *server = NFS_SERVER(dir);
1519 	struct nfs_openargs *o_arg = &data->o_arg;
1520 	struct nfs_openres *o_res = &data->o_res;
1521 	struct rpc_task *task;
1522 	struct rpc_message msg = {
1523 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1524 		.rpc_argp = o_arg,
1525 		.rpc_resp = o_res,
1526 		.rpc_cred = data->owner->so_cred,
1527 	};
1528 	struct rpc_task_setup task_setup_data = {
1529 		.rpc_client = server->client,
1530 		.rpc_message = &msg,
1531 		.callback_ops = &nfs4_open_ops,
1532 		.callback_data = data,
1533 		.workqueue = nfsiod_workqueue,
1534 		.flags = RPC_TASK_ASYNC,
1535 	};
1536 	int status;
1537 
1538 	kref_get(&data->kref);
1539 	data->rpc_done = 0;
1540 	data->rpc_status = 0;
1541 	data->cancelled = 0;
1542 	if (isrecover)
1543 		task_setup_data.callback_ops = &nfs4_recover_open_ops;
1544 	task = rpc_run_task(&task_setup_data);
1545         if (IS_ERR(task))
1546                 return PTR_ERR(task);
1547         status = nfs4_wait_for_completion_rpc_task(task);
1548         if (status != 0) {
1549                 data->cancelled = 1;
1550                 smp_wmb();
1551         } else
1552                 status = data->rpc_status;
1553         rpc_put_task(task);
1554 
1555 	return status;
1556 }
1557 
_nfs4_recover_proc_open(struct nfs4_opendata * data)1558 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1559 {
1560 	struct inode *dir = data->dir->d_inode;
1561 	struct nfs_openres *o_res = &data->o_res;
1562         int status;
1563 
1564 	status = nfs4_run_open_task(data, 1);
1565 	if (status != 0 || !data->rpc_done)
1566 		return status;
1567 
1568 	nfs_refresh_inode(dir, o_res->dir_attr);
1569 
1570 	if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1571 		status = _nfs4_proc_open_confirm(data);
1572 		if (status != 0)
1573 			return status;
1574 	}
1575 
1576 	return status;
1577 }
1578 
1579 /*
1580  * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1581  */
_nfs4_proc_open(struct nfs4_opendata * data)1582 static int _nfs4_proc_open(struct nfs4_opendata *data)
1583 {
1584 	struct inode *dir = data->dir->d_inode;
1585 	struct nfs_server *server = NFS_SERVER(dir);
1586 	struct nfs_openargs *o_arg = &data->o_arg;
1587 	struct nfs_openres *o_res = &data->o_res;
1588 	int status;
1589 
1590 	status = nfs4_run_open_task(data, 0);
1591 	if (status != 0 || !data->rpc_done)
1592 		return status;
1593 
1594 	if (o_arg->open_flags & O_CREAT) {
1595 		update_changeattr(dir, &o_res->cinfo);
1596 		nfs_post_op_update_inode(dir, o_res->dir_attr);
1597 	} else
1598 		nfs_refresh_inode(dir, o_res->dir_attr);
1599 	if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1600 		server->caps &= ~NFS_CAP_POSIX_LOCK;
1601 	if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1602 		status = _nfs4_proc_open_confirm(data);
1603 		if (status != 0)
1604 			return status;
1605 	}
1606 	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1607 		_nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1608 	return 0;
1609 }
1610 
nfs4_client_recover_expired_lease(struct nfs_client * clp)1611 static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1612 {
1613 	unsigned int loop;
1614 	int ret;
1615 
1616 	for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1617 		ret = nfs4_wait_clnt_recover(clp);
1618 		if (ret != 0)
1619 			break;
1620 		if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1621 		    !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1622 			break;
1623 		nfs4_schedule_state_manager(clp);
1624 		ret = -EIO;
1625 	}
1626 	return ret;
1627 }
1628 
nfs4_recover_expired_lease(struct nfs_server * server)1629 static int nfs4_recover_expired_lease(struct nfs_server *server)
1630 {
1631 	return nfs4_client_recover_expired_lease(server->nfs_client);
1632 }
1633 
1634 /*
1635  * OPEN_EXPIRED:
1636  * 	reclaim state on the server after a network partition.
1637  * 	Assumes caller holds the appropriate lock
1638  */
_nfs4_open_expired(struct nfs_open_context * ctx,struct nfs4_state * state)1639 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1640 {
1641 	struct nfs4_opendata *opendata;
1642 	int ret;
1643 
1644 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1645 	if (IS_ERR(opendata))
1646 		return PTR_ERR(opendata);
1647 	ret = nfs4_open_recover(opendata, state);
1648 	if (ret == -ESTALE)
1649 		d_drop(ctx->path.dentry);
1650 	nfs4_opendata_put(opendata);
1651 	return ret;
1652 }
1653 
nfs4_do_open_expired(struct nfs_open_context * ctx,struct nfs4_state * state)1654 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1655 {
1656 	struct nfs_server *server = NFS_SERVER(state->inode);
1657 	struct nfs4_exception exception = { };
1658 	int err;
1659 
1660 	do {
1661 		err = _nfs4_open_expired(ctx, state);
1662 		switch (err) {
1663 		default:
1664 			goto out;
1665 		case -NFS4ERR_GRACE:
1666 		case -NFS4ERR_DELAY:
1667 			nfs4_handle_exception(server, err, &exception);
1668 			err = 0;
1669 		}
1670 	} while (exception.retry);
1671 out:
1672 	return err;
1673 }
1674 
nfs4_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)1675 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1676 {
1677 	struct nfs_open_context *ctx;
1678 	int ret;
1679 
1680 	ctx = nfs4_state_find_open_context(state);
1681 	if (IS_ERR(ctx))
1682 		return PTR_ERR(ctx);
1683 	ret = nfs4_do_open_expired(ctx, state);
1684 	put_nfs_open_context(ctx);
1685 	return ret;
1686 }
1687 
1688 /*
1689  * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1690  * fields corresponding to attributes that were used to store the verifier.
1691  * Make sure we clobber those fields in the later setattr call
1692  */
nfs4_exclusive_attrset(struct nfs4_opendata * opendata,struct iattr * sattr)1693 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1694 {
1695 	if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1696 	    !(sattr->ia_valid & ATTR_ATIME_SET))
1697 		sattr->ia_valid |= ATTR_ATIME;
1698 
1699 	if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1700 	    !(sattr->ia_valid & ATTR_MTIME_SET))
1701 		sattr->ia_valid |= ATTR_MTIME;
1702 }
1703 
1704 /*
1705  * Returns a referenced nfs4_state
1706  */
_nfs4_do_open(struct inode * dir,struct path * path,fmode_t fmode,int flags,struct iattr * sattr,struct rpc_cred * cred,struct nfs4_state ** res)1707 static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
1708 {
1709 	struct nfs4_state_owner  *sp;
1710 	struct nfs4_state     *state = NULL;
1711 	struct nfs_server       *server = NFS_SERVER(dir);
1712 	struct nfs4_opendata *opendata;
1713 	int status;
1714 
1715 	/* Protect against reboot recovery conflicts */
1716 	status = -ENOMEM;
1717 	if (!(sp = nfs4_get_state_owner(server, cred))) {
1718 		dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1719 		goto out_err;
1720 	}
1721 	status = nfs4_recover_expired_lease(server);
1722 	if (status != 0)
1723 		goto err_put_state_owner;
1724 	if (path->dentry->d_inode != NULL)
1725 		nfs4_return_incompatible_delegation(path->dentry->d_inode, fmode);
1726 	status = -ENOMEM;
1727 	opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr, GFP_KERNEL);
1728 	if (opendata == NULL)
1729 		goto err_put_state_owner;
1730 
1731 	if (path->dentry->d_inode != NULL)
1732 		opendata->state = nfs4_get_open_state(path->dentry->d_inode, sp);
1733 
1734 	status = _nfs4_proc_open(opendata);
1735 	if (status != 0)
1736 		goto err_opendata_put;
1737 
1738 	state = nfs4_opendata_to_nfs4_state(opendata);
1739 	status = PTR_ERR(state);
1740 	if (IS_ERR(state))
1741 		goto err_opendata_put;
1742 	if (server->caps & NFS_CAP_POSIX_LOCK)
1743 		set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1744 
1745 	if (opendata->o_arg.open_flags & O_EXCL) {
1746 		nfs4_exclusive_attrset(opendata, sattr);
1747 
1748 		nfs_fattr_init(opendata->o_res.f_attr);
1749 		status = nfs4_do_setattr(state->inode, cred,
1750 				opendata->o_res.f_attr, sattr,
1751 				state);
1752 		if (status == 0)
1753 			nfs_setattr_update_inode(state->inode, sattr);
1754 		nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
1755 	}
1756 	nfs4_opendata_put(opendata);
1757 	nfs4_put_state_owner(sp);
1758 	*res = state;
1759 	return 0;
1760 err_opendata_put:
1761 	nfs4_opendata_put(opendata);
1762 err_put_state_owner:
1763 	nfs4_put_state_owner(sp);
1764 out_err:
1765 	*res = NULL;
1766 	return status;
1767 }
1768 
1769 
nfs4_do_open(struct inode * dir,struct path * path,fmode_t fmode,int flags,struct iattr * sattr,struct rpc_cred * cred)1770 static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
1771 {
1772 	struct nfs4_exception exception = { };
1773 	struct nfs4_state *res;
1774 	int status;
1775 
1776 	do {
1777 		status = _nfs4_do_open(dir, path, fmode, flags, sattr, cred, &res);
1778 		if (status == 0)
1779 			break;
1780 		/* NOTE: BAD_SEQID means the server and client disagree about the
1781 		 * book-keeping w.r.t. state-changing operations
1782 		 * (OPEN/CLOSE/LOCK/LOCKU...)
1783 		 * It is actually a sign of a bug on the client or on the server.
1784 		 *
1785 		 * If we receive a BAD_SEQID error in the particular case of
1786 		 * doing an OPEN, we assume that nfs_increment_open_seqid() will
1787 		 * have unhashed the old state_owner for us, and that we can
1788 		 * therefore safely retry using a new one. We should still warn
1789 		 * the user though...
1790 		 */
1791 		if (status == -NFS4ERR_BAD_SEQID) {
1792 			printk(KERN_WARNING "NFS: v4 server %s "
1793 					" returned a bad sequence-id error!\n",
1794 					NFS_SERVER(dir)->nfs_client->cl_hostname);
1795 			exception.retry = 1;
1796 			continue;
1797 		}
1798 		/*
1799 		 * BAD_STATEID on OPEN means that the server cancelled our
1800 		 * state before it received the OPEN_CONFIRM.
1801 		 * Recover by retrying the request as per the discussion
1802 		 * on Page 181 of RFC3530.
1803 		 */
1804 		if (status == -NFS4ERR_BAD_STATEID) {
1805 			exception.retry = 1;
1806 			continue;
1807 		}
1808 		if (status == -EAGAIN) {
1809 			/* We must have found a delegation */
1810 			exception.retry = 1;
1811 			continue;
1812 		}
1813 		res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
1814 					status, &exception));
1815 	} while (exception.retry);
1816 	return res;
1817 }
1818 
_nfs4_do_setattr(struct inode * inode,struct rpc_cred * cred,struct nfs_fattr * fattr,struct iattr * sattr,struct nfs4_state * state)1819 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1820 			    struct nfs_fattr *fattr, struct iattr *sattr,
1821 			    struct nfs4_state *state)
1822 {
1823 	struct nfs_server *server = NFS_SERVER(inode);
1824         struct nfs_setattrargs  arg = {
1825                 .fh             = NFS_FH(inode),
1826                 .iap            = sattr,
1827 		.server		= server,
1828 		.bitmask = server->attr_bitmask,
1829         };
1830         struct nfs_setattrres  res = {
1831 		.fattr		= fattr,
1832 		.server		= server,
1833         };
1834         struct rpc_message msg = {
1835 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
1836 		.rpc_argp	= &arg,
1837 		.rpc_resp	= &res,
1838 		.rpc_cred	= cred,
1839         };
1840 	unsigned long timestamp = jiffies;
1841 	int status;
1842 
1843 	nfs_fattr_init(fattr);
1844 
1845 	if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) {
1846 		/* Use that stateid */
1847 	} else if (state != NULL) {
1848 		nfs4_copy_stateid(&arg.stateid, state, current->files, current->tgid);
1849 	} else
1850 		memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
1851 
1852 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
1853 	if (status == 0 && state != NULL)
1854 		renew_lease(server, timestamp);
1855 	return status;
1856 }
1857 
nfs4_do_setattr(struct inode * inode,struct rpc_cred * cred,struct nfs_fattr * fattr,struct iattr * sattr,struct nfs4_state * state)1858 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1859 			   struct nfs_fattr *fattr, struct iattr *sattr,
1860 			   struct nfs4_state *state)
1861 {
1862 	struct nfs_server *server = NFS_SERVER(inode);
1863 	struct nfs4_exception exception = { };
1864 	int err;
1865 	do {
1866 		err = nfs4_handle_exception(server,
1867 				_nfs4_do_setattr(inode, cred, fattr, sattr, state),
1868 				&exception);
1869 	} while (exception.retry);
1870 	return err;
1871 }
1872 
1873 struct nfs4_closedata {
1874 	struct path path;
1875 	struct inode *inode;
1876 	struct nfs4_state *state;
1877 	struct nfs_closeargs arg;
1878 	struct nfs_closeres res;
1879 	struct nfs_fattr fattr;
1880 	unsigned long timestamp;
1881 	bool roc;
1882 	u32 roc_barrier;
1883 };
1884 
nfs4_free_closedata(void * data)1885 static void nfs4_free_closedata(void *data)
1886 {
1887 	struct nfs4_closedata *calldata = data;
1888 	struct nfs4_state_owner *sp = calldata->state->owner;
1889 
1890 	if (calldata->roc)
1891 		pnfs_roc_release(calldata->state->inode);
1892 	nfs4_put_open_state(calldata->state);
1893 	nfs_free_seqid(calldata->arg.seqid);
1894 	nfs4_put_state_owner(sp);
1895 	path_put(&calldata->path);
1896 	kfree(calldata);
1897 }
1898 
nfs4_close_clear_stateid_flags(struct nfs4_state * state,fmode_t fmode)1899 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
1900 		fmode_t fmode)
1901 {
1902 	spin_lock(&state->owner->so_lock);
1903 	if (!(fmode & FMODE_READ))
1904 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1905 	if (!(fmode & FMODE_WRITE))
1906 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1907 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
1908 	spin_unlock(&state->owner->so_lock);
1909 }
1910 
nfs4_close_done(struct rpc_task * task,void * data)1911 static void nfs4_close_done(struct rpc_task *task, void *data)
1912 {
1913 	struct nfs4_closedata *calldata = data;
1914 	struct nfs4_state *state = calldata->state;
1915 	struct nfs_server *server = NFS_SERVER(calldata->inode);
1916 
1917 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
1918 		return;
1919         /* hmm. we are done with the inode, and in the process of freeing
1920 	 * the state_owner. we keep this around to process errors
1921 	 */
1922 	switch (task->tk_status) {
1923 		case 0:
1924 			if (calldata->roc)
1925 				pnfs_roc_set_barrier(state->inode,
1926 						     calldata->roc_barrier);
1927 			nfs_set_open_stateid(state, &calldata->res.stateid, 0);
1928 			renew_lease(server, calldata->timestamp);
1929 			nfs4_close_clear_stateid_flags(state,
1930 					calldata->arg.fmode);
1931 			break;
1932 		case -NFS4ERR_STALE_STATEID:
1933 		case -NFS4ERR_OLD_STATEID:
1934 		case -NFS4ERR_BAD_STATEID:
1935 		case -NFS4ERR_EXPIRED:
1936 			if (calldata->arg.fmode == 0)
1937 				break;
1938 		default:
1939 			if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
1940 				rpc_restart_call_prepare(task);
1941 	}
1942 	nfs_release_seqid(calldata->arg.seqid);
1943 	nfs_refresh_inode(calldata->inode, calldata->res.fattr);
1944 }
1945 
nfs4_close_prepare(struct rpc_task * task,void * data)1946 static void nfs4_close_prepare(struct rpc_task *task, void *data)
1947 {
1948 	struct nfs4_closedata *calldata = data;
1949 	struct nfs4_state *state = calldata->state;
1950 	int call_close = 0;
1951 
1952 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
1953 		return;
1954 
1955 	task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
1956 	calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
1957 	spin_lock(&state->owner->so_lock);
1958 	/* Calculate the change in open mode */
1959 	if (state->n_rdwr == 0) {
1960 		if (state->n_rdonly == 0) {
1961 			call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
1962 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
1963 			calldata->arg.fmode &= ~FMODE_READ;
1964 		}
1965 		if (state->n_wronly == 0) {
1966 			call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
1967 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
1968 			calldata->arg.fmode &= ~FMODE_WRITE;
1969 		}
1970 	}
1971 	spin_unlock(&state->owner->so_lock);
1972 
1973 	if (!call_close) {
1974 		/* Note: exit _without_ calling nfs4_close_done */
1975 		task->tk_action = NULL;
1976 		return;
1977 	}
1978 
1979 	if (calldata->arg.fmode == 0) {
1980 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
1981 		if (calldata->roc &&
1982 		    pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
1983 			rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
1984 				     task, NULL);
1985 			return;
1986 		}
1987 	}
1988 
1989 	nfs_fattr_init(calldata->res.fattr);
1990 	calldata->timestamp = jiffies;
1991 	if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
1992 				&calldata->arg.seq_args, &calldata->res.seq_res,
1993 				1, task))
1994 		return;
1995 	rpc_call_start(task);
1996 }
1997 
1998 static const struct rpc_call_ops nfs4_close_ops = {
1999 	.rpc_call_prepare = nfs4_close_prepare,
2000 	.rpc_call_done = nfs4_close_done,
2001 	.rpc_release = nfs4_free_closedata,
2002 };
2003 
2004 /*
2005  * It is possible for data to be read/written from a mem-mapped file
2006  * after the sys_close call (which hits the vfs layer as a flush).
2007  * This means that we can't safely call nfsv4 close on a file until
2008  * the inode is cleared. This in turn means that we are not good
2009  * NFSv4 citizens - we do not indicate to the server to update the file's
2010  * share state even when we are done with one of the three share
2011  * stateid's in the inode.
2012  *
2013  * NOTE: Caller must be holding the sp->so_owner semaphore!
2014  */
nfs4_do_close(struct path * path,struct nfs4_state * state,gfp_t gfp_mask,int wait,bool roc)2015 int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
2016 {
2017 	struct nfs_server *server = NFS_SERVER(state->inode);
2018 	struct nfs4_closedata *calldata;
2019 	struct nfs4_state_owner *sp = state->owner;
2020 	struct rpc_task *task;
2021 	struct rpc_message msg = {
2022 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2023 		.rpc_cred = state->owner->so_cred,
2024 	};
2025 	struct rpc_task_setup task_setup_data = {
2026 		.rpc_client = server->client,
2027 		.rpc_message = &msg,
2028 		.callback_ops = &nfs4_close_ops,
2029 		.workqueue = nfsiod_workqueue,
2030 		.flags = RPC_TASK_ASYNC,
2031 	};
2032 	int status = -ENOMEM;
2033 
2034 	calldata = kzalloc(sizeof(*calldata), gfp_mask);
2035 	if (calldata == NULL)
2036 		goto out;
2037 	calldata->inode = state->inode;
2038 	calldata->state = state;
2039 	calldata->arg.fh = NFS_FH(state->inode);
2040 	calldata->arg.stateid = &state->open_stateid;
2041 	/* Serialization for the sequence id */
2042 	calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
2043 	if (calldata->arg.seqid == NULL)
2044 		goto out_free_calldata;
2045 	calldata->arg.fmode = 0;
2046 	calldata->arg.bitmask = server->cache_consistency_bitmask;
2047 	calldata->res.fattr = &calldata->fattr;
2048 	calldata->res.seqid = calldata->arg.seqid;
2049 	calldata->res.server = server;
2050 	calldata->roc = roc;
2051 	path_get(path);
2052 	calldata->path = *path;
2053 
2054 	msg.rpc_argp = &calldata->arg;
2055 	msg.rpc_resp = &calldata->res;
2056 	task_setup_data.callback_data = calldata;
2057 	task = rpc_run_task(&task_setup_data);
2058 	if (IS_ERR(task))
2059 		return PTR_ERR(task);
2060 	status = 0;
2061 	if (wait)
2062 		status = rpc_wait_for_completion_task(task);
2063 	rpc_put_task(task);
2064 	return status;
2065 out_free_calldata:
2066 	kfree(calldata);
2067 out:
2068 	if (roc)
2069 		pnfs_roc_release(state->inode);
2070 	nfs4_put_open_state(state);
2071 	nfs4_put_state_owner(sp);
2072 	return status;
2073 }
2074 
2075 static struct inode *
nfs4_atomic_open(struct inode * dir,struct nfs_open_context * ctx,int open_flags,struct iattr * attr)2076 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2077 {
2078 	struct nfs4_state *state;
2079 
2080 	/* Protect against concurrent sillydeletes */
2081 	state = nfs4_do_open(dir, &ctx->path, ctx->mode, open_flags, attr, ctx->cred);
2082 	if (IS_ERR(state))
2083 		return ERR_CAST(state);
2084 	ctx->state = state;
2085 	return igrab(state->inode);
2086 }
2087 
nfs4_close_context(struct nfs_open_context * ctx,int is_sync)2088 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2089 {
2090 	if (ctx->state == NULL)
2091 		return;
2092 	if (is_sync)
2093 		nfs4_close_sync(&ctx->path, ctx->state, ctx->mode);
2094 	else
2095 		nfs4_close_state(&ctx->path, ctx->state, ctx->mode);
2096 }
2097 
_nfs4_server_capabilities(struct nfs_server * server,struct nfs_fh * fhandle)2098 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2099 {
2100 	struct nfs4_server_caps_arg args = {
2101 		.fhandle = fhandle,
2102 	};
2103 	struct nfs4_server_caps_res res = {};
2104 	struct rpc_message msg = {
2105 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2106 		.rpc_argp = &args,
2107 		.rpc_resp = &res,
2108 	};
2109 	int status;
2110 
2111 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2112 	if (status == 0) {
2113 		memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2114 		server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2115 				NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2116 				NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2117 				NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2118 				NFS_CAP_CTIME|NFS_CAP_MTIME);
2119 		if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2120 			server->caps |= NFS_CAP_ACLS;
2121 		if (res.has_links != 0)
2122 			server->caps |= NFS_CAP_HARDLINKS;
2123 		if (res.has_symlinks != 0)
2124 			server->caps |= NFS_CAP_SYMLINKS;
2125 		if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2126 			server->caps |= NFS_CAP_FILEID;
2127 		if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2128 			server->caps |= NFS_CAP_MODE;
2129 		if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2130 			server->caps |= NFS_CAP_NLINK;
2131 		if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2132 			server->caps |= NFS_CAP_OWNER;
2133 		if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2134 			server->caps |= NFS_CAP_OWNER_GROUP;
2135 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2136 			server->caps |= NFS_CAP_ATIME;
2137 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2138 			server->caps |= NFS_CAP_CTIME;
2139 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2140 			server->caps |= NFS_CAP_MTIME;
2141 
2142 		memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2143 		server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2144 		server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2145 		server->acl_bitmask = res.acl_bitmask;
2146 	}
2147 
2148 	return status;
2149 }
2150 
nfs4_server_capabilities(struct nfs_server * server,struct nfs_fh * fhandle)2151 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2152 {
2153 	struct nfs4_exception exception = { };
2154 	int err;
2155 	do {
2156 		err = nfs4_handle_exception(server,
2157 				_nfs4_server_capabilities(server, fhandle),
2158 				&exception);
2159 	} while (exception.retry);
2160 	return err;
2161 }
2162 
_nfs4_lookup_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)2163 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2164 		struct nfs_fsinfo *info)
2165 {
2166 	struct nfs4_lookup_root_arg args = {
2167 		.bitmask = nfs4_fattr_bitmap,
2168 	};
2169 	struct nfs4_lookup_res res = {
2170 		.server = server,
2171 		.fattr = info->fattr,
2172 		.fh = fhandle,
2173 	};
2174 	struct rpc_message msg = {
2175 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2176 		.rpc_argp = &args,
2177 		.rpc_resp = &res,
2178 	};
2179 
2180 	nfs_fattr_init(info->fattr);
2181 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2182 }
2183 
nfs4_lookup_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)2184 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2185 		struct nfs_fsinfo *info)
2186 {
2187 	struct nfs4_exception exception = { };
2188 	int err;
2189 	do {
2190 		err = _nfs4_lookup_root(server, fhandle, info);
2191 		switch (err) {
2192 		case 0:
2193 		case -NFS4ERR_WRONGSEC:
2194 			break;
2195 		default:
2196 			err = nfs4_handle_exception(server, err, &exception);
2197 		}
2198 	} while (exception.retry);
2199 	return err;
2200 }
2201 
nfs4_lookup_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,rpc_authflavor_t flavor)2202 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2203 				struct nfs_fsinfo *info, rpc_authflavor_t flavor)
2204 {
2205 	struct rpc_auth *auth;
2206 	int ret;
2207 
2208 	auth = rpcauth_create(flavor, server->client);
2209 	if (!auth) {
2210 		ret = -EIO;
2211 		goto out;
2212 	}
2213 	ret = nfs4_lookup_root(server, fhandle, info);
2214 out:
2215 	return ret;
2216 }
2217 
nfs4_find_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)2218 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2219 			      struct nfs_fsinfo *info)
2220 {
2221 	int i, len, status = 0;
2222 	rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
2223 
2224 	len = gss_mech_list_pseudoflavors(&flav_array[0]);
2225 	flav_array[len] = RPC_AUTH_NULL;
2226 	len += 1;
2227 
2228 	for (i = 0; i < len; i++) {
2229 		status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2230 		if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
2231 			continue;
2232 		break;
2233 	}
2234 	/*
2235 	 * -EACCESS could mean that the user doesn't have correct permissions
2236 	 * to access the mount.  It could also mean that we tried to mount
2237 	 * with a gss auth flavor, but rpc.gssd isn't running.  Either way,
2238 	 * existing mount programs don't handle -EACCES very well so it should
2239 	 * be mapped to -EPERM instead.
2240 	 */
2241 	if (status == -EACCES)
2242 		status = -EPERM;
2243 	return status;
2244 }
2245 
2246 /*
2247  * get the file handle for the "/" directory on the server
2248  */
nfs4_proc_get_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)2249 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2250 			      struct nfs_fsinfo *info)
2251 {
2252 	int status = nfs4_lookup_root(server, fhandle, info);
2253 	if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
2254 		/*
2255 		 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2256 		 * by nfs4_map_errors() as this function exits.
2257 		 */
2258 		status = nfs4_find_root_sec(server, fhandle, info);
2259 	if (status == 0)
2260 		status = nfs4_server_capabilities(server, fhandle);
2261 	if (status == 0)
2262 		status = nfs4_do_fsinfo(server, fhandle, info);
2263 	return nfs4_map_errors(status);
2264 }
2265 
2266 /*
2267  * Get locations and (maybe) other attributes of a referral.
2268  * Note that we'll actually follow the referral later when
2269  * we detect fsid mismatch in inode revalidation
2270  */
nfs4_get_referral(struct inode * dir,const struct qstr * name,struct nfs_fattr * fattr,struct nfs_fh * fhandle)2271 static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle)
2272 {
2273 	int status = -ENOMEM;
2274 	struct page *page = NULL;
2275 	struct nfs4_fs_locations *locations = NULL;
2276 
2277 	page = alloc_page(GFP_KERNEL);
2278 	if (page == NULL)
2279 		goto out;
2280 	locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2281 	if (locations == NULL)
2282 		goto out;
2283 
2284 	status = nfs4_proc_fs_locations(dir, name, locations, page);
2285 	if (status != 0)
2286 		goto out;
2287 	/* Make sure server returned a different fsid for the referral */
2288 	if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2289 		dprintk("%s: server did not return a different fsid for a referral at %s\n", __func__, name->name);
2290 		status = -EIO;
2291 		goto out;
2292 	}
2293 
2294 	memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2295 	fattr->valid |= NFS_ATTR_FATTR_V4_REFERRAL;
2296 	if (!fattr->mode)
2297 		fattr->mode = S_IFDIR;
2298 	memset(fhandle, 0, sizeof(struct nfs_fh));
2299 out:
2300 	if (page)
2301 		__free_page(page);
2302 	kfree(locations);
2303 	return status;
2304 }
2305 
_nfs4_proc_getattr(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fattr * fattr)2306 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2307 {
2308 	struct nfs4_getattr_arg args = {
2309 		.fh = fhandle,
2310 		.bitmask = server->attr_bitmask,
2311 	};
2312 	struct nfs4_getattr_res res = {
2313 		.fattr = fattr,
2314 		.server = server,
2315 	};
2316 	struct rpc_message msg = {
2317 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2318 		.rpc_argp = &args,
2319 		.rpc_resp = &res,
2320 	};
2321 
2322 	nfs_fattr_init(fattr);
2323 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2324 }
2325 
nfs4_proc_getattr(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fattr * fattr)2326 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2327 {
2328 	struct nfs4_exception exception = { };
2329 	int err;
2330 	do {
2331 		err = nfs4_handle_exception(server,
2332 				_nfs4_proc_getattr(server, fhandle, fattr),
2333 				&exception);
2334 	} while (exception.retry);
2335 	return err;
2336 }
2337 
2338 /*
2339  * The file is not closed if it is opened due to the a request to change
2340  * the size of the file. The open call will not be needed once the
2341  * VFS layer lookup-intents are implemented.
2342  *
2343  * Close is called when the inode is destroyed.
2344  * If we haven't opened the file for O_WRONLY, we
2345  * need to in the size_change case to obtain a stateid.
2346  *
2347  * Got race?
2348  * Because OPEN is always done by name in nfsv4, it is
2349  * possible that we opened a different file by the same
2350  * name.  We can recognize this race condition, but we
2351  * can't do anything about it besides returning an error.
2352  *
2353  * This will be fixed with VFS changes (lookup-intent).
2354  */
2355 static int
nfs4_proc_setattr(struct dentry * dentry,struct nfs_fattr * fattr,struct iattr * sattr)2356 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2357 		  struct iattr *sattr)
2358 {
2359 	struct inode *inode = dentry->d_inode;
2360 	struct rpc_cred *cred = NULL;
2361 	struct nfs4_state *state = NULL;
2362 	int status;
2363 
2364 	nfs_fattr_init(fattr);
2365 
2366 	/* Search for an existing open(O_WRITE) file */
2367 	if (sattr->ia_valid & ATTR_FILE) {
2368 		struct nfs_open_context *ctx;
2369 
2370 		ctx = nfs_file_open_context(sattr->ia_file);
2371 		if (ctx) {
2372 			cred = ctx->cred;
2373 			state = ctx->state;
2374 		}
2375 	}
2376 
2377 	status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2378 	if (status == 0)
2379 		nfs_setattr_update_inode(inode, sattr);
2380 	return status;
2381 }
2382 
_nfs4_proc_lookupfh(struct rpc_clnt * clnt,struct nfs_server * server,const struct nfs_fh * dirfh,const struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)2383 static int _nfs4_proc_lookupfh(struct rpc_clnt *clnt, struct nfs_server *server,
2384 		const struct nfs_fh *dirfh, const struct qstr *name,
2385 		struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2386 {
2387 	int		       status;
2388 	struct nfs4_lookup_arg args = {
2389 		.bitmask = server->attr_bitmask,
2390 		.dir_fh = dirfh,
2391 		.name = name,
2392 	};
2393 	struct nfs4_lookup_res res = {
2394 		.server = server,
2395 		.fattr = fattr,
2396 		.fh = fhandle,
2397 	};
2398 	struct rpc_message msg = {
2399 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2400 		.rpc_argp = &args,
2401 		.rpc_resp = &res,
2402 	};
2403 
2404 	nfs_fattr_init(fattr);
2405 
2406 	dprintk("NFS call  lookupfh %s\n", name->name);
2407 	status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
2408 	dprintk("NFS reply lookupfh: %d\n", status);
2409 	return status;
2410 }
2411 
nfs4_proc_lookupfh(struct nfs_server * server,struct nfs_fh * dirfh,struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)2412 static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh,
2413 			      struct qstr *name, struct nfs_fh *fhandle,
2414 			      struct nfs_fattr *fattr)
2415 {
2416 	struct nfs4_exception exception = { };
2417 	int err;
2418 	do {
2419 		err = _nfs4_proc_lookupfh(server->client, server, dirfh, name, fhandle, fattr);
2420 		/* FIXME: !!!! */
2421 		if (err == -NFS4ERR_MOVED) {
2422 			err = -EREMOTE;
2423 			break;
2424 		}
2425 		err = nfs4_handle_exception(server, err, &exception);
2426 	} while (exception.retry);
2427 	return err;
2428 }
2429 
_nfs4_proc_lookup(struct rpc_clnt * clnt,struct inode * dir,const struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)2430 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2431 		const struct qstr *name, struct nfs_fh *fhandle,
2432 		struct nfs_fattr *fattr)
2433 {
2434 	int status;
2435 
2436 	dprintk("NFS call  lookup %s\n", name->name);
2437 	status = _nfs4_proc_lookupfh(clnt, NFS_SERVER(dir), NFS_FH(dir), name, fhandle, fattr);
2438 	if (status == -NFS4ERR_MOVED)
2439 		status = nfs4_get_referral(dir, name, fattr, fhandle);
2440 	dprintk("NFS reply lookup: %d\n", status);
2441 	return status;
2442 }
2443 
nfs_fixup_secinfo_attributes(struct nfs_fattr * fattr,struct nfs_fh * fh)2444 void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh)
2445 {
2446 	memset(fh, 0, sizeof(struct nfs_fh));
2447 	fattr->fsid.major = 1;
2448 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2449 		NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT;
2450 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2451 	fattr->nlink = 2;
2452 }
2453 
nfs4_proc_lookup(struct rpc_clnt * clnt,struct inode * dir,struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)2454 static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
2455 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2456 {
2457 	struct nfs4_exception exception = { };
2458 	int err;
2459 	do {
2460 		err = nfs4_handle_exception(NFS_SERVER(dir),
2461 				_nfs4_proc_lookup(clnt, dir, name, fhandle, fattr),
2462 				&exception);
2463 		if (err == -EPERM)
2464 			nfs_fixup_secinfo_attributes(fattr, fhandle);
2465 	} while (exception.retry);
2466 	return err;
2467 }
2468 
_nfs4_proc_access(struct inode * inode,struct nfs_access_entry * entry)2469 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2470 {
2471 	struct nfs_server *server = NFS_SERVER(inode);
2472 	struct nfs4_accessargs args = {
2473 		.fh = NFS_FH(inode),
2474 		.bitmask = server->attr_bitmask,
2475 	};
2476 	struct nfs4_accessres res = {
2477 		.server = server,
2478 	};
2479 	struct rpc_message msg = {
2480 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2481 		.rpc_argp = &args,
2482 		.rpc_resp = &res,
2483 		.rpc_cred = entry->cred,
2484 	};
2485 	int mode = entry->mask;
2486 	int status;
2487 
2488 	/*
2489 	 * Determine which access bits we want to ask for...
2490 	 */
2491 	if (mode & MAY_READ)
2492 		args.access |= NFS4_ACCESS_READ;
2493 	if (S_ISDIR(inode->i_mode)) {
2494 		if (mode & MAY_WRITE)
2495 			args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2496 		if (mode & MAY_EXEC)
2497 			args.access |= NFS4_ACCESS_LOOKUP;
2498 	} else {
2499 		if (mode & MAY_WRITE)
2500 			args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2501 		if (mode & MAY_EXEC)
2502 			args.access |= NFS4_ACCESS_EXECUTE;
2503 	}
2504 
2505 	res.fattr = nfs_alloc_fattr();
2506 	if (res.fattr == NULL)
2507 		return -ENOMEM;
2508 
2509 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2510 	if (!status) {
2511 		entry->mask = 0;
2512 		if (res.access & NFS4_ACCESS_READ)
2513 			entry->mask |= MAY_READ;
2514 		if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
2515 			entry->mask |= MAY_WRITE;
2516 		if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
2517 			entry->mask |= MAY_EXEC;
2518 		nfs_refresh_inode(inode, res.fattr);
2519 	}
2520 	nfs_free_fattr(res.fattr);
2521 	return status;
2522 }
2523 
nfs4_proc_access(struct inode * inode,struct nfs_access_entry * entry)2524 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2525 {
2526 	struct nfs4_exception exception = { };
2527 	int err;
2528 	do {
2529 		err = nfs4_handle_exception(NFS_SERVER(inode),
2530 				_nfs4_proc_access(inode, entry),
2531 				&exception);
2532 	} while (exception.retry);
2533 	return err;
2534 }
2535 
2536 /*
2537  * TODO: For the time being, we don't try to get any attributes
2538  * along with any of the zero-copy operations READ, READDIR,
2539  * READLINK, WRITE.
2540  *
2541  * In the case of the first three, we want to put the GETATTR
2542  * after the read-type operation -- this is because it is hard
2543  * to predict the length of a GETATTR response in v4, and thus
2544  * align the READ data correctly.  This means that the GETATTR
2545  * may end up partially falling into the page cache, and we should
2546  * shift it into the 'tail' of the xdr_buf before processing.
2547  * To do this efficiently, we need to know the total length
2548  * of data received, which doesn't seem to be available outside
2549  * of the RPC layer.
2550  *
2551  * In the case of WRITE, we also want to put the GETATTR after
2552  * the operation -- in this case because we want to make sure
2553  * we get the post-operation mtime and size.  This means that
2554  * we can't use xdr_encode_pages() as written: we need a variant
2555  * of it which would leave room in the 'tail' iovec.
2556  *
2557  * Both of these changes to the XDR layer would in fact be quite
2558  * minor, but I decided to leave them for a subsequent patch.
2559  */
_nfs4_proc_readlink(struct inode * inode,struct page * page,unsigned int pgbase,unsigned int pglen)2560 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2561 		unsigned int pgbase, unsigned int pglen)
2562 {
2563 	struct nfs4_readlink args = {
2564 		.fh       = NFS_FH(inode),
2565 		.pgbase	  = pgbase,
2566 		.pglen    = pglen,
2567 		.pages    = &page,
2568 	};
2569 	struct nfs4_readlink_res res;
2570 	struct rpc_message msg = {
2571 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
2572 		.rpc_argp = &args,
2573 		.rpc_resp = &res,
2574 	};
2575 
2576 	return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
2577 }
2578 
nfs4_proc_readlink(struct inode * inode,struct page * page,unsigned int pgbase,unsigned int pglen)2579 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
2580 		unsigned int pgbase, unsigned int pglen)
2581 {
2582 	struct nfs4_exception exception = { };
2583 	int err;
2584 	do {
2585 		err = nfs4_handle_exception(NFS_SERVER(inode),
2586 				_nfs4_proc_readlink(inode, page, pgbase, pglen),
2587 				&exception);
2588 	} while (exception.retry);
2589 	return err;
2590 }
2591 
2592 /*
2593  * Got race?
2594  * We will need to arrange for the VFS layer to provide an atomic open.
2595  * Until then, this create/open method is prone to inefficiency and race
2596  * conditions due to the lookup, create, and open VFS calls from sys_open()
2597  * placed on the wire.
2598  *
2599  * Given the above sorry state of affairs, I'm simply sending an OPEN.
2600  * The file will be opened again in the subsequent VFS open call
2601  * (nfs4_proc_file_open).
2602  *
2603  * The open for read will just hang around to be used by any process that
2604  * opens the file O_RDONLY. This will all be resolved with the VFS changes.
2605  */
2606 
2607 static int
nfs4_proc_create(struct inode * dir,struct dentry * dentry,struct iattr * sattr,int flags,struct nfs_open_context * ctx)2608 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2609                  int flags, struct nfs_open_context *ctx)
2610 {
2611 	struct path my_path = {
2612 		.dentry = dentry,
2613 	};
2614 	struct path *path = &my_path;
2615 	struct nfs4_state *state;
2616 	struct rpc_cred *cred = NULL;
2617 	fmode_t fmode = 0;
2618 	int status = 0;
2619 
2620 	if (ctx != NULL) {
2621 		cred = ctx->cred;
2622 		path = &ctx->path;
2623 		fmode = ctx->mode;
2624 	}
2625 	sattr->ia_mode &= ~current_umask();
2626 	state = nfs4_do_open(dir, path, fmode, flags, sattr, cred);
2627 	d_drop(dentry);
2628 	if (IS_ERR(state)) {
2629 		status = PTR_ERR(state);
2630 		goto out;
2631 	}
2632 	d_add(dentry, igrab(state->inode));
2633 	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2634 	if (ctx != NULL)
2635 		ctx->state = state;
2636 	else
2637 		nfs4_close_sync(path, state, fmode);
2638 out:
2639 	return status;
2640 }
2641 
_nfs4_proc_remove(struct inode * dir,struct qstr * name)2642 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2643 {
2644 	struct nfs_server *server = NFS_SERVER(dir);
2645 	struct nfs_removeargs args = {
2646 		.fh = NFS_FH(dir),
2647 		.name.len = name->len,
2648 		.name.name = name->name,
2649 		.bitmask = server->attr_bitmask,
2650 	};
2651 	struct nfs_removeres res = {
2652 		.server = server,
2653 	};
2654 	struct rpc_message msg = {
2655 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
2656 		.rpc_argp = &args,
2657 		.rpc_resp = &res,
2658 	};
2659 	int status = -ENOMEM;
2660 
2661 	res.dir_attr = nfs_alloc_fattr();
2662 	if (res.dir_attr == NULL)
2663 		goto out;
2664 
2665 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
2666 	if (status == 0) {
2667 		update_changeattr(dir, &res.cinfo);
2668 		nfs_post_op_update_inode(dir, res.dir_attr);
2669 	}
2670 	nfs_free_fattr(res.dir_attr);
2671 out:
2672 	return status;
2673 }
2674 
nfs4_proc_remove(struct inode * dir,struct qstr * name)2675 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
2676 {
2677 	struct nfs4_exception exception = { };
2678 	int err;
2679 	do {
2680 		err = nfs4_handle_exception(NFS_SERVER(dir),
2681 				_nfs4_proc_remove(dir, name),
2682 				&exception);
2683 	} while (exception.retry);
2684 	return err;
2685 }
2686 
nfs4_proc_unlink_setup(struct rpc_message * msg,struct inode * dir)2687 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
2688 {
2689 	struct nfs_server *server = NFS_SERVER(dir);
2690 	struct nfs_removeargs *args = msg->rpc_argp;
2691 	struct nfs_removeres *res = msg->rpc_resp;
2692 
2693 	args->bitmask = server->cache_consistency_bitmask;
2694 	res->server = server;
2695 	res->seq_res.sr_slot = NULL;
2696 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
2697 }
2698 
nfs4_proc_unlink_done(struct rpc_task * task,struct inode * dir)2699 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2700 {
2701 	struct nfs_removeres *res = task->tk_msg.rpc_resp;
2702 
2703 	if (!nfs4_sequence_done(task, &res->seq_res))
2704 		return 0;
2705 	if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2706 		return 0;
2707 	update_changeattr(dir, &res->cinfo);
2708 	nfs_post_op_update_inode(dir, res->dir_attr);
2709 	return 1;
2710 }
2711 
nfs4_proc_rename_setup(struct rpc_message * msg,struct inode * dir)2712 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
2713 {
2714 	struct nfs_server *server = NFS_SERVER(dir);
2715 	struct nfs_renameargs *arg = msg->rpc_argp;
2716 	struct nfs_renameres *res = msg->rpc_resp;
2717 
2718 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
2719 	arg->bitmask = server->attr_bitmask;
2720 	res->server = server;
2721 }
2722 
nfs4_proc_rename_done(struct rpc_task * task,struct inode * old_dir,struct inode * new_dir)2723 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
2724 				 struct inode *new_dir)
2725 {
2726 	struct nfs_renameres *res = task->tk_msg.rpc_resp;
2727 
2728 	if (!nfs4_sequence_done(task, &res->seq_res))
2729 		return 0;
2730 	if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2731 		return 0;
2732 
2733 	update_changeattr(old_dir, &res->old_cinfo);
2734 	nfs_post_op_update_inode(old_dir, res->old_fattr);
2735 	update_changeattr(new_dir, &res->new_cinfo);
2736 	nfs_post_op_update_inode(new_dir, res->new_fattr);
2737 	return 1;
2738 }
2739 
_nfs4_proc_rename(struct inode * old_dir,struct qstr * old_name,struct inode * new_dir,struct qstr * new_name)2740 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2741 		struct inode *new_dir, struct qstr *new_name)
2742 {
2743 	struct nfs_server *server = NFS_SERVER(old_dir);
2744 	struct nfs_renameargs arg = {
2745 		.old_dir = NFS_FH(old_dir),
2746 		.new_dir = NFS_FH(new_dir),
2747 		.old_name = old_name,
2748 		.new_name = new_name,
2749 		.bitmask = server->attr_bitmask,
2750 	};
2751 	struct nfs_renameres res = {
2752 		.server = server,
2753 	};
2754 	struct rpc_message msg = {
2755 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
2756 		.rpc_argp = &arg,
2757 		.rpc_resp = &res,
2758 	};
2759 	int status = -ENOMEM;
2760 
2761 	res.old_fattr = nfs_alloc_fattr();
2762 	res.new_fattr = nfs_alloc_fattr();
2763 	if (res.old_fattr == NULL || res.new_fattr == NULL)
2764 		goto out;
2765 
2766 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2767 	if (!status) {
2768 		update_changeattr(old_dir, &res.old_cinfo);
2769 		nfs_post_op_update_inode(old_dir, res.old_fattr);
2770 		update_changeattr(new_dir, &res.new_cinfo);
2771 		nfs_post_op_update_inode(new_dir, res.new_fattr);
2772 	}
2773 out:
2774 	nfs_free_fattr(res.new_fattr);
2775 	nfs_free_fattr(res.old_fattr);
2776 	return status;
2777 }
2778 
nfs4_proc_rename(struct inode * old_dir,struct qstr * old_name,struct inode * new_dir,struct qstr * new_name)2779 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2780 		struct inode *new_dir, struct qstr *new_name)
2781 {
2782 	struct nfs4_exception exception = { };
2783 	int err;
2784 	do {
2785 		err = nfs4_handle_exception(NFS_SERVER(old_dir),
2786 				_nfs4_proc_rename(old_dir, old_name,
2787 					new_dir, new_name),
2788 				&exception);
2789 	} while (exception.retry);
2790 	return err;
2791 }
2792 
_nfs4_proc_link(struct inode * inode,struct inode * dir,struct qstr * name)2793 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2794 {
2795 	struct nfs_server *server = NFS_SERVER(inode);
2796 	struct nfs4_link_arg arg = {
2797 		.fh     = NFS_FH(inode),
2798 		.dir_fh = NFS_FH(dir),
2799 		.name   = name,
2800 		.bitmask = server->attr_bitmask,
2801 	};
2802 	struct nfs4_link_res res = {
2803 		.server = server,
2804 	};
2805 	struct rpc_message msg = {
2806 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
2807 		.rpc_argp = &arg,
2808 		.rpc_resp = &res,
2809 	};
2810 	int status = -ENOMEM;
2811 
2812 	res.fattr = nfs_alloc_fattr();
2813 	res.dir_attr = nfs_alloc_fattr();
2814 	if (res.fattr == NULL || res.dir_attr == NULL)
2815 		goto out;
2816 
2817 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2818 	if (!status) {
2819 		update_changeattr(dir, &res.cinfo);
2820 		nfs_post_op_update_inode(dir, res.dir_attr);
2821 		nfs_post_op_update_inode(inode, res.fattr);
2822 	}
2823 out:
2824 	nfs_free_fattr(res.dir_attr);
2825 	nfs_free_fattr(res.fattr);
2826 	return status;
2827 }
2828 
nfs4_proc_link(struct inode * inode,struct inode * dir,struct qstr * name)2829 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2830 {
2831 	struct nfs4_exception exception = { };
2832 	int err;
2833 	do {
2834 		err = nfs4_handle_exception(NFS_SERVER(inode),
2835 				_nfs4_proc_link(inode, dir, name),
2836 				&exception);
2837 	} while (exception.retry);
2838 	return err;
2839 }
2840 
2841 struct nfs4_createdata {
2842 	struct rpc_message msg;
2843 	struct nfs4_create_arg arg;
2844 	struct nfs4_create_res res;
2845 	struct nfs_fh fh;
2846 	struct nfs_fattr fattr;
2847 	struct nfs_fattr dir_fattr;
2848 };
2849 
nfs4_alloc_createdata(struct inode * dir,struct qstr * name,struct iattr * sattr,u32 ftype)2850 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
2851 		struct qstr *name, struct iattr *sattr, u32 ftype)
2852 {
2853 	struct nfs4_createdata *data;
2854 
2855 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2856 	if (data != NULL) {
2857 		struct nfs_server *server = NFS_SERVER(dir);
2858 
2859 		data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
2860 		data->msg.rpc_argp = &data->arg;
2861 		data->msg.rpc_resp = &data->res;
2862 		data->arg.dir_fh = NFS_FH(dir);
2863 		data->arg.server = server;
2864 		data->arg.name = name;
2865 		data->arg.attrs = sattr;
2866 		data->arg.ftype = ftype;
2867 		data->arg.bitmask = server->attr_bitmask;
2868 		data->res.server = server;
2869 		data->res.fh = &data->fh;
2870 		data->res.fattr = &data->fattr;
2871 		data->res.dir_fattr = &data->dir_fattr;
2872 		nfs_fattr_init(data->res.fattr);
2873 		nfs_fattr_init(data->res.dir_fattr);
2874 	}
2875 	return data;
2876 }
2877 
nfs4_do_create(struct inode * dir,struct dentry * dentry,struct nfs4_createdata * data)2878 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
2879 {
2880 	int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
2881 				    &data->arg.seq_args, &data->res.seq_res, 1);
2882 	if (status == 0) {
2883 		update_changeattr(dir, &data->res.dir_cinfo);
2884 		nfs_post_op_update_inode(dir, data->res.dir_fattr);
2885 		status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
2886 	}
2887 	return status;
2888 }
2889 
nfs4_free_createdata(struct nfs4_createdata * data)2890 static void nfs4_free_createdata(struct nfs4_createdata *data)
2891 {
2892 	kfree(data);
2893 }
2894 
_nfs4_proc_symlink(struct inode * dir,struct dentry * dentry,struct page * page,unsigned int len,struct iattr * sattr)2895 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
2896 		struct page *page, unsigned int len, struct iattr *sattr)
2897 {
2898 	struct nfs4_createdata *data;
2899 	int status = -ENAMETOOLONG;
2900 
2901 	if (len > NFS4_MAXPATHLEN)
2902 		goto out;
2903 
2904 	status = -ENOMEM;
2905 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
2906 	if (data == NULL)
2907 		goto out;
2908 
2909 	data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
2910 	data->arg.u.symlink.pages = &page;
2911 	data->arg.u.symlink.len = len;
2912 
2913 	status = nfs4_do_create(dir, dentry, data);
2914 
2915 	nfs4_free_createdata(data);
2916 out:
2917 	return status;
2918 }
2919 
nfs4_proc_symlink(struct inode * dir,struct dentry * dentry,struct page * page,unsigned int len,struct iattr * sattr)2920 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
2921 		struct page *page, unsigned int len, struct iattr *sattr)
2922 {
2923 	struct nfs4_exception exception = { };
2924 	int err;
2925 	do {
2926 		err = nfs4_handle_exception(NFS_SERVER(dir),
2927 				_nfs4_proc_symlink(dir, dentry, page,
2928 							len, sattr),
2929 				&exception);
2930 	} while (exception.retry);
2931 	return err;
2932 }
2933 
_nfs4_proc_mkdir(struct inode * dir,struct dentry * dentry,struct iattr * sattr)2934 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
2935 		struct iattr *sattr)
2936 {
2937 	struct nfs4_createdata *data;
2938 	int status = -ENOMEM;
2939 
2940 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
2941 	if (data == NULL)
2942 		goto out;
2943 
2944 	status = nfs4_do_create(dir, dentry, data);
2945 
2946 	nfs4_free_createdata(data);
2947 out:
2948 	return status;
2949 }
2950 
nfs4_proc_mkdir(struct inode * dir,struct dentry * dentry,struct iattr * sattr)2951 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
2952 		struct iattr *sattr)
2953 {
2954 	struct nfs4_exception exception = { };
2955 	int err;
2956 
2957 	sattr->ia_mode &= ~current_umask();
2958 	do {
2959 		err = nfs4_handle_exception(NFS_SERVER(dir),
2960 				_nfs4_proc_mkdir(dir, dentry, sattr),
2961 				&exception);
2962 	} while (exception.retry);
2963 	return err;
2964 }
2965 
_nfs4_proc_readdir(struct dentry * dentry,struct rpc_cred * cred,u64 cookie,struct page ** pages,unsigned int count,int plus)2966 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
2967 		u64 cookie, struct page **pages, unsigned int count, int plus)
2968 {
2969 	struct inode		*dir = dentry->d_inode;
2970 	struct nfs4_readdir_arg args = {
2971 		.fh = NFS_FH(dir),
2972 		.pages = pages,
2973 		.pgbase = 0,
2974 		.count = count,
2975 		.bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
2976 		.plus = plus,
2977 	};
2978 	struct nfs4_readdir_res res;
2979 	struct rpc_message msg = {
2980 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
2981 		.rpc_argp = &args,
2982 		.rpc_resp = &res,
2983 		.rpc_cred = cred,
2984 	};
2985 	int			status;
2986 
2987 	dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
2988 			dentry->d_parent->d_name.name,
2989 			dentry->d_name.name,
2990 			(unsigned long long)cookie);
2991 	nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
2992 	res.pgbase = args.pgbase;
2993 	status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
2994 	if (status >= 0) {
2995 		memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
2996 		status += args.pgbase;
2997 	}
2998 
2999 	nfs_invalidate_atime(dir);
3000 
3001 	dprintk("%s: returns %d\n", __func__, status);
3002 	return status;
3003 }
3004 
nfs4_proc_readdir(struct dentry * dentry,struct rpc_cred * cred,u64 cookie,struct page ** pages,unsigned int count,int plus)3005 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3006 		u64 cookie, struct page **pages, unsigned int count, int plus)
3007 {
3008 	struct nfs4_exception exception = { };
3009 	int err;
3010 	do {
3011 		err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
3012 				_nfs4_proc_readdir(dentry, cred, cookie,
3013 					pages, count, plus),
3014 				&exception);
3015 	} while (exception.retry);
3016 	return err;
3017 }
3018 
_nfs4_proc_mknod(struct inode * dir,struct dentry * dentry,struct iattr * sattr,dev_t rdev)3019 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3020 		struct iattr *sattr, dev_t rdev)
3021 {
3022 	struct nfs4_createdata *data;
3023 	int mode = sattr->ia_mode;
3024 	int status = -ENOMEM;
3025 
3026 	BUG_ON(!(sattr->ia_valid & ATTR_MODE));
3027 	BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
3028 
3029 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3030 	if (data == NULL)
3031 		goto out;
3032 
3033 	if (S_ISFIFO(mode))
3034 		data->arg.ftype = NF4FIFO;
3035 	else if (S_ISBLK(mode)) {
3036 		data->arg.ftype = NF4BLK;
3037 		data->arg.u.device.specdata1 = MAJOR(rdev);
3038 		data->arg.u.device.specdata2 = MINOR(rdev);
3039 	}
3040 	else if (S_ISCHR(mode)) {
3041 		data->arg.ftype = NF4CHR;
3042 		data->arg.u.device.specdata1 = MAJOR(rdev);
3043 		data->arg.u.device.specdata2 = MINOR(rdev);
3044 	}
3045 
3046 	status = nfs4_do_create(dir, dentry, data);
3047 
3048 	nfs4_free_createdata(data);
3049 out:
3050 	return status;
3051 }
3052 
nfs4_proc_mknod(struct inode * dir,struct dentry * dentry,struct iattr * sattr,dev_t rdev)3053 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3054 		struct iattr *sattr, dev_t rdev)
3055 {
3056 	struct nfs4_exception exception = { };
3057 	int err;
3058 
3059 	sattr->ia_mode &= ~current_umask();
3060 	do {
3061 		err = nfs4_handle_exception(NFS_SERVER(dir),
3062 				_nfs4_proc_mknod(dir, dentry, sattr, rdev),
3063 				&exception);
3064 	} while (exception.retry);
3065 	return err;
3066 }
3067 
_nfs4_proc_statfs(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsstat * fsstat)3068 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3069 		 struct nfs_fsstat *fsstat)
3070 {
3071 	struct nfs4_statfs_arg args = {
3072 		.fh = fhandle,
3073 		.bitmask = server->attr_bitmask,
3074 	};
3075 	struct nfs4_statfs_res res = {
3076 		.fsstat = fsstat,
3077 	};
3078 	struct rpc_message msg = {
3079 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3080 		.rpc_argp = &args,
3081 		.rpc_resp = &res,
3082 	};
3083 
3084 	nfs_fattr_init(fsstat->fattr);
3085 	return  nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3086 }
3087 
nfs4_proc_statfs(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsstat * fsstat)3088 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
3089 {
3090 	struct nfs4_exception exception = { };
3091 	int err;
3092 	do {
3093 		err = nfs4_handle_exception(server,
3094 				_nfs4_proc_statfs(server, fhandle, fsstat),
3095 				&exception);
3096 	} while (exception.retry);
3097 	return err;
3098 }
3099 
_nfs4_do_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)3100 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
3101 		struct nfs_fsinfo *fsinfo)
3102 {
3103 	struct nfs4_fsinfo_arg args = {
3104 		.fh = fhandle,
3105 		.bitmask = server->attr_bitmask,
3106 	};
3107 	struct nfs4_fsinfo_res res = {
3108 		.fsinfo = fsinfo,
3109 	};
3110 	struct rpc_message msg = {
3111 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
3112 		.rpc_argp = &args,
3113 		.rpc_resp = &res,
3114 	};
3115 
3116 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3117 }
3118 
nfs4_do_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)3119 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3120 {
3121 	struct nfs4_exception exception = { };
3122 	int err;
3123 
3124 	do {
3125 		err = nfs4_handle_exception(server,
3126 				_nfs4_do_fsinfo(server, fhandle, fsinfo),
3127 				&exception);
3128 	} while (exception.retry);
3129 	return err;
3130 }
3131 
nfs4_proc_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)3132 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3133 {
3134 	nfs_fattr_init(fsinfo->fattr);
3135 	return nfs4_do_fsinfo(server, fhandle, fsinfo);
3136 }
3137 
_nfs4_proc_pathconf(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_pathconf * pathconf)3138 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3139 		struct nfs_pathconf *pathconf)
3140 {
3141 	struct nfs4_pathconf_arg args = {
3142 		.fh = fhandle,
3143 		.bitmask = server->attr_bitmask,
3144 	};
3145 	struct nfs4_pathconf_res res = {
3146 		.pathconf = pathconf,
3147 	};
3148 	struct rpc_message msg = {
3149 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3150 		.rpc_argp = &args,
3151 		.rpc_resp = &res,
3152 	};
3153 
3154 	/* None of the pathconf attributes are mandatory to implement */
3155 	if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3156 		memset(pathconf, 0, sizeof(*pathconf));
3157 		return 0;
3158 	}
3159 
3160 	nfs_fattr_init(pathconf->fattr);
3161 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3162 }
3163 
nfs4_proc_pathconf(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_pathconf * pathconf)3164 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3165 		struct nfs_pathconf *pathconf)
3166 {
3167 	struct nfs4_exception exception = { };
3168 	int err;
3169 
3170 	do {
3171 		err = nfs4_handle_exception(server,
3172 				_nfs4_proc_pathconf(server, fhandle, pathconf),
3173 				&exception);
3174 	} while (exception.retry);
3175 	return err;
3176 }
3177 
nfs4_read_done_cb(struct rpc_task * task,struct nfs_read_data * data)3178 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3179 {
3180 	struct nfs_server *server = NFS_SERVER(data->inode);
3181 
3182 	if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3183 		nfs_restart_rpc(task, server->nfs_client);
3184 		return -EAGAIN;
3185 	}
3186 
3187 	nfs_invalidate_atime(data->inode);
3188 	if (task->tk_status > 0)
3189 		renew_lease(server, data->timestamp);
3190 	return 0;
3191 }
3192 
nfs4_read_done(struct rpc_task * task,struct nfs_read_data * data)3193 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3194 {
3195 
3196 	dprintk("--> %s\n", __func__);
3197 
3198 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3199 		return -EAGAIN;
3200 
3201 	return data->read_done_cb(task, data);
3202 }
3203 
nfs4_proc_read_setup(struct nfs_read_data * data,struct rpc_message * msg)3204 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3205 {
3206 	data->timestamp   = jiffies;
3207 	data->read_done_cb = nfs4_read_done_cb;
3208 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3209 }
3210 
3211 /* Reset the the nfs_read_data to send the read to the MDS. */
nfs4_reset_read(struct rpc_task * task,struct nfs_read_data * data)3212 void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
3213 {
3214 	dprintk("%s Reset task for i/o through\n", __func__);
3215 	put_lseg(data->lseg);
3216 	data->lseg = NULL;
3217 	/* offsets will differ in the dense stripe case */
3218 	data->args.offset = data->mds_offset;
3219 	data->ds_clp = NULL;
3220 	data->args.fh     = NFS_FH(data->inode);
3221 	data->read_done_cb = nfs4_read_done_cb;
3222 	task->tk_ops = data->mds_ops;
3223 	rpc_task_reset_client(task, NFS_CLIENT(data->inode));
3224 }
3225 EXPORT_SYMBOL_GPL(nfs4_reset_read);
3226 
nfs4_write_done_cb(struct rpc_task * task,struct nfs_write_data * data)3227 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3228 {
3229 	struct inode *inode = data->inode;
3230 
3231 	if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3232 		nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
3233 		return -EAGAIN;
3234 	}
3235 	if (task->tk_status >= 0) {
3236 		renew_lease(NFS_SERVER(inode), data->timestamp);
3237 		nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
3238 	}
3239 	return 0;
3240 }
3241 
nfs4_write_done(struct rpc_task * task,struct nfs_write_data * data)3242 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3243 {
3244 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3245 		return -EAGAIN;
3246 	return data->write_done_cb(task, data);
3247 }
3248 
3249 /* Reset the the nfs_write_data to send the write to the MDS. */
nfs4_reset_write(struct rpc_task * task,struct nfs_write_data * data)3250 void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
3251 {
3252 	dprintk("%s Reset task for i/o through\n", __func__);
3253 	put_lseg(data->lseg);
3254 	data->lseg          = NULL;
3255 	data->ds_clp        = NULL;
3256 	data->write_done_cb = nfs4_write_done_cb;
3257 	data->args.fh       = NFS_FH(data->inode);
3258 	data->args.bitmask  = data->res.server->cache_consistency_bitmask;
3259 	data->args.offset   = data->mds_offset;
3260 	data->res.fattr     = &data->fattr;
3261 	task->tk_ops        = data->mds_ops;
3262 	rpc_task_reset_client(task, NFS_CLIENT(data->inode));
3263 }
3264 EXPORT_SYMBOL_GPL(nfs4_reset_write);
3265 
nfs4_proc_write_setup(struct nfs_write_data * data,struct rpc_message * msg)3266 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3267 {
3268 	struct nfs_server *server = NFS_SERVER(data->inode);
3269 
3270 	if (data->lseg) {
3271 		data->args.bitmask = NULL;
3272 		data->res.fattr = NULL;
3273 	} else
3274 		data->args.bitmask = server->cache_consistency_bitmask;
3275 	if (!data->write_done_cb)
3276 		data->write_done_cb = nfs4_write_done_cb;
3277 	data->res.server = server;
3278 	data->timestamp   = jiffies;
3279 
3280 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3281 }
3282 
nfs4_commit_done_cb(struct rpc_task * task,struct nfs_write_data * data)3283 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3284 {
3285 	struct inode *inode = data->inode;
3286 
3287 	if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3288 		nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
3289 		return -EAGAIN;
3290 	}
3291 	nfs_refresh_inode(inode, data->res.fattr);
3292 	return 0;
3293 }
3294 
nfs4_commit_done(struct rpc_task * task,struct nfs_write_data * data)3295 static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
3296 {
3297 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3298 		return -EAGAIN;
3299 	return data->write_done_cb(task, data);
3300 }
3301 
nfs4_proc_commit_setup(struct nfs_write_data * data,struct rpc_message * msg)3302 static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
3303 {
3304 	struct nfs_server *server = NFS_SERVER(data->inode);
3305 
3306 	if (data->lseg) {
3307 		data->args.bitmask = NULL;
3308 		data->res.fattr = NULL;
3309 	} else
3310 		data->args.bitmask = server->cache_consistency_bitmask;
3311 	if (!data->write_done_cb)
3312 		data->write_done_cb = nfs4_commit_done_cb;
3313 	data->res.server = server;
3314 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3315 }
3316 
3317 struct nfs4_renewdata {
3318 	struct nfs_client	*client;
3319 	unsigned long		timestamp;
3320 };
3321 
3322 /*
3323  * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3324  * standalone procedure for queueing an asynchronous RENEW.
3325  */
nfs4_renew_release(void * calldata)3326 static void nfs4_renew_release(void *calldata)
3327 {
3328 	struct nfs4_renewdata *data = calldata;
3329 	struct nfs_client *clp = data->client;
3330 
3331 	if (atomic_read(&clp->cl_count) > 1)
3332 		nfs4_schedule_state_renewal(clp);
3333 	nfs_put_client(clp);
3334 	kfree(data);
3335 }
3336 
nfs4_renew_done(struct rpc_task * task,void * calldata)3337 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3338 {
3339 	struct nfs4_renewdata *data = calldata;
3340 	struct nfs_client *clp = data->client;
3341 	unsigned long timestamp = data->timestamp;
3342 
3343 	if (task->tk_status < 0) {
3344 		/* Unless we're shutting down, schedule state recovery! */
3345 		if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0)
3346 			nfs4_schedule_lease_recovery(clp);
3347 		return;
3348 	}
3349 	do_renew_lease(clp, timestamp);
3350 }
3351 
3352 static const struct rpc_call_ops nfs4_renew_ops = {
3353 	.rpc_call_done = nfs4_renew_done,
3354 	.rpc_release = nfs4_renew_release,
3355 };
3356 
nfs4_proc_async_renew(struct nfs_client * clp,struct rpc_cred * cred)3357 int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
3358 {
3359 	struct rpc_message msg = {
3360 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3361 		.rpc_argp	= clp,
3362 		.rpc_cred	= cred,
3363 	};
3364 	struct nfs4_renewdata *data;
3365 
3366 	if (!atomic_inc_not_zero(&clp->cl_count))
3367 		return -EIO;
3368 	data = kmalloc(sizeof(*data), GFP_KERNEL);
3369 	if (data == NULL)
3370 		return -ENOMEM;
3371 	data->client = clp;
3372 	data->timestamp = jiffies;
3373 	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3374 			&nfs4_renew_ops, data);
3375 }
3376 
nfs4_proc_renew(struct nfs_client * clp,struct rpc_cred * cred)3377 int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3378 {
3379 	struct rpc_message msg = {
3380 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3381 		.rpc_argp	= clp,
3382 		.rpc_cred	= cred,
3383 	};
3384 	unsigned long now = jiffies;
3385 	int status;
3386 
3387 	status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3388 	if (status < 0)
3389 		return status;
3390 	do_renew_lease(clp, now);
3391 	return 0;
3392 }
3393 
nfs4_server_supports_acls(struct nfs_server * server)3394 static inline int nfs4_server_supports_acls(struct nfs_server *server)
3395 {
3396 	return (server->caps & NFS_CAP_ACLS)
3397 		&& (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3398 		&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3399 }
3400 
3401 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
3402  * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
3403  * the stack.
3404  */
3405 #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3406 
buf_to_pages(const void * buf,size_t buflen,struct page ** pages,unsigned int * pgbase)3407 static void buf_to_pages(const void *buf, size_t buflen,
3408 		struct page **pages, unsigned int *pgbase)
3409 {
3410 	const void *p = buf;
3411 
3412 	*pgbase = offset_in_page(buf);
3413 	p -= *pgbase;
3414 	while (p < buf + buflen) {
3415 		*(pages++) = virt_to_page(p);
3416 		p += PAGE_CACHE_SIZE;
3417 	}
3418 }
3419 
buf_to_pages_noslab(const void * buf,size_t buflen,struct page ** pages,unsigned int * pgbase)3420 static int buf_to_pages_noslab(const void *buf, size_t buflen,
3421 		struct page **pages, unsigned int *pgbase)
3422 {
3423 	struct page *newpage, **spages;
3424 	int rc = 0;
3425 	size_t len;
3426 	spages = pages;
3427 
3428 	do {
3429 		len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
3430 		newpage = alloc_page(GFP_KERNEL);
3431 
3432 		if (newpage == NULL)
3433 			goto unwind;
3434 		memcpy(page_address(newpage), buf, len);
3435                 buf += len;
3436                 buflen -= len;
3437 		*pages++ = newpage;
3438 		rc++;
3439 	} while (buflen != 0);
3440 
3441 	return rc;
3442 
3443 unwind:
3444 	for(; rc > 0; rc--)
3445 		__free_page(spages[rc-1]);
3446 	return -ENOMEM;
3447 }
3448 
3449 struct nfs4_cached_acl {
3450 	int cached;
3451 	size_t len;
3452 	char data[0];
3453 };
3454 
nfs4_set_cached_acl(struct inode * inode,struct nfs4_cached_acl * acl)3455 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3456 {
3457 	struct nfs_inode *nfsi = NFS_I(inode);
3458 
3459 	spin_lock(&inode->i_lock);
3460 	kfree(nfsi->nfs4_acl);
3461 	nfsi->nfs4_acl = acl;
3462 	spin_unlock(&inode->i_lock);
3463 }
3464 
nfs4_zap_acl_attr(struct inode * inode)3465 static void nfs4_zap_acl_attr(struct inode *inode)
3466 {
3467 	nfs4_set_cached_acl(inode, NULL);
3468 }
3469 
nfs4_read_cached_acl(struct inode * inode,char * buf,size_t buflen)3470 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3471 {
3472 	struct nfs_inode *nfsi = NFS_I(inode);
3473 	struct nfs4_cached_acl *acl;
3474 	int ret = -ENOENT;
3475 
3476 	spin_lock(&inode->i_lock);
3477 	acl = nfsi->nfs4_acl;
3478 	if (acl == NULL)
3479 		goto out;
3480 	if (buf == NULL) /* user is just asking for length */
3481 		goto out_len;
3482 	if (acl->cached == 0)
3483 		goto out;
3484 	ret = -ERANGE; /* see getxattr(2) man page */
3485 	if (acl->len > buflen)
3486 		goto out;
3487 	memcpy(buf, acl->data, acl->len);
3488 out_len:
3489 	ret = acl->len;
3490 out:
3491 	spin_unlock(&inode->i_lock);
3492 	return ret;
3493 }
3494 
nfs4_write_cached_acl(struct inode * inode,const char * buf,size_t acl_len)3495 static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len)
3496 {
3497 	struct nfs4_cached_acl *acl;
3498 
3499 	if (buf && acl_len <= PAGE_SIZE) {
3500 		acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
3501 		if (acl == NULL)
3502 			goto out;
3503 		acl->cached = 1;
3504 		memcpy(acl->data, buf, acl_len);
3505 	} else {
3506 		acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3507 		if (acl == NULL)
3508 			goto out;
3509 		acl->cached = 0;
3510 	}
3511 	acl->len = acl_len;
3512 out:
3513 	nfs4_set_cached_acl(inode, acl);
3514 }
3515 
__nfs4_get_acl_uncached(struct inode * inode,void * buf,size_t buflen)3516 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3517 {
3518 	struct page *pages[NFS4ACL_MAXPAGES];
3519 	struct nfs_getaclargs args = {
3520 		.fh = NFS_FH(inode),
3521 		.acl_pages = pages,
3522 		.acl_len = buflen,
3523 	};
3524 	struct nfs_getaclres res = {
3525 		.acl_len = buflen,
3526 	};
3527 	void *resp_buf;
3528 	struct rpc_message msg = {
3529 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3530 		.rpc_argp = &args,
3531 		.rpc_resp = &res,
3532 	};
3533 	struct page *localpage = NULL;
3534 	int ret;
3535 
3536 	if (buflen < PAGE_SIZE) {
3537 		/* As long as we're doing a round trip to the server anyway,
3538 		 * let's be prepared for a page of acl data. */
3539 		localpage = alloc_page(GFP_KERNEL);
3540 		resp_buf = page_address(localpage);
3541 		if (localpage == NULL)
3542 			return -ENOMEM;
3543 		args.acl_pages[0] = localpage;
3544 		args.acl_pgbase = 0;
3545 		args.acl_len = PAGE_SIZE;
3546 	} else {
3547 		resp_buf = buf;
3548 		buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
3549 	}
3550 	ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3551 	if (ret)
3552 		goto out_free;
3553 	if (res.acl_len > args.acl_len)
3554 		nfs4_write_cached_acl(inode, NULL, res.acl_len);
3555 	else
3556 		nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
3557 	if (buf) {
3558 		ret = -ERANGE;
3559 		if (res.acl_len > buflen)
3560 			goto out_free;
3561 		if (localpage)
3562 			memcpy(buf, resp_buf, res.acl_len);
3563 	}
3564 	ret = res.acl_len;
3565 out_free:
3566 	if (localpage)
3567 		__free_page(localpage);
3568 	return ret;
3569 }
3570 
nfs4_get_acl_uncached(struct inode * inode,void * buf,size_t buflen)3571 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3572 {
3573 	struct nfs4_exception exception = { };
3574 	ssize_t ret;
3575 	do {
3576 		ret = __nfs4_get_acl_uncached(inode, buf, buflen);
3577 		if (ret >= 0)
3578 			break;
3579 		ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
3580 	} while (exception.retry);
3581 	return ret;
3582 }
3583 
nfs4_proc_get_acl(struct inode * inode,void * buf,size_t buflen)3584 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3585 {
3586 	struct nfs_server *server = NFS_SERVER(inode);
3587 	int ret;
3588 
3589 	if (!nfs4_server_supports_acls(server))
3590 		return -EOPNOTSUPP;
3591 	ret = nfs_revalidate_inode(server, inode);
3592 	if (ret < 0)
3593 		return ret;
3594 	if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
3595 		nfs_zap_acl_cache(inode);
3596 	ret = nfs4_read_cached_acl(inode, buf, buflen);
3597 	if (ret != -ENOENT)
3598 		return ret;
3599 	return nfs4_get_acl_uncached(inode, buf, buflen);
3600 }
3601 
__nfs4_proc_set_acl(struct inode * inode,const void * buf,size_t buflen)3602 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3603 {
3604 	struct nfs_server *server = NFS_SERVER(inode);
3605 	struct page *pages[NFS4ACL_MAXPAGES];
3606 	struct nfs_setaclargs arg = {
3607 		.fh		= NFS_FH(inode),
3608 		.acl_pages	= pages,
3609 		.acl_len	= buflen,
3610 	};
3611 	struct nfs_setaclres res;
3612 	struct rpc_message msg = {
3613 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETACL],
3614 		.rpc_argp	= &arg,
3615 		.rpc_resp	= &res,
3616 	};
3617 	int ret, i;
3618 
3619 	if (!nfs4_server_supports_acls(server))
3620 		return -EOPNOTSUPP;
3621 	i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3622 	if (i < 0)
3623 		return i;
3624 	nfs_inode_return_delegation(inode);
3625 	ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3626 
3627 	/*
3628 	 * Free each page after tx, so the only ref left is
3629 	 * held by the network stack
3630 	 */
3631 	for (; i > 0; i--)
3632 		put_page(pages[i-1]);
3633 
3634 	/*
3635 	 * Acl update can result in inode attribute update.
3636 	 * so mark the attribute cache invalid.
3637 	 */
3638 	spin_lock(&inode->i_lock);
3639 	NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
3640 	spin_unlock(&inode->i_lock);
3641 	nfs_access_zap_cache(inode);
3642 	nfs_zap_acl_cache(inode);
3643 	return ret;
3644 }
3645 
nfs4_proc_set_acl(struct inode * inode,const void * buf,size_t buflen)3646 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3647 {
3648 	struct nfs4_exception exception = { };
3649 	int err;
3650 	do {
3651 		err = nfs4_handle_exception(NFS_SERVER(inode),
3652 				__nfs4_proc_set_acl(inode, buf, buflen),
3653 				&exception);
3654 	} while (exception.retry);
3655 	return err;
3656 }
3657 
3658 static int
nfs4_async_handle_error(struct rpc_task * task,const struct nfs_server * server,struct nfs4_state * state)3659 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3660 {
3661 	struct nfs_client *clp = server->nfs_client;
3662 
3663 	if (task->tk_status >= 0)
3664 		return 0;
3665 	switch(task->tk_status) {
3666 		case -NFS4ERR_ADMIN_REVOKED:
3667 		case -NFS4ERR_BAD_STATEID:
3668 		case -NFS4ERR_OPENMODE:
3669 			if (state == NULL)
3670 				break;
3671 			nfs4_schedule_stateid_recovery(server, state);
3672 			goto wait_on_recovery;
3673 		case -NFS4ERR_STALE_STATEID:
3674 		case -NFS4ERR_STALE_CLIENTID:
3675 		case -NFS4ERR_EXPIRED:
3676 			nfs4_schedule_lease_recovery(clp);
3677 			goto wait_on_recovery;
3678 #if defined(CONFIG_NFS_V4_1)
3679 		case -NFS4ERR_BADSESSION:
3680 		case -NFS4ERR_BADSLOT:
3681 		case -NFS4ERR_BAD_HIGH_SLOT:
3682 		case -NFS4ERR_DEADSESSION:
3683 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
3684 		case -NFS4ERR_SEQ_FALSE_RETRY:
3685 		case -NFS4ERR_SEQ_MISORDERED:
3686 			dprintk("%s ERROR %d, Reset session\n", __func__,
3687 				task->tk_status);
3688 			nfs4_schedule_session_recovery(clp->cl_session);
3689 			task->tk_status = 0;
3690 			return -EAGAIN;
3691 #endif /* CONFIG_NFS_V4_1 */
3692 		case -NFS4ERR_DELAY:
3693 			nfs_inc_server_stats(server, NFSIOS_DELAY);
3694 		case -NFS4ERR_GRACE:
3695 		case -EKEYEXPIRED:
3696 			rpc_delay(task, NFS4_POLL_RETRY_MAX);
3697 			task->tk_status = 0;
3698 			return -EAGAIN;
3699 		case -NFS4ERR_RETRY_UNCACHED_REP:
3700 		case -NFS4ERR_OLD_STATEID:
3701 			task->tk_status = 0;
3702 			return -EAGAIN;
3703 	}
3704 	task->tk_status = nfs4_map_errors(task->tk_status);
3705 	return 0;
3706 wait_on_recovery:
3707 	rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3708 	if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3709 		rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3710 	task->tk_status = 0;
3711 	return -EAGAIN;
3712 }
3713 
nfs4_proc_setclientid(struct nfs_client * clp,u32 program,unsigned short port,struct rpc_cred * cred,struct nfs4_setclientid_res * res)3714 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3715 		unsigned short port, struct rpc_cred *cred,
3716 		struct nfs4_setclientid_res *res)
3717 {
3718 	nfs4_verifier sc_verifier;
3719 	struct nfs4_setclientid setclientid = {
3720 		.sc_verifier = &sc_verifier,
3721 		.sc_prog = program,
3722 		.sc_cb_ident = clp->cl_cb_ident,
3723 	};
3724 	struct rpc_message msg = {
3725 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
3726 		.rpc_argp = &setclientid,
3727 		.rpc_resp = res,
3728 		.rpc_cred = cred,
3729 	};
3730 	__be32 *p;
3731 	int loop = 0;
3732 	int status;
3733 
3734 	p = (__be32*)sc_verifier.data;
3735 	*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
3736 	*p = htonl((u32)clp->cl_boot_time.tv_nsec);
3737 
3738 	for(;;) {
3739 		setclientid.sc_name_len = scnprintf(setclientid.sc_name,
3740 				sizeof(setclientid.sc_name), "%s/%s %s %s %u",
3741 				clp->cl_ipaddr,
3742 				rpc_peeraddr2str(clp->cl_rpcclient,
3743 							RPC_DISPLAY_ADDR),
3744 				rpc_peeraddr2str(clp->cl_rpcclient,
3745 							RPC_DISPLAY_PROTO),
3746 				clp->cl_rpcclient->cl_auth->au_ops->au_name,
3747 				clp->cl_id_uniquifier);
3748 		setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
3749 				sizeof(setclientid.sc_netid),
3750 				rpc_peeraddr2str(clp->cl_rpcclient,
3751 							RPC_DISPLAY_NETID));
3752 		setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
3753 				sizeof(setclientid.sc_uaddr), "%s.%u.%u",
3754 				clp->cl_ipaddr, port >> 8, port & 255);
3755 
3756 		status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
3757 		if (status != -NFS4ERR_CLID_INUSE)
3758 			break;
3759 		if (loop != 0) {
3760 			++clp->cl_id_uniquifier;
3761 			break;
3762 		}
3763 		++loop;
3764 		ssleep(clp->cl_lease_time / HZ + 1);
3765 	}
3766 	return status;
3767 }
3768 
nfs4_proc_setclientid_confirm(struct nfs_client * clp,struct nfs4_setclientid_res * arg,struct rpc_cred * cred)3769 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
3770 		struct nfs4_setclientid_res *arg,
3771 		struct rpc_cred *cred)
3772 {
3773 	struct nfs_fsinfo fsinfo;
3774 	struct rpc_message msg = {
3775 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
3776 		.rpc_argp = arg,
3777 		.rpc_resp = &fsinfo,
3778 		.rpc_cred = cred,
3779 	};
3780 	unsigned long now;
3781 	int status;
3782 
3783 	now = jiffies;
3784 	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
3785 	if (status == 0) {
3786 		spin_lock(&clp->cl_lock);
3787 		clp->cl_lease_time = fsinfo.lease_time * HZ;
3788 		clp->cl_last_renewal = now;
3789 		spin_unlock(&clp->cl_lock);
3790 	}
3791 	return status;
3792 }
3793 
3794 struct nfs4_delegreturndata {
3795 	struct nfs4_delegreturnargs args;
3796 	struct nfs4_delegreturnres res;
3797 	struct nfs_fh fh;
3798 	nfs4_stateid stateid;
3799 	unsigned long timestamp;
3800 	struct nfs_fattr fattr;
3801 	int rpc_status;
3802 };
3803 
nfs4_delegreturn_done(struct rpc_task * task,void * calldata)3804 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
3805 {
3806 	struct nfs4_delegreturndata *data = calldata;
3807 
3808 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3809 		return;
3810 
3811 	switch (task->tk_status) {
3812 	case -NFS4ERR_STALE_STATEID:
3813 	case -NFS4ERR_EXPIRED:
3814 	case 0:
3815 		renew_lease(data->res.server, data->timestamp);
3816 		break;
3817 	default:
3818 		if (nfs4_async_handle_error(task, data->res.server, NULL) ==
3819 				-EAGAIN) {
3820 			nfs_restart_rpc(task, data->res.server->nfs_client);
3821 			return;
3822 		}
3823 	}
3824 	data->rpc_status = task->tk_status;
3825 }
3826 
nfs4_delegreturn_release(void * calldata)3827 static void nfs4_delegreturn_release(void *calldata)
3828 {
3829 	kfree(calldata);
3830 }
3831 
3832 #if defined(CONFIG_NFS_V4_1)
nfs4_delegreturn_prepare(struct rpc_task * task,void * data)3833 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
3834 {
3835 	struct nfs4_delegreturndata *d_data;
3836 
3837 	d_data = (struct nfs4_delegreturndata *)data;
3838 
3839 	if (nfs4_setup_sequence(d_data->res.server,
3840 				&d_data->args.seq_args,
3841 				&d_data->res.seq_res, 1, task))
3842 		return;
3843 	rpc_call_start(task);
3844 }
3845 #endif /* CONFIG_NFS_V4_1 */
3846 
3847 static const struct rpc_call_ops nfs4_delegreturn_ops = {
3848 #if defined(CONFIG_NFS_V4_1)
3849 	.rpc_call_prepare = nfs4_delegreturn_prepare,
3850 #endif /* CONFIG_NFS_V4_1 */
3851 	.rpc_call_done = nfs4_delegreturn_done,
3852 	.rpc_release = nfs4_delegreturn_release,
3853 };
3854 
_nfs4_proc_delegreturn(struct inode * inode,struct rpc_cred * cred,const nfs4_stateid * stateid,int issync)3855 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
3856 {
3857 	struct nfs4_delegreturndata *data;
3858 	struct nfs_server *server = NFS_SERVER(inode);
3859 	struct rpc_task *task;
3860 	struct rpc_message msg = {
3861 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
3862 		.rpc_cred = cred,
3863 	};
3864 	struct rpc_task_setup task_setup_data = {
3865 		.rpc_client = server->client,
3866 		.rpc_message = &msg,
3867 		.callback_ops = &nfs4_delegreturn_ops,
3868 		.flags = RPC_TASK_ASYNC,
3869 	};
3870 	int status = 0;
3871 
3872 	data = kzalloc(sizeof(*data), GFP_NOFS);
3873 	if (data == NULL)
3874 		return -ENOMEM;
3875 	data->args.fhandle = &data->fh;
3876 	data->args.stateid = &data->stateid;
3877 	data->args.bitmask = server->attr_bitmask;
3878 	nfs_copy_fh(&data->fh, NFS_FH(inode));
3879 	memcpy(&data->stateid, stateid, sizeof(data->stateid));
3880 	data->res.fattr = &data->fattr;
3881 	data->res.server = server;
3882 	nfs_fattr_init(data->res.fattr);
3883 	data->timestamp = jiffies;
3884 	data->rpc_status = 0;
3885 
3886 	task_setup_data.callback_data = data;
3887 	msg.rpc_argp = &data->args;
3888 	msg.rpc_resp = &data->res;
3889 	task = rpc_run_task(&task_setup_data);
3890 	if (IS_ERR(task))
3891 		return PTR_ERR(task);
3892 	if (!issync)
3893 		goto out;
3894 	status = nfs4_wait_for_completion_rpc_task(task);
3895 	if (status != 0)
3896 		goto out;
3897 	status = data->rpc_status;
3898 	if (status != 0)
3899 		goto out;
3900 	nfs_refresh_inode(inode, &data->fattr);
3901 out:
3902 	rpc_put_task(task);
3903 	return status;
3904 }
3905 
nfs4_proc_delegreturn(struct inode * inode,struct rpc_cred * cred,const nfs4_stateid * stateid,int issync)3906 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
3907 {
3908 	struct nfs_server *server = NFS_SERVER(inode);
3909 	struct nfs4_exception exception = { };
3910 	int err;
3911 	do {
3912 		err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
3913 		switch (err) {
3914 			case -NFS4ERR_STALE_STATEID:
3915 			case -NFS4ERR_EXPIRED:
3916 			case 0:
3917 				return 0;
3918 		}
3919 		err = nfs4_handle_exception(server, err, &exception);
3920 	} while (exception.retry);
3921 	return err;
3922 }
3923 
3924 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
3925 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
3926 
3927 /*
3928  * sleep, with exponential backoff, and retry the LOCK operation.
3929  */
3930 static unsigned long
nfs4_set_lock_task_retry(unsigned long timeout)3931 nfs4_set_lock_task_retry(unsigned long timeout)
3932 {
3933 	schedule_timeout_killable(timeout);
3934 	timeout <<= 1;
3935 	if (timeout > NFS4_LOCK_MAXTIMEOUT)
3936 		return NFS4_LOCK_MAXTIMEOUT;
3937 	return timeout;
3938 }
3939 
_nfs4_proc_getlk(struct nfs4_state * state,int cmd,struct file_lock * request)3940 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
3941 {
3942 	struct inode *inode = state->inode;
3943 	struct nfs_server *server = NFS_SERVER(inode);
3944 	struct nfs_client *clp = server->nfs_client;
3945 	struct nfs_lockt_args arg = {
3946 		.fh = NFS_FH(inode),
3947 		.fl = request,
3948 	};
3949 	struct nfs_lockt_res res = {
3950 		.denied = request,
3951 	};
3952 	struct rpc_message msg = {
3953 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
3954 		.rpc_argp       = &arg,
3955 		.rpc_resp       = &res,
3956 		.rpc_cred	= state->owner->so_cred,
3957 	};
3958 	struct nfs4_lock_state *lsp;
3959 	int status;
3960 
3961 	arg.lock_owner.clientid = clp->cl_clientid;
3962 	status = nfs4_set_lock_state(state, request);
3963 	if (status != 0)
3964 		goto out;
3965 	lsp = request->fl_u.nfs4_fl.owner;
3966 	arg.lock_owner.id = lsp->ls_id.id;
3967 	arg.lock_owner.s_dev = server->s_dev;
3968 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3969 	switch (status) {
3970 		case 0:
3971 			request->fl_type = F_UNLCK;
3972 			break;
3973 		case -NFS4ERR_DENIED:
3974 			status = 0;
3975 	}
3976 	request->fl_ops->fl_release_private(request);
3977 out:
3978 	return status;
3979 }
3980 
nfs4_proc_getlk(struct nfs4_state * state,int cmd,struct file_lock * request)3981 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
3982 {
3983 	struct nfs4_exception exception = { };
3984 	int err;
3985 
3986 	do {
3987 		err = nfs4_handle_exception(NFS_SERVER(state->inode),
3988 				_nfs4_proc_getlk(state, cmd, request),
3989 				&exception);
3990 	} while (exception.retry);
3991 	return err;
3992 }
3993 
do_vfs_lock(struct file * file,struct file_lock * fl)3994 static int do_vfs_lock(struct file *file, struct file_lock *fl)
3995 {
3996 	int res = 0;
3997 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
3998 		case FL_POSIX:
3999 			res = posix_lock_file_wait(file, fl);
4000 			break;
4001 		case FL_FLOCK:
4002 			res = flock_lock_file_wait(file, fl);
4003 			break;
4004 		default:
4005 			BUG();
4006 	}
4007 	return res;
4008 }
4009 
4010 struct nfs4_unlockdata {
4011 	struct nfs_locku_args arg;
4012 	struct nfs_locku_res res;
4013 	struct nfs4_lock_state *lsp;
4014 	struct nfs_open_context *ctx;
4015 	struct file_lock fl;
4016 	const struct nfs_server *server;
4017 	unsigned long timestamp;
4018 };
4019 
nfs4_alloc_unlockdata(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,struct nfs_seqid * seqid)4020 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
4021 		struct nfs_open_context *ctx,
4022 		struct nfs4_lock_state *lsp,
4023 		struct nfs_seqid *seqid)
4024 {
4025 	struct nfs4_unlockdata *p;
4026 	struct inode *inode = lsp->ls_state->inode;
4027 
4028 	p = kzalloc(sizeof(*p), GFP_NOFS);
4029 	if (p == NULL)
4030 		return NULL;
4031 	p->arg.fh = NFS_FH(inode);
4032 	p->arg.fl = &p->fl;
4033 	p->arg.seqid = seqid;
4034 	p->res.seqid = seqid;
4035 	p->arg.stateid = &lsp->ls_stateid;
4036 	p->lsp = lsp;
4037 	atomic_inc(&lsp->ls_count);
4038 	/* Ensure we don't close file until we're done freeing locks! */
4039 	p->ctx = get_nfs_open_context(ctx);
4040 	memcpy(&p->fl, fl, sizeof(p->fl));
4041 	p->server = NFS_SERVER(inode);
4042 	return p;
4043 }
4044 
nfs4_locku_release_calldata(void * data)4045 static void nfs4_locku_release_calldata(void *data)
4046 {
4047 	struct nfs4_unlockdata *calldata = data;
4048 	nfs_free_seqid(calldata->arg.seqid);
4049 	nfs4_put_lock_state(calldata->lsp);
4050 	put_nfs_open_context(calldata->ctx);
4051 	kfree(calldata);
4052 }
4053 
nfs4_locku_done(struct rpc_task * task,void * data)4054 static void nfs4_locku_done(struct rpc_task *task, void *data)
4055 {
4056 	struct nfs4_unlockdata *calldata = data;
4057 
4058 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
4059 		return;
4060 	switch (task->tk_status) {
4061 		case 0:
4062 			memcpy(calldata->lsp->ls_stateid.data,
4063 					calldata->res.stateid.data,
4064 					sizeof(calldata->lsp->ls_stateid.data));
4065 			renew_lease(calldata->server, calldata->timestamp);
4066 			break;
4067 		case -NFS4ERR_BAD_STATEID:
4068 		case -NFS4ERR_OLD_STATEID:
4069 		case -NFS4ERR_STALE_STATEID:
4070 		case -NFS4ERR_EXPIRED:
4071 			break;
4072 		default:
4073 			if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
4074 				nfs_restart_rpc(task,
4075 						 calldata->server->nfs_client);
4076 	}
4077 }
4078 
nfs4_locku_prepare(struct rpc_task * task,void * data)4079 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
4080 {
4081 	struct nfs4_unlockdata *calldata = data;
4082 
4083 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
4084 		return;
4085 	if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
4086 		/* Note: exit _without_ running nfs4_locku_done */
4087 		task->tk_action = NULL;
4088 		return;
4089 	}
4090 	calldata->timestamp = jiffies;
4091 	if (nfs4_setup_sequence(calldata->server,
4092 				&calldata->arg.seq_args,
4093 				&calldata->res.seq_res, 1, task))
4094 		return;
4095 	rpc_call_start(task);
4096 }
4097 
4098 static const struct rpc_call_ops nfs4_locku_ops = {
4099 	.rpc_call_prepare = nfs4_locku_prepare,
4100 	.rpc_call_done = nfs4_locku_done,
4101 	.rpc_release = nfs4_locku_release_calldata,
4102 };
4103 
nfs4_do_unlck(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,struct nfs_seqid * seqid)4104 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
4105 		struct nfs_open_context *ctx,
4106 		struct nfs4_lock_state *lsp,
4107 		struct nfs_seqid *seqid)
4108 {
4109 	struct nfs4_unlockdata *data;
4110 	struct rpc_message msg = {
4111 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
4112 		.rpc_cred = ctx->cred,
4113 	};
4114 	struct rpc_task_setup task_setup_data = {
4115 		.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
4116 		.rpc_message = &msg,
4117 		.callback_ops = &nfs4_locku_ops,
4118 		.workqueue = nfsiod_workqueue,
4119 		.flags = RPC_TASK_ASYNC,
4120 	};
4121 
4122 	/* Ensure this is an unlock - when canceling a lock, the
4123 	 * canceled lock is passed in, and it won't be an unlock.
4124 	 */
4125 	fl->fl_type = F_UNLCK;
4126 
4127 	data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
4128 	if (data == NULL) {
4129 		nfs_free_seqid(seqid);
4130 		return ERR_PTR(-ENOMEM);
4131 	}
4132 
4133 	msg.rpc_argp = &data->arg;
4134 	msg.rpc_resp = &data->res;
4135 	task_setup_data.callback_data = data;
4136 	return rpc_run_task(&task_setup_data);
4137 }
4138 
nfs4_proc_unlck(struct nfs4_state * state,int cmd,struct file_lock * request)4139 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
4140 {
4141 	struct nfs_inode *nfsi = NFS_I(state->inode);
4142 	struct nfs_seqid *seqid;
4143 	struct nfs4_lock_state *lsp;
4144 	struct rpc_task *task;
4145 	int status = 0;
4146 	unsigned char fl_flags = request->fl_flags;
4147 
4148 	status = nfs4_set_lock_state(state, request);
4149 	/* Unlock _before_ we do the RPC call */
4150 	request->fl_flags |= FL_EXISTS;
4151 	down_read(&nfsi->rwsem);
4152 	if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
4153 		up_read(&nfsi->rwsem);
4154 		goto out;
4155 	}
4156 	up_read(&nfsi->rwsem);
4157 	if (status != 0)
4158 		goto out;
4159 	/* Is this a delegated lock? */
4160 	if (test_bit(NFS_DELEGATED_STATE, &state->flags))
4161 		goto out;
4162 	lsp = request->fl_u.nfs4_fl.owner;
4163 	seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
4164 	status = -ENOMEM;
4165 	if (seqid == NULL)
4166 		goto out;
4167 	task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
4168 	status = PTR_ERR(task);
4169 	if (IS_ERR(task))
4170 		goto out;
4171 	status = nfs4_wait_for_completion_rpc_task(task);
4172 	rpc_put_task(task);
4173 out:
4174 	request->fl_flags = fl_flags;
4175 	return status;
4176 }
4177 
4178 struct nfs4_lockdata {
4179 	struct nfs_lock_args arg;
4180 	struct nfs_lock_res res;
4181 	struct nfs4_lock_state *lsp;
4182 	struct nfs_open_context *ctx;
4183 	struct file_lock fl;
4184 	unsigned long timestamp;
4185 	int rpc_status;
4186 	int cancelled;
4187 	struct nfs_server *server;
4188 };
4189 
nfs4_alloc_lockdata(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,gfp_t gfp_mask)4190 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
4191 		struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
4192 		gfp_t gfp_mask)
4193 {
4194 	struct nfs4_lockdata *p;
4195 	struct inode *inode = lsp->ls_state->inode;
4196 	struct nfs_server *server = NFS_SERVER(inode);
4197 
4198 	p = kzalloc(sizeof(*p), gfp_mask);
4199 	if (p == NULL)
4200 		return NULL;
4201 
4202 	p->arg.fh = NFS_FH(inode);
4203 	p->arg.fl = &p->fl;
4204 	p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
4205 	if (p->arg.open_seqid == NULL)
4206 		goto out_free;
4207 	p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
4208 	if (p->arg.lock_seqid == NULL)
4209 		goto out_free_seqid;
4210 	p->arg.lock_stateid = &lsp->ls_stateid;
4211 	p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
4212 	p->arg.lock_owner.id = lsp->ls_id.id;
4213 	p->arg.lock_owner.s_dev = server->s_dev;
4214 	p->res.lock_seqid = p->arg.lock_seqid;
4215 	p->lsp = lsp;
4216 	p->server = server;
4217 	atomic_inc(&lsp->ls_count);
4218 	p->ctx = get_nfs_open_context(ctx);
4219 	memcpy(&p->fl, fl, sizeof(p->fl));
4220 	return p;
4221 out_free_seqid:
4222 	nfs_free_seqid(p->arg.open_seqid);
4223 out_free:
4224 	kfree(p);
4225 	return NULL;
4226 }
4227 
nfs4_lock_prepare(struct rpc_task * task,void * calldata)4228 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4229 {
4230 	struct nfs4_lockdata *data = calldata;
4231 	struct nfs4_state *state = data->lsp->ls_state;
4232 
4233 	dprintk("%s: begin!\n", __func__);
4234 	if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
4235 		return;
4236 	/* Do we need to do an open_to_lock_owner? */
4237 	if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4238 		if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
4239 			return;
4240 		data->arg.open_stateid = &state->stateid;
4241 		data->arg.new_lock_owner = 1;
4242 		data->res.open_seqid = data->arg.open_seqid;
4243 	} else
4244 		data->arg.new_lock_owner = 0;
4245 	data->timestamp = jiffies;
4246 	if (nfs4_setup_sequence(data->server,
4247 				&data->arg.seq_args,
4248 				&data->res.seq_res, 1, task))
4249 		return;
4250 	rpc_call_start(task);
4251 	dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
4252 }
4253 
nfs4_recover_lock_prepare(struct rpc_task * task,void * calldata)4254 static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
4255 {
4256 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4257 	nfs4_lock_prepare(task, calldata);
4258 }
4259 
nfs4_lock_done(struct rpc_task * task,void * calldata)4260 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4261 {
4262 	struct nfs4_lockdata *data = calldata;
4263 
4264 	dprintk("%s: begin!\n", __func__);
4265 
4266 	if (!nfs4_sequence_done(task, &data->res.seq_res))
4267 		return;
4268 
4269 	data->rpc_status = task->tk_status;
4270 	if (data->arg.new_lock_owner != 0) {
4271 		if (data->rpc_status == 0)
4272 			nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4273 		else
4274 			goto out;
4275 	}
4276 	if (data->rpc_status == 0) {
4277 		memcpy(data->lsp->ls_stateid.data, data->res.stateid.data,
4278 					sizeof(data->lsp->ls_stateid.data));
4279 		data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
4280 		renew_lease(NFS_SERVER(data->ctx->path.dentry->d_inode), data->timestamp);
4281 	}
4282 out:
4283 	dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4284 }
4285 
nfs4_lock_release(void * calldata)4286 static void nfs4_lock_release(void *calldata)
4287 {
4288 	struct nfs4_lockdata *data = calldata;
4289 
4290 	dprintk("%s: begin!\n", __func__);
4291 	nfs_free_seqid(data->arg.open_seqid);
4292 	if (data->cancelled != 0) {
4293 		struct rpc_task *task;
4294 		task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4295 				data->arg.lock_seqid);
4296 		if (!IS_ERR(task))
4297 			rpc_put_task_async(task);
4298 		dprintk("%s: cancelling lock!\n", __func__);
4299 	} else
4300 		nfs_free_seqid(data->arg.lock_seqid);
4301 	nfs4_put_lock_state(data->lsp);
4302 	put_nfs_open_context(data->ctx);
4303 	kfree(data);
4304 	dprintk("%s: done!\n", __func__);
4305 }
4306 
4307 static const struct rpc_call_ops nfs4_lock_ops = {
4308 	.rpc_call_prepare = nfs4_lock_prepare,
4309 	.rpc_call_done = nfs4_lock_done,
4310 	.rpc_release = nfs4_lock_release,
4311 };
4312 
4313 static const struct rpc_call_ops nfs4_recover_lock_ops = {
4314 	.rpc_call_prepare = nfs4_recover_lock_prepare,
4315 	.rpc_call_done = nfs4_lock_done,
4316 	.rpc_release = nfs4_lock_release,
4317 };
4318 
nfs4_handle_setlk_error(struct nfs_server * server,struct nfs4_lock_state * lsp,int new_lock_owner,int error)4319 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4320 {
4321 	switch (error) {
4322 	case -NFS4ERR_ADMIN_REVOKED:
4323 	case -NFS4ERR_BAD_STATEID:
4324 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4325 		if (new_lock_owner != 0 ||
4326 		   (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4327 			nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4328 		break;
4329 	case -NFS4ERR_STALE_STATEID:
4330 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4331 	case -NFS4ERR_EXPIRED:
4332 		nfs4_schedule_lease_recovery(server->nfs_client);
4333 	};
4334 }
4335 
_nfs4_do_setlk(struct nfs4_state * state,int cmd,struct file_lock * fl,int recovery_type)4336 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4337 {
4338 	struct nfs4_lockdata *data;
4339 	struct rpc_task *task;
4340 	struct rpc_message msg = {
4341 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4342 		.rpc_cred = state->owner->so_cred,
4343 	};
4344 	struct rpc_task_setup task_setup_data = {
4345 		.rpc_client = NFS_CLIENT(state->inode),
4346 		.rpc_message = &msg,
4347 		.callback_ops = &nfs4_lock_ops,
4348 		.workqueue = nfsiod_workqueue,
4349 		.flags = RPC_TASK_ASYNC,
4350 	};
4351 	int ret;
4352 
4353 	dprintk("%s: begin!\n", __func__);
4354 	data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4355 			fl->fl_u.nfs4_fl.owner,
4356 			recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4357 	if (data == NULL)
4358 		return -ENOMEM;
4359 	if (IS_SETLKW(cmd))
4360 		data->arg.block = 1;
4361 	if (recovery_type > NFS_LOCK_NEW) {
4362 		if (recovery_type == NFS_LOCK_RECLAIM)
4363 			data->arg.reclaim = NFS_LOCK_RECLAIM;
4364 		task_setup_data.callback_ops = &nfs4_recover_lock_ops;
4365 	}
4366 	msg.rpc_argp = &data->arg;
4367 	msg.rpc_resp = &data->res;
4368 	task_setup_data.callback_data = data;
4369 	task = rpc_run_task(&task_setup_data);
4370 	if (IS_ERR(task))
4371 		return PTR_ERR(task);
4372 	ret = nfs4_wait_for_completion_rpc_task(task);
4373 	if (ret == 0) {
4374 		ret = data->rpc_status;
4375 		if (ret)
4376 			nfs4_handle_setlk_error(data->server, data->lsp,
4377 					data->arg.new_lock_owner, ret);
4378 	} else
4379 		data->cancelled = 1;
4380 	rpc_put_task(task);
4381 	dprintk("%s: done, ret = %d!\n", __func__, ret);
4382 	return ret;
4383 }
4384 
nfs4_lock_reclaim(struct nfs4_state * state,struct file_lock * request)4385 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4386 {
4387 	struct nfs_server *server = NFS_SERVER(state->inode);
4388 	struct nfs4_exception exception = { };
4389 	int err;
4390 
4391 	do {
4392 		/* Cache the lock if possible... */
4393 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4394 			return 0;
4395 		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
4396 		if (err != -NFS4ERR_DELAY)
4397 			break;
4398 		nfs4_handle_exception(server, err, &exception);
4399 	} while (exception.retry);
4400 	return err;
4401 }
4402 
nfs4_lock_expired(struct nfs4_state * state,struct file_lock * request)4403 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4404 {
4405 	struct nfs_server *server = NFS_SERVER(state->inode);
4406 	struct nfs4_exception exception = { };
4407 	int err;
4408 
4409 	err = nfs4_set_lock_state(state, request);
4410 	if (err != 0)
4411 		return err;
4412 	do {
4413 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4414 			return 0;
4415 		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
4416 		switch (err) {
4417 		default:
4418 			goto out;
4419 		case -NFS4ERR_GRACE:
4420 		case -NFS4ERR_DELAY:
4421 			nfs4_handle_exception(server, err, &exception);
4422 			err = 0;
4423 		}
4424 	} while (exception.retry);
4425 out:
4426 	return err;
4427 }
4428 
_nfs4_proc_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)4429 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4430 {
4431 	struct nfs_inode *nfsi = NFS_I(state->inode);
4432 	unsigned char fl_flags = request->fl_flags;
4433 	int status = -ENOLCK;
4434 
4435 	if ((fl_flags & FL_POSIX) &&
4436 			!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4437 		goto out;
4438 	/* Is this a delegated open? */
4439 	status = nfs4_set_lock_state(state, request);
4440 	if (status != 0)
4441 		goto out;
4442 	request->fl_flags |= FL_ACCESS;
4443 	status = do_vfs_lock(request->fl_file, request);
4444 	if (status < 0)
4445 		goto out;
4446 	down_read(&nfsi->rwsem);
4447 	if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
4448 		/* Yes: cache locks! */
4449 		/* ...but avoid races with delegation recall... */
4450 		request->fl_flags = fl_flags & ~FL_SLEEP;
4451 		status = do_vfs_lock(request->fl_file, request);
4452 		goto out_unlock;
4453 	}
4454 	status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
4455 	if (status != 0)
4456 		goto out_unlock;
4457 	/* Note: we always want to sleep here! */
4458 	request->fl_flags = fl_flags | FL_SLEEP;
4459 	if (do_vfs_lock(request->fl_file, request) < 0)
4460 		printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
4461 out_unlock:
4462 	up_read(&nfsi->rwsem);
4463 out:
4464 	request->fl_flags = fl_flags;
4465 	return status;
4466 }
4467 
nfs4_proc_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)4468 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4469 {
4470 	struct nfs4_exception exception = { };
4471 	int err;
4472 
4473 	do {
4474 		err = _nfs4_proc_setlk(state, cmd, request);
4475 		if (err == -NFS4ERR_DENIED)
4476 			err = -EAGAIN;
4477 		err = nfs4_handle_exception(NFS_SERVER(state->inode),
4478 				err, &exception);
4479 	} while (exception.retry);
4480 	return err;
4481 }
4482 
4483 static int
nfs4_proc_lock(struct file * filp,int cmd,struct file_lock * request)4484 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4485 {
4486 	struct nfs_open_context *ctx;
4487 	struct nfs4_state *state;
4488 	unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
4489 	int status;
4490 
4491 	/* verify open state */
4492 	ctx = nfs_file_open_context(filp);
4493 	state = ctx->state;
4494 
4495 	if (request->fl_start < 0 || request->fl_end < 0)
4496 		return -EINVAL;
4497 
4498 	if (IS_GETLK(cmd)) {
4499 		if (state != NULL)
4500 			return nfs4_proc_getlk(state, F_GETLK, request);
4501 		return 0;
4502 	}
4503 
4504 	if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
4505 		return -EINVAL;
4506 
4507 	if (request->fl_type == F_UNLCK) {
4508 		if (state != NULL)
4509 			return nfs4_proc_unlck(state, cmd, request);
4510 		return 0;
4511 	}
4512 
4513 	if (state == NULL)
4514 		return -ENOLCK;
4515 	do {
4516 		status = nfs4_proc_setlk(state, cmd, request);
4517 		if ((status != -EAGAIN) || IS_SETLK(cmd))
4518 			break;
4519 		timeout = nfs4_set_lock_task_retry(timeout);
4520 		status = -ERESTARTSYS;
4521 		if (signalled())
4522 			break;
4523 	} while(status < 0);
4524 	return status;
4525 }
4526 
nfs4_lock_delegation_recall(struct nfs4_state * state,struct file_lock * fl)4527 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4528 {
4529 	struct nfs_server *server = NFS_SERVER(state->inode);
4530 	struct nfs4_exception exception = { };
4531 	int err;
4532 
4533 	err = nfs4_set_lock_state(state, fl);
4534 	if (err != 0)
4535 		goto out;
4536 	do {
4537 		err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
4538 		switch (err) {
4539 			default:
4540 				printk(KERN_ERR "%s: unhandled error %d.\n",
4541 						__func__, err);
4542 			case 0:
4543 			case -ESTALE:
4544 				goto out;
4545 			case -NFS4ERR_EXPIRED:
4546 			case -NFS4ERR_STALE_CLIENTID:
4547 			case -NFS4ERR_STALE_STATEID:
4548 				nfs4_schedule_lease_recovery(server->nfs_client);
4549 				goto out;
4550 			case -NFS4ERR_BADSESSION:
4551 			case -NFS4ERR_BADSLOT:
4552 			case -NFS4ERR_BAD_HIGH_SLOT:
4553 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4554 			case -NFS4ERR_DEADSESSION:
4555 				nfs4_schedule_session_recovery(server->nfs_client->cl_session);
4556 				goto out;
4557 			case -ERESTARTSYS:
4558 				/*
4559 				 * The show must go on: exit, but mark the
4560 				 * stateid as needing recovery.
4561 				 */
4562 			case -NFS4ERR_ADMIN_REVOKED:
4563 			case -NFS4ERR_BAD_STATEID:
4564 			case -NFS4ERR_OPENMODE:
4565 				nfs4_schedule_stateid_recovery(server, state);
4566 				err = 0;
4567 				goto out;
4568 			case -EKEYEXPIRED:
4569 				/*
4570 				 * User RPCSEC_GSS context has expired.
4571 				 * We cannot recover this stateid now, so
4572 				 * skip it and allow recovery thread to
4573 				 * proceed.
4574 				 */
4575 				err = 0;
4576 				goto out;
4577 			case -ENOMEM:
4578 			case -NFS4ERR_DENIED:
4579 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
4580 				err = 0;
4581 				goto out;
4582 			case -NFS4ERR_DELAY:
4583 				break;
4584 		}
4585 		err = nfs4_handle_exception(server, err, &exception);
4586 	} while (exception.retry);
4587 out:
4588 	return err;
4589 }
4590 
nfs4_release_lockowner_release(void * calldata)4591 static void nfs4_release_lockowner_release(void *calldata)
4592 {
4593 	kfree(calldata);
4594 }
4595 
4596 const struct rpc_call_ops nfs4_release_lockowner_ops = {
4597 	.rpc_release = nfs4_release_lockowner_release,
4598 };
4599 
nfs4_release_lockowner(const struct nfs4_lock_state * lsp)4600 void nfs4_release_lockowner(const struct nfs4_lock_state *lsp)
4601 {
4602 	struct nfs_server *server = lsp->ls_state->owner->so_server;
4603 	struct nfs_release_lockowner_args *args;
4604 	struct rpc_message msg = {
4605 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
4606 	};
4607 
4608 	if (server->nfs_client->cl_mvops->minor_version != 0)
4609 		return;
4610 	args = kmalloc(sizeof(*args), GFP_NOFS);
4611 	if (!args)
4612 		return;
4613 	args->lock_owner.clientid = server->nfs_client->cl_clientid;
4614 	args->lock_owner.id = lsp->ls_id.id;
4615 	args->lock_owner.s_dev = server->s_dev;
4616 	msg.rpc_argp = args;
4617 	rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
4618 }
4619 
4620 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4621 
nfs4_xattr_set_nfs4_acl(struct dentry * dentry,const char * key,const void * buf,size_t buflen,int flags,int type)4622 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
4623 				   const void *buf, size_t buflen,
4624 				   int flags, int type)
4625 {
4626 	if (strcmp(key, "") != 0)
4627 		return -EINVAL;
4628 
4629 	return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
4630 }
4631 
nfs4_xattr_get_nfs4_acl(struct dentry * dentry,const char * key,void * buf,size_t buflen,int type)4632 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
4633 				   void *buf, size_t buflen, int type)
4634 {
4635 	if (strcmp(key, "") != 0)
4636 		return -EINVAL;
4637 
4638 	return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
4639 }
4640 
nfs4_xattr_list_nfs4_acl(struct dentry * dentry,char * list,size_t list_len,const char * name,size_t name_len,int type)4641 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
4642 				       size_t list_len, const char *name,
4643 				       size_t name_len, int type)
4644 {
4645 	size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
4646 
4647 	if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
4648 		return 0;
4649 
4650 	if (list && len <= list_len)
4651 		memcpy(list, XATTR_NAME_NFSV4_ACL, len);
4652 	return len;
4653 }
4654 
nfs_fixup_referral_attributes(struct nfs_fattr * fattr)4655 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
4656 {
4657 	if (!((fattr->valid & NFS_ATTR_FATTR_FILEID) &&
4658 		(fattr->valid & NFS_ATTR_FATTR_FSID) &&
4659 		(fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
4660 		return;
4661 
4662 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4663 		NFS_ATTR_FATTR_NLINK;
4664 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4665 	fattr->nlink = 2;
4666 }
4667 
nfs4_proc_fs_locations(struct inode * dir,const struct qstr * name,struct nfs4_fs_locations * fs_locations,struct page * page)4668 int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
4669 		struct nfs4_fs_locations *fs_locations, struct page *page)
4670 {
4671 	struct nfs_server *server = NFS_SERVER(dir);
4672 	u32 bitmask[2] = {
4673 		[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
4674 		[1] = FATTR4_WORD1_MOUNTED_ON_FILEID,
4675 	};
4676 	struct nfs4_fs_locations_arg args = {
4677 		.dir_fh = NFS_FH(dir),
4678 		.name = name,
4679 		.page = page,
4680 		.bitmask = bitmask,
4681 	};
4682 	struct nfs4_fs_locations_res res = {
4683 		.fs_locations = fs_locations,
4684 	};
4685 	struct rpc_message msg = {
4686 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
4687 		.rpc_argp = &args,
4688 		.rpc_resp = &res,
4689 	};
4690 	int status;
4691 
4692 	dprintk("%s: start\n", __func__);
4693 	nfs_fattr_init(&fs_locations->fattr);
4694 	fs_locations->server = server;
4695 	fs_locations->nlocations = 0;
4696 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4697 	nfs_fixup_referral_attributes(&fs_locations->fattr);
4698 	dprintk("%s: returned status = %d\n", __func__, status);
4699 	return status;
4700 }
4701 
_nfs4_proc_secinfo(struct inode * dir,const struct qstr * name,struct nfs4_secinfo_flavors * flavors)4702 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
4703 {
4704 	int status;
4705 	struct nfs4_secinfo_arg args = {
4706 		.dir_fh = NFS_FH(dir),
4707 		.name   = name,
4708 	};
4709 	struct nfs4_secinfo_res res = {
4710 		.flavors     = flavors,
4711 	};
4712 	struct rpc_message msg = {
4713 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
4714 		.rpc_argp = &args,
4715 		.rpc_resp = &res,
4716 	};
4717 
4718 	dprintk("NFS call  secinfo %s\n", name->name);
4719 	status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
4720 	dprintk("NFS reply  secinfo: %d\n", status);
4721 	return status;
4722 }
4723 
nfs4_proc_secinfo(struct inode * dir,const struct qstr * name,struct nfs4_secinfo_flavors * flavors)4724 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
4725 {
4726 	struct nfs4_exception exception = { };
4727 	int err;
4728 	do {
4729 		err = nfs4_handle_exception(NFS_SERVER(dir),
4730 				_nfs4_proc_secinfo(dir, name, flavors),
4731 				&exception);
4732 	} while (exception.retry);
4733 	return err;
4734 }
4735 
4736 #ifdef CONFIG_NFS_V4_1
4737 /*
4738  * Check the exchange flags returned by the server for invalid flags, having
4739  * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
4740  * DS flags set.
4741  */
nfs4_check_cl_exchange_flags(u32 flags)4742 static int nfs4_check_cl_exchange_flags(u32 flags)
4743 {
4744 	if (flags & ~EXCHGID4_FLAG_MASK_R)
4745 		goto out_inval;
4746 	if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
4747 	    (flags & EXCHGID4_FLAG_USE_NON_PNFS))
4748 		goto out_inval;
4749 	if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
4750 		goto out_inval;
4751 	return NFS_OK;
4752 out_inval:
4753 	return -NFS4ERR_INVAL;
4754 }
4755 
4756 /*
4757  * nfs4_proc_exchange_id()
4758  *
4759  * Since the clientid has expired, all compounds using sessions
4760  * associated with the stale clientid will be returning
4761  * NFS4ERR_BADSESSION in the sequence operation, and will therefore
4762  * be in some phase of session reset.
4763  */
nfs4_proc_exchange_id(struct nfs_client * clp,struct rpc_cred * cred)4764 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
4765 {
4766 	nfs4_verifier verifier;
4767 	struct nfs41_exchange_id_args args = {
4768 		.client = clp,
4769 		.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
4770 	};
4771 	struct nfs41_exchange_id_res res = {
4772 		.client = clp,
4773 	};
4774 	int status;
4775 	struct rpc_message msg = {
4776 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
4777 		.rpc_argp = &args,
4778 		.rpc_resp = &res,
4779 		.rpc_cred = cred,
4780 	};
4781 	__be32 *p;
4782 
4783 	dprintk("--> %s\n", __func__);
4784 	BUG_ON(clp == NULL);
4785 
4786 	p = (u32 *)verifier.data;
4787 	*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
4788 	*p = htonl((u32)clp->cl_boot_time.tv_nsec);
4789 	args.verifier = &verifier;
4790 
4791 	args.id_len = scnprintf(args.id, sizeof(args.id),
4792 				"%s/%s.%s/%u",
4793 				clp->cl_ipaddr,
4794 				init_utsname()->nodename,
4795 				init_utsname()->domainname,
4796 				clp->cl_rpcclient->cl_auth->au_flavor);
4797 
4798 	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4799 	if (!status)
4800 		status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
4801 	dprintk("<-- %s status= %d\n", __func__, status);
4802 	return status;
4803 }
4804 
4805 struct nfs4_get_lease_time_data {
4806 	struct nfs4_get_lease_time_args *args;
4807 	struct nfs4_get_lease_time_res *res;
4808 	struct nfs_client *clp;
4809 };
4810 
nfs4_get_lease_time_prepare(struct rpc_task * task,void * calldata)4811 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
4812 					void *calldata)
4813 {
4814 	int ret;
4815 	struct nfs4_get_lease_time_data *data =
4816 			(struct nfs4_get_lease_time_data *)calldata;
4817 
4818 	dprintk("--> %s\n", __func__);
4819 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4820 	/* just setup sequence, do not trigger session recovery
4821 	   since we're invoked within one */
4822 	ret = nfs41_setup_sequence(data->clp->cl_session,
4823 				   &data->args->la_seq_args,
4824 				   &data->res->lr_seq_res, 0, task);
4825 
4826 	BUG_ON(ret == -EAGAIN);
4827 	rpc_call_start(task);
4828 	dprintk("<-- %s\n", __func__);
4829 }
4830 
4831 /*
4832  * Called from nfs4_state_manager thread for session setup, so don't recover
4833  * from sequence operation or clientid errors.
4834  */
nfs4_get_lease_time_done(struct rpc_task * task,void * calldata)4835 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
4836 {
4837 	struct nfs4_get_lease_time_data *data =
4838 			(struct nfs4_get_lease_time_data *)calldata;
4839 
4840 	dprintk("--> %s\n", __func__);
4841 	if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
4842 		return;
4843 	switch (task->tk_status) {
4844 	case -NFS4ERR_DELAY:
4845 	case -NFS4ERR_GRACE:
4846 		dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
4847 		rpc_delay(task, NFS4_POLL_RETRY_MIN);
4848 		task->tk_status = 0;
4849 		/* fall through */
4850 	case -NFS4ERR_RETRY_UNCACHED_REP:
4851 		nfs_restart_rpc(task, data->clp);
4852 		return;
4853 	}
4854 	dprintk("<-- %s\n", __func__);
4855 }
4856 
4857 struct rpc_call_ops nfs4_get_lease_time_ops = {
4858 	.rpc_call_prepare = nfs4_get_lease_time_prepare,
4859 	.rpc_call_done = nfs4_get_lease_time_done,
4860 };
4861 
nfs4_proc_get_lease_time(struct nfs_client * clp,struct nfs_fsinfo * fsinfo)4862 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
4863 {
4864 	struct rpc_task *task;
4865 	struct nfs4_get_lease_time_args args;
4866 	struct nfs4_get_lease_time_res res = {
4867 		.lr_fsinfo = fsinfo,
4868 	};
4869 	struct nfs4_get_lease_time_data data = {
4870 		.args = &args,
4871 		.res = &res,
4872 		.clp = clp,
4873 	};
4874 	struct rpc_message msg = {
4875 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
4876 		.rpc_argp = &args,
4877 		.rpc_resp = &res,
4878 	};
4879 	struct rpc_task_setup task_setup = {
4880 		.rpc_client = clp->cl_rpcclient,
4881 		.rpc_message = &msg,
4882 		.callback_ops = &nfs4_get_lease_time_ops,
4883 		.callback_data = &data,
4884 		.flags = RPC_TASK_TIMEOUT,
4885 	};
4886 	int status;
4887 
4888 	dprintk("--> %s\n", __func__);
4889 	task = rpc_run_task(&task_setup);
4890 
4891 	if (IS_ERR(task))
4892 		status = PTR_ERR(task);
4893 	else {
4894 		status = task->tk_status;
4895 		rpc_put_task(task);
4896 	}
4897 	dprintk("<-- %s return %d\n", __func__, status);
4898 
4899 	return status;
4900 }
4901 
4902 /*
4903  * Reset a slot table
4904  */
nfs4_reset_slot_table(struct nfs4_slot_table * tbl,u32 max_reqs,int ivalue)4905 static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
4906 				 int ivalue)
4907 {
4908 	struct nfs4_slot *new = NULL;
4909 	int i;
4910 	int ret = 0;
4911 
4912 	dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
4913 		max_reqs, tbl->max_slots);
4914 
4915 	/* Does the newly negotiated max_reqs match the existing slot table? */
4916 	if (max_reqs != tbl->max_slots) {
4917 		ret = -ENOMEM;
4918 		new = kmalloc(max_reqs * sizeof(struct nfs4_slot),
4919 			      GFP_NOFS);
4920 		if (!new)
4921 			goto out;
4922 		ret = 0;
4923 		kfree(tbl->slots);
4924 	}
4925 	spin_lock(&tbl->slot_tbl_lock);
4926 	if (new) {
4927 		tbl->slots = new;
4928 		tbl->max_slots = max_reqs;
4929 	}
4930 	for (i = 0; i < tbl->max_slots; ++i)
4931 		tbl->slots[i].seq_nr = ivalue;
4932 	spin_unlock(&tbl->slot_tbl_lock);
4933 	dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
4934 		tbl, tbl->slots, tbl->max_slots);
4935 out:
4936 	dprintk("<-- %s: return %d\n", __func__, ret);
4937 	return ret;
4938 }
4939 
4940 /*
4941  * Reset the forechannel and backchannel slot tables
4942  */
nfs4_reset_slot_tables(struct nfs4_session * session)4943 static int nfs4_reset_slot_tables(struct nfs4_session *session)
4944 {
4945 	int status;
4946 
4947 	status = nfs4_reset_slot_table(&session->fc_slot_table,
4948 			session->fc_attrs.max_reqs, 1);
4949 	if (status)
4950 		return status;
4951 
4952 	status = nfs4_reset_slot_table(&session->bc_slot_table,
4953 			session->bc_attrs.max_reqs, 0);
4954 	return status;
4955 }
4956 
4957 /* Destroy the slot table */
nfs4_destroy_slot_tables(struct nfs4_session * session)4958 static void nfs4_destroy_slot_tables(struct nfs4_session *session)
4959 {
4960 	if (session->fc_slot_table.slots != NULL) {
4961 		kfree(session->fc_slot_table.slots);
4962 		session->fc_slot_table.slots = NULL;
4963 	}
4964 	if (session->bc_slot_table.slots != NULL) {
4965 		kfree(session->bc_slot_table.slots);
4966 		session->bc_slot_table.slots = NULL;
4967 	}
4968 	return;
4969 }
4970 
4971 /*
4972  * Initialize slot table
4973  */
nfs4_init_slot_table(struct nfs4_slot_table * tbl,int max_slots,int ivalue)4974 static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
4975 		int max_slots, int ivalue)
4976 {
4977 	struct nfs4_slot *slot;
4978 	int ret = -ENOMEM;
4979 
4980 	BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE);
4981 
4982 	dprintk("--> %s: max_reqs=%u\n", __func__, max_slots);
4983 
4984 	slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS);
4985 	if (!slot)
4986 		goto out;
4987 	ret = 0;
4988 
4989 	spin_lock(&tbl->slot_tbl_lock);
4990 	tbl->max_slots = max_slots;
4991 	tbl->slots = slot;
4992 	tbl->highest_used_slotid = -1;  /* no slot is currently used */
4993 	spin_unlock(&tbl->slot_tbl_lock);
4994 	dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
4995 		tbl, tbl->slots, tbl->max_slots);
4996 out:
4997 	dprintk("<-- %s: return %d\n", __func__, ret);
4998 	return ret;
4999 }
5000 
5001 /*
5002  * Initialize the forechannel and backchannel tables
5003  */
nfs4_init_slot_tables(struct nfs4_session * session)5004 static int nfs4_init_slot_tables(struct nfs4_session *session)
5005 {
5006 	struct nfs4_slot_table *tbl;
5007 	int status = 0;
5008 
5009 	tbl = &session->fc_slot_table;
5010 	if (tbl->slots == NULL) {
5011 		status = nfs4_init_slot_table(tbl,
5012 				session->fc_attrs.max_reqs, 1);
5013 		if (status)
5014 			return status;
5015 	}
5016 
5017 	tbl = &session->bc_slot_table;
5018 	if (tbl->slots == NULL) {
5019 		status = nfs4_init_slot_table(tbl,
5020 				session->bc_attrs.max_reqs, 0);
5021 		if (status)
5022 			nfs4_destroy_slot_tables(session);
5023 	}
5024 
5025 	return status;
5026 }
5027 
nfs4_alloc_session(struct nfs_client * clp)5028 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
5029 {
5030 	struct nfs4_session *session;
5031 	struct nfs4_slot_table *tbl;
5032 
5033 	session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5034 	if (!session)
5035 		return NULL;
5036 
5037 	tbl = &session->fc_slot_table;
5038 	tbl->highest_used_slotid = -1;
5039 	spin_lock_init(&tbl->slot_tbl_lock);
5040 	rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
5041 	init_completion(&tbl->complete);
5042 
5043 	tbl = &session->bc_slot_table;
5044 	tbl->highest_used_slotid = -1;
5045 	spin_lock_init(&tbl->slot_tbl_lock);
5046 	rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
5047 	init_completion(&tbl->complete);
5048 
5049 	session->session_state = 1<<NFS4_SESSION_INITING;
5050 
5051 	session->clp = clp;
5052 	return session;
5053 }
5054 
nfs4_destroy_session(struct nfs4_session * session)5055 void nfs4_destroy_session(struct nfs4_session *session)
5056 {
5057 	nfs4_proc_destroy_session(session);
5058 	dprintk("%s Destroy backchannel for xprt %p\n",
5059 		__func__, session->clp->cl_rpcclient->cl_xprt);
5060 	xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt,
5061 				NFS41_BC_MIN_CALLBACKS);
5062 	nfs4_destroy_slot_tables(session);
5063 	kfree(session);
5064 }
5065 
5066 /*
5067  * Initialize the values to be used by the client in CREATE_SESSION
5068  * If nfs4_init_session set the fore channel request and response sizes,
5069  * use them.
5070  *
5071  * Set the back channel max_resp_sz_cached to zero to force the client to
5072  * always set csa_cachethis to FALSE because the current implementation
5073  * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5074  */
nfs4_init_channel_attrs(struct nfs41_create_session_args * args)5075 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5076 {
5077 	struct nfs4_session *session = args->client->cl_session;
5078 	unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
5079 		     mxresp_sz = session->fc_attrs.max_resp_sz;
5080 
5081 	if (mxrqst_sz == 0)
5082 		mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
5083 	if (mxresp_sz == 0)
5084 		mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5085 	/* Fore channel attributes */
5086 	args->fc_attrs.headerpadsz = 0;
5087 	args->fc_attrs.max_rqst_sz = mxrqst_sz;
5088 	args->fc_attrs.max_resp_sz = mxresp_sz;
5089 	args->fc_attrs.max_ops = NFS4_MAX_OPS;
5090 	args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs;
5091 
5092 	dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5093 		"max_ops=%u max_reqs=%u\n",
5094 		__func__,
5095 		args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
5096 		args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5097 
5098 	/* Back channel attributes */
5099 	args->bc_attrs.headerpadsz = 0;
5100 	args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5101 	args->bc_attrs.max_resp_sz = PAGE_SIZE;
5102 	args->bc_attrs.max_resp_sz_cached = 0;
5103 	args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
5104 	args->bc_attrs.max_reqs = 1;
5105 
5106 	dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5107 		"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5108 		__func__,
5109 		args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
5110 		args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
5111 		args->bc_attrs.max_reqs);
5112 }
5113 
nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args * args,struct nfs4_session * session)5114 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5115 {
5116 	struct nfs4_channel_attrs *sent = &args->fc_attrs;
5117 	struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5118 
5119 	if (rcvd->headerpadsz > sent->headerpadsz)
5120 		return -EINVAL;
5121 	if (rcvd->max_resp_sz > sent->max_resp_sz)
5122 		return -EINVAL;
5123 	/*
5124 	 * Our requested max_ops is the minimum we need; we're not
5125 	 * prepared to break up compounds into smaller pieces than that.
5126 	 * So, no point even trying to continue if the server won't
5127 	 * cooperate:
5128 	 */
5129 	if (rcvd->max_ops < sent->max_ops)
5130 		return -EINVAL;
5131 	if (rcvd->max_reqs == 0)
5132 		return -EINVAL;
5133 	return 0;
5134 }
5135 
nfs4_verify_back_channel_attrs(struct nfs41_create_session_args * args,struct nfs4_session * session)5136 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5137 {
5138 	struct nfs4_channel_attrs *sent = &args->bc_attrs;
5139 	struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
5140 
5141 	if (rcvd->max_rqst_sz > sent->max_rqst_sz)
5142 		return -EINVAL;
5143 	if (rcvd->max_resp_sz < sent->max_resp_sz)
5144 		return -EINVAL;
5145 	if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
5146 		return -EINVAL;
5147 	/* These would render the backchannel useless: */
5148 	if (rcvd->max_ops  == 0)
5149 		return -EINVAL;
5150 	if (rcvd->max_reqs == 0)
5151 		return -EINVAL;
5152 	return 0;
5153 }
5154 
nfs4_verify_channel_attrs(struct nfs41_create_session_args * args,struct nfs4_session * session)5155 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
5156 				     struct nfs4_session *session)
5157 {
5158 	int ret;
5159 
5160 	ret = nfs4_verify_fore_channel_attrs(args, session);
5161 	if (ret)
5162 		return ret;
5163 	return nfs4_verify_back_channel_attrs(args, session);
5164 }
5165 
_nfs4_proc_create_session(struct nfs_client * clp)5166 static int _nfs4_proc_create_session(struct nfs_client *clp)
5167 {
5168 	struct nfs4_session *session = clp->cl_session;
5169 	struct nfs41_create_session_args args = {
5170 		.client = clp,
5171 		.cb_program = NFS4_CALLBACK,
5172 	};
5173 	struct nfs41_create_session_res res = {
5174 		.client = clp,
5175 	};
5176 	struct rpc_message msg = {
5177 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
5178 		.rpc_argp = &args,
5179 		.rpc_resp = &res,
5180 	};
5181 	int status;
5182 
5183 	nfs4_init_channel_attrs(&args);
5184 	args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
5185 
5186 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5187 
5188 	if (!status)
5189 		/* Verify the session's negotiated channel_attrs values */
5190 		status = nfs4_verify_channel_attrs(&args, session);
5191 	if (!status) {
5192 		/* Increment the clientid slot sequence id */
5193 		clp->cl_seqid++;
5194 	}
5195 
5196 	return status;
5197 }
5198 
5199 /*
5200  * Issues a CREATE_SESSION operation to the server.
5201  * It is the responsibility of the caller to verify the session is
5202  * expired before calling this routine.
5203  */
nfs4_proc_create_session(struct nfs_client * clp)5204 int nfs4_proc_create_session(struct nfs_client *clp)
5205 {
5206 	int status;
5207 	unsigned *ptr;
5208 	struct nfs4_session *session = clp->cl_session;
5209 
5210 	dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
5211 
5212 	status = _nfs4_proc_create_session(clp);
5213 	if (status)
5214 		goto out;
5215 
5216 	/* Init and reset the fore channel */
5217 	status = nfs4_init_slot_tables(session);
5218 	dprintk("slot table initialization returned %d\n", status);
5219 	if (status)
5220 		goto out;
5221 	status = nfs4_reset_slot_tables(session);
5222 	dprintk("slot table reset returned %d\n", status);
5223 	if (status)
5224 		goto out;
5225 
5226 	ptr = (unsigned *)&session->sess_id.data[0];
5227 	dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
5228 		clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
5229 out:
5230 	dprintk("<-- %s\n", __func__);
5231 	return status;
5232 }
5233 
5234 /*
5235  * Issue the over-the-wire RPC DESTROY_SESSION.
5236  * The caller must serialize access to this routine.
5237  */
nfs4_proc_destroy_session(struct nfs4_session * session)5238 int nfs4_proc_destroy_session(struct nfs4_session *session)
5239 {
5240 	int status = 0;
5241 	struct rpc_message msg;
5242 
5243 	dprintk("--> nfs4_proc_destroy_session\n");
5244 
5245 	/* session is still being setup */
5246 	if (session->clp->cl_cons_state != NFS_CS_READY)
5247 		return status;
5248 
5249 	msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
5250 	msg.rpc_argp = session;
5251 	msg.rpc_resp = NULL;
5252 	msg.rpc_cred = NULL;
5253 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5254 
5255 	if (status)
5256 		printk(KERN_WARNING
5257 			"Got error %d from the server on DESTROY_SESSION. "
5258 			"Session has been destroyed regardless...\n", status);
5259 
5260 	dprintk("<-- nfs4_proc_destroy_session\n");
5261 	return status;
5262 }
5263 
nfs4_init_session(struct nfs_server * server)5264 int nfs4_init_session(struct nfs_server *server)
5265 {
5266 	struct nfs_client *clp = server->nfs_client;
5267 	struct nfs4_session *session;
5268 	unsigned int rsize, wsize;
5269 	int ret;
5270 
5271 	if (!nfs4_has_session(clp))
5272 		return 0;
5273 
5274 	session = clp->cl_session;
5275 	if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
5276 		return 0;
5277 
5278 	rsize = server->rsize;
5279 	if (rsize == 0)
5280 		rsize = NFS_MAX_FILE_IO_SIZE;
5281 	wsize = server->wsize;
5282 	if (wsize == 0)
5283 		wsize = NFS_MAX_FILE_IO_SIZE;
5284 
5285 	session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
5286 	session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
5287 
5288 	ret = nfs4_recover_expired_lease(server);
5289 	if (!ret)
5290 		ret = nfs4_check_client_ready(clp);
5291 	return ret;
5292 }
5293 
nfs4_init_ds_session(struct nfs_client * clp)5294 int nfs4_init_ds_session(struct nfs_client *clp)
5295 {
5296 	struct nfs4_session *session = clp->cl_session;
5297 	int ret;
5298 
5299 	if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
5300 		return 0;
5301 
5302 	ret = nfs4_client_recover_expired_lease(clp);
5303 	if (!ret)
5304 		/* Test for the DS role */
5305 		if (!is_ds_client(clp))
5306 			ret = -ENODEV;
5307 	if (!ret)
5308 		ret = nfs4_check_client_ready(clp);
5309 	return ret;
5310 
5311 }
5312 EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
5313 
5314 
5315 /*
5316  * Renew the cl_session lease.
5317  */
5318 struct nfs4_sequence_data {
5319 	struct nfs_client *clp;
5320 	struct nfs4_sequence_args args;
5321 	struct nfs4_sequence_res res;
5322 };
5323 
nfs41_sequence_release(void * data)5324 static void nfs41_sequence_release(void *data)
5325 {
5326 	struct nfs4_sequence_data *calldata = data;
5327 	struct nfs_client *clp = calldata->clp;
5328 
5329 	if (atomic_read(&clp->cl_count) > 1)
5330 		nfs4_schedule_state_renewal(clp);
5331 	nfs_put_client(clp);
5332 	kfree(calldata);
5333 }
5334 
nfs41_sequence_handle_errors(struct rpc_task * task,struct nfs_client * clp)5335 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5336 {
5337 	switch(task->tk_status) {
5338 	case -NFS4ERR_DELAY:
5339 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
5340 		return -EAGAIN;
5341 	default:
5342 		nfs4_schedule_lease_recovery(clp);
5343 	}
5344 	return 0;
5345 }
5346 
nfs41_sequence_call_done(struct rpc_task * task,void * data)5347 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5348 {
5349 	struct nfs4_sequence_data *calldata = data;
5350 	struct nfs_client *clp = calldata->clp;
5351 
5352 	if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
5353 		return;
5354 
5355 	if (task->tk_status < 0) {
5356 		dprintk("%s ERROR %d\n", __func__, task->tk_status);
5357 		if (atomic_read(&clp->cl_count) == 1)
5358 			goto out;
5359 
5360 		if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
5361 			rpc_restart_call_prepare(task);
5362 			return;
5363 		}
5364 	}
5365 	dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
5366 out:
5367 	dprintk("<-- %s\n", __func__);
5368 }
5369 
nfs41_sequence_prepare(struct rpc_task * task,void * data)5370 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
5371 {
5372 	struct nfs4_sequence_data *calldata = data;
5373 	struct nfs_client *clp = calldata->clp;
5374 	struct nfs4_sequence_args *args;
5375 	struct nfs4_sequence_res *res;
5376 
5377 	args = task->tk_msg.rpc_argp;
5378 	res = task->tk_msg.rpc_resp;
5379 
5380 	if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task))
5381 		return;
5382 	rpc_call_start(task);
5383 }
5384 
5385 static const struct rpc_call_ops nfs41_sequence_ops = {
5386 	.rpc_call_done = nfs41_sequence_call_done,
5387 	.rpc_call_prepare = nfs41_sequence_prepare,
5388 	.rpc_release = nfs41_sequence_release,
5389 };
5390 
_nfs41_proc_sequence(struct nfs_client * clp,struct rpc_cred * cred)5391 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5392 {
5393 	struct nfs4_sequence_data *calldata;
5394 	struct rpc_message msg = {
5395 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
5396 		.rpc_cred = cred,
5397 	};
5398 	struct rpc_task_setup task_setup_data = {
5399 		.rpc_client = clp->cl_rpcclient,
5400 		.rpc_message = &msg,
5401 		.callback_ops = &nfs41_sequence_ops,
5402 		.flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
5403 	};
5404 
5405 	if (!atomic_inc_not_zero(&clp->cl_count))
5406 		return ERR_PTR(-EIO);
5407 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5408 	if (calldata == NULL) {
5409 		nfs_put_client(clp);
5410 		return ERR_PTR(-ENOMEM);
5411 	}
5412 	msg.rpc_argp = &calldata->args;
5413 	msg.rpc_resp = &calldata->res;
5414 	calldata->clp = clp;
5415 	task_setup_data.callback_data = calldata;
5416 
5417 	return rpc_run_task(&task_setup_data);
5418 }
5419 
nfs41_proc_async_sequence(struct nfs_client * clp,struct rpc_cred * cred)5420 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5421 {
5422 	struct rpc_task *task;
5423 	int ret = 0;
5424 
5425 	task = _nfs41_proc_sequence(clp, cred);
5426 	if (IS_ERR(task))
5427 		ret = PTR_ERR(task);
5428 	else
5429 		rpc_put_task_async(task);
5430 	dprintk("<-- %s status=%d\n", __func__, ret);
5431 	return ret;
5432 }
5433 
nfs4_proc_sequence(struct nfs_client * clp,struct rpc_cred * cred)5434 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5435 {
5436 	struct rpc_task *task;
5437 	int ret;
5438 
5439 	task = _nfs41_proc_sequence(clp, cred);
5440 	if (IS_ERR(task)) {
5441 		ret = PTR_ERR(task);
5442 		goto out;
5443 	}
5444 	ret = rpc_wait_for_completion_task(task);
5445 	if (!ret) {
5446 		struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
5447 
5448 		if (task->tk_status == 0)
5449 			nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
5450 		ret = task->tk_status;
5451 	}
5452 	rpc_put_task(task);
5453 out:
5454 	dprintk("<-- %s status=%d\n", __func__, ret);
5455 	return ret;
5456 }
5457 
5458 struct nfs4_reclaim_complete_data {
5459 	struct nfs_client *clp;
5460 	struct nfs41_reclaim_complete_args arg;
5461 	struct nfs41_reclaim_complete_res res;
5462 };
5463 
nfs4_reclaim_complete_prepare(struct rpc_task * task,void * data)5464 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
5465 {
5466 	struct nfs4_reclaim_complete_data *calldata = data;
5467 
5468 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5469 	if (nfs41_setup_sequence(calldata->clp->cl_session,
5470 				&calldata->arg.seq_args,
5471 				&calldata->res.seq_res, 0, task))
5472 		return;
5473 
5474 	rpc_call_start(task);
5475 }
5476 
nfs41_reclaim_complete_handle_errors(struct rpc_task * task,struct nfs_client * clp)5477 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5478 {
5479 	switch(task->tk_status) {
5480 	case 0:
5481 	case -NFS4ERR_COMPLETE_ALREADY:
5482 	case -NFS4ERR_WRONG_CRED: /* What to do here? */
5483 		break;
5484 	case -NFS4ERR_DELAY:
5485 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
5486 		/* fall through */
5487 	case -NFS4ERR_RETRY_UNCACHED_REP:
5488 		return -EAGAIN;
5489 	default:
5490 		nfs4_schedule_lease_recovery(clp);
5491 	}
5492 	return 0;
5493 }
5494 
nfs4_reclaim_complete_done(struct rpc_task * task,void * data)5495 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
5496 {
5497 	struct nfs4_reclaim_complete_data *calldata = data;
5498 	struct nfs_client *clp = calldata->clp;
5499 	struct nfs4_sequence_res *res = &calldata->res.seq_res;
5500 
5501 	dprintk("--> %s\n", __func__);
5502 	if (!nfs41_sequence_done(task, res))
5503 		return;
5504 
5505 	if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
5506 		rpc_restart_call_prepare(task);
5507 		return;
5508 	}
5509 	dprintk("<-- %s\n", __func__);
5510 }
5511 
nfs4_free_reclaim_complete_data(void * data)5512 static void nfs4_free_reclaim_complete_data(void *data)
5513 {
5514 	struct nfs4_reclaim_complete_data *calldata = data;
5515 
5516 	kfree(calldata);
5517 }
5518 
5519 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
5520 	.rpc_call_prepare = nfs4_reclaim_complete_prepare,
5521 	.rpc_call_done = nfs4_reclaim_complete_done,
5522 	.rpc_release = nfs4_free_reclaim_complete_data,
5523 };
5524 
5525 /*
5526  * Issue a global reclaim complete.
5527  */
nfs41_proc_reclaim_complete(struct nfs_client * clp)5528 static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
5529 {
5530 	struct nfs4_reclaim_complete_data *calldata;
5531 	struct rpc_task *task;
5532 	struct rpc_message msg = {
5533 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
5534 	};
5535 	struct rpc_task_setup task_setup_data = {
5536 		.rpc_client = clp->cl_rpcclient,
5537 		.rpc_message = &msg,
5538 		.callback_ops = &nfs4_reclaim_complete_call_ops,
5539 		.flags = RPC_TASK_ASYNC,
5540 	};
5541 	int status = -ENOMEM;
5542 
5543 	dprintk("--> %s\n", __func__);
5544 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5545 	if (calldata == NULL)
5546 		goto out;
5547 	calldata->clp = clp;
5548 	calldata->arg.one_fs = 0;
5549 
5550 	msg.rpc_argp = &calldata->arg;
5551 	msg.rpc_resp = &calldata->res;
5552 	task_setup_data.callback_data = calldata;
5553 	task = rpc_run_task(&task_setup_data);
5554 	if (IS_ERR(task)) {
5555 		status = PTR_ERR(task);
5556 		goto out;
5557 	}
5558 	status = nfs4_wait_for_completion_rpc_task(task);
5559 	if (status == 0)
5560 		status = task->tk_status;
5561 	rpc_put_task(task);
5562 	return 0;
5563 out:
5564 	dprintk("<-- %s status=%d\n", __func__, status);
5565 	return status;
5566 }
5567 
5568 static void
nfs4_layoutget_prepare(struct rpc_task * task,void * calldata)5569 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
5570 {
5571 	struct nfs4_layoutget *lgp = calldata;
5572 	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5573 
5574 	dprintk("--> %s\n", __func__);
5575 	/* Note the is a race here, where a CB_LAYOUTRECALL can come in
5576 	 * right now covering the LAYOUTGET we are about to send.
5577 	 * However, that is not so catastrophic, and there seems
5578 	 * to be no way to prevent it completely.
5579 	 */
5580 	if (nfs4_setup_sequence(server, &lgp->args.seq_args,
5581 				&lgp->res.seq_res, 0, task))
5582 		return;
5583 	if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
5584 					  NFS_I(lgp->args.inode)->layout,
5585 					  lgp->args.ctx->state)) {
5586 		rpc_exit(task, NFS4_OK);
5587 		return;
5588 	}
5589 	rpc_call_start(task);
5590 }
5591 
nfs4_layoutget_done(struct rpc_task * task,void * calldata)5592 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
5593 {
5594 	struct nfs4_layoutget *lgp = calldata;
5595 	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5596 
5597 	dprintk("--> %s\n", __func__);
5598 
5599 	if (!nfs4_sequence_done(task, &lgp->res.seq_res))
5600 		return;
5601 
5602 	switch (task->tk_status) {
5603 	case 0:
5604 		break;
5605 	case -NFS4ERR_LAYOUTTRYLATER:
5606 	case -NFS4ERR_RECALLCONFLICT:
5607 		task->tk_status = -NFS4ERR_DELAY;
5608 		/* Fall through */
5609 	default:
5610 		if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
5611 			rpc_restart_call_prepare(task);
5612 			return;
5613 		}
5614 	}
5615 	dprintk("<-- %s\n", __func__);
5616 }
5617 
nfs4_layoutget_release(void * calldata)5618 static void nfs4_layoutget_release(void *calldata)
5619 {
5620 	struct nfs4_layoutget *lgp = calldata;
5621 
5622 	dprintk("--> %s\n", __func__);
5623 	put_nfs_open_context(lgp->args.ctx);
5624 	kfree(calldata);
5625 	dprintk("<-- %s\n", __func__);
5626 }
5627 
5628 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
5629 	.rpc_call_prepare = nfs4_layoutget_prepare,
5630 	.rpc_call_done = nfs4_layoutget_done,
5631 	.rpc_release = nfs4_layoutget_release,
5632 };
5633 
nfs4_proc_layoutget(struct nfs4_layoutget * lgp)5634 int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
5635 {
5636 	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5637 	struct rpc_task *task;
5638 	struct rpc_message msg = {
5639 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
5640 		.rpc_argp = &lgp->args,
5641 		.rpc_resp = &lgp->res,
5642 	};
5643 	struct rpc_task_setup task_setup_data = {
5644 		.rpc_client = server->client,
5645 		.rpc_message = &msg,
5646 		.callback_ops = &nfs4_layoutget_call_ops,
5647 		.callback_data = lgp,
5648 		.flags = RPC_TASK_ASYNC,
5649 	};
5650 	int status = 0;
5651 
5652 	dprintk("--> %s\n", __func__);
5653 
5654 	lgp->res.layoutp = &lgp->args.layout;
5655 	lgp->res.seq_res.sr_slot = NULL;
5656 	task = rpc_run_task(&task_setup_data);
5657 	if (IS_ERR(task))
5658 		return PTR_ERR(task);
5659 	status = nfs4_wait_for_completion_rpc_task(task);
5660 	if (status == 0)
5661 		status = task->tk_status;
5662 	if (status == 0)
5663 		status = pnfs_layout_process(lgp);
5664 	rpc_put_task(task);
5665 	dprintk("<-- %s status=%d\n", __func__, status);
5666 	return status;
5667 }
5668 
5669 static int
_nfs4_proc_getdeviceinfo(struct nfs_server * server,struct pnfs_device * pdev)5670 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
5671 {
5672 	struct nfs4_getdeviceinfo_args args = {
5673 		.pdev = pdev,
5674 	};
5675 	struct nfs4_getdeviceinfo_res res = {
5676 		.pdev = pdev,
5677 	};
5678 	struct rpc_message msg = {
5679 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
5680 		.rpc_argp = &args,
5681 		.rpc_resp = &res,
5682 	};
5683 	int status;
5684 
5685 	dprintk("--> %s\n", __func__);
5686 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5687 	dprintk("<-- %s status=%d\n", __func__, status);
5688 
5689 	return status;
5690 }
5691 
nfs4_proc_getdeviceinfo(struct nfs_server * server,struct pnfs_device * pdev)5692 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
5693 {
5694 	struct nfs4_exception exception = { };
5695 	int err;
5696 
5697 	do {
5698 		err = nfs4_handle_exception(server,
5699 					_nfs4_proc_getdeviceinfo(server, pdev),
5700 					&exception);
5701 	} while (exception.retry);
5702 	return err;
5703 }
5704 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
5705 
nfs4_layoutcommit_prepare(struct rpc_task * task,void * calldata)5706 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
5707 {
5708 	struct nfs4_layoutcommit_data *data = calldata;
5709 	struct nfs_server *server = NFS_SERVER(data->args.inode);
5710 
5711 	if (nfs4_setup_sequence(server, &data->args.seq_args,
5712 				&data->res.seq_res, 1, task))
5713 		return;
5714 	rpc_call_start(task);
5715 }
5716 
5717 static void
nfs4_layoutcommit_done(struct rpc_task * task,void * calldata)5718 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
5719 {
5720 	struct nfs4_layoutcommit_data *data = calldata;
5721 	struct nfs_server *server = NFS_SERVER(data->args.inode);
5722 
5723 	if (!nfs4_sequence_done(task, &data->res.seq_res))
5724 		return;
5725 
5726 	switch (task->tk_status) { /* Just ignore these failures */
5727 	case NFS4ERR_DELEG_REVOKED: /* layout was recalled */
5728 	case NFS4ERR_BADIOMODE:     /* no IOMODE_RW layout for range */
5729 	case NFS4ERR_BADLAYOUT:     /* no layout */
5730 	case NFS4ERR_GRACE:	    /* loca_recalim always false */
5731 		task->tk_status = 0;
5732 	}
5733 
5734 	if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
5735 		nfs_restart_rpc(task, server->nfs_client);
5736 		return;
5737 	}
5738 
5739 	if (task->tk_status == 0)
5740 		nfs_post_op_update_inode_force_wcc(data->args.inode,
5741 						   data->res.fattr);
5742 }
5743 
nfs4_layoutcommit_release(void * calldata)5744 static void nfs4_layoutcommit_release(void *calldata)
5745 {
5746 	struct nfs4_layoutcommit_data *data = calldata;
5747 
5748 	/* Matched by references in pnfs_set_layoutcommit */
5749 	put_lseg(data->lseg);
5750 	put_rpccred(data->cred);
5751 	kfree(data);
5752 }
5753 
5754 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
5755 	.rpc_call_prepare = nfs4_layoutcommit_prepare,
5756 	.rpc_call_done = nfs4_layoutcommit_done,
5757 	.rpc_release = nfs4_layoutcommit_release,
5758 };
5759 
5760 int
nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data * data,bool sync)5761 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
5762 {
5763 	struct rpc_message msg = {
5764 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
5765 		.rpc_argp = &data->args,
5766 		.rpc_resp = &data->res,
5767 		.rpc_cred = data->cred,
5768 	};
5769 	struct rpc_task_setup task_setup_data = {
5770 		.task = &data->task,
5771 		.rpc_client = NFS_CLIENT(data->args.inode),
5772 		.rpc_message = &msg,
5773 		.callback_ops = &nfs4_layoutcommit_ops,
5774 		.callback_data = data,
5775 		.flags = RPC_TASK_ASYNC,
5776 	};
5777 	struct rpc_task *task;
5778 	int status = 0;
5779 
5780 	dprintk("NFS: %4d initiating layoutcommit call. sync %d "
5781 		"lbw: %llu inode %lu\n",
5782 		data->task.tk_pid, sync,
5783 		data->args.lastbytewritten,
5784 		data->args.inode->i_ino);
5785 
5786 	task = rpc_run_task(&task_setup_data);
5787 	if (IS_ERR(task))
5788 		return PTR_ERR(task);
5789 	if (sync == false)
5790 		goto out;
5791 	status = nfs4_wait_for_completion_rpc_task(task);
5792 	if (status != 0)
5793 		goto out;
5794 	status = task->tk_status;
5795 out:
5796 	dprintk("%s: status %d\n", __func__, status);
5797 	rpc_put_task(task);
5798 	return status;
5799 }
5800 #endif /* CONFIG_NFS_V4_1 */
5801 
5802 struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
5803 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
5804 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
5805 	.recover_open	= nfs4_open_reclaim,
5806 	.recover_lock	= nfs4_lock_reclaim,
5807 	.establish_clid = nfs4_init_clientid,
5808 	.get_clid_cred	= nfs4_get_setclientid_cred,
5809 };
5810 
5811 #if defined(CONFIG_NFS_V4_1)
5812 struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
5813 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
5814 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
5815 	.recover_open	= nfs4_open_reclaim,
5816 	.recover_lock	= nfs4_lock_reclaim,
5817 	.establish_clid = nfs41_init_clientid,
5818 	.get_clid_cred	= nfs4_get_exchange_id_cred,
5819 	.reclaim_complete = nfs41_proc_reclaim_complete,
5820 };
5821 #endif /* CONFIG_NFS_V4_1 */
5822 
5823 struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
5824 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
5825 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
5826 	.recover_open	= nfs4_open_expired,
5827 	.recover_lock	= nfs4_lock_expired,
5828 	.establish_clid = nfs4_init_clientid,
5829 	.get_clid_cred	= nfs4_get_setclientid_cred,
5830 };
5831 
5832 #if defined(CONFIG_NFS_V4_1)
5833 struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
5834 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
5835 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
5836 	.recover_open	= nfs4_open_expired,
5837 	.recover_lock	= nfs4_lock_expired,
5838 	.establish_clid = nfs41_init_clientid,
5839 	.get_clid_cred	= nfs4_get_exchange_id_cred,
5840 };
5841 #endif /* CONFIG_NFS_V4_1 */
5842 
5843 struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
5844 	.sched_state_renewal = nfs4_proc_async_renew,
5845 	.get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
5846 	.renew_lease = nfs4_proc_renew,
5847 };
5848 
5849 #if defined(CONFIG_NFS_V4_1)
5850 struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
5851 	.sched_state_renewal = nfs41_proc_async_sequence,
5852 	.get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
5853 	.renew_lease = nfs4_proc_sequence,
5854 };
5855 #endif
5856 
5857 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
5858 	.minor_version = 0,
5859 	.call_sync = _nfs4_call_sync,
5860 	.validate_stateid = nfs4_validate_delegation_stateid,
5861 	.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
5862 	.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
5863 	.state_renewal_ops = &nfs40_state_renewal_ops,
5864 };
5865 
5866 #if defined(CONFIG_NFS_V4_1)
5867 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
5868 	.minor_version = 1,
5869 	.call_sync = _nfs4_call_sync_session,
5870 	.validate_stateid = nfs41_validate_delegation_stateid,
5871 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
5872 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
5873 	.state_renewal_ops = &nfs41_state_renewal_ops,
5874 };
5875 #endif
5876 
5877 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
5878 	[0] = &nfs_v4_0_minor_ops,
5879 #if defined(CONFIG_NFS_V4_1)
5880 	[1] = &nfs_v4_1_minor_ops,
5881 #endif
5882 };
5883 
5884 static const struct inode_operations nfs4_file_inode_operations = {
5885 	.permission	= nfs_permission,
5886 	.getattr	= nfs_getattr,
5887 	.setattr	= nfs_setattr,
5888 	.getxattr	= generic_getxattr,
5889 	.setxattr	= generic_setxattr,
5890 	.listxattr	= generic_listxattr,
5891 	.removexattr	= generic_removexattr,
5892 };
5893 
5894 const struct nfs_rpc_ops nfs_v4_clientops = {
5895 	.version	= 4,			/* protocol version */
5896 	.dentry_ops	= &nfs4_dentry_operations,
5897 	.dir_inode_ops	= &nfs4_dir_inode_operations,
5898 	.file_inode_ops	= &nfs4_file_inode_operations,
5899 	.getroot	= nfs4_proc_get_root,
5900 	.getattr	= nfs4_proc_getattr,
5901 	.setattr	= nfs4_proc_setattr,
5902 	.lookupfh	= nfs4_proc_lookupfh,
5903 	.lookup		= nfs4_proc_lookup,
5904 	.access		= nfs4_proc_access,
5905 	.readlink	= nfs4_proc_readlink,
5906 	.create		= nfs4_proc_create,
5907 	.remove		= nfs4_proc_remove,
5908 	.unlink_setup	= nfs4_proc_unlink_setup,
5909 	.unlink_done	= nfs4_proc_unlink_done,
5910 	.rename		= nfs4_proc_rename,
5911 	.rename_setup	= nfs4_proc_rename_setup,
5912 	.rename_done	= nfs4_proc_rename_done,
5913 	.link		= nfs4_proc_link,
5914 	.symlink	= nfs4_proc_symlink,
5915 	.mkdir		= nfs4_proc_mkdir,
5916 	.rmdir		= nfs4_proc_remove,
5917 	.readdir	= nfs4_proc_readdir,
5918 	.mknod		= nfs4_proc_mknod,
5919 	.statfs		= nfs4_proc_statfs,
5920 	.fsinfo		= nfs4_proc_fsinfo,
5921 	.pathconf	= nfs4_proc_pathconf,
5922 	.set_capabilities = nfs4_server_capabilities,
5923 	.decode_dirent	= nfs4_decode_dirent,
5924 	.read_setup	= nfs4_proc_read_setup,
5925 	.read_done	= nfs4_read_done,
5926 	.write_setup	= nfs4_proc_write_setup,
5927 	.write_done	= nfs4_write_done,
5928 	.commit_setup	= nfs4_proc_commit_setup,
5929 	.commit_done	= nfs4_commit_done,
5930 	.lock		= nfs4_proc_lock,
5931 	.clear_acl_cache = nfs4_zap_acl_attr,
5932 	.close_context  = nfs4_close_context,
5933 	.open_context	= nfs4_atomic_open,
5934 	.init_client	= nfs4_init_client,
5935 	.secinfo	= nfs4_proc_secinfo,
5936 };
5937 
5938 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
5939 	.prefix	= XATTR_NAME_NFSV4_ACL,
5940 	.list	= nfs4_xattr_list_nfs4_acl,
5941 	.get	= nfs4_xattr_get_nfs4_acl,
5942 	.set	= nfs4_xattr_set_nfs4_acl,
5943 };
5944 
5945 const struct xattr_handler *nfs4_xattr_handlers[] = {
5946 	&nfs4_xattr_nfs4_acl_handler,
5947 	NULL
5948 };
5949 
5950 /*
5951  * Local variables:
5952  *  c-basic-offset: 8
5953  * End:
5954  */
5955