1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/sunrpc/gss_api.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/nfs_idmap.h>
56 #include <linux/sunrpc/bc_xprt.h>
57 #include <linux/xattr.h>
58 #include <linux/utsname.h>
59 #include <linux/freezer.h>
60
61 #include "nfs4_fs.h"
62 #include "delegation.h"
63 #include "internal.h"
64 #include "iostat.h"
65 #include "callback.h"
66 #include "pnfs.h"
67
68 #define NFSDBG_FACILITY NFSDBG_PROC
69
70 #define NFS4_POLL_RETRY_MIN (HZ/10)
71 #define NFS4_POLL_RETRY_MAX (15*HZ)
72
73 #define NFS4_MAX_LOOP_ON_RECOVER (10)
74
75 static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
76
77 struct nfs4_opendata;
78 static int _nfs4_proc_open(struct nfs4_opendata *data);
79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
82 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
83 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
84 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
85 struct nfs_fattr *fattr, struct iattr *sattr,
86 struct nfs4_state *state);
87 #ifdef CONFIG_NFS_V4_1
88 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
89 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
90 #endif
91 /* Prevent leaks of NFSv4 errors into userland */
nfs4_map_errors(int err)92 static int nfs4_map_errors(int err)
93 {
94 if (err >= -1000)
95 return err;
96 switch (err) {
97 case -NFS4ERR_RESOURCE:
98 return -EREMOTEIO;
99 case -NFS4ERR_WRONGSEC:
100 return -EPERM;
101 case -NFS4ERR_BADOWNER:
102 case -NFS4ERR_BADNAME:
103 return -EINVAL;
104 case -NFS4ERR_SHARE_DENIED:
105 return -EACCES;
106 default:
107 dprintk("%s could not handle NFSv4 error %d\n",
108 __func__, -err);
109 break;
110 }
111 return -EIO;
112 }
113
114 /*
115 * This is our standard bitmap for GETATTR requests.
116 */
117 const u32 nfs4_fattr_bitmap[2] = {
118 FATTR4_WORD0_TYPE
119 | FATTR4_WORD0_CHANGE
120 | FATTR4_WORD0_SIZE
121 | FATTR4_WORD0_FSID
122 | FATTR4_WORD0_FILEID,
123 FATTR4_WORD1_MODE
124 | FATTR4_WORD1_NUMLINKS
125 | FATTR4_WORD1_OWNER
126 | FATTR4_WORD1_OWNER_GROUP
127 | FATTR4_WORD1_RAWDEV
128 | FATTR4_WORD1_SPACE_USED
129 | FATTR4_WORD1_TIME_ACCESS
130 | FATTR4_WORD1_TIME_METADATA
131 | FATTR4_WORD1_TIME_MODIFY
132 };
133
134 const u32 nfs4_statfs_bitmap[2] = {
135 FATTR4_WORD0_FILES_AVAIL
136 | FATTR4_WORD0_FILES_FREE
137 | FATTR4_WORD0_FILES_TOTAL,
138 FATTR4_WORD1_SPACE_AVAIL
139 | FATTR4_WORD1_SPACE_FREE
140 | FATTR4_WORD1_SPACE_TOTAL
141 };
142
143 const u32 nfs4_pathconf_bitmap[2] = {
144 FATTR4_WORD0_MAXLINK
145 | FATTR4_WORD0_MAXNAME,
146 0
147 };
148
149 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
150 | FATTR4_WORD0_MAXREAD
151 | FATTR4_WORD0_MAXWRITE
152 | FATTR4_WORD0_LEASE_TIME,
153 FATTR4_WORD1_TIME_DELTA
154 | FATTR4_WORD1_FS_LAYOUT_TYPES,
155 FATTR4_WORD2_LAYOUT_BLKSIZE
156 };
157
158 const u32 nfs4_fs_locations_bitmap[2] = {
159 FATTR4_WORD0_TYPE
160 | FATTR4_WORD0_CHANGE
161 | FATTR4_WORD0_SIZE
162 | FATTR4_WORD0_FSID
163 | FATTR4_WORD0_FILEID
164 | FATTR4_WORD0_FS_LOCATIONS,
165 FATTR4_WORD1_MODE
166 | FATTR4_WORD1_NUMLINKS
167 | FATTR4_WORD1_OWNER
168 | FATTR4_WORD1_OWNER_GROUP
169 | FATTR4_WORD1_RAWDEV
170 | FATTR4_WORD1_SPACE_USED
171 | FATTR4_WORD1_TIME_ACCESS
172 | FATTR4_WORD1_TIME_METADATA
173 | FATTR4_WORD1_TIME_MODIFY
174 | FATTR4_WORD1_MOUNTED_ON_FILEID
175 };
176
nfs4_setup_readdir(u64 cookie,__be32 * verifier,struct dentry * dentry,struct nfs4_readdir_arg * readdir)177 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
178 struct nfs4_readdir_arg *readdir)
179 {
180 __be32 *start, *p;
181
182 BUG_ON(readdir->count < 80);
183 if (cookie > 2) {
184 readdir->cookie = cookie;
185 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
186 return;
187 }
188
189 readdir->cookie = 0;
190 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
191 if (cookie == 2)
192 return;
193
194 /*
195 * NFSv4 servers do not return entries for '.' and '..'
196 * Therefore, we fake these entries here. We let '.'
197 * have cookie 0 and '..' have cookie 1. Note that
198 * when talking to the server, we always send cookie 0
199 * instead of 1 or 2.
200 */
201 start = p = kmap_atomic(*readdir->pages);
202
203 if (cookie == 0) {
204 *p++ = xdr_one; /* next */
205 *p++ = xdr_zero; /* cookie, first word */
206 *p++ = xdr_one; /* cookie, second word */
207 *p++ = xdr_one; /* entry len */
208 memcpy(p, ".\0\0\0", 4); /* entry */
209 p++;
210 *p++ = xdr_one; /* bitmap length */
211 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
212 *p++ = htonl(8); /* attribute buffer length */
213 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
214 }
215
216 *p++ = xdr_one; /* next */
217 *p++ = xdr_zero; /* cookie, first word */
218 *p++ = xdr_two; /* cookie, second word */
219 *p++ = xdr_two; /* entry len */
220 memcpy(p, "..\0\0", 4); /* entry */
221 p++;
222 *p++ = xdr_one; /* bitmap length */
223 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
224 *p++ = htonl(8); /* attribute buffer length */
225 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
226
227 readdir->pgbase = (char *)p - (char *)start;
228 readdir->count -= readdir->pgbase;
229 kunmap_atomic(start);
230 }
231
nfs4_wait_clnt_recover(struct nfs_client * clp)232 static int nfs4_wait_clnt_recover(struct nfs_client *clp)
233 {
234 int res;
235
236 might_sleep();
237
238 res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
239 nfs_wait_bit_killable, TASK_KILLABLE);
240 return res;
241 }
242
nfs4_delay(struct rpc_clnt * clnt,long * timeout)243 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
244 {
245 int res = 0;
246
247 might_sleep();
248
249 if (*timeout <= 0)
250 *timeout = NFS4_POLL_RETRY_MIN;
251 if (*timeout > NFS4_POLL_RETRY_MAX)
252 *timeout = NFS4_POLL_RETRY_MAX;
253 freezable_schedule_timeout_killable(*timeout);
254 if (fatal_signal_pending(current))
255 res = -ERESTARTSYS;
256 *timeout <<= 1;
257 return res;
258 }
259
260 /* This is the error handling routine for processes that are allowed
261 * to sleep.
262 */
nfs4_handle_exception(struct nfs_server * server,int errorcode,struct nfs4_exception * exception)263 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
264 {
265 struct nfs_client *clp = server->nfs_client;
266 struct nfs4_state *state = exception->state;
267 struct inode *inode = exception->inode;
268 int ret = errorcode;
269
270 exception->retry = 0;
271 switch(errorcode) {
272 case 0:
273 return 0;
274 case -NFS4ERR_OPENMODE:
275 if (inode && nfs_have_delegation(inode, FMODE_READ)) {
276 nfs_inode_return_delegation(inode);
277 exception->retry = 1;
278 return 0;
279 }
280 if (state == NULL)
281 break;
282 nfs4_schedule_stateid_recovery(server, state);
283 goto wait_on_recovery;
284 case -NFS4ERR_DELEG_REVOKED:
285 case -NFS4ERR_ADMIN_REVOKED:
286 case -NFS4ERR_BAD_STATEID:
287 if (state == NULL)
288 break;
289 nfs_remove_bad_delegation(state->inode);
290 nfs4_schedule_stateid_recovery(server, state);
291 goto wait_on_recovery;
292 case -NFS4ERR_EXPIRED:
293 if (state != NULL)
294 nfs4_schedule_stateid_recovery(server, state);
295 case -NFS4ERR_STALE_STATEID:
296 case -NFS4ERR_STALE_CLIENTID:
297 nfs4_schedule_lease_recovery(clp);
298 goto wait_on_recovery;
299 #if defined(CONFIG_NFS_V4_1)
300 case -NFS4ERR_BADSESSION:
301 case -NFS4ERR_BADSLOT:
302 case -NFS4ERR_BAD_HIGH_SLOT:
303 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
304 case -NFS4ERR_DEADSESSION:
305 case -NFS4ERR_SEQ_FALSE_RETRY:
306 case -NFS4ERR_SEQ_MISORDERED:
307 dprintk("%s ERROR: %d Reset session\n", __func__,
308 errorcode);
309 nfs4_schedule_session_recovery(clp->cl_session);
310 goto wait_on_recovery;
311 #endif /* defined(CONFIG_NFS_V4_1) */
312 case -NFS4ERR_FILE_OPEN:
313 if (exception->timeout > HZ) {
314 /* We have retried a decent amount, time to
315 * fail
316 */
317 ret = -EBUSY;
318 break;
319 }
320 case -NFS4ERR_GRACE:
321 case -NFS4ERR_DELAY:
322 ret = nfs4_delay(server->client, &exception->timeout);
323 if (ret != 0)
324 break;
325 case -NFS4ERR_RETRY_UNCACHED_REP:
326 case -NFS4ERR_OLD_STATEID:
327 exception->retry = 1;
328 break;
329 case -NFS4ERR_BADOWNER:
330 /* The following works around a Linux server bug! */
331 case -NFS4ERR_BADNAME:
332 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
333 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
334 exception->retry = 1;
335 printk(KERN_WARNING "NFS: v4 server %s "
336 "does not accept raw "
337 "uid/gids. "
338 "Reenabling the idmapper.\n",
339 server->nfs_client->cl_hostname);
340 }
341 }
342 /* We failed to handle the error */
343 return nfs4_map_errors(ret);
344 wait_on_recovery:
345 ret = nfs4_wait_clnt_recover(clp);
346 if (ret == 0)
347 exception->retry = 1;
348 return ret;
349 }
350
351
do_renew_lease(struct nfs_client * clp,unsigned long timestamp)352 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
353 {
354 spin_lock(&clp->cl_lock);
355 if (time_before(clp->cl_last_renewal,timestamp))
356 clp->cl_last_renewal = timestamp;
357 spin_unlock(&clp->cl_lock);
358 }
359
renew_lease(const struct nfs_server * server,unsigned long timestamp)360 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
361 {
362 do_renew_lease(server->nfs_client, timestamp);
363 }
364
365 #if defined(CONFIG_NFS_V4_1)
366
367 /*
368 * nfs4_free_slot - free a slot and efficiently update slot table.
369 *
370 * freeing a slot is trivially done by clearing its respective bit
371 * in the bitmap.
372 * If the freed slotid equals highest_used_slotid we want to update it
373 * so that the server would be able to size down the slot table if needed,
374 * otherwise we know that the highest_used_slotid is still in use.
375 * When updating highest_used_slotid there may be "holes" in the bitmap
376 * so we need to scan down from highest_used_slotid to 0 looking for the now
377 * highest slotid in use.
378 * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
379 *
380 * Must be called while holding tbl->slot_tbl_lock
381 */
382 static void
nfs4_free_slot(struct nfs4_slot_table * tbl,u32 slotid)383 nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid)
384 {
385 BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE);
386 /* clear used bit in bitmap */
387 __clear_bit(slotid, tbl->used_slots);
388
389 /* update highest_used_slotid when it is freed */
390 if (slotid == tbl->highest_used_slotid) {
391 slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
392 if (slotid < tbl->max_slots)
393 tbl->highest_used_slotid = slotid;
394 else
395 tbl->highest_used_slotid = NFS4_NO_SLOT;
396 }
397 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
398 slotid, tbl->highest_used_slotid);
399 }
400
nfs4_set_task_privileged(struct rpc_task * task,void * dummy)401 bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
402 {
403 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
404 return true;
405 }
406
407 /*
408 * Signal state manager thread if session fore channel is drained
409 */
nfs4_check_drain_fc_complete(struct nfs4_session * ses)410 static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
411 {
412 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
413 rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
414 nfs4_set_task_privileged, NULL);
415 return;
416 }
417
418 if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
419 return;
420
421 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
422 complete(&ses->fc_slot_table.complete);
423 }
424
425 /*
426 * Signal state manager thread if session back channel is drained
427 */
nfs4_check_drain_bc_complete(struct nfs4_session * ses)428 void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
429 {
430 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
431 ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
432 return;
433 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
434 complete(&ses->bc_slot_table.complete);
435 }
436
nfs41_sequence_free_slot(struct nfs4_sequence_res * res)437 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
438 {
439 struct nfs4_slot_table *tbl;
440
441 tbl = &res->sr_session->fc_slot_table;
442 if (!res->sr_slot) {
443 /* just wake up the next guy waiting since
444 * we may have not consumed a slot after all */
445 dprintk("%s: No slot\n", __func__);
446 return;
447 }
448
449 spin_lock(&tbl->slot_tbl_lock);
450 nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
451 nfs4_check_drain_fc_complete(res->sr_session);
452 spin_unlock(&tbl->slot_tbl_lock);
453 res->sr_slot = NULL;
454 }
455
nfs41_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)456 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
457 {
458 unsigned long timestamp;
459 struct nfs_client *clp;
460
461 /*
462 * sr_status remains 1 if an RPC level error occurred. The server
463 * may or may not have processed the sequence operation..
464 * Proceed as if the server received and processed the sequence
465 * operation.
466 */
467 if (res->sr_status == 1)
468 res->sr_status = NFS_OK;
469
470 /* don't increment the sequence number if the task wasn't sent */
471 if (!RPC_WAS_SENT(task))
472 goto out;
473
474 /* Check the SEQUENCE operation status */
475 switch (res->sr_status) {
476 case 0:
477 /* Update the slot's sequence and clientid lease timer */
478 ++res->sr_slot->seq_nr;
479 timestamp = res->sr_renewal_time;
480 clp = res->sr_session->clp;
481 do_renew_lease(clp, timestamp);
482 /* Check sequence flags */
483 if (res->sr_status_flags != 0)
484 nfs4_schedule_lease_recovery(clp);
485 break;
486 case -NFS4ERR_DELAY:
487 /* The server detected a resend of the RPC call and
488 * returned NFS4ERR_DELAY as per Section 2.10.6.2
489 * of RFC5661.
490 */
491 dprintk("%s: slot=%td seq=%d: Operation in progress\n",
492 __func__,
493 res->sr_slot - res->sr_session->fc_slot_table.slots,
494 res->sr_slot->seq_nr);
495 goto out_retry;
496 default:
497 /* Just update the slot sequence no. */
498 ++res->sr_slot->seq_nr;
499 }
500 out:
501 /* The session may be reset by one of the error handlers. */
502 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
503 nfs41_sequence_free_slot(res);
504 return 1;
505 out_retry:
506 if (!rpc_restart_call(task))
507 goto out;
508 rpc_delay(task, NFS4_POLL_RETRY_MAX);
509 return 0;
510 }
511
nfs4_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)512 static int nfs4_sequence_done(struct rpc_task *task,
513 struct nfs4_sequence_res *res)
514 {
515 if (res->sr_session == NULL)
516 return 1;
517 return nfs41_sequence_done(task, res);
518 }
519
520 /*
521 * nfs4_find_slot - efficiently look for a free slot
522 *
523 * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
524 * If found, we mark the slot as used, update the highest_used_slotid,
525 * and respectively set up the sequence operation args.
526 * The slot number is returned if found, or NFS4_NO_SLOT otherwise.
527 *
528 * Note: must be called with under the slot_tbl_lock.
529 */
530 static u32
nfs4_find_slot(struct nfs4_slot_table * tbl)531 nfs4_find_slot(struct nfs4_slot_table *tbl)
532 {
533 u32 slotid;
534 u32 ret_id = NFS4_NO_SLOT;
535
536 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
537 __func__, tbl->used_slots[0], tbl->highest_used_slotid,
538 tbl->max_slots);
539 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
540 if (slotid >= tbl->max_slots)
541 goto out;
542 __set_bit(slotid, tbl->used_slots);
543 if (slotid > tbl->highest_used_slotid ||
544 tbl->highest_used_slotid == NFS4_NO_SLOT)
545 tbl->highest_used_slotid = slotid;
546 ret_id = slotid;
547 out:
548 dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
549 __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
550 return ret_id;
551 }
552
nfs41_init_sequence(struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply)553 static void nfs41_init_sequence(struct nfs4_sequence_args *args,
554 struct nfs4_sequence_res *res, int cache_reply)
555 {
556 args->sa_session = NULL;
557 args->sa_cache_this = 0;
558 if (cache_reply)
559 args->sa_cache_this = 1;
560 res->sr_session = NULL;
561 res->sr_slot = NULL;
562 }
563
nfs41_setup_sequence(struct nfs4_session * session,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,struct rpc_task * task)564 int nfs41_setup_sequence(struct nfs4_session *session,
565 struct nfs4_sequence_args *args,
566 struct nfs4_sequence_res *res,
567 struct rpc_task *task)
568 {
569 struct nfs4_slot *slot;
570 struct nfs4_slot_table *tbl;
571 u32 slotid;
572
573 dprintk("--> %s\n", __func__);
574 /* slot already allocated? */
575 if (res->sr_slot != NULL)
576 return 0;
577
578 tbl = &session->fc_slot_table;
579
580 spin_lock(&tbl->slot_tbl_lock);
581 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
582 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
583 /* The state manager will wait until the slot table is empty */
584 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
585 spin_unlock(&tbl->slot_tbl_lock);
586 dprintk("%s session is draining\n", __func__);
587 return -EAGAIN;
588 }
589
590 if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
591 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
592 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
593 spin_unlock(&tbl->slot_tbl_lock);
594 dprintk("%s enforce FIFO order\n", __func__);
595 return -EAGAIN;
596 }
597
598 slotid = nfs4_find_slot(tbl);
599 if (slotid == NFS4_NO_SLOT) {
600 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
601 spin_unlock(&tbl->slot_tbl_lock);
602 dprintk("<-- %s: no free slots\n", __func__);
603 return -EAGAIN;
604 }
605 spin_unlock(&tbl->slot_tbl_lock);
606
607 rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
608 slot = tbl->slots + slotid;
609 args->sa_session = session;
610 args->sa_slotid = slotid;
611
612 dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
613
614 res->sr_session = session;
615 res->sr_slot = slot;
616 res->sr_renewal_time = jiffies;
617 res->sr_status_flags = 0;
618 /*
619 * sr_status is only set in decode_sequence, and so will remain
620 * set to 1 if an rpc level failure occurs.
621 */
622 res->sr_status = 1;
623 return 0;
624 }
625 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
626
nfs4_setup_sequence(const struct nfs_server * server,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,struct rpc_task * task)627 int nfs4_setup_sequence(const struct nfs_server *server,
628 struct nfs4_sequence_args *args,
629 struct nfs4_sequence_res *res,
630 struct rpc_task *task)
631 {
632 struct nfs4_session *session = nfs4_get_session(server);
633 int ret = 0;
634
635 if (session == NULL)
636 goto out;
637
638 dprintk("--> %s clp %p session %p sr_slot %td\n",
639 __func__, session->clp, session, res->sr_slot ?
640 res->sr_slot - session->fc_slot_table.slots : -1);
641
642 ret = nfs41_setup_sequence(session, args, res, task);
643 out:
644 dprintk("<-- %s status=%d\n", __func__, ret);
645 return ret;
646 }
647
648 struct nfs41_call_sync_data {
649 const struct nfs_server *seq_server;
650 struct nfs4_sequence_args *seq_args;
651 struct nfs4_sequence_res *seq_res;
652 };
653
nfs41_call_sync_prepare(struct rpc_task * task,void * calldata)654 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
655 {
656 struct nfs41_call_sync_data *data = calldata;
657
658 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
659
660 if (nfs4_setup_sequence(data->seq_server, data->seq_args,
661 data->seq_res, task))
662 return;
663 rpc_call_start(task);
664 }
665
nfs41_call_priv_sync_prepare(struct rpc_task * task,void * calldata)666 static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
667 {
668 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
669 nfs41_call_sync_prepare(task, calldata);
670 }
671
nfs41_call_sync_done(struct rpc_task * task,void * calldata)672 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
673 {
674 struct nfs41_call_sync_data *data = calldata;
675
676 nfs41_sequence_done(task, data->seq_res);
677 }
678
679 static const struct rpc_call_ops nfs41_call_sync_ops = {
680 .rpc_call_prepare = nfs41_call_sync_prepare,
681 .rpc_call_done = nfs41_call_sync_done,
682 };
683
684 static const struct rpc_call_ops nfs41_call_priv_sync_ops = {
685 .rpc_call_prepare = nfs41_call_priv_sync_prepare,
686 .rpc_call_done = nfs41_call_sync_done,
687 };
688
nfs4_call_sync_sequence(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int privileged)689 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
690 struct nfs_server *server,
691 struct rpc_message *msg,
692 struct nfs4_sequence_args *args,
693 struct nfs4_sequence_res *res,
694 int privileged)
695 {
696 int ret;
697 struct rpc_task *task;
698 struct nfs41_call_sync_data data = {
699 .seq_server = server,
700 .seq_args = args,
701 .seq_res = res,
702 };
703 struct rpc_task_setup task_setup = {
704 .rpc_client = clnt,
705 .rpc_message = msg,
706 .callback_ops = &nfs41_call_sync_ops,
707 .callback_data = &data
708 };
709
710 if (privileged)
711 task_setup.callback_ops = &nfs41_call_priv_sync_ops;
712 task = rpc_run_task(&task_setup);
713 if (IS_ERR(task))
714 ret = PTR_ERR(task);
715 else {
716 ret = task->tk_status;
717 rpc_put_task(task);
718 }
719 return ret;
720 }
721
_nfs4_call_sync_session(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply)722 int _nfs4_call_sync_session(struct rpc_clnt *clnt,
723 struct nfs_server *server,
724 struct rpc_message *msg,
725 struct nfs4_sequence_args *args,
726 struct nfs4_sequence_res *res,
727 int cache_reply)
728 {
729 nfs41_init_sequence(args, res, cache_reply);
730 return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0);
731 }
732
733 #else
734 static inline
nfs41_init_sequence(struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply)735 void nfs41_init_sequence(struct nfs4_sequence_args *args,
736 struct nfs4_sequence_res *res, int cache_reply)
737 {
738 }
739
nfs4_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)740 static int nfs4_sequence_done(struct rpc_task *task,
741 struct nfs4_sequence_res *res)
742 {
743 return 1;
744 }
745 #endif /* CONFIG_NFS_V4_1 */
746
_nfs4_call_sync(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply)747 int _nfs4_call_sync(struct rpc_clnt *clnt,
748 struct nfs_server *server,
749 struct rpc_message *msg,
750 struct nfs4_sequence_args *args,
751 struct nfs4_sequence_res *res,
752 int cache_reply)
753 {
754 nfs41_init_sequence(args, res, cache_reply);
755 return rpc_call_sync(clnt, msg, 0);
756 }
757
758 static inline
nfs4_call_sync(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply)759 int nfs4_call_sync(struct rpc_clnt *clnt,
760 struct nfs_server *server,
761 struct rpc_message *msg,
762 struct nfs4_sequence_args *args,
763 struct nfs4_sequence_res *res,
764 int cache_reply)
765 {
766 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
767 args, res, cache_reply);
768 }
769
update_changeattr(struct inode * dir,struct nfs4_change_info * cinfo)770 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
771 {
772 struct nfs_inode *nfsi = NFS_I(dir);
773
774 spin_lock(&dir->i_lock);
775 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
776 if (!cinfo->atomic || cinfo->before != dir->i_version)
777 nfs_force_lookup_revalidate(dir);
778 dir->i_version = cinfo->after;
779 spin_unlock(&dir->i_lock);
780 }
781
782 struct nfs4_opendata {
783 struct kref kref;
784 struct nfs_openargs o_arg;
785 struct nfs_openres o_res;
786 struct nfs_open_confirmargs c_arg;
787 struct nfs_open_confirmres c_res;
788 struct nfs4_string owner_name;
789 struct nfs4_string group_name;
790 struct nfs_fattr f_attr;
791 struct nfs_fattr dir_attr;
792 struct dentry *dir;
793 struct dentry *dentry;
794 struct nfs4_state_owner *owner;
795 struct nfs4_state *state;
796 struct iattr attrs;
797 unsigned long timestamp;
798 unsigned int rpc_done : 1;
799 int rpc_status;
800 int cancelled;
801 };
802
803
nfs4_init_opendata_res(struct nfs4_opendata * p)804 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
805 {
806 p->o_res.f_attr = &p->f_attr;
807 p->o_res.dir_attr = &p->dir_attr;
808 p->o_res.seqid = p->o_arg.seqid;
809 p->c_res.seqid = p->c_arg.seqid;
810 p->o_res.server = p->o_arg.server;
811 nfs_fattr_init(&p->f_attr);
812 nfs_fattr_init(&p->dir_attr);
813 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
814 }
815
nfs4_opendata_alloc(struct dentry * dentry,struct nfs4_state_owner * sp,fmode_t fmode,int flags,const struct iattr * attrs,gfp_t gfp_mask)816 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
817 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
818 const struct iattr *attrs,
819 gfp_t gfp_mask)
820 {
821 struct dentry *parent = dget_parent(dentry);
822 struct inode *dir = parent->d_inode;
823 struct nfs_server *server = NFS_SERVER(dir);
824 struct nfs4_opendata *p;
825
826 p = kzalloc(sizeof(*p), gfp_mask);
827 if (p == NULL)
828 goto err;
829 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
830 if (p->o_arg.seqid == NULL)
831 goto err_free;
832 nfs_sb_active(dentry->d_sb);
833 p->dentry = dget(dentry);
834 p->dir = parent;
835 p->owner = sp;
836 atomic_inc(&sp->so_count);
837 p->o_arg.fh = NFS_FH(dir);
838 p->o_arg.open_flags = flags;
839 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
840 p->o_arg.clientid = server->nfs_client->cl_clientid;
841 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
842 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
843 p->o_arg.name = &dentry->d_name;
844 p->o_arg.server = server;
845 p->o_arg.bitmask = server->attr_bitmask;
846 p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
847 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
848 if (attrs != NULL && attrs->ia_valid != 0) {
849 __be32 verf[2];
850
851 p->o_arg.u.attrs = &p->attrs;
852 memcpy(&p->attrs, attrs, sizeof(p->attrs));
853
854 verf[0] = jiffies;
855 verf[1] = current->pid;
856 memcpy(p->o_arg.u.verifier.data, verf,
857 sizeof(p->o_arg.u.verifier.data));
858 }
859 p->c_arg.fh = &p->o_res.fh;
860 p->c_arg.stateid = &p->o_res.stateid;
861 p->c_arg.seqid = p->o_arg.seqid;
862 nfs4_init_opendata_res(p);
863 kref_init(&p->kref);
864 return p;
865 err_free:
866 kfree(p);
867 err:
868 dput(parent);
869 return NULL;
870 }
871
nfs4_opendata_free(struct kref * kref)872 static void nfs4_opendata_free(struct kref *kref)
873 {
874 struct nfs4_opendata *p = container_of(kref,
875 struct nfs4_opendata, kref);
876 struct super_block *sb = p->dentry->d_sb;
877
878 nfs_free_seqid(p->o_arg.seqid);
879 if (p->state != NULL)
880 nfs4_put_open_state(p->state);
881 nfs4_put_state_owner(p->owner);
882 dput(p->dir);
883 dput(p->dentry);
884 nfs_sb_deactive(sb);
885 nfs_fattr_free_names(&p->f_attr);
886 kfree(p);
887 }
888
nfs4_opendata_put(struct nfs4_opendata * p)889 static void nfs4_opendata_put(struct nfs4_opendata *p)
890 {
891 if (p != NULL)
892 kref_put(&p->kref, nfs4_opendata_free);
893 }
894
nfs4_wait_for_completion_rpc_task(struct rpc_task * task)895 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
896 {
897 int ret;
898
899 ret = rpc_wait_for_completion_task(task);
900 return ret;
901 }
902
can_open_cached(struct nfs4_state * state,fmode_t mode,int open_mode)903 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
904 {
905 int ret = 0;
906
907 if (open_mode & (O_EXCL|O_TRUNC))
908 goto out;
909 switch (mode & (FMODE_READ|FMODE_WRITE)) {
910 case FMODE_READ:
911 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
912 && state->n_rdonly != 0;
913 break;
914 case FMODE_WRITE:
915 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
916 && state->n_wronly != 0;
917 break;
918 case FMODE_READ|FMODE_WRITE:
919 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
920 && state->n_rdwr != 0;
921 }
922 out:
923 return ret;
924 }
925
can_open_delegated(struct nfs_delegation * delegation,fmode_t fmode)926 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
927 {
928 if (delegation == NULL)
929 return 0;
930 if ((delegation->type & fmode) != fmode)
931 return 0;
932 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
933 return 0;
934 nfs_mark_delegation_referenced(delegation);
935 return 1;
936 }
937
update_open_stateflags(struct nfs4_state * state,fmode_t fmode)938 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
939 {
940 switch (fmode) {
941 case FMODE_WRITE:
942 state->n_wronly++;
943 break;
944 case FMODE_READ:
945 state->n_rdonly++;
946 break;
947 case FMODE_READ|FMODE_WRITE:
948 state->n_rdwr++;
949 }
950 nfs4_state_set_mode_locked(state, state->state | fmode);
951 }
952
nfs_set_open_stateid_locked(struct nfs4_state * state,nfs4_stateid * stateid,fmode_t fmode)953 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
954 {
955 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
956 nfs4_stateid_copy(&state->stateid, stateid);
957 nfs4_stateid_copy(&state->open_stateid, stateid);
958 switch (fmode) {
959 case FMODE_READ:
960 set_bit(NFS_O_RDONLY_STATE, &state->flags);
961 break;
962 case FMODE_WRITE:
963 set_bit(NFS_O_WRONLY_STATE, &state->flags);
964 break;
965 case FMODE_READ|FMODE_WRITE:
966 set_bit(NFS_O_RDWR_STATE, &state->flags);
967 }
968 }
969
nfs_set_open_stateid(struct nfs4_state * state,nfs4_stateid * stateid,fmode_t fmode)970 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
971 {
972 write_seqlock(&state->seqlock);
973 nfs_set_open_stateid_locked(state, stateid, fmode);
974 write_sequnlock(&state->seqlock);
975 }
976
__update_open_stateid(struct nfs4_state * state,nfs4_stateid * open_stateid,const nfs4_stateid * deleg_stateid,fmode_t fmode)977 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
978 {
979 /*
980 * Protect the call to nfs4_state_set_mode_locked and
981 * serialise the stateid update
982 */
983 write_seqlock(&state->seqlock);
984 if (deleg_stateid != NULL) {
985 nfs4_stateid_copy(&state->stateid, deleg_stateid);
986 set_bit(NFS_DELEGATED_STATE, &state->flags);
987 }
988 if (open_stateid != NULL)
989 nfs_set_open_stateid_locked(state, open_stateid, fmode);
990 write_sequnlock(&state->seqlock);
991 spin_lock(&state->owner->so_lock);
992 update_open_stateflags(state, fmode);
993 spin_unlock(&state->owner->so_lock);
994 }
995
update_open_stateid(struct nfs4_state * state,nfs4_stateid * open_stateid,nfs4_stateid * delegation,fmode_t fmode)996 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
997 {
998 struct nfs_inode *nfsi = NFS_I(state->inode);
999 struct nfs_delegation *deleg_cur;
1000 int ret = 0;
1001
1002 fmode &= (FMODE_READ|FMODE_WRITE);
1003
1004 rcu_read_lock();
1005 deleg_cur = rcu_dereference(nfsi->delegation);
1006 if (deleg_cur == NULL)
1007 goto no_delegation;
1008
1009 spin_lock(&deleg_cur->lock);
1010 if (nfsi->delegation != deleg_cur ||
1011 (deleg_cur->type & fmode) != fmode)
1012 goto no_delegation_unlock;
1013
1014 if (delegation == NULL)
1015 delegation = &deleg_cur->stateid;
1016 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1017 goto no_delegation_unlock;
1018
1019 nfs_mark_delegation_referenced(deleg_cur);
1020 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1021 ret = 1;
1022 no_delegation_unlock:
1023 spin_unlock(&deleg_cur->lock);
1024 no_delegation:
1025 rcu_read_unlock();
1026
1027 if (!ret && open_stateid != NULL) {
1028 __update_open_stateid(state, open_stateid, NULL, fmode);
1029 ret = 1;
1030 }
1031
1032 return ret;
1033 }
1034
1035
nfs4_return_incompatible_delegation(struct inode * inode,fmode_t fmode)1036 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1037 {
1038 struct nfs_delegation *delegation;
1039
1040 rcu_read_lock();
1041 delegation = rcu_dereference(NFS_I(inode)->delegation);
1042 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1043 rcu_read_unlock();
1044 return;
1045 }
1046 rcu_read_unlock();
1047 nfs_inode_return_delegation(inode);
1048 }
1049
nfs4_try_open_cached(struct nfs4_opendata * opendata)1050 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1051 {
1052 struct nfs4_state *state = opendata->state;
1053 struct nfs_inode *nfsi = NFS_I(state->inode);
1054 struct nfs_delegation *delegation;
1055 int open_mode = opendata->o_arg.open_flags;
1056 fmode_t fmode = opendata->o_arg.fmode;
1057 nfs4_stateid stateid;
1058 int ret = -EAGAIN;
1059
1060 for (;;) {
1061 if (can_open_cached(state, fmode, open_mode)) {
1062 spin_lock(&state->owner->so_lock);
1063 if (can_open_cached(state, fmode, open_mode)) {
1064 update_open_stateflags(state, fmode);
1065 spin_unlock(&state->owner->so_lock);
1066 goto out_return_state;
1067 }
1068 spin_unlock(&state->owner->so_lock);
1069 }
1070 rcu_read_lock();
1071 delegation = rcu_dereference(nfsi->delegation);
1072 if (!can_open_delegated(delegation, fmode)) {
1073 rcu_read_unlock();
1074 break;
1075 }
1076 /* Save the delegation */
1077 nfs4_stateid_copy(&stateid, &delegation->stateid);
1078 rcu_read_unlock();
1079 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1080 if (ret != 0)
1081 goto out;
1082 ret = -EAGAIN;
1083
1084 /* Try to update the stateid using the delegation */
1085 if (update_open_stateid(state, NULL, &stateid, fmode))
1086 goto out_return_state;
1087 }
1088 out:
1089 return ERR_PTR(ret);
1090 out_return_state:
1091 atomic_inc(&state->count);
1092 return state;
1093 }
1094
nfs4_opendata_to_nfs4_state(struct nfs4_opendata * data)1095 static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1096 {
1097 struct inode *inode;
1098 struct nfs4_state *state = NULL;
1099 struct nfs_delegation *delegation;
1100 int ret;
1101
1102 if (!data->rpc_done) {
1103 state = nfs4_try_open_cached(data);
1104 goto out;
1105 }
1106
1107 ret = -EAGAIN;
1108 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1109 goto err;
1110 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1111 ret = PTR_ERR(inode);
1112 if (IS_ERR(inode))
1113 goto err;
1114 ret = -ENOMEM;
1115 state = nfs4_get_open_state(inode, data->owner);
1116 if (state == NULL)
1117 goto err_put_inode;
1118 if (data->o_res.delegation_type != 0) {
1119 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1120 int delegation_flags = 0;
1121
1122 rcu_read_lock();
1123 delegation = rcu_dereference(NFS_I(inode)->delegation);
1124 if (delegation)
1125 delegation_flags = delegation->flags;
1126 rcu_read_unlock();
1127 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1128 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1129 "returning a delegation for "
1130 "OPEN(CLAIM_DELEGATE_CUR)\n",
1131 clp->cl_hostname);
1132 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1133 nfs_inode_set_delegation(state->inode,
1134 data->owner->so_cred,
1135 &data->o_res);
1136 else
1137 nfs_inode_reclaim_delegation(state->inode,
1138 data->owner->so_cred,
1139 &data->o_res);
1140 }
1141
1142 update_open_stateid(state, &data->o_res.stateid, NULL,
1143 data->o_arg.fmode);
1144 iput(inode);
1145 out:
1146 return state;
1147 err_put_inode:
1148 iput(inode);
1149 err:
1150 return ERR_PTR(ret);
1151 }
1152
nfs4_state_find_open_context(struct nfs4_state * state)1153 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1154 {
1155 struct nfs_inode *nfsi = NFS_I(state->inode);
1156 struct nfs_open_context *ctx;
1157
1158 spin_lock(&state->inode->i_lock);
1159 list_for_each_entry(ctx, &nfsi->open_files, list) {
1160 if (ctx->state != state)
1161 continue;
1162 get_nfs_open_context(ctx);
1163 spin_unlock(&state->inode->i_lock);
1164 return ctx;
1165 }
1166 spin_unlock(&state->inode->i_lock);
1167 return ERR_PTR(-ENOENT);
1168 }
1169
nfs4_open_recoverdata_alloc(struct nfs_open_context * ctx,struct nfs4_state * state)1170 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1171 {
1172 struct nfs4_opendata *opendata;
1173
1174 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
1175 if (opendata == NULL)
1176 return ERR_PTR(-ENOMEM);
1177 opendata->state = state;
1178 atomic_inc(&state->count);
1179 return opendata;
1180 }
1181
nfs4_open_recover_helper(struct nfs4_opendata * opendata,fmode_t fmode,struct nfs4_state ** res)1182 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1183 {
1184 struct nfs4_state *newstate;
1185 int ret;
1186
1187 opendata->o_arg.open_flags = 0;
1188 opendata->o_arg.fmode = fmode;
1189 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1190 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1191 nfs4_init_opendata_res(opendata);
1192 ret = _nfs4_recover_proc_open(opendata);
1193 if (ret != 0)
1194 return ret;
1195 newstate = nfs4_opendata_to_nfs4_state(opendata);
1196 if (IS_ERR(newstate))
1197 return PTR_ERR(newstate);
1198 nfs4_close_state(newstate, fmode);
1199 *res = newstate;
1200 return 0;
1201 }
1202
nfs4_open_recover(struct nfs4_opendata * opendata,struct nfs4_state * state)1203 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1204 {
1205 struct nfs4_state *newstate;
1206 int ret;
1207
1208 /* memory barrier prior to reading state->n_* */
1209 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1210 smp_rmb();
1211 if (state->n_rdwr != 0) {
1212 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1213 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1214 if (ret != 0)
1215 return ret;
1216 if (newstate != state)
1217 return -ESTALE;
1218 }
1219 if (state->n_wronly != 0) {
1220 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1221 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1222 if (ret != 0)
1223 return ret;
1224 if (newstate != state)
1225 return -ESTALE;
1226 }
1227 if (state->n_rdonly != 0) {
1228 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1229 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1230 if (ret != 0)
1231 return ret;
1232 if (newstate != state)
1233 return -ESTALE;
1234 }
1235 /*
1236 * We may have performed cached opens for all three recoveries.
1237 * Check if we need to update the current stateid.
1238 */
1239 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1240 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1241 write_seqlock(&state->seqlock);
1242 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1243 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1244 write_sequnlock(&state->seqlock);
1245 }
1246 return 0;
1247 }
1248
1249 /*
1250 * OPEN_RECLAIM:
1251 * reclaim state on the server after a reboot.
1252 */
_nfs4_do_open_reclaim(struct nfs_open_context * ctx,struct nfs4_state * state)1253 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1254 {
1255 struct nfs_delegation *delegation;
1256 struct nfs4_opendata *opendata;
1257 fmode_t delegation_type = 0;
1258 int status;
1259
1260 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1261 if (IS_ERR(opendata))
1262 return PTR_ERR(opendata);
1263 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1264 opendata->o_arg.fh = NFS_FH(state->inode);
1265 rcu_read_lock();
1266 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1267 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1268 delegation_type = delegation->type;
1269 rcu_read_unlock();
1270 opendata->o_arg.u.delegation_type = delegation_type;
1271 status = nfs4_open_recover(opendata, state);
1272 nfs4_opendata_put(opendata);
1273 return status;
1274 }
1275
nfs4_do_open_reclaim(struct nfs_open_context * ctx,struct nfs4_state * state)1276 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1277 {
1278 struct nfs_server *server = NFS_SERVER(state->inode);
1279 struct nfs4_exception exception = { };
1280 int err;
1281 do {
1282 err = _nfs4_do_open_reclaim(ctx, state);
1283 if (err != -NFS4ERR_DELAY)
1284 break;
1285 nfs4_handle_exception(server, err, &exception);
1286 } while (exception.retry);
1287 return err;
1288 }
1289
nfs4_open_reclaim(struct nfs4_state_owner * sp,struct nfs4_state * state)1290 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1291 {
1292 struct nfs_open_context *ctx;
1293 int ret;
1294
1295 ctx = nfs4_state_find_open_context(state);
1296 if (IS_ERR(ctx))
1297 return PTR_ERR(ctx);
1298 ret = nfs4_do_open_reclaim(ctx, state);
1299 put_nfs_open_context(ctx);
1300 return ret;
1301 }
1302
_nfs4_open_delegation_recall(struct nfs_open_context * ctx,struct nfs4_state * state,const nfs4_stateid * stateid)1303 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1304 {
1305 struct nfs4_opendata *opendata;
1306 int ret;
1307
1308 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1309 if (IS_ERR(opendata))
1310 return PTR_ERR(opendata);
1311 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1312 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1313 ret = nfs4_open_recover(opendata, state);
1314 nfs4_opendata_put(opendata);
1315 return ret;
1316 }
1317
nfs4_open_delegation_recall(struct nfs_open_context * ctx,struct nfs4_state * state,const nfs4_stateid * stateid)1318 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1319 {
1320 struct nfs4_exception exception = { };
1321 struct nfs_server *server = NFS_SERVER(state->inode);
1322 int err;
1323 do {
1324 err = _nfs4_open_delegation_recall(ctx, state, stateid);
1325 switch (err) {
1326 case 0:
1327 case -ENOENT:
1328 case -ESTALE:
1329 goto out;
1330 case -NFS4ERR_BADSESSION:
1331 case -NFS4ERR_BADSLOT:
1332 case -NFS4ERR_BAD_HIGH_SLOT:
1333 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1334 case -NFS4ERR_DEADSESSION:
1335 nfs4_schedule_session_recovery(server->nfs_client->cl_session);
1336 goto out;
1337 case -NFS4ERR_STALE_CLIENTID:
1338 case -NFS4ERR_STALE_STATEID:
1339 case -NFS4ERR_EXPIRED:
1340 /* Don't recall a delegation if it was lost */
1341 nfs4_schedule_lease_recovery(server->nfs_client);
1342 goto out;
1343 case -ERESTARTSYS:
1344 /*
1345 * The show must go on: exit, but mark the
1346 * stateid as needing recovery.
1347 */
1348 case -NFS4ERR_DELEG_REVOKED:
1349 case -NFS4ERR_ADMIN_REVOKED:
1350 case -NFS4ERR_BAD_STATEID:
1351 nfs_inode_find_state_and_recover(state->inode,
1352 stateid);
1353 nfs4_schedule_stateid_recovery(server, state);
1354 case -ENOMEM:
1355 err = 0;
1356 goto out;
1357 case -NFS4ERR_DELAY:
1358 case -NFS4ERR_GRACE:
1359 set_bit(NFS_DELEGATED_STATE, &state->flags);
1360 ssleep(1);
1361 err = -EAGAIN;
1362 goto out;
1363 }
1364 err = nfs4_handle_exception(server, err, &exception);
1365 } while (exception.retry);
1366 out:
1367 return err;
1368 }
1369
nfs4_open_confirm_done(struct rpc_task * task,void * calldata)1370 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1371 {
1372 struct nfs4_opendata *data = calldata;
1373
1374 data->rpc_status = task->tk_status;
1375 if (data->rpc_status == 0) {
1376 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1377 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1378 renew_lease(data->o_res.server, data->timestamp);
1379 data->rpc_done = 1;
1380 }
1381 }
1382
nfs4_open_confirm_release(void * calldata)1383 static void nfs4_open_confirm_release(void *calldata)
1384 {
1385 struct nfs4_opendata *data = calldata;
1386 struct nfs4_state *state = NULL;
1387
1388 /* If this request hasn't been cancelled, do nothing */
1389 if (data->cancelled == 0)
1390 goto out_free;
1391 /* In case of error, no cleanup! */
1392 if (!data->rpc_done)
1393 goto out_free;
1394 state = nfs4_opendata_to_nfs4_state(data);
1395 if (!IS_ERR(state))
1396 nfs4_close_state(state, data->o_arg.fmode);
1397 out_free:
1398 nfs4_opendata_put(data);
1399 }
1400
1401 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1402 .rpc_call_done = nfs4_open_confirm_done,
1403 .rpc_release = nfs4_open_confirm_release,
1404 };
1405
1406 /*
1407 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1408 */
_nfs4_proc_open_confirm(struct nfs4_opendata * data)1409 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1410 {
1411 struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1412 struct rpc_task *task;
1413 struct rpc_message msg = {
1414 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1415 .rpc_argp = &data->c_arg,
1416 .rpc_resp = &data->c_res,
1417 .rpc_cred = data->owner->so_cred,
1418 };
1419 struct rpc_task_setup task_setup_data = {
1420 .rpc_client = server->client,
1421 .rpc_message = &msg,
1422 .callback_ops = &nfs4_open_confirm_ops,
1423 .callback_data = data,
1424 .workqueue = nfsiod_workqueue,
1425 .flags = RPC_TASK_ASYNC,
1426 };
1427 int status;
1428
1429 kref_get(&data->kref);
1430 data->rpc_done = 0;
1431 data->rpc_status = 0;
1432 data->timestamp = jiffies;
1433 task = rpc_run_task(&task_setup_data);
1434 if (IS_ERR(task))
1435 return PTR_ERR(task);
1436 status = nfs4_wait_for_completion_rpc_task(task);
1437 if (status != 0) {
1438 data->cancelled = 1;
1439 smp_wmb();
1440 } else
1441 status = data->rpc_status;
1442 rpc_put_task(task);
1443 return status;
1444 }
1445
nfs4_open_prepare(struct rpc_task * task,void * calldata)1446 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1447 {
1448 struct nfs4_opendata *data = calldata;
1449 struct nfs4_state_owner *sp = data->owner;
1450
1451 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1452 return;
1453 /*
1454 * Check if we still need to send an OPEN call, or if we can use
1455 * a delegation instead.
1456 */
1457 if (data->state != NULL) {
1458 struct nfs_delegation *delegation;
1459
1460 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1461 goto out_no_action;
1462 rcu_read_lock();
1463 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1464 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1465 can_open_delegated(delegation, data->o_arg.fmode))
1466 goto unlock_no_action;
1467 rcu_read_unlock();
1468 }
1469 /* Update client id. */
1470 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1471 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1472 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1473 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1474 }
1475 data->timestamp = jiffies;
1476 if (nfs4_setup_sequence(data->o_arg.server,
1477 &data->o_arg.seq_args,
1478 &data->o_res.seq_res,
1479 task) != 0)
1480 nfs_release_seqid(data->o_arg.seqid);
1481 else
1482 rpc_call_start(task);
1483 return;
1484 unlock_no_action:
1485 rcu_read_unlock();
1486 out_no_action:
1487 task->tk_action = NULL;
1488
1489 }
1490
nfs4_recover_open_prepare(struct rpc_task * task,void * calldata)1491 static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
1492 {
1493 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
1494 nfs4_open_prepare(task, calldata);
1495 }
1496
nfs4_open_done(struct rpc_task * task,void * calldata)1497 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1498 {
1499 struct nfs4_opendata *data = calldata;
1500
1501 data->rpc_status = task->tk_status;
1502
1503 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1504 return;
1505
1506 if (task->tk_status == 0) {
1507 switch (data->o_res.f_attr->mode & S_IFMT) {
1508 case S_IFREG:
1509 break;
1510 case S_IFLNK:
1511 data->rpc_status = -ELOOP;
1512 break;
1513 case S_IFDIR:
1514 data->rpc_status = -EISDIR;
1515 break;
1516 default:
1517 data->rpc_status = -ENOTDIR;
1518 }
1519 renew_lease(data->o_res.server, data->timestamp);
1520 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1521 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1522 }
1523 data->rpc_done = 1;
1524 }
1525
nfs4_open_release(void * calldata)1526 static void nfs4_open_release(void *calldata)
1527 {
1528 struct nfs4_opendata *data = calldata;
1529 struct nfs4_state *state = NULL;
1530
1531 /* If this request hasn't been cancelled, do nothing */
1532 if (data->cancelled == 0)
1533 goto out_free;
1534 /* In case of error, no cleanup! */
1535 if (data->rpc_status != 0 || !data->rpc_done)
1536 goto out_free;
1537 /* In case we need an open_confirm, no cleanup! */
1538 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1539 goto out_free;
1540 state = nfs4_opendata_to_nfs4_state(data);
1541 if (!IS_ERR(state))
1542 nfs4_close_state(state, data->o_arg.fmode);
1543 out_free:
1544 nfs4_opendata_put(data);
1545 }
1546
1547 static const struct rpc_call_ops nfs4_open_ops = {
1548 .rpc_call_prepare = nfs4_open_prepare,
1549 .rpc_call_done = nfs4_open_done,
1550 .rpc_release = nfs4_open_release,
1551 };
1552
1553 static const struct rpc_call_ops nfs4_recover_open_ops = {
1554 .rpc_call_prepare = nfs4_recover_open_prepare,
1555 .rpc_call_done = nfs4_open_done,
1556 .rpc_release = nfs4_open_release,
1557 };
1558
nfs4_run_open_task(struct nfs4_opendata * data,int isrecover)1559 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1560 {
1561 struct inode *dir = data->dir->d_inode;
1562 struct nfs_server *server = NFS_SERVER(dir);
1563 struct nfs_openargs *o_arg = &data->o_arg;
1564 struct nfs_openres *o_res = &data->o_res;
1565 struct rpc_task *task;
1566 struct rpc_message msg = {
1567 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1568 .rpc_argp = o_arg,
1569 .rpc_resp = o_res,
1570 .rpc_cred = data->owner->so_cred,
1571 };
1572 struct rpc_task_setup task_setup_data = {
1573 .rpc_client = server->client,
1574 .rpc_message = &msg,
1575 .callback_ops = &nfs4_open_ops,
1576 .callback_data = data,
1577 .workqueue = nfsiod_workqueue,
1578 .flags = RPC_TASK_ASYNC,
1579 };
1580 int status;
1581
1582 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1583 kref_get(&data->kref);
1584 data->rpc_done = 0;
1585 data->rpc_status = 0;
1586 data->cancelled = 0;
1587 if (isrecover)
1588 task_setup_data.callback_ops = &nfs4_recover_open_ops;
1589 task = rpc_run_task(&task_setup_data);
1590 if (IS_ERR(task))
1591 return PTR_ERR(task);
1592 status = nfs4_wait_for_completion_rpc_task(task);
1593 if (status != 0) {
1594 data->cancelled = 1;
1595 smp_wmb();
1596 } else
1597 status = data->rpc_status;
1598 rpc_put_task(task);
1599
1600 return status;
1601 }
1602
_nfs4_recover_proc_open(struct nfs4_opendata * data)1603 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1604 {
1605 struct inode *dir = data->dir->d_inode;
1606 struct nfs_openres *o_res = &data->o_res;
1607 int status;
1608
1609 status = nfs4_run_open_task(data, 1);
1610 if (status != 0 || !data->rpc_done)
1611 return status;
1612
1613 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
1614
1615 nfs_refresh_inode(dir, o_res->dir_attr);
1616
1617 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1618 status = _nfs4_proc_open_confirm(data);
1619 if (status != 0)
1620 return status;
1621 }
1622
1623 return status;
1624 }
1625
1626 /*
1627 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1628 */
_nfs4_proc_open(struct nfs4_opendata * data)1629 static int _nfs4_proc_open(struct nfs4_opendata *data)
1630 {
1631 struct inode *dir = data->dir->d_inode;
1632 struct nfs_server *server = NFS_SERVER(dir);
1633 struct nfs_openargs *o_arg = &data->o_arg;
1634 struct nfs_openres *o_res = &data->o_res;
1635 int status;
1636
1637 status = nfs4_run_open_task(data, 0);
1638 if (!data->rpc_done)
1639 return status;
1640 if (status != 0) {
1641 if (status == -NFS4ERR_BADNAME &&
1642 !(o_arg->open_flags & O_CREAT))
1643 return -ENOENT;
1644 return status;
1645 }
1646
1647 nfs_fattr_map_and_free_names(server, &data->f_attr);
1648
1649 if (o_arg->open_flags & O_CREAT) {
1650 update_changeattr(dir, &o_res->cinfo);
1651 nfs_post_op_update_inode(dir, o_res->dir_attr);
1652 } else
1653 nfs_refresh_inode(dir, o_res->dir_attr);
1654 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1655 server->caps &= ~NFS_CAP_POSIX_LOCK;
1656 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1657 status = _nfs4_proc_open_confirm(data);
1658 if (status != 0)
1659 return status;
1660 }
1661 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1662 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1663 return 0;
1664 }
1665
nfs4_client_recover_expired_lease(struct nfs_client * clp)1666 static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1667 {
1668 unsigned int loop;
1669 int ret;
1670
1671 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1672 ret = nfs4_wait_clnt_recover(clp);
1673 if (ret != 0)
1674 break;
1675 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1676 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1677 break;
1678 nfs4_schedule_state_manager(clp);
1679 ret = -EIO;
1680 }
1681 return ret;
1682 }
1683
nfs4_recover_expired_lease(struct nfs_server * server)1684 static int nfs4_recover_expired_lease(struct nfs_server *server)
1685 {
1686 return nfs4_client_recover_expired_lease(server->nfs_client);
1687 }
1688
1689 /*
1690 * OPEN_EXPIRED:
1691 * reclaim state on the server after a network partition.
1692 * Assumes caller holds the appropriate lock
1693 */
_nfs4_open_expired(struct nfs_open_context * ctx,struct nfs4_state * state)1694 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1695 {
1696 struct nfs4_opendata *opendata;
1697 int ret;
1698
1699 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1700 if (IS_ERR(opendata))
1701 return PTR_ERR(opendata);
1702 ret = nfs4_open_recover(opendata, state);
1703 if (ret == -ESTALE)
1704 d_drop(ctx->dentry);
1705 nfs4_opendata_put(opendata);
1706 return ret;
1707 }
1708
nfs4_do_open_expired(struct nfs_open_context * ctx,struct nfs4_state * state)1709 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1710 {
1711 struct nfs_server *server = NFS_SERVER(state->inode);
1712 struct nfs4_exception exception = { };
1713 int err;
1714
1715 do {
1716 err = _nfs4_open_expired(ctx, state);
1717 switch (err) {
1718 default:
1719 goto out;
1720 case -NFS4ERR_GRACE:
1721 case -NFS4ERR_DELAY:
1722 nfs4_handle_exception(server, err, &exception);
1723 err = 0;
1724 }
1725 } while (exception.retry);
1726 out:
1727 return err;
1728 }
1729
nfs4_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)1730 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1731 {
1732 struct nfs_open_context *ctx;
1733 int ret;
1734
1735 ctx = nfs4_state_find_open_context(state);
1736 if (IS_ERR(ctx))
1737 return PTR_ERR(ctx);
1738 ret = nfs4_do_open_expired(ctx, state);
1739 put_nfs_open_context(ctx);
1740 return ret;
1741 }
1742
1743 #if defined(CONFIG_NFS_V4_1)
nfs41_check_expired_stateid(struct nfs4_state * state,nfs4_stateid * stateid,unsigned int flags)1744 static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags)
1745 {
1746 int status = NFS_OK;
1747 struct nfs_server *server = NFS_SERVER(state->inode);
1748
1749 if (state->flags & flags) {
1750 status = nfs41_test_stateid(server, stateid);
1751 if (status != NFS_OK) {
1752 nfs41_free_stateid(server, stateid);
1753 state->flags &= ~flags;
1754 }
1755 }
1756 return status;
1757 }
1758
nfs41_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)1759 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1760 {
1761 int deleg_status, open_status;
1762 int deleg_flags = 1 << NFS_DELEGATED_STATE;
1763 int open_flags = (1 << NFS_O_RDONLY_STATE) | (1 << NFS_O_WRONLY_STATE) | (1 << NFS_O_RDWR_STATE);
1764
1765 deleg_status = nfs41_check_expired_stateid(state, &state->stateid, deleg_flags);
1766 open_status = nfs41_check_expired_stateid(state, &state->open_stateid, open_flags);
1767
1768 if ((deleg_status == NFS_OK) && (open_status == NFS_OK))
1769 return NFS_OK;
1770 return nfs4_open_expired(sp, state);
1771 }
1772 #endif
1773
1774 /*
1775 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1776 * fields corresponding to attributes that were used to store the verifier.
1777 * Make sure we clobber those fields in the later setattr call
1778 */
nfs4_exclusive_attrset(struct nfs4_opendata * opendata,struct iattr * sattr)1779 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1780 {
1781 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1782 !(sattr->ia_valid & ATTR_ATIME_SET))
1783 sattr->ia_valid |= ATTR_ATIME;
1784
1785 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1786 !(sattr->ia_valid & ATTR_MTIME_SET))
1787 sattr->ia_valid |= ATTR_MTIME;
1788 }
1789
1790 /*
1791 * Returns a referenced nfs4_state
1792 */
_nfs4_do_open(struct inode * dir,struct dentry * dentry,fmode_t fmode,int flags,struct iattr * sattr,struct rpc_cred * cred,struct nfs4_state ** res)1793 static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
1794 {
1795 struct nfs4_state_owner *sp;
1796 struct nfs4_state *state = NULL;
1797 struct nfs_server *server = NFS_SERVER(dir);
1798 struct nfs4_opendata *opendata;
1799 int status;
1800
1801 /* Protect against reboot recovery conflicts */
1802 status = -ENOMEM;
1803 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
1804 if (sp == NULL) {
1805 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1806 goto out_err;
1807 }
1808 status = nfs4_recover_expired_lease(server);
1809 if (status != 0)
1810 goto err_put_state_owner;
1811 if (dentry->d_inode != NULL)
1812 nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
1813 status = -ENOMEM;
1814 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
1815 if (opendata == NULL)
1816 goto err_put_state_owner;
1817
1818 if (dentry->d_inode != NULL)
1819 opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
1820
1821 status = _nfs4_proc_open(opendata);
1822 if (status != 0)
1823 goto err_opendata_put;
1824
1825 state = nfs4_opendata_to_nfs4_state(opendata);
1826 status = PTR_ERR(state);
1827 if (IS_ERR(state))
1828 goto err_opendata_put;
1829 if (server->caps & NFS_CAP_POSIX_LOCK)
1830 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1831
1832 if (opendata->o_arg.open_flags & O_EXCL) {
1833 nfs4_exclusive_attrset(opendata, sattr);
1834
1835 nfs_fattr_init(opendata->o_res.f_attr);
1836 status = nfs4_do_setattr(state->inode, cred,
1837 opendata->o_res.f_attr, sattr,
1838 state);
1839 if (status == 0)
1840 nfs_setattr_update_inode(state->inode, sattr);
1841 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
1842 }
1843 nfs4_opendata_put(opendata);
1844 nfs4_put_state_owner(sp);
1845 *res = state;
1846 return 0;
1847 err_opendata_put:
1848 nfs4_opendata_put(opendata);
1849 err_put_state_owner:
1850 nfs4_put_state_owner(sp);
1851 out_err:
1852 *res = NULL;
1853 return status;
1854 }
1855
1856
nfs4_do_open(struct inode * dir,struct dentry * dentry,fmode_t fmode,int flags,struct iattr * sattr,struct rpc_cred * cred)1857 static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
1858 {
1859 struct nfs4_exception exception = { };
1860 struct nfs4_state *res;
1861 int status;
1862
1863 fmode &= FMODE_READ|FMODE_WRITE;
1864 do {
1865 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
1866 if (status == 0)
1867 break;
1868 /* NOTE: BAD_SEQID means the server and client disagree about the
1869 * book-keeping w.r.t. state-changing operations
1870 * (OPEN/CLOSE/LOCK/LOCKU...)
1871 * It is actually a sign of a bug on the client or on the server.
1872 *
1873 * If we receive a BAD_SEQID error in the particular case of
1874 * doing an OPEN, we assume that nfs_increment_open_seqid() will
1875 * have unhashed the old state_owner for us, and that we can
1876 * therefore safely retry using a new one. We should still warn
1877 * the user though...
1878 */
1879 if (status == -NFS4ERR_BAD_SEQID) {
1880 pr_warn_ratelimited("NFS: v4 server %s "
1881 " returned a bad sequence-id error!\n",
1882 NFS_SERVER(dir)->nfs_client->cl_hostname);
1883 exception.retry = 1;
1884 continue;
1885 }
1886 /*
1887 * BAD_STATEID on OPEN means that the server cancelled our
1888 * state before it received the OPEN_CONFIRM.
1889 * Recover by retrying the request as per the discussion
1890 * on Page 181 of RFC3530.
1891 */
1892 if (status == -NFS4ERR_BAD_STATEID) {
1893 exception.retry = 1;
1894 continue;
1895 }
1896 if (status == -EAGAIN) {
1897 /* We must have found a delegation */
1898 exception.retry = 1;
1899 continue;
1900 }
1901 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
1902 status, &exception));
1903 } while (exception.retry);
1904 return res;
1905 }
1906
_nfs4_do_setattr(struct inode * inode,struct rpc_cred * cred,struct nfs_fattr * fattr,struct iattr * sattr,struct nfs4_state * state)1907 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1908 struct nfs_fattr *fattr, struct iattr *sattr,
1909 struct nfs4_state *state)
1910 {
1911 struct nfs_server *server = NFS_SERVER(inode);
1912 struct nfs_setattrargs arg = {
1913 .fh = NFS_FH(inode),
1914 .iap = sattr,
1915 .server = server,
1916 .bitmask = server->attr_bitmask,
1917 };
1918 struct nfs_setattrres res = {
1919 .fattr = fattr,
1920 .server = server,
1921 };
1922 struct rpc_message msg = {
1923 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
1924 .rpc_argp = &arg,
1925 .rpc_resp = &res,
1926 .rpc_cred = cred,
1927 };
1928 unsigned long timestamp = jiffies;
1929 int status;
1930
1931 nfs_fattr_init(fattr);
1932
1933 if (state != NULL) {
1934 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
1935 current->files, current->tgid);
1936 } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
1937 FMODE_WRITE)) {
1938 /* Use that stateid */
1939 } else
1940 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
1941
1942 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
1943 if (status == 0 && state != NULL)
1944 renew_lease(server, timestamp);
1945 return status;
1946 }
1947
nfs4_do_setattr(struct inode * inode,struct rpc_cred * cred,struct nfs_fattr * fattr,struct iattr * sattr,struct nfs4_state * state)1948 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1949 struct nfs_fattr *fattr, struct iattr *sattr,
1950 struct nfs4_state *state)
1951 {
1952 struct nfs_server *server = NFS_SERVER(inode);
1953 struct nfs4_exception exception = {
1954 .state = state,
1955 .inode = inode,
1956 };
1957 int err;
1958 do {
1959 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
1960 switch (err) {
1961 case -NFS4ERR_OPENMODE:
1962 if (state && !(state->state & FMODE_WRITE)) {
1963 err = -EBADF;
1964 if (sattr->ia_valid & ATTR_OPEN)
1965 err = -EACCES;
1966 goto out;
1967 }
1968 }
1969 err = nfs4_handle_exception(server, err, &exception);
1970 } while (exception.retry);
1971 out:
1972 return err;
1973 }
1974
1975 struct nfs4_closedata {
1976 struct inode *inode;
1977 struct nfs4_state *state;
1978 struct nfs_closeargs arg;
1979 struct nfs_closeres res;
1980 struct nfs_fattr fattr;
1981 unsigned long timestamp;
1982 bool roc;
1983 u32 roc_barrier;
1984 };
1985
nfs4_free_closedata(void * data)1986 static void nfs4_free_closedata(void *data)
1987 {
1988 struct nfs4_closedata *calldata = data;
1989 struct nfs4_state_owner *sp = calldata->state->owner;
1990 struct super_block *sb = calldata->state->inode->i_sb;
1991
1992 if (calldata->roc)
1993 pnfs_roc_release(calldata->state->inode);
1994 nfs4_put_open_state(calldata->state);
1995 nfs_free_seqid(calldata->arg.seqid);
1996 nfs4_put_state_owner(sp);
1997 nfs_sb_deactive(sb);
1998 kfree(calldata);
1999 }
2000
nfs4_close_clear_stateid_flags(struct nfs4_state * state,fmode_t fmode)2001 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
2002 fmode_t fmode)
2003 {
2004 spin_lock(&state->owner->so_lock);
2005 if (!(fmode & FMODE_READ))
2006 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2007 if (!(fmode & FMODE_WRITE))
2008 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2009 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2010 spin_unlock(&state->owner->so_lock);
2011 }
2012
nfs4_close_done(struct rpc_task * task,void * data)2013 static void nfs4_close_done(struct rpc_task *task, void *data)
2014 {
2015 struct nfs4_closedata *calldata = data;
2016 struct nfs4_state *state = calldata->state;
2017 struct nfs_server *server = NFS_SERVER(calldata->inode);
2018
2019 dprintk("%s: begin!\n", __func__);
2020 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2021 return;
2022 /* hmm. we are done with the inode, and in the process of freeing
2023 * the state_owner. we keep this around to process errors
2024 */
2025 switch (task->tk_status) {
2026 case 0:
2027 if (calldata->roc)
2028 pnfs_roc_set_barrier(state->inode,
2029 calldata->roc_barrier);
2030 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2031 renew_lease(server, calldata->timestamp);
2032 nfs4_close_clear_stateid_flags(state,
2033 calldata->arg.fmode);
2034 break;
2035 case -NFS4ERR_STALE_STATEID:
2036 case -NFS4ERR_OLD_STATEID:
2037 case -NFS4ERR_BAD_STATEID:
2038 case -NFS4ERR_EXPIRED:
2039 if (calldata->arg.fmode == 0)
2040 break;
2041 default:
2042 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
2043 rpc_restart_call_prepare(task);
2044 }
2045 nfs_release_seqid(calldata->arg.seqid);
2046 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2047 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2048 }
2049
nfs4_close_prepare(struct rpc_task * task,void * data)2050 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2051 {
2052 struct nfs4_closedata *calldata = data;
2053 struct nfs4_state *state = calldata->state;
2054 int call_close = 0;
2055
2056 dprintk("%s: begin!\n", __func__);
2057 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2058 return;
2059
2060 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2061 calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
2062 spin_lock(&state->owner->so_lock);
2063 /* Calculate the change in open mode */
2064 if (state->n_rdwr == 0) {
2065 if (state->n_rdonly == 0) {
2066 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
2067 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2068 calldata->arg.fmode &= ~FMODE_READ;
2069 }
2070 if (state->n_wronly == 0) {
2071 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
2072 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2073 calldata->arg.fmode &= ~FMODE_WRITE;
2074 }
2075 }
2076 spin_unlock(&state->owner->so_lock);
2077
2078 if (!call_close) {
2079 /* Note: exit _without_ calling nfs4_close_done */
2080 task->tk_action = NULL;
2081 goto out;
2082 }
2083
2084 if (calldata->arg.fmode == 0) {
2085 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2086 if (calldata->roc &&
2087 pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
2088 rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
2089 task, NULL);
2090 goto out;
2091 }
2092 }
2093
2094 nfs_fattr_init(calldata->res.fattr);
2095 calldata->timestamp = jiffies;
2096 if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
2097 &calldata->arg.seq_args,
2098 &calldata->res.seq_res,
2099 task) != 0)
2100 nfs_release_seqid(calldata->arg.seqid);
2101 else
2102 rpc_call_start(task);
2103 out:
2104 dprintk("%s: done!\n", __func__);
2105 }
2106
2107 static const struct rpc_call_ops nfs4_close_ops = {
2108 .rpc_call_prepare = nfs4_close_prepare,
2109 .rpc_call_done = nfs4_close_done,
2110 .rpc_release = nfs4_free_closedata,
2111 };
2112
2113 /*
2114 * It is possible for data to be read/written from a mem-mapped file
2115 * after the sys_close call (which hits the vfs layer as a flush).
2116 * This means that we can't safely call nfsv4 close on a file until
2117 * the inode is cleared. This in turn means that we are not good
2118 * NFSv4 citizens - we do not indicate to the server to update the file's
2119 * share state even when we are done with one of the three share
2120 * stateid's in the inode.
2121 *
2122 * NOTE: Caller must be holding the sp->so_owner semaphore!
2123 */
nfs4_do_close(struct nfs4_state * state,gfp_t gfp_mask,int wait,bool roc)2124 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
2125 {
2126 struct nfs_server *server = NFS_SERVER(state->inode);
2127 struct nfs4_closedata *calldata;
2128 struct nfs4_state_owner *sp = state->owner;
2129 struct rpc_task *task;
2130 struct rpc_message msg = {
2131 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2132 .rpc_cred = state->owner->so_cred,
2133 };
2134 struct rpc_task_setup task_setup_data = {
2135 .rpc_client = server->client,
2136 .rpc_message = &msg,
2137 .callback_ops = &nfs4_close_ops,
2138 .workqueue = nfsiod_workqueue,
2139 .flags = RPC_TASK_ASYNC,
2140 };
2141 int status = -ENOMEM;
2142
2143 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2144 if (calldata == NULL)
2145 goto out;
2146 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2147 calldata->inode = state->inode;
2148 calldata->state = state;
2149 calldata->arg.fh = NFS_FH(state->inode);
2150 calldata->arg.stateid = &state->open_stateid;
2151 /* Serialization for the sequence id */
2152 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
2153 if (calldata->arg.seqid == NULL)
2154 goto out_free_calldata;
2155 calldata->arg.fmode = 0;
2156 calldata->arg.bitmask = server->cache_consistency_bitmask;
2157 calldata->res.fattr = &calldata->fattr;
2158 calldata->res.seqid = calldata->arg.seqid;
2159 calldata->res.server = server;
2160 calldata->roc = roc;
2161 nfs_sb_active(calldata->inode->i_sb);
2162
2163 msg.rpc_argp = &calldata->arg;
2164 msg.rpc_resp = &calldata->res;
2165 task_setup_data.callback_data = calldata;
2166 task = rpc_run_task(&task_setup_data);
2167 if (IS_ERR(task))
2168 return PTR_ERR(task);
2169 status = 0;
2170 if (wait)
2171 status = rpc_wait_for_completion_task(task);
2172 rpc_put_task(task);
2173 return status;
2174 out_free_calldata:
2175 kfree(calldata);
2176 out:
2177 if (roc)
2178 pnfs_roc_release(state->inode);
2179 nfs4_put_open_state(state);
2180 nfs4_put_state_owner(sp);
2181 return status;
2182 }
2183
2184 static struct inode *
nfs4_atomic_open(struct inode * dir,struct nfs_open_context * ctx,int open_flags,struct iattr * attr)2185 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2186 {
2187 struct nfs4_state *state;
2188
2189 /* Protect against concurrent sillydeletes */
2190 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred);
2191 if (IS_ERR(state))
2192 return ERR_CAST(state);
2193 ctx->state = state;
2194 return igrab(state->inode);
2195 }
2196
nfs4_close_context(struct nfs_open_context * ctx,int is_sync)2197 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2198 {
2199 if (ctx->state == NULL)
2200 return;
2201 if (is_sync)
2202 nfs4_close_sync(ctx->state, ctx->mode);
2203 else
2204 nfs4_close_state(ctx->state, ctx->mode);
2205 }
2206
_nfs4_server_capabilities(struct nfs_server * server,struct nfs_fh * fhandle)2207 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2208 {
2209 struct nfs4_server_caps_arg args = {
2210 .fhandle = fhandle,
2211 };
2212 struct nfs4_server_caps_res res = {};
2213 struct rpc_message msg = {
2214 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2215 .rpc_argp = &args,
2216 .rpc_resp = &res,
2217 };
2218 int status;
2219
2220 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2221 if (status == 0) {
2222 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2223 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2224 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2225 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2226 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2227 NFS_CAP_CTIME|NFS_CAP_MTIME);
2228 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2229 server->caps |= NFS_CAP_ACLS;
2230 if (res.has_links != 0)
2231 server->caps |= NFS_CAP_HARDLINKS;
2232 if (res.has_symlinks != 0)
2233 server->caps |= NFS_CAP_SYMLINKS;
2234 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2235 server->caps |= NFS_CAP_FILEID;
2236 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2237 server->caps |= NFS_CAP_MODE;
2238 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2239 server->caps |= NFS_CAP_NLINK;
2240 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2241 server->caps |= NFS_CAP_OWNER;
2242 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2243 server->caps |= NFS_CAP_OWNER_GROUP;
2244 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2245 server->caps |= NFS_CAP_ATIME;
2246 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2247 server->caps |= NFS_CAP_CTIME;
2248 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2249 server->caps |= NFS_CAP_MTIME;
2250
2251 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2252 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2253 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2254 server->acl_bitmask = res.acl_bitmask;
2255 server->fh_expire_type = res.fh_expire_type;
2256 }
2257
2258 return status;
2259 }
2260
nfs4_server_capabilities(struct nfs_server * server,struct nfs_fh * fhandle)2261 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2262 {
2263 struct nfs4_exception exception = { };
2264 int err;
2265 do {
2266 err = nfs4_handle_exception(server,
2267 _nfs4_server_capabilities(server, fhandle),
2268 &exception);
2269 } while (exception.retry);
2270 return err;
2271 }
2272
_nfs4_lookup_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)2273 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2274 struct nfs_fsinfo *info)
2275 {
2276 struct nfs4_lookup_root_arg args = {
2277 .bitmask = nfs4_fattr_bitmap,
2278 };
2279 struct nfs4_lookup_res res = {
2280 .server = server,
2281 .fattr = info->fattr,
2282 .fh = fhandle,
2283 };
2284 struct rpc_message msg = {
2285 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2286 .rpc_argp = &args,
2287 .rpc_resp = &res,
2288 };
2289
2290 nfs_fattr_init(info->fattr);
2291 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2292 }
2293
nfs4_lookup_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)2294 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2295 struct nfs_fsinfo *info)
2296 {
2297 struct nfs4_exception exception = { };
2298 int err;
2299 do {
2300 err = _nfs4_lookup_root(server, fhandle, info);
2301 switch (err) {
2302 case 0:
2303 case -NFS4ERR_WRONGSEC:
2304 goto out;
2305 default:
2306 err = nfs4_handle_exception(server, err, &exception);
2307 }
2308 } while (exception.retry);
2309 out:
2310 return err;
2311 }
2312
nfs4_lookup_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,rpc_authflavor_t flavor)2313 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2314 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
2315 {
2316 struct rpc_auth *auth;
2317 int ret;
2318
2319 auth = rpcauth_create(flavor, server->client);
2320 if (!auth) {
2321 ret = -EIO;
2322 goto out;
2323 }
2324 ret = nfs4_lookup_root(server, fhandle, info);
2325 out:
2326 return ret;
2327 }
2328
nfs4_find_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)2329 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2330 struct nfs_fsinfo *info)
2331 {
2332 int i, len, status = 0;
2333 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
2334
2335 len = gss_mech_list_pseudoflavors(&flav_array[0]);
2336 flav_array[len] = RPC_AUTH_NULL;
2337 len += 1;
2338
2339 for (i = 0; i < len; i++) {
2340 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2341 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
2342 continue;
2343 break;
2344 }
2345 /*
2346 * -EACCESS could mean that the user doesn't have correct permissions
2347 * to access the mount. It could also mean that we tried to mount
2348 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
2349 * existing mount programs don't handle -EACCES very well so it should
2350 * be mapped to -EPERM instead.
2351 */
2352 if (status == -EACCES)
2353 status = -EPERM;
2354 return status;
2355 }
2356
2357 /*
2358 * get the file handle for the "/" directory on the server
2359 */
nfs4_proc_get_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)2360 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2361 struct nfs_fsinfo *info)
2362 {
2363 int minor_version = server->nfs_client->cl_minorversion;
2364 int status = nfs4_lookup_root(server, fhandle, info);
2365 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
2366 /*
2367 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2368 * by nfs4_map_errors() as this function exits.
2369 */
2370 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
2371 if (status == 0)
2372 status = nfs4_server_capabilities(server, fhandle);
2373 if (status == 0)
2374 status = nfs4_do_fsinfo(server, fhandle, info);
2375 return nfs4_map_errors(status);
2376 }
2377
2378 /*
2379 * Get locations and (maybe) other attributes of a referral.
2380 * Note that we'll actually follow the referral later when
2381 * we detect fsid mismatch in inode revalidation
2382 */
nfs4_get_referral(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs_fattr * fattr,struct nfs_fh * fhandle)2383 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
2384 const struct qstr *name, struct nfs_fattr *fattr,
2385 struct nfs_fh *fhandle)
2386 {
2387 int status = -ENOMEM;
2388 struct page *page = NULL;
2389 struct nfs4_fs_locations *locations = NULL;
2390
2391 page = alloc_page(GFP_KERNEL);
2392 if (page == NULL)
2393 goto out;
2394 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2395 if (locations == NULL)
2396 goto out;
2397
2398 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
2399 if (status != 0)
2400 goto out;
2401 /* Make sure server returned a different fsid for the referral */
2402 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2403 dprintk("%s: server did not return a different fsid for"
2404 " a referral at %s\n", __func__, name->name);
2405 status = -EIO;
2406 goto out;
2407 }
2408 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2409 nfs_fixup_referral_attributes(&locations->fattr);
2410
2411 /* replace the lookup nfs_fattr with the locations nfs_fattr */
2412 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2413 memset(fhandle, 0, sizeof(struct nfs_fh));
2414 out:
2415 if (page)
2416 __free_page(page);
2417 kfree(locations);
2418 return status;
2419 }
2420
_nfs4_proc_getattr(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fattr * fattr)2421 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2422 {
2423 struct nfs4_getattr_arg args = {
2424 .fh = fhandle,
2425 .bitmask = server->attr_bitmask,
2426 };
2427 struct nfs4_getattr_res res = {
2428 .fattr = fattr,
2429 .server = server,
2430 };
2431 struct rpc_message msg = {
2432 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2433 .rpc_argp = &args,
2434 .rpc_resp = &res,
2435 };
2436
2437 nfs_fattr_init(fattr);
2438 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2439 }
2440
nfs4_proc_getattr(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fattr * fattr)2441 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2442 {
2443 struct nfs4_exception exception = { };
2444 int err;
2445 do {
2446 err = nfs4_handle_exception(server,
2447 _nfs4_proc_getattr(server, fhandle, fattr),
2448 &exception);
2449 } while (exception.retry);
2450 return err;
2451 }
2452
2453 /*
2454 * The file is not closed if it is opened due to the a request to change
2455 * the size of the file. The open call will not be needed once the
2456 * VFS layer lookup-intents are implemented.
2457 *
2458 * Close is called when the inode is destroyed.
2459 * If we haven't opened the file for O_WRONLY, we
2460 * need to in the size_change case to obtain a stateid.
2461 *
2462 * Got race?
2463 * Because OPEN is always done by name in nfsv4, it is
2464 * possible that we opened a different file by the same
2465 * name. We can recognize this race condition, but we
2466 * can't do anything about it besides returning an error.
2467 *
2468 * This will be fixed with VFS changes (lookup-intent).
2469 */
2470 static int
nfs4_proc_setattr(struct dentry * dentry,struct nfs_fattr * fattr,struct iattr * sattr)2471 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2472 struct iattr *sattr)
2473 {
2474 struct inode *inode = dentry->d_inode;
2475 struct rpc_cred *cred = NULL;
2476 struct nfs4_state *state = NULL;
2477 int status;
2478
2479 if (pnfs_ld_layoutret_on_setattr(inode))
2480 pnfs_return_layout(inode);
2481
2482 nfs_fattr_init(fattr);
2483
2484 /* Search for an existing open(O_WRITE) file */
2485 if (sattr->ia_valid & ATTR_FILE) {
2486 struct nfs_open_context *ctx;
2487
2488 ctx = nfs_file_open_context(sattr->ia_file);
2489 if (ctx) {
2490 cred = ctx->cred;
2491 state = ctx->state;
2492 }
2493 }
2494
2495 /* Deal with open(O_TRUNC) */
2496 if (sattr->ia_valid & ATTR_OPEN)
2497 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2498
2499 status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2500 if (status == 0)
2501 nfs_setattr_update_inode(inode, sattr);
2502 return status;
2503 }
2504
_nfs4_proc_lookup(struct rpc_clnt * clnt,struct inode * dir,const struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)2505 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2506 const struct qstr *name, struct nfs_fh *fhandle,
2507 struct nfs_fattr *fattr)
2508 {
2509 struct nfs_server *server = NFS_SERVER(dir);
2510 int status;
2511 struct nfs4_lookup_arg args = {
2512 .bitmask = server->attr_bitmask,
2513 .dir_fh = NFS_FH(dir),
2514 .name = name,
2515 };
2516 struct nfs4_lookup_res res = {
2517 .server = server,
2518 .fattr = fattr,
2519 .fh = fhandle,
2520 };
2521 struct rpc_message msg = {
2522 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2523 .rpc_argp = &args,
2524 .rpc_resp = &res,
2525 };
2526
2527 nfs_fattr_init(fattr);
2528
2529 dprintk("NFS call lookup %s\n", name->name);
2530 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
2531 dprintk("NFS reply lookup: %d\n", status);
2532 return status;
2533 }
2534
nfs_fixup_secinfo_attributes(struct nfs_fattr * fattr)2535 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
2536 {
2537 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2538 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
2539 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2540 fattr->nlink = 2;
2541 }
2542
nfs4_proc_lookup_common(struct rpc_clnt ** clnt,struct inode * dir,struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)2543 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
2544 struct qstr *name, struct nfs_fh *fhandle,
2545 struct nfs_fattr *fattr)
2546 {
2547 struct nfs4_exception exception = { };
2548 struct rpc_clnt *client = *clnt;
2549 int err;
2550 do {
2551 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
2552 switch (err) {
2553 case -NFS4ERR_BADNAME:
2554 err = -ENOENT;
2555 goto out;
2556 case -NFS4ERR_MOVED:
2557 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
2558 goto out;
2559 case -NFS4ERR_WRONGSEC:
2560 err = -EPERM;
2561 if (client != *clnt)
2562 goto out;
2563
2564 client = nfs4_create_sec_client(client, dir, name);
2565 if (IS_ERR(client))
2566 return PTR_ERR(client);
2567
2568 exception.retry = 1;
2569 break;
2570 default:
2571 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
2572 }
2573 } while (exception.retry);
2574
2575 out:
2576 if (err == 0)
2577 *clnt = client;
2578 else if (client != *clnt)
2579 rpc_shutdown_client(client);
2580
2581 return err;
2582 }
2583
nfs4_proc_lookup(struct rpc_clnt * clnt,struct inode * dir,struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)2584 static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
2585 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2586 {
2587 int status;
2588 struct rpc_clnt *client = NFS_CLIENT(dir);
2589
2590 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2591 if (client != NFS_CLIENT(dir)) {
2592 rpc_shutdown_client(client);
2593 nfs_fixup_secinfo_attributes(fattr);
2594 }
2595 return status;
2596 }
2597
2598 struct rpc_clnt *
nfs4_proc_lookup_mountpoint(struct inode * dir,struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)2599 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
2600 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2601 {
2602 int status;
2603 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
2604
2605 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2606 if (status < 0) {
2607 rpc_shutdown_client(client);
2608 return ERR_PTR(status);
2609 }
2610 return client;
2611 }
2612
_nfs4_proc_access(struct inode * inode,struct nfs_access_entry * entry)2613 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2614 {
2615 struct nfs_server *server = NFS_SERVER(inode);
2616 struct nfs4_accessargs args = {
2617 .fh = NFS_FH(inode),
2618 .bitmask = server->cache_consistency_bitmask,
2619 };
2620 struct nfs4_accessres res = {
2621 .server = server,
2622 };
2623 struct rpc_message msg = {
2624 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2625 .rpc_argp = &args,
2626 .rpc_resp = &res,
2627 .rpc_cred = entry->cred,
2628 };
2629 int mode = entry->mask;
2630 int status;
2631
2632 /*
2633 * Determine which access bits we want to ask for...
2634 */
2635 if (mode & MAY_READ)
2636 args.access |= NFS4_ACCESS_READ;
2637 if (S_ISDIR(inode->i_mode)) {
2638 if (mode & MAY_WRITE)
2639 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2640 if (mode & MAY_EXEC)
2641 args.access |= NFS4_ACCESS_LOOKUP;
2642 } else {
2643 if (mode & MAY_WRITE)
2644 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2645 if (mode & MAY_EXEC)
2646 args.access |= NFS4_ACCESS_EXECUTE;
2647 }
2648
2649 res.fattr = nfs_alloc_fattr();
2650 if (res.fattr == NULL)
2651 return -ENOMEM;
2652
2653 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2654 if (!status) {
2655 entry->mask = 0;
2656 if (res.access & NFS4_ACCESS_READ)
2657 entry->mask |= MAY_READ;
2658 if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
2659 entry->mask |= MAY_WRITE;
2660 if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
2661 entry->mask |= MAY_EXEC;
2662 nfs_refresh_inode(inode, res.fattr);
2663 }
2664 nfs_free_fattr(res.fattr);
2665 return status;
2666 }
2667
nfs4_proc_access(struct inode * inode,struct nfs_access_entry * entry)2668 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2669 {
2670 struct nfs4_exception exception = { };
2671 int err;
2672 do {
2673 err = nfs4_handle_exception(NFS_SERVER(inode),
2674 _nfs4_proc_access(inode, entry),
2675 &exception);
2676 } while (exception.retry);
2677 return err;
2678 }
2679
2680 /*
2681 * TODO: For the time being, we don't try to get any attributes
2682 * along with any of the zero-copy operations READ, READDIR,
2683 * READLINK, WRITE.
2684 *
2685 * In the case of the first three, we want to put the GETATTR
2686 * after the read-type operation -- this is because it is hard
2687 * to predict the length of a GETATTR response in v4, and thus
2688 * align the READ data correctly. This means that the GETATTR
2689 * may end up partially falling into the page cache, and we should
2690 * shift it into the 'tail' of the xdr_buf before processing.
2691 * To do this efficiently, we need to know the total length
2692 * of data received, which doesn't seem to be available outside
2693 * of the RPC layer.
2694 *
2695 * In the case of WRITE, we also want to put the GETATTR after
2696 * the operation -- in this case because we want to make sure
2697 * we get the post-operation mtime and size. This means that
2698 * we can't use xdr_encode_pages() as written: we need a variant
2699 * of it which would leave room in the 'tail' iovec.
2700 *
2701 * Both of these changes to the XDR layer would in fact be quite
2702 * minor, but I decided to leave them for a subsequent patch.
2703 */
_nfs4_proc_readlink(struct inode * inode,struct page * page,unsigned int pgbase,unsigned int pglen)2704 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2705 unsigned int pgbase, unsigned int pglen)
2706 {
2707 struct nfs4_readlink args = {
2708 .fh = NFS_FH(inode),
2709 .pgbase = pgbase,
2710 .pglen = pglen,
2711 .pages = &page,
2712 };
2713 struct nfs4_readlink_res res;
2714 struct rpc_message msg = {
2715 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
2716 .rpc_argp = &args,
2717 .rpc_resp = &res,
2718 };
2719
2720 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
2721 }
2722
nfs4_proc_readlink(struct inode * inode,struct page * page,unsigned int pgbase,unsigned int pglen)2723 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
2724 unsigned int pgbase, unsigned int pglen)
2725 {
2726 struct nfs4_exception exception = { };
2727 int err;
2728 do {
2729 err = nfs4_handle_exception(NFS_SERVER(inode),
2730 _nfs4_proc_readlink(inode, page, pgbase, pglen),
2731 &exception);
2732 } while (exception.retry);
2733 return err;
2734 }
2735
2736 /*
2737 * Got race?
2738 * We will need to arrange for the VFS layer to provide an atomic open.
2739 * Until then, this create/open method is prone to inefficiency and race
2740 * conditions due to the lookup, create, and open VFS calls from sys_open()
2741 * placed on the wire.
2742 *
2743 * Given the above sorry state of affairs, I'm simply sending an OPEN.
2744 * The file will be opened again in the subsequent VFS open call
2745 * (nfs4_proc_file_open).
2746 *
2747 * The open for read will just hang around to be used by any process that
2748 * opens the file O_RDONLY. This will all be resolved with the VFS changes.
2749 */
2750
2751 static int
nfs4_proc_create(struct inode * dir,struct dentry * dentry,struct iattr * sattr,int flags,struct nfs_open_context * ctx)2752 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2753 int flags, struct nfs_open_context *ctx)
2754 {
2755 struct dentry *de = dentry;
2756 struct nfs4_state *state;
2757 struct rpc_cred *cred = NULL;
2758 fmode_t fmode = 0;
2759 int status = 0;
2760
2761 if (ctx != NULL) {
2762 cred = ctx->cred;
2763 de = ctx->dentry;
2764 fmode = ctx->mode;
2765 }
2766 sattr->ia_mode &= ~current_umask();
2767 state = nfs4_do_open(dir, de, fmode, flags, sattr, cred);
2768 d_drop(dentry);
2769 if (IS_ERR(state)) {
2770 status = PTR_ERR(state);
2771 goto out;
2772 }
2773 d_add(dentry, igrab(state->inode));
2774 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2775 if (ctx != NULL)
2776 ctx->state = state;
2777 else
2778 nfs4_close_sync(state, fmode);
2779 out:
2780 return status;
2781 }
2782
_nfs4_proc_remove(struct inode * dir,struct qstr * name)2783 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2784 {
2785 struct nfs_server *server = NFS_SERVER(dir);
2786 struct nfs_removeargs args = {
2787 .fh = NFS_FH(dir),
2788 .name.len = name->len,
2789 .name.name = name->name,
2790 .bitmask = server->attr_bitmask,
2791 };
2792 struct nfs_removeres res = {
2793 .server = server,
2794 };
2795 struct rpc_message msg = {
2796 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
2797 .rpc_argp = &args,
2798 .rpc_resp = &res,
2799 };
2800 int status = -ENOMEM;
2801
2802 res.dir_attr = nfs_alloc_fattr();
2803 if (res.dir_attr == NULL)
2804 goto out;
2805
2806 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
2807 if (status == 0) {
2808 update_changeattr(dir, &res.cinfo);
2809 nfs_post_op_update_inode(dir, res.dir_attr);
2810 }
2811 nfs_free_fattr(res.dir_attr);
2812 out:
2813 return status;
2814 }
2815
nfs4_proc_remove(struct inode * dir,struct qstr * name)2816 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
2817 {
2818 struct nfs4_exception exception = { };
2819 int err;
2820 do {
2821 err = nfs4_handle_exception(NFS_SERVER(dir),
2822 _nfs4_proc_remove(dir, name),
2823 &exception);
2824 } while (exception.retry);
2825 return err;
2826 }
2827
nfs4_proc_unlink_setup(struct rpc_message * msg,struct inode * dir)2828 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
2829 {
2830 struct nfs_server *server = NFS_SERVER(dir);
2831 struct nfs_removeargs *args = msg->rpc_argp;
2832 struct nfs_removeres *res = msg->rpc_resp;
2833
2834 args->bitmask = server->cache_consistency_bitmask;
2835 res->server = server;
2836 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
2837 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
2838 }
2839
nfs4_proc_unlink_rpc_prepare(struct rpc_task * task,struct nfs_unlinkdata * data)2840 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
2841 {
2842 if (nfs4_setup_sequence(NFS_SERVER(data->dir),
2843 &data->args.seq_args,
2844 &data->res.seq_res,
2845 task))
2846 return;
2847 rpc_call_start(task);
2848 }
2849
nfs4_proc_unlink_done(struct rpc_task * task,struct inode * dir)2850 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2851 {
2852 struct nfs_removeres *res = task->tk_msg.rpc_resp;
2853
2854 if (!nfs4_sequence_done(task, &res->seq_res))
2855 return 0;
2856 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2857 return 0;
2858 update_changeattr(dir, &res->cinfo);
2859 nfs_post_op_update_inode(dir, res->dir_attr);
2860 return 1;
2861 }
2862
nfs4_proc_rename_setup(struct rpc_message * msg,struct inode * dir)2863 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
2864 {
2865 struct nfs_server *server = NFS_SERVER(dir);
2866 struct nfs_renameargs *arg = msg->rpc_argp;
2867 struct nfs_renameres *res = msg->rpc_resp;
2868
2869 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
2870 arg->bitmask = server->attr_bitmask;
2871 res->server = server;
2872 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
2873 }
2874
nfs4_proc_rename_rpc_prepare(struct rpc_task * task,struct nfs_renamedata * data)2875 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
2876 {
2877 if (nfs4_setup_sequence(NFS_SERVER(data->old_dir),
2878 &data->args.seq_args,
2879 &data->res.seq_res,
2880 task))
2881 return;
2882 rpc_call_start(task);
2883 }
2884
nfs4_proc_rename_done(struct rpc_task * task,struct inode * old_dir,struct inode * new_dir)2885 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
2886 struct inode *new_dir)
2887 {
2888 struct nfs_renameres *res = task->tk_msg.rpc_resp;
2889
2890 if (!nfs4_sequence_done(task, &res->seq_res))
2891 return 0;
2892 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2893 return 0;
2894
2895 update_changeattr(old_dir, &res->old_cinfo);
2896 nfs_post_op_update_inode(old_dir, res->old_fattr);
2897 update_changeattr(new_dir, &res->new_cinfo);
2898 nfs_post_op_update_inode(new_dir, res->new_fattr);
2899 return 1;
2900 }
2901
_nfs4_proc_rename(struct inode * old_dir,struct qstr * old_name,struct inode * new_dir,struct qstr * new_name)2902 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2903 struct inode *new_dir, struct qstr *new_name)
2904 {
2905 struct nfs_server *server = NFS_SERVER(old_dir);
2906 struct nfs_renameargs arg = {
2907 .old_dir = NFS_FH(old_dir),
2908 .new_dir = NFS_FH(new_dir),
2909 .old_name = old_name,
2910 .new_name = new_name,
2911 .bitmask = server->attr_bitmask,
2912 };
2913 struct nfs_renameres res = {
2914 .server = server,
2915 };
2916 struct rpc_message msg = {
2917 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
2918 .rpc_argp = &arg,
2919 .rpc_resp = &res,
2920 };
2921 int status = -ENOMEM;
2922
2923 res.old_fattr = nfs_alloc_fattr();
2924 res.new_fattr = nfs_alloc_fattr();
2925 if (res.old_fattr == NULL || res.new_fattr == NULL)
2926 goto out;
2927
2928 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2929 if (!status) {
2930 update_changeattr(old_dir, &res.old_cinfo);
2931 nfs_post_op_update_inode(old_dir, res.old_fattr);
2932 update_changeattr(new_dir, &res.new_cinfo);
2933 nfs_post_op_update_inode(new_dir, res.new_fattr);
2934 }
2935 out:
2936 nfs_free_fattr(res.new_fattr);
2937 nfs_free_fattr(res.old_fattr);
2938 return status;
2939 }
2940
nfs4_proc_rename(struct inode * old_dir,struct qstr * old_name,struct inode * new_dir,struct qstr * new_name)2941 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2942 struct inode *new_dir, struct qstr *new_name)
2943 {
2944 struct nfs4_exception exception = { };
2945 int err;
2946 do {
2947 err = nfs4_handle_exception(NFS_SERVER(old_dir),
2948 _nfs4_proc_rename(old_dir, old_name,
2949 new_dir, new_name),
2950 &exception);
2951 } while (exception.retry);
2952 return err;
2953 }
2954
_nfs4_proc_link(struct inode * inode,struct inode * dir,struct qstr * name)2955 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2956 {
2957 struct nfs_server *server = NFS_SERVER(inode);
2958 struct nfs4_link_arg arg = {
2959 .fh = NFS_FH(inode),
2960 .dir_fh = NFS_FH(dir),
2961 .name = name,
2962 .bitmask = server->attr_bitmask,
2963 };
2964 struct nfs4_link_res res = {
2965 .server = server,
2966 };
2967 struct rpc_message msg = {
2968 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
2969 .rpc_argp = &arg,
2970 .rpc_resp = &res,
2971 };
2972 int status = -ENOMEM;
2973
2974 res.fattr = nfs_alloc_fattr();
2975 res.dir_attr = nfs_alloc_fattr();
2976 if (res.fattr == NULL || res.dir_attr == NULL)
2977 goto out;
2978
2979 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2980 if (!status) {
2981 update_changeattr(dir, &res.cinfo);
2982 nfs_post_op_update_inode(dir, res.dir_attr);
2983 nfs_post_op_update_inode(inode, res.fattr);
2984 }
2985 out:
2986 nfs_free_fattr(res.dir_attr);
2987 nfs_free_fattr(res.fattr);
2988 return status;
2989 }
2990
nfs4_proc_link(struct inode * inode,struct inode * dir,struct qstr * name)2991 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2992 {
2993 struct nfs4_exception exception = { };
2994 int err;
2995 do {
2996 err = nfs4_handle_exception(NFS_SERVER(inode),
2997 _nfs4_proc_link(inode, dir, name),
2998 &exception);
2999 } while (exception.retry);
3000 return err;
3001 }
3002
3003 struct nfs4_createdata {
3004 struct rpc_message msg;
3005 struct nfs4_create_arg arg;
3006 struct nfs4_create_res res;
3007 struct nfs_fh fh;
3008 struct nfs_fattr fattr;
3009 struct nfs_fattr dir_fattr;
3010 };
3011
nfs4_alloc_createdata(struct inode * dir,struct qstr * name,struct iattr * sattr,u32 ftype)3012 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3013 struct qstr *name, struct iattr *sattr, u32 ftype)
3014 {
3015 struct nfs4_createdata *data;
3016
3017 data = kzalloc(sizeof(*data), GFP_KERNEL);
3018 if (data != NULL) {
3019 struct nfs_server *server = NFS_SERVER(dir);
3020
3021 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3022 data->msg.rpc_argp = &data->arg;
3023 data->msg.rpc_resp = &data->res;
3024 data->arg.dir_fh = NFS_FH(dir);
3025 data->arg.server = server;
3026 data->arg.name = name;
3027 data->arg.attrs = sattr;
3028 data->arg.ftype = ftype;
3029 data->arg.bitmask = server->attr_bitmask;
3030 data->res.server = server;
3031 data->res.fh = &data->fh;
3032 data->res.fattr = &data->fattr;
3033 data->res.dir_fattr = &data->dir_fattr;
3034 nfs_fattr_init(data->res.fattr);
3035 nfs_fattr_init(data->res.dir_fattr);
3036 }
3037 return data;
3038 }
3039
nfs4_do_create(struct inode * dir,struct dentry * dentry,struct nfs4_createdata * data)3040 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3041 {
3042 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3043 &data->arg.seq_args, &data->res.seq_res, 1);
3044 if (status == 0) {
3045 update_changeattr(dir, &data->res.dir_cinfo);
3046 nfs_post_op_update_inode(dir, data->res.dir_fattr);
3047 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
3048 }
3049 return status;
3050 }
3051
nfs4_free_createdata(struct nfs4_createdata * data)3052 static void nfs4_free_createdata(struct nfs4_createdata *data)
3053 {
3054 kfree(data);
3055 }
3056
_nfs4_proc_symlink(struct inode * dir,struct dentry * dentry,struct page * page,unsigned int len,struct iattr * sattr)3057 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3058 struct page *page, unsigned int len, struct iattr *sattr)
3059 {
3060 struct nfs4_createdata *data;
3061 int status = -ENAMETOOLONG;
3062
3063 if (len > NFS4_MAXPATHLEN)
3064 goto out;
3065
3066 status = -ENOMEM;
3067 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3068 if (data == NULL)
3069 goto out;
3070
3071 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3072 data->arg.u.symlink.pages = &page;
3073 data->arg.u.symlink.len = len;
3074
3075 status = nfs4_do_create(dir, dentry, data);
3076
3077 nfs4_free_createdata(data);
3078 out:
3079 return status;
3080 }
3081
nfs4_proc_symlink(struct inode * dir,struct dentry * dentry,struct page * page,unsigned int len,struct iattr * sattr)3082 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3083 struct page *page, unsigned int len, struct iattr *sattr)
3084 {
3085 struct nfs4_exception exception = { };
3086 int err;
3087 do {
3088 err = nfs4_handle_exception(NFS_SERVER(dir),
3089 _nfs4_proc_symlink(dir, dentry, page,
3090 len, sattr),
3091 &exception);
3092 } while (exception.retry);
3093 return err;
3094 }
3095
_nfs4_proc_mkdir(struct inode * dir,struct dentry * dentry,struct iattr * sattr)3096 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3097 struct iattr *sattr)
3098 {
3099 struct nfs4_createdata *data;
3100 int status = -ENOMEM;
3101
3102 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3103 if (data == NULL)
3104 goto out;
3105
3106 status = nfs4_do_create(dir, dentry, data);
3107
3108 nfs4_free_createdata(data);
3109 out:
3110 return status;
3111 }
3112
nfs4_proc_mkdir(struct inode * dir,struct dentry * dentry,struct iattr * sattr)3113 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3114 struct iattr *sattr)
3115 {
3116 struct nfs4_exception exception = { };
3117 int err;
3118
3119 sattr->ia_mode &= ~current_umask();
3120 do {
3121 err = nfs4_handle_exception(NFS_SERVER(dir),
3122 _nfs4_proc_mkdir(dir, dentry, sattr),
3123 &exception);
3124 } while (exception.retry);
3125 return err;
3126 }
3127
_nfs4_proc_readdir(struct dentry * dentry,struct rpc_cred * cred,u64 cookie,struct page ** pages,unsigned int count,int plus)3128 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3129 u64 cookie, struct page **pages, unsigned int count, int plus)
3130 {
3131 struct inode *dir = dentry->d_inode;
3132 struct nfs4_readdir_arg args = {
3133 .fh = NFS_FH(dir),
3134 .pages = pages,
3135 .pgbase = 0,
3136 .count = count,
3137 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
3138 .plus = plus,
3139 };
3140 struct nfs4_readdir_res res;
3141 struct rpc_message msg = {
3142 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3143 .rpc_argp = &args,
3144 .rpc_resp = &res,
3145 .rpc_cred = cred,
3146 };
3147 int status;
3148
3149 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
3150 dentry->d_parent->d_name.name,
3151 dentry->d_name.name,
3152 (unsigned long long)cookie);
3153 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3154 res.pgbase = args.pgbase;
3155 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3156 if (status >= 0) {
3157 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3158 status += args.pgbase;
3159 }
3160
3161 nfs_invalidate_atime(dir);
3162
3163 dprintk("%s: returns %d\n", __func__, status);
3164 return status;
3165 }
3166
nfs4_proc_readdir(struct dentry * dentry,struct rpc_cred * cred,u64 cookie,struct page ** pages,unsigned int count,int plus)3167 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3168 u64 cookie, struct page **pages, unsigned int count, int plus)
3169 {
3170 struct nfs4_exception exception = { };
3171 int err;
3172 do {
3173 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
3174 _nfs4_proc_readdir(dentry, cred, cookie,
3175 pages, count, plus),
3176 &exception);
3177 } while (exception.retry);
3178 return err;
3179 }
3180
_nfs4_proc_mknod(struct inode * dir,struct dentry * dentry,struct iattr * sattr,dev_t rdev)3181 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3182 struct iattr *sattr, dev_t rdev)
3183 {
3184 struct nfs4_createdata *data;
3185 int mode = sattr->ia_mode;
3186 int status = -ENOMEM;
3187
3188 BUG_ON(!(sattr->ia_valid & ATTR_MODE));
3189 BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
3190
3191 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3192 if (data == NULL)
3193 goto out;
3194
3195 if (S_ISFIFO(mode))
3196 data->arg.ftype = NF4FIFO;
3197 else if (S_ISBLK(mode)) {
3198 data->arg.ftype = NF4BLK;
3199 data->arg.u.device.specdata1 = MAJOR(rdev);
3200 data->arg.u.device.specdata2 = MINOR(rdev);
3201 }
3202 else if (S_ISCHR(mode)) {
3203 data->arg.ftype = NF4CHR;
3204 data->arg.u.device.specdata1 = MAJOR(rdev);
3205 data->arg.u.device.specdata2 = MINOR(rdev);
3206 }
3207
3208 status = nfs4_do_create(dir, dentry, data);
3209
3210 nfs4_free_createdata(data);
3211 out:
3212 return status;
3213 }
3214
nfs4_proc_mknod(struct inode * dir,struct dentry * dentry,struct iattr * sattr,dev_t rdev)3215 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3216 struct iattr *sattr, dev_t rdev)
3217 {
3218 struct nfs4_exception exception = { };
3219 int err;
3220
3221 sattr->ia_mode &= ~current_umask();
3222 do {
3223 err = nfs4_handle_exception(NFS_SERVER(dir),
3224 _nfs4_proc_mknod(dir, dentry, sattr, rdev),
3225 &exception);
3226 } while (exception.retry);
3227 return err;
3228 }
3229
_nfs4_proc_statfs(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsstat * fsstat)3230 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3231 struct nfs_fsstat *fsstat)
3232 {
3233 struct nfs4_statfs_arg args = {
3234 .fh = fhandle,
3235 .bitmask = server->attr_bitmask,
3236 };
3237 struct nfs4_statfs_res res = {
3238 .fsstat = fsstat,
3239 };
3240 struct rpc_message msg = {
3241 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3242 .rpc_argp = &args,
3243 .rpc_resp = &res,
3244 };
3245
3246 nfs_fattr_init(fsstat->fattr);
3247 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3248 }
3249
nfs4_proc_statfs(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsstat * fsstat)3250 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
3251 {
3252 struct nfs4_exception exception = { };
3253 int err;
3254 do {
3255 err = nfs4_handle_exception(server,
3256 _nfs4_proc_statfs(server, fhandle, fsstat),
3257 &exception);
3258 } while (exception.retry);
3259 return err;
3260 }
3261
_nfs4_do_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)3262 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
3263 struct nfs_fsinfo *fsinfo)
3264 {
3265 struct nfs4_fsinfo_arg args = {
3266 .fh = fhandle,
3267 .bitmask = server->attr_bitmask,
3268 };
3269 struct nfs4_fsinfo_res res = {
3270 .fsinfo = fsinfo,
3271 };
3272 struct rpc_message msg = {
3273 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
3274 .rpc_argp = &args,
3275 .rpc_resp = &res,
3276 };
3277
3278 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3279 }
3280
nfs4_do_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)3281 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3282 {
3283 struct nfs4_exception exception = { };
3284 int err;
3285
3286 do {
3287 err = nfs4_handle_exception(server,
3288 _nfs4_do_fsinfo(server, fhandle, fsinfo),
3289 &exception);
3290 } while (exception.retry);
3291 return err;
3292 }
3293
nfs4_proc_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)3294 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3295 {
3296 nfs_fattr_init(fsinfo->fattr);
3297 return nfs4_do_fsinfo(server, fhandle, fsinfo);
3298 }
3299
_nfs4_proc_pathconf(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_pathconf * pathconf)3300 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3301 struct nfs_pathconf *pathconf)
3302 {
3303 struct nfs4_pathconf_arg args = {
3304 .fh = fhandle,
3305 .bitmask = server->attr_bitmask,
3306 };
3307 struct nfs4_pathconf_res res = {
3308 .pathconf = pathconf,
3309 };
3310 struct rpc_message msg = {
3311 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3312 .rpc_argp = &args,
3313 .rpc_resp = &res,
3314 };
3315
3316 /* None of the pathconf attributes are mandatory to implement */
3317 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3318 memset(pathconf, 0, sizeof(*pathconf));
3319 return 0;
3320 }
3321
3322 nfs_fattr_init(pathconf->fattr);
3323 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3324 }
3325
nfs4_proc_pathconf(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_pathconf * pathconf)3326 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3327 struct nfs_pathconf *pathconf)
3328 {
3329 struct nfs4_exception exception = { };
3330 int err;
3331
3332 do {
3333 err = nfs4_handle_exception(server,
3334 _nfs4_proc_pathconf(server, fhandle, pathconf),
3335 &exception);
3336 } while (exception.retry);
3337 return err;
3338 }
3339
__nfs4_read_done_cb(struct nfs_read_data * data)3340 void __nfs4_read_done_cb(struct nfs_read_data *data)
3341 {
3342 nfs_invalidate_atime(data->inode);
3343 }
3344
nfs4_read_done_cb(struct rpc_task * task,struct nfs_read_data * data)3345 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3346 {
3347 struct nfs_server *server = NFS_SERVER(data->inode);
3348
3349 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3350 rpc_restart_call_prepare(task);
3351 return -EAGAIN;
3352 }
3353
3354 __nfs4_read_done_cb(data);
3355 if (task->tk_status > 0)
3356 renew_lease(server, data->timestamp);
3357 return 0;
3358 }
3359
nfs4_read_done(struct rpc_task * task,struct nfs_read_data * data)3360 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3361 {
3362
3363 dprintk("--> %s\n", __func__);
3364
3365 if (!nfs4_sequence_done(task, &data->res.seq_res))
3366 return -EAGAIN;
3367
3368 return data->read_done_cb ? data->read_done_cb(task, data) :
3369 nfs4_read_done_cb(task, data);
3370 }
3371
nfs4_proc_read_setup(struct nfs_read_data * data,struct rpc_message * msg)3372 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3373 {
3374 data->timestamp = jiffies;
3375 data->read_done_cb = nfs4_read_done_cb;
3376 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3377 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
3378 }
3379
nfs4_proc_read_rpc_prepare(struct rpc_task * task,struct nfs_read_data * data)3380 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
3381 {
3382 if (nfs4_setup_sequence(NFS_SERVER(data->inode),
3383 &data->args.seq_args,
3384 &data->res.seq_res,
3385 task))
3386 return;
3387 rpc_call_start(task);
3388 }
3389
3390 /* Reset the the nfs_read_data to send the read to the MDS. */
nfs4_reset_read(struct rpc_task * task,struct nfs_read_data * data)3391 void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
3392 {
3393 dprintk("%s Reset task for i/o through\n", __func__);
3394 put_lseg(data->lseg);
3395 data->lseg = NULL;
3396 /* offsets will differ in the dense stripe case */
3397 data->args.offset = data->mds_offset;
3398 data->ds_clp = NULL;
3399 data->args.fh = NFS_FH(data->inode);
3400 data->read_done_cb = nfs4_read_done_cb;
3401 task->tk_ops = data->mds_ops;
3402 rpc_task_reset_client(task, NFS_CLIENT(data->inode));
3403 }
3404 EXPORT_SYMBOL_GPL(nfs4_reset_read);
3405
nfs4_write_done_cb(struct rpc_task * task,struct nfs_write_data * data)3406 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3407 {
3408 struct inode *inode = data->inode;
3409
3410 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3411 rpc_restart_call_prepare(task);
3412 return -EAGAIN;
3413 }
3414 if (task->tk_status >= 0) {
3415 renew_lease(NFS_SERVER(inode), data->timestamp);
3416 nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
3417 }
3418 return 0;
3419 }
3420
nfs4_write_done(struct rpc_task * task,struct nfs_write_data * data)3421 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3422 {
3423 if (!nfs4_sequence_done(task, &data->res.seq_res))
3424 return -EAGAIN;
3425 return data->write_done_cb ? data->write_done_cb(task, data) :
3426 nfs4_write_done_cb(task, data);
3427 }
3428
3429 /* Reset the the nfs_write_data to send the write to the MDS. */
nfs4_reset_write(struct rpc_task * task,struct nfs_write_data * data)3430 void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
3431 {
3432 dprintk("%s Reset task for i/o through\n", __func__);
3433 put_lseg(data->lseg);
3434 data->lseg = NULL;
3435 data->ds_clp = NULL;
3436 data->write_done_cb = nfs4_write_done_cb;
3437 data->args.fh = NFS_FH(data->inode);
3438 data->args.bitmask = data->res.server->cache_consistency_bitmask;
3439 data->args.offset = data->mds_offset;
3440 data->res.fattr = &data->fattr;
3441 task->tk_ops = data->mds_ops;
3442 rpc_task_reset_client(task, NFS_CLIENT(data->inode));
3443 }
3444 EXPORT_SYMBOL_GPL(nfs4_reset_write);
3445
nfs4_proc_write_setup(struct nfs_write_data * data,struct rpc_message * msg)3446 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3447 {
3448 struct nfs_server *server = NFS_SERVER(data->inode);
3449
3450 if (data->lseg) {
3451 data->args.bitmask = NULL;
3452 data->res.fattr = NULL;
3453 } else
3454 data->args.bitmask = server->cache_consistency_bitmask;
3455 if (!data->write_done_cb)
3456 data->write_done_cb = nfs4_write_done_cb;
3457 data->res.server = server;
3458 data->timestamp = jiffies;
3459
3460 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3461 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3462 }
3463
nfs4_proc_write_rpc_prepare(struct rpc_task * task,struct nfs_write_data * data)3464 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
3465 {
3466 if (nfs4_setup_sequence(NFS_SERVER(data->inode),
3467 &data->args.seq_args,
3468 &data->res.seq_res,
3469 task))
3470 return;
3471 rpc_call_start(task);
3472 }
3473
nfs4_commit_done_cb(struct rpc_task * task,struct nfs_write_data * data)3474 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3475 {
3476 struct inode *inode = data->inode;
3477
3478 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3479 rpc_restart_call_prepare(task);
3480 return -EAGAIN;
3481 }
3482 nfs_refresh_inode(inode, data->res.fattr);
3483 return 0;
3484 }
3485
nfs4_commit_done(struct rpc_task * task,struct nfs_write_data * data)3486 static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
3487 {
3488 if (!nfs4_sequence_done(task, &data->res.seq_res))
3489 return -EAGAIN;
3490 return data->write_done_cb(task, data);
3491 }
3492
nfs4_proc_commit_setup(struct nfs_write_data * data,struct rpc_message * msg)3493 static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
3494 {
3495 struct nfs_server *server = NFS_SERVER(data->inode);
3496
3497 if (data->lseg) {
3498 data->args.bitmask = NULL;
3499 data->res.fattr = NULL;
3500 } else
3501 data->args.bitmask = server->cache_consistency_bitmask;
3502 if (!data->write_done_cb)
3503 data->write_done_cb = nfs4_commit_done_cb;
3504 data->res.server = server;
3505 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3506 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3507 }
3508
3509 struct nfs4_renewdata {
3510 struct nfs_client *client;
3511 unsigned long timestamp;
3512 };
3513
3514 /*
3515 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3516 * standalone procedure for queueing an asynchronous RENEW.
3517 */
nfs4_renew_release(void * calldata)3518 static void nfs4_renew_release(void *calldata)
3519 {
3520 struct nfs4_renewdata *data = calldata;
3521 struct nfs_client *clp = data->client;
3522
3523 if (atomic_read(&clp->cl_count) > 1)
3524 nfs4_schedule_state_renewal(clp);
3525 nfs_put_client(clp);
3526 kfree(data);
3527 }
3528
nfs4_renew_done(struct rpc_task * task,void * calldata)3529 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3530 {
3531 struct nfs4_renewdata *data = calldata;
3532 struct nfs_client *clp = data->client;
3533 unsigned long timestamp = data->timestamp;
3534
3535 if (task->tk_status < 0) {
3536 /* Unless we're shutting down, schedule state recovery! */
3537 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
3538 return;
3539 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
3540 nfs4_schedule_lease_recovery(clp);
3541 return;
3542 }
3543 nfs4_schedule_path_down_recovery(clp);
3544 }
3545 do_renew_lease(clp, timestamp);
3546 }
3547
3548 static const struct rpc_call_ops nfs4_renew_ops = {
3549 .rpc_call_done = nfs4_renew_done,
3550 .rpc_release = nfs4_renew_release,
3551 };
3552
nfs4_proc_async_renew(struct nfs_client * clp,struct rpc_cred * cred,unsigned renew_flags)3553 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
3554 {
3555 struct rpc_message msg = {
3556 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3557 .rpc_argp = clp,
3558 .rpc_cred = cred,
3559 };
3560 struct nfs4_renewdata *data;
3561
3562 if (renew_flags == 0)
3563 return 0;
3564 if (!atomic_inc_not_zero(&clp->cl_count))
3565 return -EIO;
3566 data = kmalloc(sizeof(*data), GFP_NOFS);
3567 if (data == NULL)
3568 return -ENOMEM;
3569 data->client = clp;
3570 data->timestamp = jiffies;
3571 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3572 &nfs4_renew_ops, data);
3573 }
3574
nfs4_proc_renew(struct nfs_client * clp,struct rpc_cred * cred)3575 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3576 {
3577 struct rpc_message msg = {
3578 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3579 .rpc_argp = clp,
3580 .rpc_cred = cred,
3581 };
3582 unsigned long now = jiffies;
3583 int status;
3584
3585 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3586 if (status < 0)
3587 return status;
3588 do_renew_lease(clp, now);
3589 return 0;
3590 }
3591
nfs4_server_supports_acls(struct nfs_server * server)3592 static inline int nfs4_server_supports_acls(struct nfs_server *server)
3593 {
3594 return (server->caps & NFS_CAP_ACLS)
3595 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3596 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3597 }
3598
3599 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
3600 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
3601 * the stack.
3602 */
3603 #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3604
buf_to_pages_noslab(const void * buf,size_t buflen,struct page ** pages,unsigned int * pgbase)3605 static int buf_to_pages_noslab(const void *buf, size_t buflen,
3606 struct page **pages, unsigned int *pgbase)
3607 {
3608 struct page *newpage, **spages;
3609 int rc = 0;
3610 size_t len;
3611 spages = pages;
3612
3613 do {
3614 len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
3615 newpage = alloc_page(GFP_KERNEL);
3616
3617 if (newpage == NULL)
3618 goto unwind;
3619 memcpy(page_address(newpage), buf, len);
3620 buf += len;
3621 buflen -= len;
3622 *pages++ = newpage;
3623 rc++;
3624 } while (buflen != 0);
3625
3626 return rc;
3627
3628 unwind:
3629 for(; rc > 0; rc--)
3630 __free_page(spages[rc-1]);
3631 return -ENOMEM;
3632 }
3633
3634 struct nfs4_cached_acl {
3635 int cached;
3636 size_t len;
3637 char data[0];
3638 };
3639
nfs4_set_cached_acl(struct inode * inode,struct nfs4_cached_acl * acl)3640 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3641 {
3642 struct nfs_inode *nfsi = NFS_I(inode);
3643
3644 spin_lock(&inode->i_lock);
3645 kfree(nfsi->nfs4_acl);
3646 nfsi->nfs4_acl = acl;
3647 spin_unlock(&inode->i_lock);
3648 }
3649
nfs4_zap_acl_attr(struct inode * inode)3650 static void nfs4_zap_acl_attr(struct inode *inode)
3651 {
3652 nfs4_set_cached_acl(inode, NULL);
3653 }
3654
nfs4_read_cached_acl(struct inode * inode,char * buf,size_t buflen)3655 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3656 {
3657 struct nfs_inode *nfsi = NFS_I(inode);
3658 struct nfs4_cached_acl *acl;
3659 int ret = -ENOENT;
3660
3661 spin_lock(&inode->i_lock);
3662 acl = nfsi->nfs4_acl;
3663 if (acl == NULL)
3664 goto out;
3665 if (buf == NULL) /* user is just asking for length */
3666 goto out_len;
3667 if (acl->cached == 0)
3668 goto out;
3669 ret = -ERANGE; /* see getxattr(2) man page */
3670 if (acl->len > buflen)
3671 goto out;
3672 memcpy(buf, acl->data, acl->len);
3673 out_len:
3674 ret = acl->len;
3675 out:
3676 spin_unlock(&inode->i_lock);
3677 return ret;
3678 }
3679
nfs4_write_cached_acl(struct inode * inode,struct page ** pages,size_t pgbase,size_t acl_len)3680 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3681 {
3682 struct nfs4_cached_acl *acl;
3683
3684 if (pages && acl_len <= PAGE_SIZE) {
3685 acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
3686 if (acl == NULL)
3687 goto out;
3688 acl->cached = 1;
3689 _copy_from_pages(acl->data, pages, pgbase, acl_len);
3690 } else {
3691 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3692 if (acl == NULL)
3693 goto out;
3694 acl->cached = 0;
3695 }
3696 acl->len = acl_len;
3697 out:
3698 nfs4_set_cached_acl(inode, acl);
3699 }
3700
3701 /*
3702 * The getxattr API returns the required buffer length when called with a
3703 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3704 * the required buf. On a NULL buf, we send a page of data to the server
3705 * guessing that the ACL request can be serviced by a page. If so, we cache
3706 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3707 * the cache. If not so, we throw away the page, and cache the required
3708 * length. The next getxattr call will then produce another round trip to
3709 * the server, this time with the input buf of the required size.
3710 */
__nfs4_get_acl_uncached(struct inode * inode,void * buf,size_t buflen)3711 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3712 {
3713 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
3714 struct nfs_getaclargs args = {
3715 .fh = NFS_FH(inode),
3716 .acl_pages = pages,
3717 .acl_len = buflen,
3718 };
3719 struct nfs_getaclres res = {
3720 .acl_len = buflen,
3721 };
3722 struct rpc_message msg = {
3723 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3724 .rpc_argp = &args,
3725 .rpc_resp = &res,
3726 };
3727 int ret = -ENOMEM, npages, i;
3728 size_t acl_len = 0;
3729
3730 npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3731 /* As long as we're doing a round trip to the server anyway,
3732 * let's be prepared for a page of acl data. */
3733 if (npages == 0)
3734 npages = 1;
3735
3736 /* Add an extra page to handle the bitmap returned */
3737 npages++;
3738
3739 for (i = 0; i < npages; i++) {
3740 pages[i] = alloc_page(GFP_KERNEL);
3741 if (!pages[i])
3742 goto out_free;
3743 }
3744
3745 /* for decoding across pages */
3746 res.acl_scratch = alloc_page(GFP_KERNEL);
3747 if (!res.acl_scratch)
3748 goto out_free;
3749
3750 args.acl_len = npages * PAGE_SIZE;
3751 args.acl_pgbase = 0;
3752
3753 /* Let decode_getfacl know not to fail if the ACL data is larger than
3754 * the page we send as a guess */
3755 if (buf == NULL)
3756 res.acl_flags |= NFS4_ACL_LEN_REQUEST;
3757
3758 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
3759 __func__, buf, buflen, npages, args.acl_len);
3760 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
3761 &msg, &args.seq_args, &res.seq_res, 0);
3762 if (ret)
3763 goto out_free;
3764
3765 acl_len = res.acl_len - res.acl_data_offset;
3766 if (acl_len > args.acl_len)
3767 nfs4_write_cached_acl(inode, NULL, 0, acl_len);
3768 else
3769 nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
3770 acl_len);
3771 if (buf) {
3772 ret = -ERANGE;
3773 if (acl_len > buflen)
3774 goto out_free;
3775 _copy_from_pages(buf, pages, res.acl_data_offset,
3776 acl_len);
3777 }
3778 ret = acl_len;
3779 out_free:
3780 for (i = 0; i < npages; i++)
3781 if (pages[i])
3782 __free_page(pages[i]);
3783 if (res.acl_scratch)
3784 __free_page(res.acl_scratch);
3785 return ret;
3786 }
3787
nfs4_get_acl_uncached(struct inode * inode,void * buf,size_t buflen)3788 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3789 {
3790 struct nfs4_exception exception = { };
3791 ssize_t ret;
3792 do {
3793 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
3794 if (ret >= 0)
3795 break;
3796 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
3797 } while (exception.retry);
3798 return ret;
3799 }
3800
nfs4_proc_get_acl(struct inode * inode,void * buf,size_t buflen)3801 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3802 {
3803 struct nfs_server *server = NFS_SERVER(inode);
3804 int ret;
3805
3806 if (!nfs4_server_supports_acls(server))
3807 return -EOPNOTSUPP;
3808 ret = nfs_revalidate_inode(server, inode);
3809 if (ret < 0)
3810 return ret;
3811 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
3812 nfs_zap_acl_cache(inode);
3813 ret = nfs4_read_cached_acl(inode, buf, buflen);
3814 if (ret != -ENOENT)
3815 /* -ENOENT is returned if there is no ACL or if there is an ACL
3816 * but no cached acl data, just the acl length */
3817 return ret;
3818 return nfs4_get_acl_uncached(inode, buf, buflen);
3819 }
3820
__nfs4_proc_set_acl(struct inode * inode,const void * buf,size_t buflen)3821 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3822 {
3823 struct nfs_server *server = NFS_SERVER(inode);
3824 struct page *pages[NFS4ACL_MAXPAGES];
3825 struct nfs_setaclargs arg = {
3826 .fh = NFS_FH(inode),
3827 .acl_pages = pages,
3828 .acl_len = buflen,
3829 };
3830 struct nfs_setaclres res;
3831 struct rpc_message msg = {
3832 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
3833 .rpc_argp = &arg,
3834 .rpc_resp = &res,
3835 };
3836 int ret, i;
3837
3838 if (!nfs4_server_supports_acls(server))
3839 return -EOPNOTSUPP;
3840 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3841 if (i < 0)
3842 return i;
3843 nfs_inode_return_delegation(inode);
3844 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3845
3846 /*
3847 * Free each page after tx, so the only ref left is
3848 * held by the network stack
3849 */
3850 for (; i > 0; i--)
3851 put_page(pages[i-1]);
3852
3853 /*
3854 * Acl update can result in inode attribute update.
3855 * so mark the attribute cache invalid.
3856 */
3857 spin_lock(&inode->i_lock);
3858 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
3859 spin_unlock(&inode->i_lock);
3860 nfs_access_zap_cache(inode);
3861 nfs_zap_acl_cache(inode);
3862 return ret;
3863 }
3864
nfs4_proc_set_acl(struct inode * inode,const void * buf,size_t buflen)3865 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3866 {
3867 struct nfs4_exception exception = { };
3868 int err;
3869 do {
3870 err = nfs4_handle_exception(NFS_SERVER(inode),
3871 __nfs4_proc_set_acl(inode, buf, buflen),
3872 &exception);
3873 } while (exception.retry);
3874 return err;
3875 }
3876
3877 static int
nfs4_async_handle_error(struct rpc_task * task,const struct nfs_server * server,struct nfs4_state * state)3878 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3879 {
3880 struct nfs_client *clp = server->nfs_client;
3881
3882 if (task->tk_status >= 0)
3883 return 0;
3884 switch(task->tk_status) {
3885 case -NFS4ERR_DELEG_REVOKED:
3886 case -NFS4ERR_ADMIN_REVOKED:
3887 case -NFS4ERR_BAD_STATEID:
3888 if (state == NULL)
3889 break;
3890 nfs_remove_bad_delegation(state->inode);
3891 case -NFS4ERR_OPENMODE:
3892 if (state == NULL)
3893 break;
3894 nfs4_schedule_stateid_recovery(server, state);
3895 goto wait_on_recovery;
3896 case -NFS4ERR_EXPIRED:
3897 if (state != NULL)
3898 nfs4_schedule_stateid_recovery(server, state);
3899 case -NFS4ERR_STALE_STATEID:
3900 case -NFS4ERR_STALE_CLIENTID:
3901 nfs4_schedule_lease_recovery(clp);
3902 goto wait_on_recovery;
3903 #if defined(CONFIG_NFS_V4_1)
3904 case -NFS4ERR_BADSESSION:
3905 case -NFS4ERR_BADSLOT:
3906 case -NFS4ERR_BAD_HIGH_SLOT:
3907 case -NFS4ERR_DEADSESSION:
3908 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
3909 case -NFS4ERR_SEQ_FALSE_RETRY:
3910 case -NFS4ERR_SEQ_MISORDERED:
3911 dprintk("%s ERROR %d, Reset session\n", __func__,
3912 task->tk_status);
3913 nfs4_schedule_session_recovery(clp->cl_session);
3914 goto wait_on_recovery;
3915 #endif /* CONFIG_NFS_V4_1 */
3916 case -NFS4ERR_DELAY:
3917 nfs_inc_server_stats(server, NFSIOS_DELAY);
3918 case -NFS4ERR_GRACE:
3919 rpc_delay(task, NFS4_POLL_RETRY_MAX);
3920 task->tk_status = 0;
3921 return -EAGAIN;
3922 case -NFS4ERR_RETRY_UNCACHED_REP:
3923 case -NFS4ERR_OLD_STATEID:
3924 task->tk_status = 0;
3925 return -EAGAIN;
3926 }
3927 task->tk_status = nfs4_map_errors(task->tk_status);
3928 return 0;
3929 wait_on_recovery:
3930 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3931 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3932 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3933 task->tk_status = 0;
3934 return -EAGAIN;
3935 }
3936
nfs4_construct_boot_verifier(struct nfs_client * clp,nfs4_verifier * bootverf)3937 static void nfs4_construct_boot_verifier(struct nfs_client *clp,
3938 nfs4_verifier *bootverf)
3939 {
3940 __be32 verf[2];
3941
3942 verf[0] = htonl((u32)clp->cl_boot_time.tv_sec);
3943 verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec);
3944 memcpy(bootverf->data, verf, sizeof(bootverf->data));
3945 }
3946
nfs4_proc_setclientid(struct nfs_client * clp,u32 program,unsigned short port,struct rpc_cred * cred,struct nfs4_setclientid_res * res)3947 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3948 unsigned short port, struct rpc_cred *cred,
3949 struct nfs4_setclientid_res *res)
3950 {
3951 nfs4_verifier sc_verifier;
3952 struct nfs4_setclientid setclientid = {
3953 .sc_verifier = &sc_verifier,
3954 .sc_prog = program,
3955 .sc_cb_ident = clp->cl_cb_ident,
3956 };
3957 struct rpc_message msg = {
3958 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
3959 .rpc_argp = &setclientid,
3960 .rpc_resp = res,
3961 .rpc_cred = cred,
3962 };
3963 int loop = 0;
3964 int status;
3965
3966 nfs4_construct_boot_verifier(clp, &sc_verifier);
3967
3968 for(;;) {
3969 rcu_read_lock();
3970 setclientid.sc_name_len = scnprintf(setclientid.sc_name,
3971 sizeof(setclientid.sc_name), "%s/%s %s %s %u",
3972 clp->cl_ipaddr,
3973 rpc_peeraddr2str(clp->cl_rpcclient,
3974 RPC_DISPLAY_ADDR),
3975 rpc_peeraddr2str(clp->cl_rpcclient,
3976 RPC_DISPLAY_PROTO),
3977 clp->cl_rpcclient->cl_auth->au_ops->au_name,
3978 clp->cl_id_uniquifier);
3979 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
3980 sizeof(setclientid.sc_netid),
3981 rpc_peeraddr2str(clp->cl_rpcclient,
3982 RPC_DISPLAY_NETID));
3983 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
3984 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
3985 clp->cl_ipaddr, port >> 8, port & 255);
3986 rcu_read_unlock();
3987
3988 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
3989 if (status != -NFS4ERR_CLID_INUSE)
3990 break;
3991 if (loop != 0) {
3992 ++clp->cl_id_uniquifier;
3993 break;
3994 }
3995 ++loop;
3996 ssleep(clp->cl_lease_time / HZ + 1);
3997 }
3998 return status;
3999 }
4000
nfs4_proc_setclientid_confirm(struct nfs_client * clp,struct nfs4_setclientid_res * arg,struct rpc_cred * cred)4001 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
4002 struct nfs4_setclientid_res *arg,
4003 struct rpc_cred *cred)
4004 {
4005 struct nfs_fsinfo fsinfo;
4006 struct rpc_message msg = {
4007 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
4008 .rpc_argp = arg,
4009 .rpc_resp = &fsinfo,
4010 .rpc_cred = cred,
4011 };
4012 unsigned long now;
4013 int status;
4014
4015 now = jiffies;
4016 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4017 if (status == 0) {
4018 spin_lock(&clp->cl_lock);
4019 clp->cl_lease_time = fsinfo.lease_time * HZ;
4020 clp->cl_last_renewal = now;
4021 spin_unlock(&clp->cl_lock);
4022 }
4023 return status;
4024 }
4025
4026 struct nfs4_delegreturndata {
4027 struct nfs4_delegreturnargs args;
4028 struct nfs4_delegreturnres res;
4029 struct nfs_fh fh;
4030 nfs4_stateid stateid;
4031 unsigned long timestamp;
4032 struct nfs_fattr fattr;
4033 int rpc_status;
4034 };
4035
nfs4_delegreturn_done(struct rpc_task * task,void * calldata)4036 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4037 {
4038 struct nfs4_delegreturndata *data = calldata;
4039
4040 if (!nfs4_sequence_done(task, &data->res.seq_res))
4041 return;
4042
4043 switch (task->tk_status) {
4044 case 0:
4045 renew_lease(data->res.server, data->timestamp);
4046 break;
4047 case -NFS4ERR_ADMIN_REVOKED:
4048 case -NFS4ERR_DELEG_REVOKED:
4049 case -NFS4ERR_BAD_STATEID:
4050 case -NFS4ERR_OLD_STATEID:
4051 case -NFS4ERR_STALE_STATEID:
4052 case -NFS4ERR_EXPIRED:
4053 task->tk_status = 0;
4054 break;
4055 default:
4056 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4057 -EAGAIN) {
4058 rpc_restart_call_prepare(task);
4059 return;
4060 }
4061 }
4062 data->rpc_status = task->tk_status;
4063 }
4064
nfs4_delegreturn_release(void * calldata)4065 static void nfs4_delegreturn_release(void *calldata)
4066 {
4067 kfree(calldata);
4068 }
4069
4070 #if defined(CONFIG_NFS_V4_1)
nfs4_delegreturn_prepare(struct rpc_task * task,void * data)4071 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
4072 {
4073 struct nfs4_delegreturndata *d_data;
4074
4075 d_data = (struct nfs4_delegreturndata *)data;
4076
4077 if (nfs4_setup_sequence(d_data->res.server,
4078 &d_data->args.seq_args,
4079 &d_data->res.seq_res, task))
4080 return;
4081 rpc_call_start(task);
4082 }
4083 #endif /* CONFIG_NFS_V4_1 */
4084
4085 static const struct rpc_call_ops nfs4_delegreturn_ops = {
4086 #if defined(CONFIG_NFS_V4_1)
4087 .rpc_call_prepare = nfs4_delegreturn_prepare,
4088 #endif /* CONFIG_NFS_V4_1 */
4089 .rpc_call_done = nfs4_delegreturn_done,
4090 .rpc_release = nfs4_delegreturn_release,
4091 };
4092
_nfs4_proc_delegreturn(struct inode * inode,struct rpc_cred * cred,const nfs4_stateid * stateid,int issync)4093 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4094 {
4095 struct nfs4_delegreturndata *data;
4096 struct nfs_server *server = NFS_SERVER(inode);
4097 struct rpc_task *task;
4098 struct rpc_message msg = {
4099 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
4100 .rpc_cred = cred,
4101 };
4102 struct rpc_task_setup task_setup_data = {
4103 .rpc_client = server->client,
4104 .rpc_message = &msg,
4105 .callback_ops = &nfs4_delegreturn_ops,
4106 .flags = RPC_TASK_ASYNC,
4107 };
4108 int status = 0;
4109
4110 data = kzalloc(sizeof(*data), GFP_NOFS);
4111 if (data == NULL)
4112 return -ENOMEM;
4113 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4114 data->args.fhandle = &data->fh;
4115 data->args.stateid = &data->stateid;
4116 data->args.bitmask = server->attr_bitmask;
4117 nfs_copy_fh(&data->fh, NFS_FH(inode));
4118 nfs4_stateid_copy(&data->stateid, stateid);
4119 data->res.fattr = &data->fattr;
4120 data->res.server = server;
4121 nfs_fattr_init(data->res.fattr);
4122 data->timestamp = jiffies;
4123 data->rpc_status = 0;
4124
4125 task_setup_data.callback_data = data;
4126 msg.rpc_argp = &data->args;
4127 msg.rpc_resp = &data->res;
4128 task = rpc_run_task(&task_setup_data);
4129 if (IS_ERR(task))
4130 return PTR_ERR(task);
4131 if (!issync)
4132 goto out;
4133 status = nfs4_wait_for_completion_rpc_task(task);
4134 if (status != 0)
4135 goto out;
4136 status = data->rpc_status;
4137 if (status != 0)
4138 goto out;
4139 nfs_refresh_inode(inode, &data->fattr);
4140 out:
4141 rpc_put_task(task);
4142 return status;
4143 }
4144
nfs4_proc_delegreturn(struct inode * inode,struct rpc_cred * cred,const nfs4_stateid * stateid,int issync)4145 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4146 {
4147 struct nfs_server *server = NFS_SERVER(inode);
4148 struct nfs4_exception exception = { };
4149 int err;
4150 do {
4151 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
4152 switch (err) {
4153 case -NFS4ERR_STALE_STATEID:
4154 case -NFS4ERR_EXPIRED:
4155 case 0:
4156 return 0;
4157 }
4158 err = nfs4_handle_exception(server, err, &exception);
4159 } while (exception.retry);
4160 return err;
4161 }
4162
4163 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
4164 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
4165
4166 /*
4167 * sleep, with exponential backoff, and retry the LOCK operation.
4168 */
4169 static unsigned long
nfs4_set_lock_task_retry(unsigned long timeout)4170 nfs4_set_lock_task_retry(unsigned long timeout)
4171 {
4172 freezable_schedule_timeout_killable(timeout);
4173 timeout <<= 1;
4174 if (timeout > NFS4_LOCK_MAXTIMEOUT)
4175 return NFS4_LOCK_MAXTIMEOUT;
4176 return timeout;
4177 }
4178
_nfs4_proc_getlk(struct nfs4_state * state,int cmd,struct file_lock * request)4179 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4180 {
4181 struct inode *inode = state->inode;
4182 struct nfs_server *server = NFS_SERVER(inode);
4183 struct nfs_client *clp = server->nfs_client;
4184 struct nfs_lockt_args arg = {
4185 .fh = NFS_FH(inode),
4186 .fl = request,
4187 };
4188 struct nfs_lockt_res res = {
4189 .denied = request,
4190 };
4191 struct rpc_message msg = {
4192 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
4193 .rpc_argp = &arg,
4194 .rpc_resp = &res,
4195 .rpc_cred = state->owner->so_cred,
4196 };
4197 struct nfs4_lock_state *lsp;
4198 int status;
4199
4200 arg.lock_owner.clientid = clp->cl_clientid;
4201 status = nfs4_set_lock_state(state, request);
4202 if (status != 0)
4203 goto out;
4204 lsp = request->fl_u.nfs4_fl.owner;
4205 arg.lock_owner.id = lsp->ls_seqid.owner_id;
4206 arg.lock_owner.s_dev = server->s_dev;
4207 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4208 switch (status) {
4209 case 0:
4210 request->fl_type = F_UNLCK;
4211 break;
4212 case -NFS4ERR_DENIED:
4213 status = 0;
4214 }
4215 request->fl_ops->fl_release_private(request);
4216 request->fl_ops = NULL;
4217 out:
4218 return status;
4219 }
4220
nfs4_proc_getlk(struct nfs4_state * state,int cmd,struct file_lock * request)4221 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4222 {
4223 struct nfs4_exception exception = { };
4224 int err;
4225
4226 do {
4227 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4228 _nfs4_proc_getlk(state, cmd, request),
4229 &exception);
4230 } while (exception.retry);
4231 return err;
4232 }
4233
do_vfs_lock(struct file * file,struct file_lock * fl)4234 static int do_vfs_lock(struct file *file, struct file_lock *fl)
4235 {
4236 int res = 0;
4237 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
4238 case FL_POSIX:
4239 res = posix_lock_file_wait(file, fl);
4240 break;
4241 case FL_FLOCK:
4242 res = flock_lock_file_wait(file, fl);
4243 break;
4244 default:
4245 BUG();
4246 }
4247 return res;
4248 }
4249
4250 struct nfs4_unlockdata {
4251 struct nfs_locku_args arg;
4252 struct nfs_locku_res res;
4253 struct nfs4_lock_state *lsp;
4254 struct nfs_open_context *ctx;
4255 struct file_lock fl;
4256 const struct nfs_server *server;
4257 unsigned long timestamp;
4258 };
4259
nfs4_alloc_unlockdata(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,struct nfs_seqid * seqid)4260 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
4261 struct nfs_open_context *ctx,
4262 struct nfs4_lock_state *lsp,
4263 struct nfs_seqid *seqid)
4264 {
4265 struct nfs4_unlockdata *p;
4266 struct inode *inode = lsp->ls_state->inode;
4267
4268 p = kzalloc(sizeof(*p), GFP_NOFS);
4269 if (p == NULL)
4270 return NULL;
4271 p->arg.fh = NFS_FH(inode);
4272 p->arg.fl = &p->fl;
4273 p->arg.seqid = seqid;
4274 p->res.seqid = seqid;
4275 p->arg.stateid = &lsp->ls_stateid;
4276 p->lsp = lsp;
4277 atomic_inc(&lsp->ls_count);
4278 /* Ensure we don't close file until we're done freeing locks! */
4279 p->ctx = get_nfs_open_context(ctx);
4280 memcpy(&p->fl, fl, sizeof(p->fl));
4281 p->server = NFS_SERVER(inode);
4282 return p;
4283 }
4284
nfs4_locku_release_calldata(void * data)4285 static void nfs4_locku_release_calldata(void *data)
4286 {
4287 struct nfs4_unlockdata *calldata = data;
4288 nfs_free_seqid(calldata->arg.seqid);
4289 nfs4_put_lock_state(calldata->lsp);
4290 put_nfs_open_context(calldata->ctx);
4291 kfree(calldata);
4292 }
4293
nfs4_locku_done(struct rpc_task * task,void * data)4294 static void nfs4_locku_done(struct rpc_task *task, void *data)
4295 {
4296 struct nfs4_unlockdata *calldata = data;
4297
4298 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
4299 return;
4300 switch (task->tk_status) {
4301 case 0:
4302 nfs4_stateid_copy(&calldata->lsp->ls_stateid,
4303 &calldata->res.stateid);
4304 renew_lease(calldata->server, calldata->timestamp);
4305 break;
4306 case -NFS4ERR_BAD_STATEID:
4307 case -NFS4ERR_OLD_STATEID:
4308 case -NFS4ERR_STALE_STATEID:
4309 case -NFS4ERR_EXPIRED:
4310 break;
4311 default:
4312 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
4313 rpc_restart_call_prepare(task);
4314 }
4315 nfs_release_seqid(calldata->arg.seqid);
4316 }
4317
nfs4_locku_prepare(struct rpc_task * task,void * data)4318 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
4319 {
4320 struct nfs4_unlockdata *calldata = data;
4321
4322 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
4323 return;
4324 if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
4325 /* Note: exit _without_ running nfs4_locku_done */
4326 task->tk_action = NULL;
4327 return;
4328 }
4329 calldata->timestamp = jiffies;
4330 if (nfs4_setup_sequence(calldata->server,
4331 &calldata->arg.seq_args,
4332 &calldata->res.seq_res,
4333 task) != 0)
4334 nfs_release_seqid(calldata->arg.seqid);
4335 else
4336 rpc_call_start(task);
4337 }
4338
4339 static const struct rpc_call_ops nfs4_locku_ops = {
4340 .rpc_call_prepare = nfs4_locku_prepare,
4341 .rpc_call_done = nfs4_locku_done,
4342 .rpc_release = nfs4_locku_release_calldata,
4343 };
4344
nfs4_do_unlck(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,struct nfs_seqid * seqid)4345 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
4346 struct nfs_open_context *ctx,
4347 struct nfs4_lock_state *lsp,
4348 struct nfs_seqid *seqid)
4349 {
4350 struct nfs4_unlockdata *data;
4351 struct rpc_message msg = {
4352 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
4353 .rpc_cred = ctx->cred,
4354 };
4355 struct rpc_task_setup task_setup_data = {
4356 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
4357 .rpc_message = &msg,
4358 .callback_ops = &nfs4_locku_ops,
4359 .workqueue = nfsiod_workqueue,
4360 .flags = RPC_TASK_ASYNC,
4361 };
4362
4363 /* Ensure this is an unlock - when canceling a lock, the
4364 * canceled lock is passed in, and it won't be an unlock.
4365 */
4366 fl->fl_type = F_UNLCK;
4367
4368 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
4369 if (data == NULL) {
4370 nfs_free_seqid(seqid);
4371 return ERR_PTR(-ENOMEM);
4372 }
4373
4374 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4375 msg.rpc_argp = &data->arg;
4376 msg.rpc_resp = &data->res;
4377 task_setup_data.callback_data = data;
4378 return rpc_run_task(&task_setup_data);
4379 }
4380
nfs4_proc_unlck(struct nfs4_state * state,int cmd,struct file_lock * request)4381 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
4382 {
4383 struct nfs_inode *nfsi = NFS_I(state->inode);
4384 struct nfs_seqid *seqid;
4385 struct nfs4_lock_state *lsp;
4386 struct rpc_task *task;
4387 int status = 0;
4388 unsigned char fl_flags = request->fl_flags;
4389
4390 status = nfs4_set_lock_state(state, request);
4391 /* Unlock _before_ we do the RPC call */
4392 request->fl_flags |= FL_EXISTS;
4393 down_read(&nfsi->rwsem);
4394 if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
4395 up_read(&nfsi->rwsem);
4396 goto out;
4397 }
4398 up_read(&nfsi->rwsem);
4399 if (status != 0)
4400 goto out;
4401 /* Is this a delegated lock? */
4402 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
4403 goto out;
4404 lsp = request->fl_u.nfs4_fl.owner;
4405 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
4406 status = -ENOMEM;
4407 if (seqid == NULL)
4408 goto out;
4409 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
4410 status = PTR_ERR(task);
4411 if (IS_ERR(task))
4412 goto out;
4413 status = nfs4_wait_for_completion_rpc_task(task);
4414 rpc_put_task(task);
4415 out:
4416 request->fl_flags = fl_flags;
4417 return status;
4418 }
4419
4420 struct nfs4_lockdata {
4421 struct nfs_lock_args arg;
4422 struct nfs_lock_res res;
4423 struct nfs4_lock_state *lsp;
4424 struct nfs_open_context *ctx;
4425 struct file_lock fl;
4426 unsigned long timestamp;
4427 int rpc_status;
4428 int cancelled;
4429 struct nfs_server *server;
4430 };
4431
nfs4_alloc_lockdata(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,gfp_t gfp_mask)4432 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
4433 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
4434 gfp_t gfp_mask)
4435 {
4436 struct nfs4_lockdata *p;
4437 struct inode *inode = lsp->ls_state->inode;
4438 struct nfs_server *server = NFS_SERVER(inode);
4439
4440 p = kzalloc(sizeof(*p), gfp_mask);
4441 if (p == NULL)
4442 return NULL;
4443
4444 p->arg.fh = NFS_FH(inode);
4445 p->arg.fl = &p->fl;
4446 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
4447 if (p->arg.open_seqid == NULL)
4448 goto out_free;
4449 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
4450 if (p->arg.lock_seqid == NULL)
4451 goto out_free_seqid;
4452 p->arg.lock_stateid = &lsp->ls_stateid;
4453 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
4454 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
4455 p->arg.lock_owner.s_dev = server->s_dev;
4456 p->res.lock_seqid = p->arg.lock_seqid;
4457 p->lsp = lsp;
4458 p->server = server;
4459 atomic_inc(&lsp->ls_count);
4460 p->ctx = get_nfs_open_context(ctx);
4461 memcpy(&p->fl, fl, sizeof(p->fl));
4462 return p;
4463 out_free_seqid:
4464 nfs_free_seqid(p->arg.open_seqid);
4465 out_free:
4466 kfree(p);
4467 return NULL;
4468 }
4469
nfs4_lock_prepare(struct rpc_task * task,void * calldata)4470 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4471 {
4472 struct nfs4_lockdata *data = calldata;
4473 struct nfs4_state *state = data->lsp->ls_state;
4474
4475 dprintk("%s: begin!\n", __func__);
4476 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
4477 return;
4478 /* Do we need to do an open_to_lock_owner? */
4479 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4480 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
4481 goto out_release_lock_seqid;
4482 data->arg.open_stateid = &state->stateid;
4483 data->arg.new_lock_owner = 1;
4484 data->res.open_seqid = data->arg.open_seqid;
4485 } else
4486 data->arg.new_lock_owner = 0;
4487 data->timestamp = jiffies;
4488 if (nfs4_setup_sequence(data->server,
4489 &data->arg.seq_args,
4490 &data->res.seq_res,
4491 task) == 0) {
4492 rpc_call_start(task);
4493 return;
4494 }
4495 nfs_release_seqid(data->arg.open_seqid);
4496 out_release_lock_seqid:
4497 nfs_release_seqid(data->arg.lock_seqid);
4498 dprintk("%s: done!, ret = %d\n", __func__, task->tk_status);
4499 }
4500
nfs4_recover_lock_prepare(struct rpc_task * task,void * calldata)4501 static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
4502 {
4503 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4504 nfs4_lock_prepare(task, calldata);
4505 }
4506
nfs4_lock_done(struct rpc_task * task,void * calldata)4507 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4508 {
4509 struct nfs4_lockdata *data = calldata;
4510
4511 dprintk("%s: begin!\n", __func__);
4512
4513 if (!nfs4_sequence_done(task, &data->res.seq_res))
4514 return;
4515
4516 data->rpc_status = task->tk_status;
4517 if (data->arg.new_lock_owner != 0) {
4518 if (data->rpc_status == 0)
4519 nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4520 else
4521 goto out;
4522 }
4523 if (data->rpc_status == 0) {
4524 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
4525 data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
4526 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
4527 }
4528 out:
4529 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4530 }
4531
nfs4_lock_release(void * calldata)4532 static void nfs4_lock_release(void *calldata)
4533 {
4534 struct nfs4_lockdata *data = calldata;
4535
4536 dprintk("%s: begin!\n", __func__);
4537 nfs_free_seqid(data->arg.open_seqid);
4538 if (data->cancelled != 0) {
4539 struct rpc_task *task;
4540 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4541 data->arg.lock_seqid);
4542 if (!IS_ERR(task))
4543 rpc_put_task_async(task);
4544 dprintk("%s: cancelling lock!\n", __func__);
4545 } else
4546 nfs_free_seqid(data->arg.lock_seqid);
4547 nfs4_put_lock_state(data->lsp);
4548 put_nfs_open_context(data->ctx);
4549 kfree(data);
4550 dprintk("%s: done!\n", __func__);
4551 }
4552
4553 static const struct rpc_call_ops nfs4_lock_ops = {
4554 .rpc_call_prepare = nfs4_lock_prepare,
4555 .rpc_call_done = nfs4_lock_done,
4556 .rpc_release = nfs4_lock_release,
4557 };
4558
4559 static const struct rpc_call_ops nfs4_recover_lock_ops = {
4560 .rpc_call_prepare = nfs4_recover_lock_prepare,
4561 .rpc_call_done = nfs4_lock_done,
4562 .rpc_release = nfs4_lock_release,
4563 };
4564
nfs4_handle_setlk_error(struct nfs_server * server,struct nfs4_lock_state * lsp,int new_lock_owner,int error)4565 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4566 {
4567 switch (error) {
4568 case -NFS4ERR_ADMIN_REVOKED:
4569 case -NFS4ERR_BAD_STATEID:
4570 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4571 if (new_lock_owner != 0 ||
4572 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4573 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4574 break;
4575 case -NFS4ERR_STALE_STATEID:
4576 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4577 case -NFS4ERR_EXPIRED:
4578 nfs4_schedule_lease_recovery(server->nfs_client);
4579 };
4580 }
4581
_nfs4_do_setlk(struct nfs4_state * state,int cmd,struct file_lock * fl,int recovery_type)4582 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4583 {
4584 struct nfs4_lockdata *data;
4585 struct rpc_task *task;
4586 struct rpc_message msg = {
4587 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4588 .rpc_cred = state->owner->so_cred,
4589 };
4590 struct rpc_task_setup task_setup_data = {
4591 .rpc_client = NFS_CLIENT(state->inode),
4592 .rpc_message = &msg,
4593 .callback_ops = &nfs4_lock_ops,
4594 .workqueue = nfsiod_workqueue,
4595 .flags = RPC_TASK_ASYNC,
4596 };
4597 int ret;
4598
4599 dprintk("%s: begin!\n", __func__);
4600 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4601 fl->fl_u.nfs4_fl.owner,
4602 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4603 if (data == NULL)
4604 return -ENOMEM;
4605 if (IS_SETLKW(cmd))
4606 data->arg.block = 1;
4607 if (recovery_type > NFS_LOCK_NEW) {
4608 if (recovery_type == NFS_LOCK_RECLAIM)
4609 data->arg.reclaim = NFS_LOCK_RECLAIM;
4610 task_setup_data.callback_ops = &nfs4_recover_lock_ops;
4611 }
4612 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4613 msg.rpc_argp = &data->arg;
4614 msg.rpc_resp = &data->res;
4615 task_setup_data.callback_data = data;
4616 task = rpc_run_task(&task_setup_data);
4617 if (IS_ERR(task))
4618 return PTR_ERR(task);
4619 ret = nfs4_wait_for_completion_rpc_task(task);
4620 if (ret == 0) {
4621 ret = data->rpc_status;
4622 if (ret)
4623 nfs4_handle_setlk_error(data->server, data->lsp,
4624 data->arg.new_lock_owner, ret);
4625 } else
4626 data->cancelled = 1;
4627 rpc_put_task(task);
4628 dprintk("%s: done, ret = %d!\n", __func__, ret);
4629 return ret;
4630 }
4631
nfs4_lock_reclaim(struct nfs4_state * state,struct file_lock * request)4632 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4633 {
4634 struct nfs_server *server = NFS_SERVER(state->inode);
4635 struct nfs4_exception exception = {
4636 .inode = state->inode,
4637 };
4638 int err;
4639
4640 do {
4641 /* Cache the lock if possible... */
4642 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4643 return 0;
4644 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
4645 if (err != -NFS4ERR_DELAY)
4646 break;
4647 nfs4_handle_exception(server, err, &exception);
4648 } while (exception.retry);
4649 return err;
4650 }
4651
nfs4_lock_expired(struct nfs4_state * state,struct file_lock * request)4652 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4653 {
4654 struct nfs_server *server = NFS_SERVER(state->inode);
4655 struct nfs4_exception exception = {
4656 .inode = state->inode,
4657 };
4658 int err;
4659
4660 err = nfs4_set_lock_state(state, request);
4661 if (err != 0)
4662 return err;
4663 do {
4664 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4665 return 0;
4666 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
4667 switch (err) {
4668 default:
4669 goto out;
4670 case -NFS4ERR_GRACE:
4671 case -NFS4ERR_DELAY:
4672 nfs4_handle_exception(server, err, &exception);
4673 err = 0;
4674 }
4675 } while (exception.retry);
4676 out:
4677 return err;
4678 }
4679
4680 #if defined(CONFIG_NFS_V4_1)
nfs41_check_expired_locks(struct nfs4_state * state)4681 static int nfs41_check_expired_locks(struct nfs4_state *state)
4682 {
4683 int status, ret = NFS_OK;
4684 struct nfs4_lock_state *lsp;
4685 struct nfs_server *server = NFS_SERVER(state->inode);
4686
4687 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
4688 if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
4689 status = nfs41_test_stateid(server, &lsp->ls_stateid);
4690 if (status != NFS_OK) {
4691 nfs41_free_stateid(server, &lsp->ls_stateid);
4692 lsp->ls_flags &= ~NFS_LOCK_INITIALIZED;
4693 ret = status;
4694 }
4695 }
4696 };
4697
4698 return ret;
4699 }
4700
nfs41_lock_expired(struct nfs4_state * state,struct file_lock * request)4701 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
4702 {
4703 int status = NFS_OK;
4704
4705 if (test_bit(LK_STATE_IN_USE, &state->flags))
4706 status = nfs41_check_expired_locks(state);
4707 if (status == NFS_OK)
4708 return status;
4709 return nfs4_lock_expired(state, request);
4710 }
4711 #endif
4712
_nfs4_proc_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)4713 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4714 {
4715 struct nfs_inode *nfsi = NFS_I(state->inode);
4716 unsigned char fl_flags = request->fl_flags;
4717 int status = -ENOLCK;
4718
4719 if ((fl_flags & FL_POSIX) &&
4720 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4721 goto out;
4722 /* Is this a delegated open? */
4723 status = nfs4_set_lock_state(state, request);
4724 if (status != 0)
4725 goto out;
4726 request->fl_flags |= FL_ACCESS;
4727 status = do_vfs_lock(request->fl_file, request);
4728 if (status < 0)
4729 goto out;
4730 down_read(&nfsi->rwsem);
4731 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
4732 /* Yes: cache locks! */
4733 /* ...but avoid races with delegation recall... */
4734 request->fl_flags = fl_flags & ~FL_SLEEP;
4735 status = do_vfs_lock(request->fl_file, request);
4736 goto out_unlock;
4737 }
4738 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
4739 if (status != 0)
4740 goto out_unlock;
4741 /* Note: we always want to sleep here! */
4742 request->fl_flags = fl_flags | FL_SLEEP;
4743 if (do_vfs_lock(request->fl_file, request) < 0)
4744 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
4745 "manager!\n", __func__);
4746 out_unlock:
4747 up_read(&nfsi->rwsem);
4748 out:
4749 request->fl_flags = fl_flags;
4750 return status;
4751 }
4752
nfs4_proc_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)4753 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4754 {
4755 struct nfs4_exception exception = {
4756 .state = state,
4757 .inode = state->inode,
4758 };
4759 int err;
4760
4761 do {
4762 err = _nfs4_proc_setlk(state, cmd, request);
4763 if (err == -NFS4ERR_DENIED)
4764 err = -EAGAIN;
4765 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4766 err, &exception);
4767 } while (exception.retry);
4768 return err;
4769 }
4770
4771 static int
nfs4_proc_lock(struct file * filp,int cmd,struct file_lock * request)4772 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4773 {
4774 struct nfs_open_context *ctx;
4775 struct nfs4_state *state;
4776 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
4777 int status;
4778
4779 /* verify open state */
4780 ctx = nfs_file_open_context(filp);
4781 state = ctx->state;
4782
4783 if (request->fl_start < 0 || request->fl_end < 0)
4784 return -EINVAL;
4785
4786 if (IS_GETLK(cmd)) {
4787 if (state != NULL)
4788 return nfs4_proc_getlk(state, F_GETLK, request);
4789 return 0;
4790 }
4791
4792 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
4793 return -EINVAL;
4794
4795 if (request->fl_type == F_UNLCK) {
4796 if (state != NULL)
4797 return nfs4_proc_unlck(state, cmd, request);
4798 return 0;
4799 }
4800
4801 if (state == NULL)
4802 return -ENOLCK;
4803 /*
4804 * Don't rely on the VFS having checked the file open mode,
4805 * since it won't do this for flock() locks.
4806 */
4807 switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
4808 case F_RDLCK:
4809 if (!(filp->f_mode & FMODE_READ))
4810 return -EBADF;
4811 break;
4812 case F_WRLCK:
4813 if (!(filp->f_mode & FMODE_WRITE))
4814 return -EBADF;
4815 }
4816
4817 do {
4818 status = nfs4_proc_setlk(state, cmd, request);
4819 if ((status != -EAGAIN) || IS_SETLK(cmd))
4820 break;
4821 timeout = nfs4_set_lock_task_retry(timeout);
4822 status = -ERESTARTSYS;
4823 if (signalled())
4824 break;
4825 } while(status < 0);
4826 return status;
4827 }
4828
nfs4_lock_delegation_recall(struct nfs4_state * state,struct file_lock * fl)4829 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4830 {
4831 struct nfs_server *server = NFS_SERVER(state->inode);
4832 struct nfs4_exception exception = { };
4833 int err;
4834
4835 err = nfs4_set_lock_state(state, fl);
4836 if (err != 0)
4837 goto out;
4838 do {
4839 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
4840 switch (err) {
4841 default:
4842 printk(KERN_ERR "NFS: %s: unhandled error "
4843 "%d.\n", __func__, err);
4844 case 0:
4845 case -ESTALE:
4846 goto out;
4847 case -NFS4ERR_EXPIRED:
4848 nfs4_schedule_stateid_recovery(server, state);
4849 case -NFS4ERR_STALE_CLIENTID:
4850 case -NFS4ERR_STALE_STATEID:
4851 nfs4_schedule_lease_recovery(server->nfs_client);
4852 goto out;
4853 case -NFS4ERR_BADSESSION:
4854 case -NFS4ERR_BADSLOT:
4855 case -NFS4ERR_BAD_HIGH_SLOT:
4856 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4857 case -NFS4ERR_DEADSESSION:
4858 nfs4_schedule_session_recovery(server->nfs_client->cl_session);
4859 goto out;
4860 case -ERESTARTSYS:
4861 /*
4862 * The show must go on: exit, but mark the
4863 * stateid as needing recovery.
4864 */
4865 case -NFS4ERR_DELEG_REVOKED:
4866 case -NFS4ERR_ADMIN_REVOKED:
4867 case -NFS4ERR_BAD_STATEID:
4868 case -NFS4ERR_OPENMODE:
4869 nfs4_schedule_stateid_recovery(server, state);
4870 err = 0;
4871 goto out;
4872 case -ENOMEM:
4873 case -NFS4ERR_DENIED:
4874 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
4875 err = 0;
4876 goto out;
4877 case -NFS4ERR_DELAY:
4878 break;
4879 }
4880 err = nfs4_handle_exception(server, err, &exception);
4881 } while (exception.retry);
4882 out:
4883 return err;
4884 }
4885
4886 struct nfs_release_lockowner_data {
4887 struct nfs4_lock_state *lsp;
4888 struct nfs_server *server;
4889 struct nfs_release_lockowner_args args;
4890 };
4891
nfs4_release_lockowner_release(void * calldata)4892 static void nfs4_release_lockowner_release(void *calldata)
4893 {
4894 struct nfs_release_lockowner_data *data = calldata;
4895 nfs4_free_lock_state(data->server, data->lsp);
4896 kfree(calldata);
4897 }
4898
4899 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
4900 .rpc_release = nfs4_release_lockowner_release,
4901 };
4902
nfs4_release_lockowner(struct nfs4_lock_state * lsp)4903 int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
4904 {
4905 struct nfs_server *server = lsp->ls_state->owner->so_server;
4906 struct nfs_release_lockowner_data *data;
4907 struct rpc_message msg = {
4908 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
4909 };
4910
4911 if (server->nfs_client->cl_mvops->minor_version != 0)
4912 return -EINVAL;
4913 data = kmalloc(sizeof(*data), GFP_NOFS);
4914 if (!data)
4915 return -ENOMEM;
4916 data->lsp = lsp;
4917 data->server = server;
4918 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
4919 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
4920 data->args.lock_owner.s_dev = server->s_dev;
4921 msg.rpc_argp = &data->args;
4922 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
4923 return 0;
4924 }
4925
4926 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4927
nfs4_xattr_set_nfs4_acl(struct dentry * dentry,const char * key,const void * buf,size_t buflen,int flags,int type)4928 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
4929 const void *buf, size_t buflen,
4930 int flags, int type)
4931 {
4932 if (strcmp(key, "") != 0)
4933 return -EINVAL;
4934
4935 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
4936 }
4937
nfs4_xattr_get_nfs4_acl(struct dentry * dentry,const char * key,void * buf,size_t buflen,int type)4938 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
4939 void *buf, size_t buflen, int type)
4940 {
4941 if (strcmp(key, "") != 0)
4942 return -EINVAL;
4943
4944 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
4945 }
4946
nfs4_xattr_list_nfs4_acl(struct dentry * dentry,char * list,size_t list_len,const char * name,size_t name_len,int type)4947 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
4948 size_t list_len, const char *name,
4949 size_t name_len, int type)
4950 {
4951 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
4952
4953 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
4954 return 0;
4955
4956 if (list && len <= list_len)
4957 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
4958 return len;
4959 }
4960
4961 /*
4962 * nfs_fhget will use either the mounted_on_fileid or the fileid
4963 */
nfs_fixup_referral_attributes(struct nfs_fattr * fattr)4964 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
4965 {
4966 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
4967 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
4968 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
4969 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
4970 return;
4971
4972 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4973 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
4974 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4975 fattr->nlink = 2;
4976 }
4977
_nfs4_proc_fs_locations(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs4_fs_locations * fs_locations,struct page * page)4978 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
4979 const struct qstr *name,
4980 struct nfs4_fs_locations *fs_locations,
4981 struct page *page)
4982 {
4983 struct nfs_server *server = NFS_SERVER(dir);
4984 u32 bitmask[2] = {
4985 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
4986 };
4987 struct nfs4_fs_locations_arg args = {
4988 .dir_fh = NFS_FH(dir),
4989 .name = name,
4990 .page = page,
4991 .bitmask = bitmask,
4992 };
4993 struct nfs4_fs_locations_res res = {
4994 .fs_locations = fs_locations,
4995 };
4996 struct rpc_message msg = {
4997 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
4998 .rpc_argp = &args,
4999 .rpc_resp = &res,
5000 };
5001 int status;
5002
5003 dprintk("%s: start\n", __func__);
5004
5005 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
5006 * is not supported */
5007 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
5008 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
5009 else
5010 bitmask[0] |= FATTR4_WORD0_FILEID;
5011
5012 nfs_fattr_init(&fs_locations->fattr);
5013 fs_locations->server = server;
5014 fs_locations->nlocations = 0;
5015 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
5016 dprintk("%s: returned status = %d\n", __func__, status);
5017 return status;
5018 }
5019
nfs4_proc_fs_locations(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs4_fs_locations * fs_locations,struct page * page)5020 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5021 const struct qstr *name,
5022 struct nfs4_fs_locations *fs_locations,
5023 struct page *page)
5024 {
5025 struct nfs4_exception exception = { };
5026 int err;
5027 do {
5028 err = nfs4_handle_exception(NFS_SERVER(dir),
5029 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
5030 &exception);
5031 } while (exception.retry);
5032 return err;
5033 }
5034
_nfs4_proc_secinfo(struct inode * dir,const struct qstr * name,struct nfs4_secinfo_flavors * flavors)5035 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
5036 {
5037 int status;
5038 struct nfs4_secinfo_arg args = {
5039 .dir_fh = NFS_FH(dir),
5040 .name = name,
5041 };
5042 struct nfs4_secinfo_res res = {
5043 .flavors = flavors,
5044 };
5045 struct rpc_message msg = {
5046 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
5047 .rpc_argp = &args,
5048 .rpc_resp = &res,
5049 };
5050
5051 dprintk("NFS call secinfo %s\n", name->name);
5052 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
5053 dprintk("NFS reply secinfo: %d\n", status);
5054 return status;
5055 }
5056
nfs4_proc_secinfo(struct inode * dir,const struct qstr * name,struct nfs4_secinfo_flavors * flavors)5057 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
5058 struct nfs4_secinfo_flavors *flavors)
5059 {
5060 struct nfs4_exception exception = { };
5061 int err;
5062 do {
5063 err = nfs4_handle_exception(NFS_SERVER(dir),
5064 _nfs4_proc_secinfo(dir, name, flavors),
5065 &exception);
5066 } while (exception.retry);
5067 return err;
5068 }
5069
5070 #ifdef CONFIG_NFS_V4_1
5071 /*
5072 * Check the exchange flags returned by the server for invalid flags, having
5073 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
5074 * DS flags set.
5075 */
nfs4_check_cl_exchange_flags(u32 flags)5076 static int nfs4_check_cl_exchange_flags(u32 flags)
5077 {
5078 if (flags & ~EXCHGID4_FLAG_MASK_R)
5079 goto out_inval;
5080 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
5081 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
5082 goto out_inval;
5083 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
5084 goto out_inval;
5085 return NFS_OK;
5086 out_inval:
5087 return -NFS4ERR_INVAL;
5088 }
5089
5090 static bool
nfs41_same_server_scope(struct server_scope * a,struct server_scope * b)5091 nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
5092 {
5093 if (a->server_scope_sz == b->server_scope_sz &&
5094 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
5095 return true;
5096
5097 return false;
5098 }
5099
5100 /*
5101 * nfs4_proc_exchange_id()
5102 *
5103 * Since the clientid has expired, all compounds using sessions
5104 * associated with the stale clientid will be returning
5105 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
5106 * be in some phase of session reset.
5107 */
nfs4_proc_exchange_id(struct nfs_client * clp,struct rpc_cred * cred)5108 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5109 {
5110 nfs4_verifier verifier;
5111 struct nfs41_exchange_id_args args = {
5112 .verifier = &verifier,
5113 .client = clp,
5114 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
5115 };
5116 struct nfs41_exchange_id_res res = {
5117 .client = clp,
5118 };
5119 int status;
5120 struct rpc_message msg = {
5121 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
5122 .rpc_argp = &args,
5123 .rpc_resp = &res,
5124 .rpc_cred = cred,
5125 };
5126
5127 dprintk("--> %s\n", __func__);
5128 BUG_ON(clp == NULL);
5129
5130 nfs4_construct_boot_verifier(clp, &verifier);
5131
5132 args.id_len = scnprintf(args.id, sizeof(args.id),
5133 "%s/%s/%u",
5134 clp->cl_ipaddr,
5135 clp->cl_rpcclient->cl_nodename,
5136 clp->cl_rpcclient->cl_auth->au_flavor);
5137
5138 res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
5139 if (unlikely(!res.server_scope)) {
5140 status = -ENOMEM;
5141 goto out;
5142 }
5143
5144 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL);
5145 if (unlikely(!res.impl_id)) {
5146 status = -ENOMEM;
5147 goto out_server_scope;
5148 }
5149
5150 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5151 if (!status)
5152 status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
5153
5154 if (!status) {
5155 /* use the most recent implementation id */
5156 kfree(clp->impl_id);
5157 clp->impl_id = res.impl_id;
5158 } else
5159 kfree(res.impl_id);
5160
5161 if (!status) {
5162 if (clp->server_scope &&
5163 !nfs41_same_server_scope(clp->server_scope,
5164 res.server_scope)) {
5165 dprintk("%s: server_scope mismatch detected\n",
5166 __func__);
5167 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
5168 kfree(clp->server_scope);
5169 clp->server_scope = NULL;
5170 }
5171
5172 if (!clp->server_scope) {
5173 clp->server_scope = res.server_scope;
5174 goto out;
5175 }
5176 }
5177
5178 out_server_scope:
5179 kfree(res.server_scope);
5180 out:
5181 if (clp->impl_id)
5182 dprintk("%s: Server Implementation ID: "
5183 "domain: %s, name: %s, date: %llu,%u\n",
5184 __func__, clp->impl_id->domain, clp->impl_id->name,
5185 clp->impl_id->date.seconds,
5186 clp->impl_id->date.nseconds);
5187 dprintk("<-- %s status= %d\n", __func__, status);
5188 return status;
5189 }
5190
5191 struct nfs4_get_lease_time_data {
5192 struct nfs4_get_lease_time_args *args;
5193 struct nfs4_get_lease_time_res *res;
5194 struct nfs_client *clp;
5195 };
5196
nfs4_get_lease_time_prepare(struct rpc_task * task,void * calldata)5197 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
5198 void *calldata)
5199 {
5200 int ret;
5201 struct nfs4_get_lease_time_data *data =
5202 (struct nfs4_get_lease_time_data *)calldata;
5203
5204 dprintk("--> %s\n", __func__);
5205 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5206 /* just setup sequence, do not trigger session recovery
5207 since we're invoked within one */
5208 ret = nfs41_setup_sequence(data->clp->cl_session,
5209 &data->args->la_seq_args,
5210 &data->res->lr_seq_res, task);
5211
5212 BUG_ON(ret == -EAGAIN);
5213 rpc_call_start(task);
5214 dprintk("<-- %s\n", __func__);
5215 }
5216
5217 /*
5218 * Called from nfs4_state_manager thread for session setup, so don't recover
5219 * from sequence operation or clientid errors.
5220 */
nfs4_get_lease_time_done(struct rpc_task * task,void * calldata)5221 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
5222 {
5223 struct nfs4_get_lease_time_data *data =
5224 (struct nfs4_get_lease_time_data *)calldata;
5225
5226 dprintk("--> %s\n", __func__);
5227 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
5228 return;
5229 switch (task->tk_status) {
5230 case -NFS4ERR_DELAY:
5231 case -NFS4ERR_GRACE:
5232 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
5233 rpc_delay(task, NFS4_POLL_RETRY_MIN);
5234 task->tk_status = 0;
5235 /* fall through */
5236 case -NFS4ERR_RETRY_UNCACHED_REP:
5237 rpc_restart_call_prepare(task);
5238 return;
5239 }
5240 dprintk("<-- %s\n", __func__);
5241 }
5242
5243 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
5244 .rpc_call_prepare = nfs4_get_lease_time_prepare,
5245 .rpc_call_done = nfs4_get_lease_time_done,
5246 };
5247
nfs4_proc_get_lease_time(struct nfs_client * clp,struct nfs_fsinfo * fsinfo)5248 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
5249 {
5250 struct rpc_task *task;
5251 struct nfs4_get_lease_time_args args;
5252 struct nfs4_get_lease_time_res res = {
5253 .lr_fsinfo = fsinfo,
5254 };
5255 struct nfs4_get_lease_time_data data = {
5256 .args = &args,
5257 .res = &res,
5258 .clp = clp,
5259 };
5260 struct rpc_message msg = {
5261 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
5262 .rpc_argp = &args,
5263 .rpc_resp = &res,
5264 };
5265 struct rpc_task_setup task_setup = {
5266 .rpc_client = clp->cl_rpcclient,
5267 .rpc_message = &msg,
5268 .callback_ops = &nfs4_get_lease_time_ops,
5269 .callback_data = &data,
5270 .flags = RPC_TASK_TIMEOUT,
5271 };
5272 int status;
5273
5274 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
5275 dprintk("--> %s\n", __func__);
5276 task = rpc_run_task(&task_setup);
5277
5278 if (IS_ERR(task))
5279 status = PTR_ERR(task);
5280 else {
5281 status = task->tk_status;
5282 rpc_put_task(task);
5283 }
5284 dprintk("<-- %s return %d\n", __func__, status);
5285
5286 return status;
5287 }
5288
nfs4_alloc_slots(u32 max_slots,gfp_t gfp_flags)5289 static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags)
5290 {
5291 return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags);
5292 }
5293
nfs4_add_and_init_slots(struct nfs4_slot_table * tbl,struct nfs4_slot * new,u32 max_slots,u32 ivalue)5294 static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl,
5295 struct nfs4_slot *new,
5296 u32 max_slots,
5297 u32 ivalue)
5298 {
5299 struct nfs4_slot *old = NULL;
5300 u32 i;
5301
5302 spin_lock(&tbl->slot_tbl_lock);
5303 if (new) {
5304 old = tbl->slots;
5305 tbl->slots = new;
5306 tbl->max_slots = max_slots;
5307 }
5308 tbl->highest_used_slotid = -1; /* no slot is currently used */
5309 for (i = 0; i < tbl->max_slots; i++)
5310 tbl->slots[i].seq_nr = ivalue;
5311 spin_unlock(&tbl->slot_tbl_lock);
5312 kfree(old);
5313 }
5314
5315 /*
5316 * (re)Initialise a slot table
5317 */
nfs4_realloc_slot_table(struct nfs4_slot_table * tbl,u32 max_reqs,u32 ivalue)5318 static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
5319 u32 ivalue)
5320 {
5321 struct nfs4_slot *new = NULL;
5322 int ret = -ENOMEM;
5323
5324 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
5325 max_reqs, tbl->max_slots);
5326
5327 /* Does the newly negotiated max_reqs match the existing slot table? */
5328 if (max_reqs != tbl->max_slots) {
5329 new = nfs4_alloc_slots(max_reqs, GFP_NOFS);
5330 if (!new)
5331 goto out;
5332 }
5333 ret = 0;
5334
5335 nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue);
5336 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
5337 tbl, tbl->slots, tbl->max_slots);
5338 out:
5339 dprintk("<-- %s: return %d\n", __func__, ret);
5340 return ret;
5341 }
5342
5343 /* Destroy the slot table */
nfs4_destroy_slot_tables(struct nfs4_session * session)5344 static void nfs4_destroy_slot_tables(struct nfs4_session *session)
5345 {
5346 if (session->fc_slot_table.slots != NULL) {
5347 kfree(session->fc_slot_table.slots);
5348 session->fc_slot_table.slots = NULL;
5349 }
5350 if (session->bc_slot_table.slots != NULL) {
5351 kfree(session->bc_slot_table.slots);
5352 session->bc_slot_table.slots = NULL;
5353 }
5354 return;
5355 }
5356
5357 /*
5358 * Initialize or reset the forechannel and backchannel tables
5359 */
nfs4_setup_session_slot_tables(struct nfs4_session * ses)5360 static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
5361 {
5362 struct nfs4_slot_table *tbl;
5363 int status;
5364
5365 dprintk("--> %s\n", __func__);
5366 /* Fore channel */
5367 tbl = &ses->fc_slot_table;
5368 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
5369 if (status) /* -ENOMEM */
5370 return status;
5371 /* Back channel */
5372 tbl = &ses->bc_slot_table;
5373 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
5374 if (status && tbl->slots == NULL)
5375 /* Fore and back channel share a connection so get
5376 * both slot tables or neither */
5377 nfs4_destroy_slot_tables(ses);
5378 return status;
5379 }
5380
nfs4_alloc_session(struct nfs_client * clp)5381 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
5382 {
5383 struct nfs4_session *session;
5384 struct nfs4_slot_table *tbl;
5385
5386 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5387 if (!session)
5388 return NULL;
5389
5390 tbl = &session->fc_slot_table;
5391 tbl->highest_used_slotid = NFS4_NO_SLOT;
5392 spin_lock_init(&tbl->slot_tbl_lock);
5393 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
5394 init_completion(&tbl->complete);
5395
5396 tbl = &session->bc_slot_table;
5397 tbl->highest_used_slotid = NFS4_NO_SLOT;
5398 spin_lock_init(&tbl->slot_tbl_lock);
5399 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
5400 init_completion(&tbl->complete);
5401
5402 session->session_state = 1<<NFS4_SESSION_INITING;
5403
5404 session->clp = clp;
5405 return session;
5406 }
5407
nfs4_destroy_session(struct nfs4_session * session)5408 void nfs4_destroy_session(struct nfs4_session *session)
5409 {
5410 struct rpc_xprt *xprt;
5411
5412 nfs4_proc_destroy_session(session);
5413
5414 rcu_read_lock();
5415 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
5416 rcu_read_unlock();
5417 dprintk("%s Destroy backchannel for xprt %p\n",
5418 __func__, xprt);
5419 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
5420 nfs4_destroy_slot_tables(session);
5421 kfree(session);
5422 }
5423
5424 /*
5425 * Initialize the values to be used by the client in CREATE_SESSION
5426 * If nfs4_init_session set the fore channel request and response sizes,
5427 * use them.
5428 *
5429 * Set the back channel max_resp_sz_cached to zero to force the client to
5430 * always set csa_cachethis to FALSE because the current implementation
5431 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5432 */
nfs4_init_channel_attrs(struct nfs41_create_session_args * args)5433 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5434 {
5435 struct nfs4_session *session = args->client->cl_session;
5436 unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
5437 mxresp_sz = session->fc_attrs.max_resp_sz;
5438
5439 if (mxrqst_sz == 0)
5440 mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
5441 if (mxresp_sz == 0)
5442 mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5443 /* Fore channel attributes */
5444 args->fc_attrs.max_rqst_sz = mxrqst_sz;
5445 args->fc_attrs.max_resp_sz = mxresp_sz;
5446 args->fc_attrs.max_ops = NFS4_MAX_OPS;
5447 args->fc_attrs.max_reqs = max_session_slots;
5448
5449 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5450 "max_ops=%u max_reqs=%u\n",
5451 __func__,
5452 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
5453 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5454
5455 /* Back channel attributes */
5456 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5457 args->bc_attrs.max_resp_sz = PAGE_SIZE;
5458 args->bc_attrs.max_resp_sz_cached = 0;
5459 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
5460 args->bc_attrs.max_reqs = 1;
5461
5462 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5463 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5464 __func__,
5465 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
5466 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
5467 args->bc_attrs.max_reqs);
5468 }
5469
nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args * args,struct nfs4_session * session)5470 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5471 {
5472 struct nfs4_channel_attrs *sent = &args->fc_attrs;
5473 struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5474
5475 if (rcvd->max_resp_sz > sent->max_resp_sz)
5476 return -EINVAL;
5477 /*
5478 * Our requested max_ops is the minimum we need; we're not
5479 * prepared to break up compounds into smaller pieces than that.
5480 * So, no point even trying to continue if the server won't
5481 * cooperate:
5482 */
5483 if (rcvd->max_ops < sent->max_ops)
5484 return -EINVAL;
5485 if (rcvd->max_reqs == 0)
5486 return -EINVAL;
5487 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
5488 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
5489 return 0;
5490 }
5491
nfs4_verify_back_channel_attrs(struct nfs41_create_session_args * args,struct nfs4_session * session)5492 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5493 {
5494 struct nfs4_channel_attrs *sent = &args->bc_attrs;
5495 struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
5496
5497 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
5498 return -EINVAL;
5499 if (rcvd->max_resp_sz < sent->max_resp_sz)
5500 return -EINVAL;
5501 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
5502 return -EINVAL;
5503 /* These would render the backchannel useless: */
5504 if (rcvd->max_ops != sent->max_ops)
5505 return -EINVAL;
5506 if (rcvd->max_reqs != sent->max_reqs)
5507 return -EINVAL;
5508 return 0;
5509 }
5510
nfs4_verify_channel_attrs(struct nfs41_create_session_args * args,struct nfs4_session * session)5511 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
5512 struct nfs4_session *session)
5513 {
5514 int ret;
5515
5516 ret = nfs4_verify_fore_channel_attrs(args, session);
5517 if (ret)
5518 return ret;
5519 return nfs4_verify_back_channel_attrs(args, session);
5520 }
5521
_nfs4_proc_create_session(struct nfs_client * clp)5522 static int _nfs4_proc_create_session(struct nfs_client *clp)
5523 {
5524 struct nfs4_session *session = clp->cl_session;
5525 struct nfs41_create_session_args args = {
5526 .client = clp,
5527 .cb_program = NFS4_CALLBACK,
5528 };
5529 struct nfs41_create_session_res res = {
5530 .client = clp,
5531 };
5532 struct rpc_message msg = {
5533 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
5534 .rpc_argp = &args,
5535 .rpc_resp = &res,
5536 };
5537 int status;
5538
5539 nfs4_init_channel_attrs(&args);
5540 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
5541
5542 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5543
5544 if (!status)
5545 /* Verify the session's negotiated channel_attrs values */
5546 status = nfs4_verify_channel_attrs(&args, session);
5547 if (!status) {
5548 /* Increment the clientid slot sequence id */
5549 clp->cl_seqid++;
5550 }
5551
5552 return status;
5553 }
5554
5555 /*
5556 * Issues a CREATE_SESSION operation to the server.
5557 * It is the responsibility of the caller to verify the session is
5558 * expired before calling this routine.
5559 */
nfs4_proc_create_session(struct nfs_client * clp)5560 int nfs4_proc_create_session(struct nfs_client *clp)
5561 {
5562 int status;
5563 unsigned *ptr;
5564 struct nfs4_session *session = clp->cl_session;
5565
5566 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
5567
5568 status = _nfs4_proc_create_session(clp);
5569 if (status)
5570 goto out;
5571
5572 /* Init or reset the session slot tables */
5573 status = nfs4_setup_session_slot_tables(session);
5574 dprintk("slot table setup returned %d\n", status);
5575 if (status)
5576 goto out;
5577
5578 ptr = (unsigned *)&session->sess_id.data[0];
5579 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
5580 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
5581 out:
5582 dprintk("<-- %s\n", __func__);
5583 return status;
5584 }
5585
5586 /*
5587 * Issue the over-the-wire RPC DESTROY_SESSION.
5588 * The caller must serialize access to this routine.
5589 */
nfs4_proc_destroy_session(struct nfs4_session * session)5590 int nfs4_proc_destroy_session(struct nfs4_session *session)
5591 {
5592 int status = 0;
5593 struct rpc_message msg;
5594
5595 dprintk("--> nfs4_proc_destroy_session\n");
5596
5597 /* session is still being setup */
5598 if (session->clp->cl_cons_state != NFS_CS_READY)
5599 return status;
5600
5601 msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
5602 msg.rpc_argp = session;
5603 msg.rpc_resp = NULL;
5604 msg.rpc_cred = NULL;
5605 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5606
5607 if (status)
5608 printk(KERN_WARNING
5609 "NFS: Got error %d from the server on DESTROY_SESSION. "
5610 "Session has been destroyed regardless...\n", status);
5611
5612 dprintk("<-- nfs4_proc_destroy_session\n");
5613 return status;
5614 }
5615
nfs4_init_session(struct nfs_server * server)5616 int nfs4_init_session(struct nfs_server *server)
5617 {
5618 struct nfs_client *clp = server->nfs_client;
5619 struct nfs4_session *session;
5620 unsigned int rsize, wsize;
5621 int ret;
5622
5623 if (!nfs4_has_session(clp))
5624 return 0;
5625
5626 session = clp->cl_session;
5627 if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
5628 return 0;
5629
5630 rsize = server->rsize;
5631 if (rsize == 0)
5632 rsize = NFS_MAX_FILE_IO_SIZE;
5633 wsize = server->wsize;
5634 if (wsize == 0)
5635 wsize = NFS_MAX_FILE_IO_SIZE;
5636
5637 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
5638 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
5639
5640 ret = nfs4_recover_expired_lease(server);
5641 if (!ret)
5642 ret = nfs4_check_client_ready(clp);
5643 return ret;
5644 }
5645
nfs4_init_ds_session(struct nfs_client * clp)5646 int nfs4_init_ds_session(struct nfs_client *clp)
5647 {
5648 struct nfs4_session *session = clp->cl_session;
5649 int ret;
5650
5651 if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
5652 return 0;
5653
5654 ret = nfs4_client_recover_expired_lease(clp);
5655 if (!ret)
5656 /* Test for the DS role */
5657 if (!is_ds_client(clp))
5658 ret = -ENODEV;
5659 if (!ret)
5660 ret = nfs4_check_client_ready(clp);
5661 return ret;
5662
5663 }
5664 EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
5665
5666
5667 /*
5668 * Renew the cl_session lease.
5669 */
5670 struct nfs4_sequence_data {
5671 struct nfs_client *clp;
5672 struct nfs4_sequence_args args;
5673 struct nfs4_sequence_res res;
5674 };
5675
nfs41_sequence_release(void * data)5676 static void nfs41_sequence_release(void *data)
5677 {
5678 struct nfs4_sequence_data *calldata = data;
5679 struct nfs_client *clp = calldata->clp;
5680
5681 if (atomic_read(&clp->cl_count) > 1)
5682 nfs4_schedule_state_renewal(clp);
5683 nfs_put_client(clp);
5684 kfree(calldata);
5685 }
5686
nfs41_sequence_handle_errors(struct rpc_task * task,struct nfs_client * clp)5687 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5688 {
5689 switch(task->tk_status) {
5690 case -NFS4ERR_DELAY:
5691 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5692 return -EAGAIN;
5693 default:
5694 nfs4_schedule_lease_recovery(clp);
5695 }
5696 return 0;
5697 }
5698
nfs41_sequence_call_done(struct rpc_task * task,void * data)5699 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5700 {
5701 struct nfs4_sequence_data *calldata = data;
5702 struct nfs_client *clp = calldata->clp;
5703
5704 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
5705 return;
5706
5707 if (task->tk_status < 0) {
5708 dprintk("%s ERROR %d\n", __func__, task->tk_status);
5709 if (atomic_read(&clp->cl_count) == 1)
5710 goto out;
5711
5712 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
5713 rpc_restart_call_prepare(task);
5714 return;
5715 }
5716 }
5717 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
5718 out:
5719 dprintk("<-- %s\n", __func__);
5720 }
5721
nfs41_sequence_prepare(struct rpc_task * task,void * data)5722 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
5723 {
5724 struct nfs4_sequence_data *calldata = data;
5725 struct nfs_client *clp = calldata->clp;
5726 struct nfs4_sequence_args *args;
5727 struct nfs4_sequence_res *res;
5728
5729 args = task->tk_msg.rpc_argp;
5730 res = task->tk_msg.rpc_resp;
5731
5732 if (nfs41_setup_sequence(clp->cl_session, args, res, task))
5733 return;
5734 rpc_call_start(task);
5735 }
5736
nfs41_sequence_prepare_privileged(struct rpc_task * task,void * data)5737 static void nfs41_sequence_prepare_privileged(struct rpc_task *task, void *data)
5738 {
5739 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5740 nfs41_sequence_prepare(task, data);
5741 }
5742
5743 static const struct rpc_call_ops nfs41_sequence_ops = {
5744 .rpc_call_done = nfs41_sequence_call_done,
5745 .rpc_call_prepare = nfs41_sequence_prepare,
5746 .rpc_release = nfs41_sequence_release,
5747 };
5748
5749 static const struct rpc_call_ops nfs41_sequence_privileged_ops = {
5750 .rpc_call_done = nfs41_sequence_call_done,
5751 .rpc_call_prepare = nfs41_sequence_prepare_privileged,
5752 .rpc_release = nfs41_sequence_release,
5753 };
5754
_nfs41_proc_sequence(struct nfs_client * clp,struct rpc_cred * cred,const struct rpc_call_ops * seq_ops)5755 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred,
5756 const struct rpc_call_ops *seq_ops)
5757 {
5758 struct nfs4_sequence_data *calldata;
5759 struct rpc_message msg = {
5760 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
5761 .rpc_cred = cred,
5762 };
5763 struct rpc_task_setup task_setup_data = {
5764 .rpc_client = clp->cl_rpcclient,
5765 .rpc_message = &msg,
5766 .callback_ops = seq_ops,
5767 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
5768 };
5769
5770 if (!atomic_inc_not_zero(&clp->cl_count))
5771 return ERR_PTR(-EIO);
5772 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5773 if (calldata == NULL) {
5774 nfs_put_client(clp);
5775 return ERR_PTR(-ENOMEM);
5776 }
5777 nfs41_init_sequence(&calldata->args, &calldata->res, 0);
5778 msg.rpc_argp = &calldata->args;
5779 msg.rpc_resp = &calldata->res;
5780 calldata->clp = clp;
5781 task_setup_data.callback_data = calldata;
5782
5783 return rpc_run_task(&task_setup_data);
5784 }
5785
nfs41_proc_async_sequence(struct nfs_client * clp,struct rpc_cred * cred,unsigned renew_flags)5786 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
5787 {
5788 struct rpc_task *task;
5789 int ret = 0;
5790
5791 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
5792 return 0;
5793 task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_ops);
5794 if (IS_ERR(task))
5795 ret = PTR_ERR(task);
5796 else
5797 rpc_put_task_async(task);
5798 dprintk("<-- %s status=%d\n", __func__, ret);
5799 return ret;
5800 }
5801
nfs4_proc_sequence(struct nfs_client * clp,struct rpc_cred * cred)5802 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5803 {
5804 struct rpc_task *task;
5805 int ret;
5806
5807 task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_privileged_ops);
5808 if (IS_ERR(task)) {
5809 ret = PTR_ERR(task);
5810 goto out;
5811 }
5812 ret = rpc_wait_for_completion_task(task);
5813 if (!ret) {
5814 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
5815
5816 if (task->tk_status == 0)
5817 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
5818 ret = task->tk_status;
5819 }
5820 rpc_put_task(task);
5821 out:
5822 dprintk("<-- %s status=%d\n", __func__, ret);
5823 return ret;
5824 }
5825
5826 struct nfs4_reclaim_complete_data {
5827 struct nfs_client *clp;
5828 struct nfs41_reclaim_complete_args arg;
5829 struct nfs41_reclaim_complete_res res;
5830 };
5831
nfs4_reclaim_complete_prepare(struct rpc_task * task,void * data)5832 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
5833 {
5834 struct nfs4_reclaim_complete_data *calldata = data;
5835
5836 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5837 if (nfs41_setup_sequence(calldata->clp->cl_session,
5838 &calldata->arg.seq_args,
5839 &calldata->res.seq_res, task))
5840 return;
5841
5842 rpc_call_start(task);
5843 }
5844
nfs41_reclaim_complete_handle_errors(struct rpc_task * task,struct nfs_client * clp)5845 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5846 {
5847 switch(task->tk_status) {
5848 case 0:
5849 case -NFS4ERR_COMPLETE_ALREADY:
5850 case -NFS4ERR_WRONG_CRED: /* What to do here? */
5851 break;
5852 case -NFS4ERR_DELAY:
5853 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5854 /* fall through */
5855 case -NFS4ERR_RETRY_UNCACHED_REP:
5856 return -EAGAIN;
5857 default:
5858 nfs4_schedule_lease_recovery(clp);
5859 }
5860 return 0;
5861 }
5862
nfs4_reclaim_complete_done(struct rpc_task * task,void * data)5863 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
5864 {
5865 struct nfs4_reclaim_complete_data *calldata = data;
5866 struct nfs_client *clp = calldata->clp;
5867 struct nfs4_sequence_res *res = &calldata->res.seq_res;
5868
5869 dprintk("--> %s\n", __func__);
5870 if (!nfs41_sequence_done(task, res))
5871 return;
5872
5873 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
5874 rpc_restart_call_prepare(task);
5875 return;
5876 }
5877 dprintk("<-- %s\n", __func__);
5878 }
5879
nfs4_free_reclaim_complete_data(void * data)5880 static void nfs4_free_reclaim_complete_data(void *data)
5881 {
5882 struct nfs4_reclaim_complete_data *calldata = data;
5883
5884 kfree(calldata);
5885 }
5886
5887 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
5888 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
5889 .rpc_call_done = nfs4_reclaim_complete_done,
5890 .rpc_release = nfs4_free_reclaim_complete_data,
5891 };
5892
5893 /*
5894 * Issue a global reclaim complete.
5895 */
nfs41_proc_reclaim_complete(struct nfs_client * clp)5896 static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
5897 {
5898 struct nfs4_reclaim_complete_data *calldata;
5899 struct rpc_task *task;
5900 struct rpc_message msg = {
5901 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
5902 };
5903 struct rpc_task_setup task_setup_data = {
5904 .rpc_client = clp->cl_rpcclient,
5905 .rpc_message = &msg,
5906 .callback_ops = &nfs4_reclaim_complete_call_ops,
5907 .flags = RPC_TASK_ASYNC,
5908 };
5909 int status = -ENOMEM;
5910
5911 dprintk("--> %s\n", __func__);
5912 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5913 if (calldata == NULL)
5914 goto out;
5915 calldata->clp = clp;
5916 calldata->arg.one_fs = 0;
5917
5918 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
5919 msg.rpc_argp = &calldata->arg;
5920 msg.rpc_resp = &calldata->res;
5921 task_setup_data.callback_data = calldata;
5922 task = rpc_run_task(&task_setup_data);
5923 if (IS_ERR(task)) {
5924 status = PTR_ERR(task);
5925 goto out;
5926 }
5927 status = nfs4_wait_for_completion_rpc_task(task);
5928 if (status == 0)
5929 status = task->tk_status;
5930 rpc_put_task(task);
5931 return 0;
5932 out:
5933 dprintk("<-- %s status=%d\n", __func__, status);
5934 return status;
5935 }
5936
5937 static void
nfs4_layoutget_prepare(struct rpc_task * task,void * calldata)5938 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
5939 {
5940 struct nfs4_layoutget *lgp = calldata;
5941 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5942
5943 dprintk("--> %s\n", __func__);
5944 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
5945 * right now covering the LAYOUTGET we are about to send.
5946 * However, that is not so catastrophic, and there seems
5947 * to be no way to prevent it completely.
5948 */
5949 if (nfs4_setup_sequence(server, &lgp->args.seq_args,
5950 &lgp->res.seq_res, task))
5951 return;
5952 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
5953 NFS_I(lgp->args.inode)->layout,
5954 lgp->args.ctx->state)) {
5955 rpc_exit(task, NFS4_OK);
5956 return;
5957 }
5958 rpc_call_start(task);
5959 }
5960
nfs4_layoutget_done(struct rpc_task * task,void * calldata)5961 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
5962 {
5963 struct nfs4_layoutget *lgp = calldata;
5964 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5965
5966 dprintk("--> %s\n", __func__);
5967
5968 if (!nfs4_sequence_done(task, &lgp->res.seq_res))
5969 return;
5970
5971 switch (task->tk_status) {
5972 case 0:
5973 break;
5974 case -NFS4ERR_LAYOUTTRYLATER:
5975 case -NFS4ERR_RECALLCONFLICT:
5976 task->tk_status = -NFS4ERR_DELAY;
5977 /* Fall through */
5978 default:
5979 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
5980 rpc_restart_call_prepare(task);
5981 return;
5982 }
5983 }
5984 dprintk("<-- %s\n", __func__);
5985 }
5986
max_response_pages(struct nfs_server * server)5987 static size_t max_response_pages(struct nfs_server *server)
5988 {
5989 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
5990 return nfs_page_array_len(0, max_resp_sz);
5991 }
5992
nfs4_free_pages(struct page ** pages,size_t size)5993 static void nfs4_free_pages(struct page **pages, size_t size)
5994 {
5995 int i;
5996
5997 if (!pages)
5998 return;
5999
6000 for (i = 0; i < size; i++) {
6001 if (!pages[i])
6002 break;
6003 __free_page(pages[i]);
6004 }
6005 kfree(pages);
6006 }
6007
nfs4_alloc_pages(size_t size,gfp_t gfp_flags)6008 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
6009 {
6010 struct page **pages;
6011 int i;
6012
6013 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
6014 if (!pages) {
6015 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
6016 return NULL;
6017 }
6018
6019 for (i = 0; i < size; i++) {
6020 pages[i] = alloc_page(gfp_flags);
6021 if (!pages[i]) {
6022 dprintk("%s: failed to allocate page\n", __func__);
6023 nfs4_free_pages(pages, size);
6024 return NULL;
6025 }
6026 }
6027
6028 return pages;
6029 }
6030
nfs4_layoutget_release(void * calldata)6031 static void nfs4_layoutget_release(void *calldata)
6032 {
6033 struct nfs4_layoutget *lgp = calldata;
6034 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6035 size_t max_pages = max_response_pages(server);
6036
6037 dprintk("--> %s\n", __func__);
6038 nfs4_free_pages(lgp->args.layout.pages, max_pages);
6039 put_nfs_open_context(lgp->args.ctx);
6040 kfree(calldata);
6041 dprintk("<-- %s\n", __func__);
6042 }
6043
6044 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
6045 .rpc_call_prepare = nfs4_layoutget_prepare,
6046 .rpc_call_done = nfs4_layoutget_done,
6047 .rpc_release = nfs4_layoutget_release,
6048 };
6049
nfs4_proc_layoutget(struct nfs4_layoutget * lgp,gfp_t gfp_flags)6050 int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
6051 {
6052 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6053 size_t max_pages = max_response_pages(server);
6054 struct rpc_task *task;
6055 struct rpc_message msg = {
6056 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
6057 .rpc_argp = &lgp->args,
6058 .rpc_resp = &lgp->res,
6059 };
6060 struct rpc_task_setup task_setup_data = {
6061 .rpc_client = server->client,
6062 .rpc_message = &msg,
6063 .callback_ops = &nfs4_layoutget_call_ops,
6064 .callback_data = lgp,
6065 .flags = RPC_TASK_ASYNC,
6066 };
6067 int status = 0;
6068
6069 dprintk("--> %s\n", __func__);
6070
6071 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
6072 if (!lgp->args.layout.pages) {
6073 nfs4_layoutget_release(lgp);
6074 return -ENOMEM;
6075 }
6076 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
6077
6078 lgp->res.layoutp = &lgp->args.layout;
6079 lgp->res.seq_res.sr_slot = NULL;
6080 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
6081 task = rpc_run_task(&task_setup_data);
6082 if (IS_ERR(task))
6083 return PTR_ERR(task);
6084 status = nfs4_wait_for_completion_rpc_task(task);
6085 if (status == 0)
6086 status = task->tk_status;
6087 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
6088 if (status == 0 && lgp->res.layoutp->len)
6089 status = pnfs_layout_process(lgp);
6090 rpc_put_task(task);
6091 dprintk("<-- %s status=%d\n", __func__, status);
6092 return status;
6093 }
6094
6095 static void
nfs4_layoutreturn_prepare(struct rpc_task * task,void * calldata)6096 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
6097 {
6098 struct nfs4_layoutreturn *lrp = calldata;
6099
6100 dprintk("--> %s\n", __func__);
6101 if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
6102 &lrp->res.seq_res, task))
6103 return;
6104 rpc_call_start(task);
6105 }
6106
nfs4_layoutreturn_done(struct rpc_task * task,void * calldata)6107 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
6108 {
6109 struct nfs4_layoutreturn *lrp = calldata;
6110 struct nfs_server *server;
6111 struct pnfs_layout_hdr *lo = lrp->args.layout;
6112
6113 dprintk("--> %s\n", __func__);
6114
6115 if (!nfs4_sequence_done(task, &lrp->res.seq_res))
6116 return;
6117
6118 server = NFS_SERVER(lrp->args.inode);
6119 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6120 rpc_restart_call_prepare(task);
6121 return;
6122 }
6123 spin_lock(&lo->plh_inode->i_lock);
6124 if (task->tk_status == 0 && lrp->res.lrs_present)
6125 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6126 lo->plh_block_lgets--;
6127 spin_unlock(&lo->plh_inode->i_lock);
6128 dprintk("<-- %s\n", __func__);
6129 }
6130
nfs4_layoutreturn_release(void * calldata)6131 static void nfs4_layoutreturn_release(void *calldata)
6132 {
6133 struct nfs4_layoutreturn *lrp = calldata;
6134
6135 dprintk("--> %s\n", __func__);
6136 put_layout_hdr(lrp->args.layout);
6137 kfree(calldata);
6138 dprintk("<-- %s\n", __func__);
6139 }
6140
6141 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
6142 .rpc_call_prepare = nfs4_layoutreturn_prepare,
6143 .rpc_call_done = nfs4_layoutreturn_done,
6144 .rpc_release = nfs4_layoutreturn_release,
6145 };
6146
nfs4_proc_layoutreturn(struct nfs4_layoutreturn * lrp)6147 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
6148 {
6149 struct rpc_task *task;
6150 struct rpc_message msg = {
6151 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
6152 .rpc_argp = &lrp->args,
6153 .rpc_resp = &lrp->res,
6154 };
6155 struct rpc_task_setup task_setup_data = {
6156 .rpc_client = lrp->clp->cl_rpcclient,
6157 .rpc_message = &msg,
6158 .callback_ops = &nfs4_layoutreturn_call_ops,
6159 .callback_data = lrp,
6160 };
6161 int status;
6162
6163 dprintk("--> %s\n", __func__);
6164 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
6165 task = rpc_run_task(&task_setup_data);
6166 if (IS_ERR(task))
6167 return PTR_ERR(task);
6168 status = task->tk_status;
6169 dprintk("<-- %s status=%d\n", __func__, status);
6170 rpc_put_task(task);
6171 return status;
6172 }
6173
6174 /*
6175 * Retrieve the list of Data Server devices from the MDS.
6176 */
_nfs4_getdevicelist(struct nfs_server * server,const struct nfs_fh * fh,struct pnfs_devicelist * devlist)6177 static int _nfs4_getdevicelist(struct nfs_server *server,
6178 const struct nfs_fh *fh,
6179 struct pnfs_devicelist *devlist)
6180 {
6181 struct nfs4_getdevicelist_args args = {
6182 .fh = fh,
6183 .layoutclass = server->pnfs_curr_ld->id,
6184 };
6185 struct nfs4_getdevicelist_res res = {
6186 .devlist = devlist,
6187 };
6188 struct rpc_message msg = {
6189 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
6190 .rpc_argp = &args,
6191 .rpc_resp = &res,
6192 };
6193 int status;
6194
6195 dprintk("--> %s\n", __func__);
6196 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
6197 &res.seq_res, 0);
6198 dprintk("<-- %s status=%d\n", __func__, status);
6199 return status;
6200 }
6201
nfs4_proc_getdevicelist(struct nfs_server * server,const struct nfs_fh * fh,struct pnfs_devicelist * devlist)6202 int nfs4_proc_getdevicelist(struct nfs_server *server,
6203 const struct nfs_fh *fh,
6204 struct pnfs_devicelist *devlist)
6205 {
6206 struct nfs4_exception exception = { };
6207 int err;
6208
6209 do {
6210 err = nfs4_handle_exception(server,
6211 _nfs4_getdevicelist(server, fh, devlist),
6212 &exception);
6213 } while (exception.retry);
6214
6215 dprintk("%s: err=%d, num_devs=%u\n", __func__,
6216 err, devlist->num_devs);
6217
6218 return err;
6219 }
6220 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
6221
6222 static int
_nfs4_proc_getdeviceinfo(struct nfs_server * server,struct pnfs_device * pdev)6223 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6224 {
6225 struct nfs4_getdeviceinfo_args args = {
6226 .pdev = pdev,
6227 };
6228 struct nfs4_getdeviceinfo_res res = {
6229 .pdev = pdev,
6230 };
6231 struct rpc_message msg = {
6232 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
6233 .rpc_argp = &args,
6234 .rpc_resp = &res,
6235 };
6236 int status;
6237
6238 dprintk("--> %s\n", __func__);
6239 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6240 dprintk("<-- %s status=%d\n", __func__, status);
6241
6242 return status;
6243 }
6244
nfs4_proc_getdeviceinfo(struct nfs_server * server,struct pnfs_device * pdev)6245 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6246 {
6247 struct nfs4_exception exception = { };
6248 int err;
6249
6250 do {
6251 err = nfs4_handle_exception(server,
6252 _nfs4_proc_getdeviceinfo(server, pdev),
6253 &exception);
6254 } while (exception.retry);
6255 return err;
6256 }
6257 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
6258
nfs4_layoutcommit_prepare(struct rpc_task * task,void * calldata)6259 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
6260 {
6261 struct nfs4_layoutcommit_data *data = calldata;
6262 struct nfs_server *server = NFS_SERVER(data->args.inode);
6263
6264 if (nfs4_setup_sequence(server, &data->args.seq_args,
6265 &data->res.seq_res, task))
6266 return;
6267 rpc_call_start(task);
6268 }
6269
6270 static void
nfs4_layoutcommit_done(struct rpc_task * task,void * calldata)6271 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6272 {
6273 struct nfs4_layoutcommit_data *data = calldata;
6274 struct nfs_server *server = NFS_SERVER(data->args.inode);
6275
6276 if (!nfs4_sequence_done(task, &data->res.seq_res))
6277 return;
6278
6279 switch (task->tk_status) { /* Just ignore these failures */
6280 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
6281 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
6282 case -NFS4ERR_BADLAYOUT: /* no layout */
6283 case -NFS4ERR_GRACE: /* loca_recalim always false */
6284 task->tk_status = 0;
6285 break;
6286 case 0:
6287 nfs_post_op_update_inode_force_wcc(data->args.inode,
6288 data->res.fattr);
6289 break;
6290 default:
6291 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6292 rpc_restart_call_prepare(task);
6293 return;
6294 }
6295 }
6296 }
6297
nfs4_layoutcommit_release(void * calldata)6298 static void nfs4_layoutcommit_release(void *calldata)
6299 {
6300 struct nfs4_layoutcommit_data *data = calldata;
6301
6302 pnfs_cleanup_layoutcommit(data);
6303 put_rpccred(data->cred);
6304 kfree(data);
6305 }
6306
6307 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
6308 .rpc_call_prepare = nfs4_layoutcommit_prepare,
6309 .rpc_call_done = nfs4_layoutcommit_done,
6310 .rpc_release = nfs4_layoutcommit_release,
6311 };
6312
6313 int
nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data * data,bool sync)6314 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
6315 {
6316 struct rpc_message msg = {
6317 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
6318 .rpc_argp = &data->args,
6319 .rpc_resp = &data->res,
6320 .rpc_cred = data->cred,
6321 };
6322 struct rpc_task_setup task_setup_data = {
6323 .task = &data->task,
6324 .rpc_client = NFS_CLIENT(data->args.inode),
6325 .rpc_message = &msg,
6326 .callback_ops = &nfs4_layoutcommit_ops,
6327 .callback_data = data,
6328 .flags = RPC_TASK_ASYNC,
6329 };
6330 struct rpc_task *task;
6331 int status = 0;
6332
6333 dprintk("NFS: %4d initiating layoutcommit call. sync %d "
6334 "lbw: %llu inode %lu\n",
6335 data->task.tk_pid, sync,
6336 data->args.lastbytewritten,
6337 data->args.inode->i_ino);
6338
6339 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
6340 task = rpc_run_task(&task_setup_data);
6341 if (IS_ERR(task))
6342 return PTR_ERR(task);
6343 if (sync == false)
6344 goto out;
6345 status = nfs4_wait_for_completion_rpc_task(task);
6346 if (status != 0)
6347 goto out;
6348 status = task->tk_status;
6349 out:
6350 dprintk("%s: status %d\n", __func__, status);
6351 rpc_put_task(task);
6352 return status;
6353 }
6354
6355 static int
_nfs41_proc_secinfo_no_name(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,struct nfs4_secinfo_flavors * flavors)6356 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6357 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6358 {
6359 struct nfs41_secinfo_no_name_args args = {
6360 .style = SECINFO_STYLE_CURRENT_FH,
6361 };
6362 struct nfs4_secinfo_res res = {
6363 .flavors = flavors,
6364 };
6365 struct rpc_message msg = {
6366 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
6367 .rpc_argp = &args,
6368 .rpc_resp = &res,
6369 };
6370 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6371 }
6372
6373 static int
nfs41_proc_secinfo_no_name(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,struct nfs4_secinfo_flavors * flavors)6374 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6375 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6376 {
6377 struct nfs4_exception exception = { };
6378 int err;
6379 do {
6380 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6381 switch (err) {
6382 case 0:
6383 case -NFS4ERR_WRONGSEC:
6384 case -ENOTSUPP:
6385 goto out;
6386 default:
6387 err = nfs4_handle_exception(server, err, &exception);
6388 }
6389 } while (exception.retry);
6390 out:
6391 return err;
6392 }
6393
6394 static int
nfs41_find_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)6395 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
6396 struct nfs_fsinfo *info)
6397 {
6398 int err;
6399 struct page *page;
6400 rpc_authflavor_t flavor;
6401 struct nfs4_secinfo_flavors *flavors;
6402
6403 page = alloc_page(GFP_KERNEL);
6404 if (!page) {
6405 err = -ENOMEM;
6406 goto out;
6407 }
6408
6409 flavors = page_address(page);
6410 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6411
6412 /*
6413 * Fall back on "guess and check" method if
6414 * the server doesn't support SECINFO_NO_NAME
6415 */
6416 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
6417 err = nfs4_find_root_sec(server, fhandle, info);
6418 goto out_freepage;
6419 }
6420 if (err)
6421 goto out_freepage;
6422
6423 flavor = nfs_find_best_sec(flavors);
6424 if (err == 0)
6425 err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
6426
6427 out_freepage:
6428 put_page(page);
6429 if (err == -EACCES)
6430 return -EPERM;
6431 out:
6432 return err;
6433 }
6434
_nfs41_test_stateid(struct nfs_server * server,nfs4_stateid * stateid)6435 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6436 {
6437 int status;
6438 struct nfs41_test_stateid_args args = {
6439 .stateid = stateid,
6440 };
6441 struct nfs41_test_stateid_res res;
6442 struct rpc_message msg = {
6443 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
6444 .rpc_argp = &args,
6445 .rpc_resp = &res,
6446 };
6447
6448 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6449 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6450
6451 if (status == NFS_OK)
6452 return res.status;
6453 return status;
6454 }
6455
nfs41_test_stateid(struct nfs_server * server,nfs4_stateid * stateid)6456 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6457 {
6458 struct nfs4_exception exception = { };
6459 int err;
6460 do {
6461 err = nfs4_handle_exception(server,
6462 _nfs41_test_stateid(server, stateid),
6463 &exception);
6464 } while (exception.retry);
6465 return err;
6466 }
6467
_nfs4_free_stateid(struct nfs_server * server,nfs4_stateid * stateid)6468 static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6469 {
6470 struct nfs41_free_stateid_args args = {
6471 .stateid = stateid,
6472 };
6473 struct nfs41_free_stateid_res res;
6474 struct rpc_message msg = {
6475 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
6476 .rpc_argp = &args,
6477 .rpc_resp = &res,
6478 };
6479
6480 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6481 return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6482 }
6483
nfs41_free_stateid(struct nfs_server * server,nfs4_stateid * stateid)6484 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6485 {
6486 struct nfs4_exception exception = { };
6487 int err;
6488 do {
6489 err = nfs4_handle_exception(server,
6490 _nfs4_free_stateid(server, stateid),
6491 &exception);
6492 } while (exception.retry);
6493 return err;
6494 }
6495
nfs41_match_stateid(const nfs4_stateid * s1,const nfs4_stateid * s2)6496 static bool nfs41_match_stateid(const nfs4_stateid *s1,
6497 const nfs4_stateid *s2)
6498 {
6499 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
6500 return false;
6501
6502 if (s1->seqid == s2->seqid)
6503 return true;
6504 if (s1->seqid == 0 || s2->seqid == 0)
6505 return true;
6506
6507 return false;
6508 }
6509
6510 #endif /* CONFIG_NFS_V4_1 */
6511
nfs4_match_stateid(const nfs4_stateid * s1,const nfs4_stateid * s2)6512 static bool nfs4_match_stateid(const nfs4_stateid *s1,
6513 const nfs4_stateid *s2)
6514 {
6515 return nfs4_stateid_match(s1, s2);
6516 }
6517
6518
6519 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
6520 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6521 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6522 .recover_open = nfs4_open_reclaim,
6523 .recover_lock = nfs4_lock_reclaim,
6524 .establish_clid = nfs4_init_clientid,
6525 .get_clid_cred = nfs4_get_setclientid_cred,
6526 };
6527
6528 #if defined(CONFIG_NFS_V4_1)
6529 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
6530 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6531 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6532 .recover_open = nfs4_open_reclaim,
6533 .recover_lock = nfs4_lock_reclaim,
6534 .establish_clid = nfs41_init_clientid,
6535 .get_clid_cred = nfs4_get_exchange_id_cred,
6536 .reclaim_complete = nfs41_proc_reclaim_complete,
6537 };
6538 #endif /* CONFIG_NFS_V4_1 */
6539
6540 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
6541 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6542 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6543 .recover_open = nfs4_open_expired,
6544 .recover_lock = nfs4_lock_expired,
6545 .establish_clid = nfs4_init_clientid,
6546 .get_clid_cred = nfs4_get_setclientid_cred,
6547 };
6548
6549 #if defined(CONFIG_NFS_V4_1)
6550 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
6551 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6552 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6553 .recover_open = nfs41_open_expired,
6554 .recover_lock = nfs41_lock_expired,
6555 .establish_clid = nfs41_init_clientid,
6556 .get_clid_cred = nfs4_get_exchange_id_cred,
6557 };
6558 #endif /* CONFIG_NFS_V4_1 */
6559
6560 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
6561 .sched_state_renewal = nfs4_proc_async_renew,
6562 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
6563 .renew_lease = nfs4_proc_renew,
6564 };
6565
6566 #if defined(CONFIG_NFS_V4_1)
6567 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
6568 .sched_state_renewal = nfs41_proc_async_sequence,
6569 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
6570 .renew_lease = nfs4_proc_sequence,
6571 };
6572 #endif
6573
6574 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
6575 .minor_version = 0,
6576 .call_sync = _nfs4_call_sync,
6577 .match_stateid = nfs4_match_stateid,
6578 .find_root_sec = nfs4_find_root_sec,
6579 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
6580 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
6581 .state_renewal_ops = &nfs40_state_renewal_ops,
6582 };
6583
6584 #if defined(CONFIG_NFS_V4_1)
6585 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
6586 .minor_version = 1,
6587 .call_sync = _nfs4_call_sync_session,
6588 .match_stateid = nfs41_match_stateid,
6589 .find_root_sec = nfs41_find_root_sec,
6590 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
6591 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
6592 .state_renewal_ops = &nfs41_state_renewal_ops,
6593 };
6594 #endif
6595
6596 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
6597 [0] = &nfs_v4_0_minor_ops,
6598 #if defined(CONFIG_NFS_V4_1)
6599 [1] = &nfs_v4_1_minor_ops,
6600 #endif
6601 };
6602
6603 static const struct inode_operations nfs4_file_inode_operations = {
6604 .permission = nfs_permission,
6605 .getattr = nfs_getattr,
6606 .setattr = nfs_setattr,
6607 .getxattr = generic_getxattr,
6608 .setxattr = generic_setxattr,
6609 .listxattr = generic_listxattr,
6610 .removexattr = generic_removexattr,
6611 };
6612
6613 const struct nfs_rpc_ops nfs_v4_clientops = {
6614 .version = 4, /* protocol version */
6615 .dentry_ops = &nfs4_dentry_operations,
6616 .dir_inode_ops = &nfs4_dir_inode_operations,
6617 .file_inode_ops = &nfs4_file_inode_operations,
6618 .file_ops = &nfs4_file_operations,
6619 .getroot = nfs4_proc_get_root,
6620 .getattr = nfs4_proc_getattr,
6621 .setattr = nfs4_proc_setattr,
6622 .lookup = nfs4_proc_lookup,
6623 .access = nfs4_proc_access,
6624 .readlink = nfs4_proc_readlink,
6625 .create = nfs4_proc_create,
6626 .remove = nfs4_proc_remove,
6627 .unlink_setup = nfs4_proc_unlink_setup,
6628 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
6629 .unlink_done = nfs4_proc_unlink_done,
6630 .rename = nfs4_proc_rename,
6631 .rename_setup = nfs4_proc_rename_setup,
6632 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
6633 .rename_done = nfs4_proc_rename_done,
6634 .link = nfs4_proc_link,
6635 .symlink = nfs4_proc_symlink,
6636 .mkdir = nfs4_proc_mkdir,
6637 .rmdir = nfs4_proc_remove,
6638 .readdir = nfs4_proc_readdir,
6639 .mknod = nfs4_proc_mknod,
6640 .statfs = nfs4_proc_statfs,
6641 .fsinfo = nfs4_proc_fsinfo,
6642 .pathconf = nfs4_proc_pathconf,
6643 .set_capabilities = nfs4_server_capabilities,
6644 .decode_dirent = nfs4_decode_dirent,
6645 .read_setup = nfs4_proc_read_setup,
6646 .read_rpc_prepare = nfs4_proc_read_rpc_prepare,
6647 .read_done = nfs4_read_done,
6648 .write_setup = nfs4_proc_write_setup,
6649 .write_rpc_prepare = nfs4_proc_write_rpc_prepare,
6650 .write_done = nfs4_write_done,
6651 .commit_setup = nfs4_proc_commit_setup,
6652 .commit_done = nfs4_commit_done,
6653 .lock = nfs4_proc_lock,
6654 .clear_acl_cache = nfs4_zap_acl_attr,
6655 .close_context = nfs4_close_context,
6656 .open_context = nfs4_atomic_open,
6657 .init_client = nfs4_init_client,
6658 .secinfo = nfs4_proc_secinfo,
6659 };
6660
6661 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
6662 .prefix = XATTR_NAME_NFSV4_ACL,
6663 .list = nfs4_xattr_list_nfs4_acl,
6664 .get = nfs4_xattr_get_nfs4_acl,
6665 .set = nfs4_xattr_set_nfs4_acl,
6666 };
6667
6668 const struct xattr_handler *nfs4_xattr_handlers[] = {
6669 &nfs4_xattr_nfs4_acl_handler,
6670 NULL
6671 };
6672
6673 module_param(max_session_slots, ushort, 0644);
6674 MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
6675 "requests the client will negotiate");
6676
6677 /*
6678 * Local variables:
6679 * c-basic-offset: 8
6680 * End:
6681 */
6682