1 /*
2 * fs/nfs/nfs4state.c
3 *
4 * Client-side XDR for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
38 * subsequent patch.
39 */
40
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/fs.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/kthread.h>
47 #include <linux/module.h>
48 #include <linux/random.h>
49 #include <linux/ratelimit.h>
50 #include <linux/workqueue.h>
51 #include <linux/bitops.h>
52 #include <linux/jiffies.h>
53
54 #include "nfs4_fs.h"
55 #include "callback.h"
56 #include "delegation.h"
57 #include "internal.h"
58 #include "pnfs.h"
59
60 #define OPENOWNER_POOL_SIZE 8
61
62 const nfs4_stateid zero_stateid;
63
64 static LIST_HEAD(nfs4_clientid_list);
65
nfs4_init_clientid(struct nfs_client * clp,struct rpc_cred * cred)66 int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
67 {
68 struct nfs4_setclientid_res clid = {
69 .clientid = clp->cl_clientid,
70 .confirm = clp->cl_confirm,
71 };
72 unsigned short port;
73 int status;
74
75 if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
76 goto do_confirm;
77 port = nfs_callback_tcpport;
78 if (clp->cl_addr.ss_family == AF_INET6)
79 port = nfs_callback_tcpport6;
80
81 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
82 if (status != 0)
83 goto out;
84 clp->cl_clientid = clid.clientid;
85 clp->cl_confirm = clid.confirm;
86 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
87 do_confirm:
88 status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
89 if (status != 0)
90 goto out;
91 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
92 nfs4_schedule_state_renewal(clp);
93 out:
94 return status;
95 }
96
nfs4_get_machine_cred_locked(struct nfs_client * clp)97 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
98 {
99 struct rpc_cred *cred = NULL;
100
101 if (clp->cl_machine_cred != NULL)
102 cred = get_rpccred(clp->cl_machine_cred);
103 return cred;
104 }
105
nfs4_clear_machine_cred(struct nfs_client * clp)106 static void nfs4_clear_machine_cred(struct nfs_client *clp)
107 {
108 struct rpc_cred *cred;
109
110 spin_lock(&clp->cl_lock);
111 cred = clp->cl_machine_cred;
112 clp->cl_machine_cred = NULL;
113 spin_unlock(&clp->cl_lock);
114 if (cred != NULL)
115 put_rpccred(cred);
116 }
117
118 static struct rpc_cred *
nfs4_get_renew_cred_server_locked(struct nfs_server * server)119 nfs4_get_renew_cred_server_locked(struct nfs_server *server)
120 {
121 struct rpc_cred *cred = NULL;
122 struct nfs4_state_owner *sp;
123 struct rb_node *pos;
124
125 for (pos = rb_first(&server->state_owners);
126 pos != NULL;
127 pos = rb_next(pos)) {
128 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
129 if (list_empty(&sp->so_states))
130 continue;
131 cred = get_rpccred(sp->so_cred);
132 break;
133 }
134 return cred;
135 }
136
137 /**
138 * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
139 * @clp: client state handle
140 *
141 * Returns an rpc_cred with reference count bumped, or NULL.
142 * Caller must hold clp->cl_lock.
143 */
nfs4_get_renew_cred_locked(struct nfs_client * clp)144 struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
145 {
146 struct rpc_cred *cred = NULL;
147 struct nfs_server *server;
148
149 /* Use machine credentials if available */
150 cred = nfs4_get_machine_cred_locked(clp);
151 if (cred != NULL)
152 goto out;
153
154 rcu_read_lock();
155 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
156 cred = nfs4_get_renew_cred_server_locked(server);
157 if (cred != NULL)
158 break;
159 }
160 rcu_read_unlock();
161
162 out:
163 return cred;
164 }
165
166 #if defined(CONFIG_NFS_V4_1)
167
nfs41_setup_state_renewal(struct nfs_client * clp)168 static int nfs41_setup_state_renewal(struct nfs_client *clp)
169 {
170 int status;
171 struct nfs_fsinfo fsinfo;
172
173 if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
174 nfs4_schedule_state_renewal(clp);
175 return 0;
176 }
177
178 status = nfs4_proc_get_lease_time(clp, &fsinfo);
179 if (status == 0) {
180 /* Update lease time and schedule renewal */
181 spin_lock(&clp->cl_lock);
182 clp->cl_lease_time = fsinfo.lease_time * HZ;
183 clp->cl_last_renewal = jiffies;
184 spin_unlock(&clp->cl_lock);
185
186 nfs4_schedule_state_renewal(clp);
187 }
188
189 return status;
190 }
191
192 /*
193 * Back channel returns NFS4ERR_DELAY for new requests when
194 * NFS4_SESSION_DRAINING is set so there is no work to be done when draining
195 * is ended.
196 */
nfs4_end_drain_session(struct nfs_client * clp)197 static void nfs4_end_drain_session(struct nfs_client *clp)
198 {
199 struct nfs4_session *ses = clp->cl_session;
200 struct nfs4_slot_table *tbl;
201 int max_slots;
202
203 if (ses == NULL)
204 return;
205 tbl = &ses->fc_slot_table;
206 if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
207 spin_lock(&tbl->slot_tbl_lock);
208 max_slots = tbl->max_slots;
209 while (max_slots--) {
210 if (rpc_wake_up_first(&tbl->slot_tbl_waitq,
211 nfs4_set_task_privileged,
212 NULL) == NULL)
213 break;
214 }
215 spin_unlock(&tbl->slot_tbl_lock);
216 }
217 }
218
nfs4_wait_on_slot_tbl(struct nfs4_slot_table * tbl)219 static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
220 {
221 spin_lock(&tbl->slot_tbl_lock);
222 if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
223 INIT_COMPLETION(tbl->complete);
224 spin_unlock(&tbl->slot_tbl_lock);
225 return wait_for_completion_interruptible(&tbl->complete);
226 }
227 spin_unlock(&tbl->slot_tbl_lock);
228 return 0;
229 }
230
nfs4_begin_drain_session(struct nfs_client * clp)231 static int nfs4_begin_drain_session(struct nfs_client *clp)
232 {
233 struct nfs4_session *ses = clp->cl_session;
234 int ret = 0;
235
236 set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
237 /* back channel */
238 ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table);
239 if (ret)
240 return ret;
241 /* fore channel */
242 return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
243 }
244
nfs41_init_clientid(struct nfs_client * clp,struct rpc_cred * cred)245 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
246 {
247 int status;
248
249 if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
250 goto do_confirm;
251 nfs4_begin_drain_session(clp);
252 status = nfs4_proc_exchange_id(clp, cred);
253 if (status != 0)
254 goto out;
255 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
256 do_confirm:
257 status = nfs4_proc_create_session(clp);
258 if (status != 0)
259 goto out;
260 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
261 nfs41_setup_state_renewal(clp);
262 nfs_mark_client_ready(clp, NFS_CS_READY);
263 out:
264 return status;
265 }
266
nfs4_get_exchange_id_cred(struct nfs_client * clp)267 struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
268 {
269 struct rpc_cred *cred;
270
271 spin_lock(&clp->cl_lock);
272 cred = nfs4_get_machine_cred_locked(clp);
273 spin_unlock(&clp->cl_lock);
274 return cred;
275 }
276
277 #endif /* CONFIG_NFS_V4_1 */
278
279 static struct rpc_cred *
nfs4_get_setclientid_cred_server(struct nfs_server * server)280 nfs4_get_setclientid_cred_server(struct nfs_server *server)
281 {
282 struct nfs_client *clp = server->nfs_client;
283 struct rpc_cred *cred = NULL;
284 struct nfs4_state_owner *sp;
285 struct rb_node *pos;
286
287 spin_lock(&clp->cl_lock);
288 pos = rb_first(&server->state_owners);
289 if (pos != NULL) {
290 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
291 cred = get_rpccred(sp->so_cred);
292 }
293 spin_unlock(&clp->cl_lock);
294 return cred;
295 }
296
297 /**
298 * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
299 * @clp: client state handle
300 *
301 * Returns an rpc_cred with reference count bumped, or NULL.
302 */
nfs4_get_setclientid_cred(struct nfs_client * clp)303 struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
304 {
305 struct nfs_server *server;
306 struct rpc_cred *cred;
307
308 spin_lock(&clp->cl_lock);
309 cred = nfs4_get_machine_cred_locked(clp);
310 spin_unlock(&clp->cl_lock);
311 if (cred != NULL)
312 goto out;
313
314 rcu_read_lock();
315 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
316 cred = nfs4_get_setclientid_cred_server(server);
317 if (cred != NULL)
318 break;
319 }
320 rcu_read_unlock();
321
322 out:
323 return cred;
324 }
325
326 static struct nfs4_state_owner *
nfs4_find_state_owner_locked(struct nfs_server * server,struct rpc_cred * cred)327 nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
328 {
329 struct rb_node **p = &server->state_owners.rb_node,
330 *parent = NULL;
331 struct nfs4_state_owner *sp;
332
333 while (*p != NULL) {
334 parent = *p;
335 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
336
337 if (cred < sp->so_cred)
338 p = &parent->rb_left;
339 else if (cred > sp->so_cred)
340 p = &parent->rb_right;
341 else {
342 if (!list_empty(&sp->so_lru))
343 list_del_init(&sp->so_lru);
344 atomic_inc(&sp->so_count);
345 return sp;
346 }
347 }
348 return NULL;
349 }
350
351 static struct nfs4_state_owner *
nfs4_insert_state_owner_locked(struct nfs4_state_owner * new)352 nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
353 {
354 struct nfs_server *server = new->so_server;
355 struct rb_node **p = &server->state_owners.rb_node,
356 *parent = NULL;
357 struct nfs4_state_owner *sp;
358 int err;
359
360 while (*p != NULL) {
361 parent = *p;
362 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
363
364 if (new->so_cred < sp->so_cred)
365 p = &parent->rb_left;
366 else if (new->so_cred > sp->so_cred)
367 p = &parent->rb_right;
368 else {
369 if (!list_empty(&sp->so_lru))
370 list_del_init(&sp->so_lru);
371 atomic_inc(&sp->so_count);
372 return sp;
373 }
374 }
375 err = ida_get_new(&server->openowner_id, &new->so_seqid.owner_id);
376 if (err)
377 return ERR_PTR(err);
378 rb_link_node(&new->so_server_node, parent, p);
379 rb_insert_color(&new->so_server_node, &server->state_owners);
380 return new;
381 }
382
383 static void
nfs4_remove_state_owner_locked(struct nfs4_state_owner * sp)384 nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
385 {
386 struct nfs_server *server = sp->so_server;
387
388 if (!RB_EMPTY_NODE(&sp->so_server_node))
389 rb_erase(&sp->so_server_node, &server->state_owners);
390 ida_remove(&server->openowner_id, sp->so_seqid.owner_id);
391 }
392
393 static void
nfs4_init_seqid_counter(struct nfs_seqid_counter * sc)394 nfs4_init_seqid_counter(struct nfs_seqid_counter *sc)
395 {
396 sc->create_time = ktime_get();
397 sc->flags = 0;
398 sc->counter = 0;
399 spin_lock_init(&sc->lock);
400 INIT_LIST_HEAD(&sc->list);
401 rpc_init_wait_queue(&sc->wait, "Seqid_waitqueue");
402 }
403
404 static void
nfs4_destroy_seqid_counter(struct nfs_seqid_counter * sc)405 nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc)
406 {
407 rpc_destroy_wait_queue(&sc->wait);
408 }
409
410 /*
411 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
412 * create a new state_owner.
413 *
414 */
415 static struct nfs4_state_owner *
nfs4_alloc_state_owner(struct nfs_server * server,struct rpc_cred * cred,gfp_t gfp_flags)416 nfs4_alloc_state_owner(struct nfs_server *server,
417 struct rpc_cred *cred,
418 gfp_t gfp_flags)
419 {
420 struct nfs4_state_owner *sp;
421
422 sp = kzalloc(sizeof(*sp), gfp_flags);
423 if (!sp)
424 return NULL;
425 sp->so_server = server;
426 sp->so_cred = get_rpccred(cred);
427 spin_lock_init(&sp->so_lock);
428 INIT_LIST_HEAD(&sp->so_states);
429 nfs4_init_seqid_counter(&sp->so_seqid);
430 atomic_set(&sp->so_count, 1);
431 INIT_LIST_HEAD(&sp->so_lru);
432 return sp;
433 }
434
435 static void
nfs4_drop_state_owner(struct nfs4_state_owner * sp)436 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
437 {
438 struct rb_node *rb_node = &sp->so_server_node;
439
440 if (!RB_EMPTY_NODE(rb_node)) {
441 struct nfs_server *server = sp->so_server;
442 struct nfs_client *clp = server->nfs_client;
443
444 spin_lock(&clp->cl_lock);
445 if (!RB_EMPTY_NODE(rb_node)) {
446 rb_erase(rb_node, &server->state_owners);
447 RB_CLEAR_NODE(rb_node);
448 }
449 spin_unlock(&clp->cl_lock);
450 }
451 }
452
nfs4_free_state_owner(struct nfs4_state_owner * sp)453 static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
454 {
455 nfs4_destroy_seqid_counter(&sp->so_seqid);
456 put_rpccred(sp->so_cred);
457 kfree(sp);
458 }
459
nfs4_gc_state_owners(struct nfs_server * server)460 static void nfs4_gc_state_owners(struct nfs_server *server)
461 {
462 struct nfs_client *clp = server->nfs_client;
463 struct nfs4_state_owner *sp, *tmp;
464 unsigned long time_min, time_max;
465 LIST_HEAD(doomed);
466
467 spin_lock(&clp->cl_lock);
468 time_max = jiffies;
469 time_min = (long)time_max - (long)clp->cl_lease_time;
470 list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
471 /* NB: LRU is sorted so that oldest is at the head */
472 if (time_in_range(sp->so_expires, time_min, time_max))
473 break;
474 list_move(&sp->so_lru, &doomed);
475 nfs4_remove_state_owner_locked(sp);
476 }
477 spin_unlock(&clp->cl_lock);
478
479 list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
480 list_del(&sp->so_lru);
481 nfs4_free_state_owner(sp);
482 }
483 }
484
485 /**
486 * nfs4_get_state_owner - Look up a state owner given a credential
487 * @server: nfs_server to search
488 * @cred: RPC credential to match
489 *
490 * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
491 */
nfs4_get_state_owner(struct nfs_server * server,struct rpc_cred * cred,gfp_t gfp_flags)492 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
493 struct rpc_cred *cred,
494 gfp_t gfp_flags)
495 {
496 struct nfs_client *clp = server->nfs_client;
497 struct nfs4_state_owner *sp, *new;
498
499 spin_lock(&clp->cl_lock);
500 sp = nfs4_find_state_owner_locked(server, cred);
501 spin_unlock(&clp->cl_lock);
502 if (sp != NULL)
503 goto out;
504 new = nfs4_alloc_state_owner(server, cred, gfp_flags);
505 if (new == NULL)
506 goto out;
507 do {
508 if (ida_pre_get(&server->openowner_id, gfp_flags) == 0)
509 break;
510 spin_lock(&clp->cl_lock);
511 sp = nfs4_insert_state_owner_locked(new);
512 spin_unlock(&clp->cl_lock);
513 } while (sp == ERR_PTR(-EAGAIN));
514 if (sp != new)
515 nfs4_free_state_owner(new);
516 out:
517 nfs4_gc_state_owners(server);
518 return sp;
519 }
520
521 /**
522 * nfs4_put_state_owner - Release a nfs4_state_owner
523 * @sp: state owner data to release
524 *
525 * Note that we keep released state owners on an LRU
526 * list.
527 * This caches valid state owners so that they can be
528 * reused, to avoid the OPEN_CONFIRM on minor version 0.
529 * It also pins the uniquifier of dropped state owners for
530 * a while, to ensure that those state owner names are
531 * never reused.
532 */
nfs4_put_state_owner(struct nfs4_state_owner * sp)533 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
534 {
535 struct nfs_server *server = sp->so_server;
536 struct nfs_client *clp = server->nfs_client;
537
538 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
539 return;
540
541 sp->so_expires = jiffies;
542 list_add_tail(&sp->so_lru, &server->state_owners_lru);
543 spin_unlock(&clp->cl_lock);
544 }
545
546 /**
547 * nfs4_purge_state_owners - Release all cached state owners
548 * @server: nfs_server with cached state owners to release
549 *
550 * Called at umount time. Remaining state owners will be on
551 * the LRU with ref count of zero.
552 */
nfs4_purge_state_owners(struct nfs_server * server)553 void nfs4_purge_state_owners(struct nfs_server *server)
554 {
555 struct nfs_client *clp = server->nfs_client;
556 struct nfs4_state_owner *sp, *tmp;
557 LIST_HEAD(doomed);
558
559 spin_lock(&clp->cl_lock);
560 list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
561 list_move(&sp->so_lru, &doomed);
562 nfs4_remove_state_owner_locked(sp);
563 }
564 spin_unlock(&clp->cl_lock);
565
566 list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
567 list_del(&sp->so_lru);
568 nfs4_free_state_owner(sp);
569 }
570 }
571
572 static struct nfs4_state *
nfs4_alloc_open_state(void)573 nfs4_alloc_open_state(void)
574 {
575 struct nfs4_state *state;
576
577 state = kzalloc(sizeof(*state), GFP_NOFS);
578 if (!state)
579 return NULL;
580 atomic_set(&state->count, 1);
581 INIT_LIST_HEAD(&state->lock_states);
582 spin_lock_init(&state->state_lock);
583 seqlock_init(&state->seqlock);
584 return state;
585 }
586
587 void
nfs4_state_set_mode_locked(struct nfs4_state * state,fmode_t fmode)588 nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
589 {
590 if (state->state == fmode)
591 return;
592 /* NB! List reordering - see the reclaim code for why. */
593 if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
594 if (fmode & FMODE_WRITE)
595 list_move(&state->open_states, &state->owner->so_states);
596 else
597 list_move_tail(&state->open_states, &state->owner->so_states);
598 }
599 state->state = fmode;
600 }
601
602 static struct nfs4_state *
__nfs4_find_state_byowner(struct inode * inode,struct nfs4_state_owner * owner)603 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
604 {
605 struct nfs_inode *nfsi = NFS_I(inode);
606 struct nfs4_state *state;
607
608 list_for_each_entry(state, &nfsi->open_states, inode_states) {
609 if (state->owner != owner)
610 continue;
611 if (atomic_inc_not_zero(&state->count))
612 return state;
613 }
614 return NULL;
615 }
616
617 static void
nfs4_free_open_state(struct nfs4_state * state)618 nfs4_free_open_state(struct nfs4_state *state)
619 {
620 kfree(state);
621 }
622
623 struct nfs4_state *
nfs4_get_open_state(struct inode * inode,struct nfs4_state_owner * owner)624 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
625 {
626 struct nfs4_state *state, *new;
627 struct nfs_inode *nfsi = NFS_I(inode);
628
629 spin_lock(&inode->i_lock);
630 state = __nfs4_find_state_byowner(inode, owner);
631 spin_unlock(&inode->i_lock);
632 if (state)
633 goto out;
634 new = nfs4_alloc_open_state();
635 spin_lock(&owner->so_lock);
636 spin_lock(&inode->i_lock);
637 state = __nfs4_find_state_byowner(inode, owner);
638 if (state == NULL && new != NULL) {
639 state = new;
640 state->owner = owner;
641 atomic_inc(&owner->so_count);
642 list_add(&state->inode_states, &nfsi->open_states);
643 ihold(inode);
644 state->inode = inode;
645 spin_unlock(&inode->i_lock);
646 /* Note: The reclaim code dictates that we add stateless
647 * and read-only stateids to the end of the list */
648 list_add_tail(&state->open_states, &owner->so_states);
649 spin_unlock(&owner->so_lock);
650 } else {
651 spin_unlock(&inode->i_lock);
652 spin_unlock(&owner->so_lock);
653 if (new)
654 nfs4_free_open_state(new);
655 }
656 out:
657 return state;
658 }
659
nfs4_put_open_state(struct nfs4_state * state)660 void nfs4_put_open_state(struct nfs4_state *state)
661 {
662 struct inode *inode = state->inode;
663 struct nfs4_state_owner *owner = state->owner;
664
665 if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
666 return;
667 spin_lock(&inode->i_lock);
668 list_del(&state->inode_states);
669 list_del(&state->open_states);
670 spin_unlock(&inode->i_lock);
671 spin_unlock(&owner->so_lock);
672 iput(inode);
673 nfs4_free_open_state(state);
674 nfs4_put_state_owner(owner);
675 }
676
677 /*
678 * Close the current file.
679 */
__nfs4_close(struct nfs4_state * state,fmode_t fmode,gfp_t gfp_mask,int wait)680 static void __nfs4_close(struct nfs4_state *state,
681 fmode_t fmode, gfp_t gfp_mask, int wait)
682 {
683 struct nfs4_state_owner *owner = state->owner;
684 int call_close = 0;
685 fmode_t newstate;
686
687 atomic_inc(&owner->so_count);
688 /* Protect against nfs4_find_state() */
689 spin_lock(&owner->so_lock);
690 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
691 case FMODE_READ:
692 state->n_rdonly--;
693 break;
694 case FMODE_WRITE:
695 state->n_wronly--;
696 break;
697 case FMODE_READ|FMODE_WRITE:
698 state->n_rdwr--;
699 }
700 newstate = FMODE_READ|FMODE_WRITE;
701 if (state->n_rdwr == 0) {
702 if (state->n_rdonly == 0) {
703 newstate &= ~FMODE_READ;
704 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
705 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
706 }
707 if (state->n_wronly == 0) {
708 newstate &= ~FMODE_WRITE;
709 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
710 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
711 }
712 if (newstate == 0)
713 clear_bit(NFS_DELEGATED_STATE, &state->flags);
714 }
715 nfs4_state_set_mode_locked(state, newstate);
716 spin_unlock(&owner->so_lock);
717
718 if (!call_close) {
719 nfs4_put_open_state(state);
720 nfs4_put_state_owner(owner);
721 } else {
722 bool roc = pnfs_roc(state->inode);
723
724 nfs4_do_close(state, gfp_mask, wait, roc);
725 }
726 }
727
nfs4_close_state(struct nfs4_state * state,fmode_t fmode)728 void nfs4_close_state(struct nfs4_state *state, fmode_t fmode)
729 {
730 __nfs4_close(state, fmode, GFP_NOFS, 0);
731 }
732
nfs4_close_sync(struct nfs4_state * state,fmode_t fmode)733 void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
734 {
735 __nfs4_close(state, fmode, GFP_KERNEL, 1);
736 }
737
738 /*
739 * Search the state->lock_states for an existing lock_owner
740 * that is compatible with current->files
741 */
742 static struct nfs4_lock_state *
__nfs4_find_lock_state(struct nfs4_state * state,fl_owner_t fl_owner,pid_t fl_pid,unsigned int type)743 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
744 {
745 struct nfs4_lock_state *pos;
746 list_for_each_entry(pos, &state->lock_states, ls_locks) {
747 if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type)
748 continue;
749 switch (pos->ls_owner.lo_type) {
750 case NFS4_POSIX_LOCK_TYPE:
751 if (pos->ls_owner.lo_u.posix_owner != fl_owner)
752 continue;
753 break;
754 case NFS4_FLOCK_LOCK_TYPE:
755 if (pos->ls_owner.lo_u.flock_owner != fl_pid)
756 continue;
757 }
758 atomic_inc(&pos->ls_count);
759 return pos;
760 }
761 return NULL;
762 }
763
764 /*
765 * Return a compatible lock_state. If no initialized lock_state structure
766 * exists, return an uninitialized one.
767 *
768 */
nfs4_alloc_lock_state(struct nfs4_state * state,fl_owner_t fl_owner,pid_t fl_pid,unsigned int type)769 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
770 {
771 struct nfs4_lock_state *lsp;
772 struct nfs_server *server = state->owner->so_server;
773
774 lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
775 if (lsp == NULL)
776 return NULL;
777 nfs4_init_seqid_counter(&lsp->ls_seqid);
778 atomic_set(&lsp->ls_count, 1);
779 lsp->ls_state = state;
780 lsp->ls_owner.lo_type = type;
781 switch (lsp->ls_owner.lo_type) {
782 case NFS4_FLOCK_LOCK_TYPE:
783 lsp->ls_owner.lo_u.flock_owner = fl_pid;
784 break;
785 case NFS4_POSIX_LOCK_TYPE:
786 lsp->ls_owner.lo_u.posix_owner = fl_owner;
787 break;
788 default:
789 goto out_free;
790 }
791 lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS);
792 if (lsp->ls_seqid.owner_id < 0)
793 goto out_free;
794 INIT_LIST_HEAD(&lsp->ls_locks);
795 return lsp;
796 out_free:
797 kfree(lsp);
798 return NULL;
799 }
800
nfs4_free_lock_state(struct nfs_server * server,struct nfs4_lock_state * lsp)801 void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
802 {
803 ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id);
804 nfs4_destroy_seqid_counter(&lsp->ls_seqid);
805 kfree(lsp);
806 }
807
808 /*
809 * Return a compatible lock_state. If no initialized lock_state structure
810 * exists, return an uninitialized one.
811 *
812 */
nfs4_get_lock_state(struct nfs4_state * state,fl_owner_t owner,pid_t pid,unsigned int type)813 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type)
814 {
815 struct nfs4_lock_state *lsp, *new = NULL;
816
817 for(;;) {
818 spin_lock(&state->state_lock);
819 lsp = __nfs4_find_lock_state(state, owner, pid, type);
820 if (lsp != NULL)
821 break;
822 if (new != NULL) {
823 list_add(&new->ls_locks, &state->lock_states);
824 set_bit(LK_STATE_IN_USE, &state->flags);
825 lsp = new;
826 new = NULL;
827 break;
828 }
829 spin_unlock(&state->state_lock);
830 new = nfs4_alloc_lock_state(state, owner, pid, type);
831 if (new == NULL)
832 return NULL;
833 }
834 spin_unlock(&state->state_lock);
835 if (new != NULL)
836 nfs4_free_lock_state(state->owner->so_server, new);
837 return lsp;
838 }
839
840 /*
841 * Release reference to lock_state, and free it if we see that
842 * it is no longer in use
843 */
nfs4_put_lock_state(struct nfs4_lock_state * lsp)844 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
845 {
846 struct nfs4_state *state;
847
848 if (lsp == NULL)
849 return;
850 state = lsp->ls_state;
851 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
852 return;
853 list_del(&lsp->ls_locks);
854 if (list_empty(&state->lock_states))
855 clear_bit(LK_STATE_IN_USE, &state->flags);
856 spin_unlock(&state->state_lock);
857 if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
858 if (nfs4_release_lockowner(lsp) == 0)
859 return;
860 }
861 nfs4_free_lock_state(lsp->ls_state->owner->so_server, lsp);
862 }
863
nfs4_fl_copy_lock(struct file_lock * dst,struct file_lock * src)864 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
865 {
866 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
867
868 dst->fl_u.nfs4_fl.owner = lsp;
869 atomic_inc(&lsp->ls_count);
870 }
871
nfs4_fl_release_lock(struct file_lock * fl)872 static void nfs4_fl_release_lock(struct file_lock *fl)
873 {
874 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
875 }
876
877 static const struct file_lock_operations nfs4_fl_lock_ops = {
878 .fl_copy_lock = nfs4_fl_copy_lock,
879 .fl_release_private = nfs4_fl_release_lock,
880 };
881
nfs4_set_lock_state(struct nfs4_state * state,struct file_lock * fl)882 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
883 {
884 struct nfs4_lock_state *lsp;
885
886 if (fl->fl_ops != NULL)
887 return 0;
888 if (fl->fl_flags & FL_POSIX)
889 lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE);
890 else if (fl->fl_flags & FL_FLOCK)
891 lsp = nfs4_get_lock_state(state, NULL, fl->fl_pid,
892 NFS4_FLOCK_LOCK_TYPE);
893 else
894 return -EINVAL;
895 if (lsp == NULL)
896 return -ENOMEM;
897 fl->fl_u.nfs4_fl.owner = lsp;
898 fl->fl_ops = &nfs4_fl_lock_ops;
899 return 0;
900 }
901
nfs4_copy_lock_stateid(nfs4_stateid * dst,struct nfs4_state * state,fl_owner_t fl_owner,pid_t fl_pid)902 static bool nfs4_copy_lock_stateid(nfs4_stateid *dst, struct nfs4_state *state,
903 fl_owner_t fl_owner, pid_t fl_pid)
904 {
905 struct nfs4_lock_state *lsp;
906 bool ret = false;
907
908 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
909 goto out;
910
911 spin_lock(&state->state_lock);
912 lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE);
913 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) {
914 nfs4_stateid_copy(dst, &lsp->ls_stateid);
915 ret = true;
916 }
917 spin_unlock(&state->state_lock);
918 nfs4_put_lock_state(lsp);
919 out:
920 return ret;
921 }
922
nfs4_copy_open_stateid(nfs4_stateid * dst,struct nfs4_state * state)923 static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
924 {
925 int seq;
926
927 do {
928 seq = read_seqbegin(&state->seqlock);
929 nfs4_stateid_copy(dst, &state->stateid);
930 } while (read_seqretry(&state->seqlock, seq));
931 }
932
933 /*
934 * Byte-range lock aware utility to initialize the stateid of read/write
935 * requests.
936 */
nfs4_select_rw_stateid(nfs4_stateid * dst,struct nfs4_state * state,fmode_t fmode,fl_owner_t fl_owner,pid_t fl_pid)937 void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
938 fmode_t fmode, fl_owner_t fl_owner, pid_t fl_pid)
939 {
940 if (nfs4_copy_delegation_stateid(dst, state->inode, fmode))
941 return;
942 if (nfs4_copy_lock_stateid(dst, state, fl_owner, fl_pid))
943 return;
944 nfs4_copy_open_stateid(dst, state);
945 }
946
nfs_alloc_seqid(struct nfs_seqid_counter * counter,gfp_t gfp_mask)947 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
948 {
949 struct nfs_seqid *new;
950
951 new = kmalloc(sizeof(*new), gfp_mask);
952 if (new != NULL) {
953 new->sequence = counter;
954 INIT_LIST_HEAD(&new->list);
955 new->task = NULL;
956 }
957 return new;
958 }
959
nfs_release_seqid(struct nfs_seqid * seqid)960 void nfs_release_seqid(struct nfs_seqid *seqid)
961 {
962 struct nfs_seqid_counter *sequence;
963
964 if (list_empty(&seqid->list))
965 return;
966 sequence = seqid->sequence;
967 spin_lock(&sequence->lock);
968 list_del_init(&seqid->list);
969 if (!list_empty(&sequence->list)) {
970 struct nfs_seqid *next;
971
972 next = list_first_entry(&sequence->list,
973 struct nfs_seqid, list);
974 rpc_wake_up_queued_task(&sequence->wait, next->task);
975 }
976 spin_unlock(&sequence->lock);
977 }
978
nfs_free_seqid(struct nfs_seqid * seqid)979 void nfs_free_seqid(struct nfs_seqid *seqid)
980 {
981 nfs_release_seqid(seqid);
982 kfree(seqid);
983 }
984
985 /*
986 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
987 * failed with a seqid incrementing error -
988 * see comments nfs_fs.h:seqid_mutating_error()
989 */
nfs_increment_seqid(int status,struct nfs_seqid * seqid)990 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
991 {
992 BUG_ON(list_first_entry(&seqid->sequence->list, struct nfs_seqid, list) != seqid);
993 switch (status) {
994 case 0:
995 break;
996 case -NFS4ERR_BAD_SEQID:
997 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
998 return;
999 pr_warn_ratelimited("NFS: v4 server returned a bad"
1000 " sequence-id error on an"
1001 " unconfirmed sequence %p!\n",
1002 seqid->sequence);
1003 case -NFS4ERR_STALE_CLIENTID:
1004 case -NFS4ERR_STALE_STATEID:
1005 case -NFS4ERR_BAD_STATEID:
1006 case -NFS4ERR_BADXDR:
1007 case -NFS4ERR_RESOURCE:
1008 case -NFS4ERR_NOFILEHANDLE:
1009 /* Non-seqid mutating errors */
1010 return;
1011 };
1012 /*
1013 * Note: no locking needed as we are guaranteed to be first
1014 * on the sequence list
1015 */
1016 seqid->sequence->counter++;
1017 }
1018
nfs_increment_open_seqid(int status,struct nfs_seqid * seqid)1019 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
1020 {
1021 struct nfs4_state_owner *sp = container_of(seqid->sequence,
1022 struct nfs4_state_owner, so_seqid);
1023 struct nfs_server *server = sp->so_server;
1024
1025 if (status == -NFS4ERR_BAD_SEQID)
1026 nfs4_drop_state_owner(sp);
1027 if (!nfs4_has_session(server->nfs_client))
1028 nfs_increment_seqid(status, seqid);
1029 }
1030
1031 /*
1032 * Increment the seqid if the LOCK/LOCKU succeeded, or
1033 * failed with a seqid incrementing error -
1034 * see comments nfs_fs.h:seqid_mutating_error()
1035 */
nfs_increment_lock_seqid(int status,struct nfs_seqid * seqid)1036 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
1037 {
1038 nfs_increment_seqid(status, seqid);
1039 }
1040
nfs_wait_on_sequence(struct nfs_seqid * seqid,struct rpc_task * task)1041 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
1042 {
1043 struct nfs_seqid_counter *sequence = seqid->sequence;
1044 int status = 0;
1045
1046 spin_lock(&sequence->lock);
1047 seqid->task = task;
1048 if (list_empty(&seqid->list))
1049 list_add_tail(&seqid->list, &sequence->list);
1050 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
1051 goto unlock;
1052 rpc_sleep_on(&sequence->wait, task, NULL);
1053 status = -EAGAIN;
1054 unlock:
1055 spin_unlock(&sequence->lock);
1056 return status;
1057 }
1058
1059 static int nfs4_run_state_manager(void *);
1060
nfs4_clear_state_manager_bit(struct nfs_client * clp)1061 static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
1062 {
1063 smp_mb__before_clear_bit();
1064 clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
1065 smp_mb__after_clear_bit();
1066 wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
1067 rpc_wake_up(&clp->cl_rpcwaitq);
1068 }
1069
1070 /*
1071 * Schedule the nfs_client asynchronous state management routine
1072 */
nfs4_schedule_state_manager(struct nfs_client * clp)1073 void nfs4_schedule_state_manager(struct nfs_client *clp)
1074 {
1075 struct task_struct *task;
1076 char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
1077
1078 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1079 return;
1080 __module_get(THIS_MODULE);
1081 atomic_inc(&clp->cl_count);
1082
1083 /* The rcu_read_lock() is not strictly necessary, as the state
1084 * manager is the only thread that ever changes the rpc_xprt
1085 * after it's initialized. At this point, we're single threaded. */
1086 rcu_read_lock();
1087 snprintf(buf, sizeof(buf), "%s-manager",
1088 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
1089 rcu_read_unlock();
1090 task = kthread_run(nfs4_run_state_manager, clp, buf);
1091 if (IS_ERR(task)) {
1092 printk(KERN_ERR "%s: kthread_run: %ld\n",
1093 __func__, PTR_ERR(task));
1094 nfs4_clear_state_manager_bit(clp);
1095 nfs_put_client(clp);
1096 module_put(THIS_MODULE);
1097 }
1098 }
1099
1100 /*
1101 * Schedule a lease recovery attempt
1102 */
nfs4_schedule_lease_recovery(struct nfs_client * clp)1103 void nfs4_schedule_lease_recovery(struct nfs_client *clp)
1104 {
1105 if (!clp)
1106 return;
1107 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1108 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1109 nfs4_schedule_state_manager(clp);
1110 }
1111 EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
1112
1113 /*
1114 * nfs40_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
1115 * @clp: client to process
1116 *
1117 * Set the NFS4CLNT_LEASE_EXPIRED state in order to force a
1118 * resend of the SETCLIENTID and hence re-establish the
1119 * callback channel. Then return all existing delegations.
1120 */
nfs40_handle_cb_pathdown(struct nfs_client * clp)1121 static void nfs40_handle_cb_pathdown(struct nfs_client *clp)
1122 {
1123 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1124 nfs_expire_all_delegations(clp);
1125 }
1126
nfs4_schedule_path_down_recovery(struct nfs_client * clp)1127 void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
1128 {
1129 nfs40_handle_cb_pathdown(clp);
1130 nfs4_schedule_state_manager(clp);
1131 }
1132
nfs4_state_mark_reclaim_reboot(struct nfs_client * clp,struct nfs4_state * state)1133 static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
1134 {
1135
1136 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
1137 /* Don't recover state that expired before the reboot */
1138 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
1139 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
1140 return 0;
1141 }
1142 set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
1143 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1144 return 1;
1145 }
1146
nfs4_state_mark_reclaim_nograce(struct nfs_client * clp,struct nfs4_state * state)1147 static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
1148 {
1149 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
1150 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
1151 set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
1152 set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
1153 return 1;
1154 }
1155
nfs4_schedule_stateid_recovery(const struct nfs_server * server,struct nfs4_state * state)1156 void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
1157 {
1158 struct nfs_client *clp = server->nfs_client;
1159
1160 nfs4_state_mark_reclaim_nograce(clp, state);
1161 nfs4_schedule_state_manager(clp);
1162 }
1163 EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
1164
nfs_inode_find_state_and_recover(struct inode * inode,const nfs4_stateid * stateid)1165 void nfs_inode_find_state_and_recover(struct inode *inode,
1166 const nfs4_stateid *stateid)
1167 {
1168 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1169 struct nfs_inode *nfsi = NFS_I(inode);
1170 struct nfs_open_context *ctx;
1171 struct nfs4_state *state;
1172 bool found = false;
1173
1174 spin_lock(&inode->i_lock);
1175 list_for_each_entry(ctx, &nfsi->open_files, list) {
1176 state = ctx->state;
1177 if (state == NULL)
1178 continue;
1179 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
1180 continue;
1181 if (!nfs4_stateid_match(&state->stateid, stateid))
1182 continue;
1183 nfs4_state_mark_reclaim_nograce(clp, state);
1184 found = true;
1185 }
1186 spin_unlock(&inode->i_lock);
1187 if (found)
1188 nfs4_schedule_state_manager(clp);
1189 }
1190
1191
nfs4_reclaim_locks(struct nfs4_state * state,const struct nfs4_state_recovery_ops * ops)1192 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
1193 {
1194 struct inode *inode = state->inode;
1195 struct nfs_inode *nfsi = NFS_I(inode);
1196 struct file_lock *fl;
1197 int status = 0;
1198
1199 if (inode->i_flock == NULL)
1200 return 0;
1201
1202 /* Guard against delegation returns and new lock/unlock calls */
1203 down_write(&nfsi->rwsem);
1204 /* Protect inode->i_flock using the BKL */
1205 lock_flocks();
1206 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1207 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
1208 continue;
1209 if (nfs_file_open_context(fl->fl_file)->state != state)
1210 continue;
1211 unlock_flocks();
1212 status = ops->recover_lock(state, fl);
1213 switch (status) {
1214 case 0:
1215 break;
1216 case -ESTALE:
1217 case -NFS4ERR_ADMIN_REVOKED:
1218 case -NFS4ERR_STALE_STATEID:
1219 case -NFS4ERR_BAD_STATEID:
1220 case -NFS4ERR_EXPIRED:
1221 case -NFS4ERR_NO_GRACE:
1222 case -NFS4ERR_STALE_CLIENTID:
1223 case -NFS4ERR_BADSESSION:
1224 case -NFS4ERR_BADSLOT:
1225 case -NFS4ERR_BAD_HIGH_SLOT:
1226 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1227 goto out;
1228 default:
1229 printk(KERN_ERR "NFS: %s: unhandled error %d. "
1230 "Zeroing state\n", __func__, status);
1231 case -ENOMEM:
1232 case -NFS4ERR_DENIED:
1233 case -NFS4ERR_RECLAIM_BAD:
1234 case -NFS4ERR_RECLAIM_CONFLICT:
1235 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1236 status = 0;
1237 }
1238 lock_flocks();
1239 }
1240 unlock_flocks();
1241 out:
1242 up_write(&nfsi->rwsem);
1243 return status;
1244 }
1245
nfs4_reclaim_open_state(struct nfs4_state_owner * sp,const struct nfs4_state_recovery_ops * ops)1246 static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
1247 {
1248 struct nfs4_state *state;
1249 struct nfs4_lock_state *lock;
1250 int status = 0;
1251
1252 /* Note: we rely on the sp->so_states list being ordered
1253 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
1254 * states first.
1255 * This is needed to ensure that the server won't give us any
1256 * read delegations that we have to return if, say, we are
1257 * recovering after a network partition or a reboot from a
1258 * server that doesn't support a grace period.
1259 */
1260 restart:
1261 spin_lock(&sp->so_lock);
1262 list_for_each_entry(state, &sp->so_states, open_states) {
1263 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
1264 continue;
1265 if (state->state == 0)
1266 continue;
1267 atomic_inc(&state->count);
1268 spin_unlock(&sp->so_lock);
1269 status = ops->recover_open(sp, state);
1270 if (status >= 0) {
1271 status = nfs4_reclaim_locks(state, ops);
1272 if (status >= 0) {
1273 spin_lock(&state->state_lock);
1274 list_for_each_entry(lock, &state->lock_states, ls_locks) {
1275 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
1276 pr_warn_ratelimited("NFS: "
1277 "%s: Lock reclaim "
1278 "failed!\n", __func__);
1279 }
1280 spin_unlock(&state->state_lock);
1281 nfs4_put_open_state(state);
1282 goto restart;
1283 }
1284 }
1285 switch (status) {
1286 default:
1287 printk(KERN_ERR "NFS: %s: unhandled error %d. "
1288 "Zeroing state\n", __func__, status);
1289 case -ENOENT:
1290 case -ENOMEM:
1291 case -ESTALE:
1292 /*
1293 * Open state on this file cannot be recovered
1294 * All we can do is revert to using the zero stateid.
1295 */
1296 memset(&state->stateid, 0,
1297 sizeof(state->stateid));
1298 /* Mark the file as being 'closed' */
1299 state->state = 0;
1300 break;
1301 case -NFS4ERR_ADMIN_REVOKED:
1302 case -NFS4ERR_STALE_STATEID:
1303 case -NFS4ERR_BAD_STATEID:
1304 case -NFS4ERR_RECLAIM_BAD:
1305 case -NFS4ERR_RECLAIM_CONFLICT:
1306 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1307 break;
1308 case -NFS4ERR_EXPIRED:
1309 case -NFS4ERR_NO_GRACE:
1310 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1311 case -NFS4ERR_STALE_CLIENTID:
1312 case -NFS4ERR_BADSESSION:
1313 case -NFS4ERR_BADSLOT:
1314 case -NFS4ERR_BAD_HIGH_SLOT:
1315 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1316 goto out_err;
1317 }
1318 nfs4_put_open_state(state);
1319 goto restart;
1320 }
1321 spin_unlock(&sp->so_lock);
1322 return 0;
1323 out_err:
1324 nfs4_put_open_state(state);
1325 return status;
1326 }
1327
nfs4_clear_open_state(struct nfs4_state * state)1328 static void nfs4_clear_open_state(struct nfs4_state *state)
1329 {
1330 struct nfs4_lock_state *lock;
1331
1332 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1333 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1334 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1335 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1336 spin_lock(&state->state_lock);
1337 list_for_each_entry(lock, &state->lock_states, ls_locks) {
1338 lock->ls_seqid.flags = 0;
1339 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
1340 }
1341 spin_unlock(&state->state_lock);
1342 }
1343
nfs4_reset_seqids(struct nfs_server * server,int (* mark_reclaim)(struct nfs_client * clp,struct nfs4_state * state))1344 static void nfs4_reset_seqids(struct nfs_server *server,
1345 int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1346 {
1347 struct nfs_client *clp = server->nfs_client;
1348 struct nfs4_state_owner *sp;
1349 struct rb_node *pos;
1350 struct nfs4_state *state;
1351
1352 spin_lock(&clp->cl_lock);
1353 for (pos = rb_first(&server->state_owners);
1354 pos != NULL;
1355 pos = rb_next(pos)) {
1356 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
1357 sp->so_seqid.flags = 0;
1358 spin_lock(&sp->so_lock);
1359 list_for_each_entry(state, &sp->so_states, open_states) {
1360 if (mark_reclaim(clp, state))
1361 nfs4_clear_open_state(state);
1362 }
1363 spin_unlock(&sp->so_lock);
1364 }
1365 spin_unlock(&clp->cl_lock);
1366 }
1367
nfs4_state_mark_reclaim_helper(struct nfs_client * clp,int (* mark_reclaim)(struct nfs_client * clp,struct nfs4_state * state))1368 static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
1369 int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1370 {
1371 struct nfs_server *server;
1372
1373 rcu_read_lock();
1374 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1375 nfs4_reset_seqids(server, mark_reclaim);
1376 rcu_read_unlock();
1377 }
1378
nfs4_state_start_reclaim_reboot(struct nfs_client * clp)1379 static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1380 {
1381 /* Mark all delegations for reclaim */
1382 nfs_delegation_mark_reclaim(clp);
1383 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1384 }
1385
nfs4_reclaim_complete(struct nfs_client * clp,const struct nfs4_state_recovery_ops * ops)1386 static void nfs4_reclaim_complete(struct nfs_client *clp,
1387 const struct nfs4_state_recovery_ops *ops)
1388 {
1389 /* Notify the server we're done reclaiming our state */
1390 if (ops->reclaim_complete)
1391 (void)ops->reclaim_complete(clp);
1392 }
1393
nfs4_clear_reclaim_server(struct nfs_server * server)1394 static void nfs4_clear_reclaim_server(struct nfs_server *server)
1395 {
1396 struct nfs_client *clp = server->nfs_client;
1397 struct nfs4_state_owner *sp;
1398 struct rb_node *pos;
1399 struct nfs4_state *state;
1400
1401 spin_lock(&clp->cl_lock);
1402 for (pos = rb_first(&server->state_owners);
1403 pos != NULL;
1404 pos = rb_next(pos)) {
1405 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
1406 spin_lock(&sp->so_lock);
1407 list_for_each_entry(state, &sp->so_states, open_states) {
1408 if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
1409 &state->flags))
1410 continue;
1411 nfs4_state_mark_reclaim_nograce(clp, state);
1412 }
1413 spin_unlock(&sp->so_lock);
1414 }
1415 spin_unlock(&clp->cl_lock);
1416 }
1417
nfs4_state_clear_reclaim_reboot(struct nfs_client * clp)1418 static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
1419 {
1420 struct nfs_server *server;
1421
1422 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1423 return 0;
1424
1425 rcu_read_lock();
1426 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1427 nfs4_clear_reclaim_server(server);
1428 rcu_read_unlock();
1429
1430 nfs_delegation_reap_unclaimed(clp);
1431 return 1;
1432 }
1433
nfs4_state_end_reclaim_reboot(struct nfs_client * clp)1434 static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1435 {
1436 if (!nfs4_state_clear_reclaim_reboot(clp))
1437 return;
1438 nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
1439 }
1440
nfs_delegation_clear_all(struct nfs_client * clp)1441 static void nfs_delegation_clear_all(struct nfs_client *clp)
1442 {
1443 nfs_delegation_mark_reclaim(clp);
1444 nfs_delegation_reap_unclaimed(clp);
1445 }
1446
nfs4_state_start_reclaim_nograce(struct nfs_client * clp)1447 static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1448 {
1449 nfs_delegation_clear_all(clp);
1450 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1451 }
1452
nfs4_recovery_handle_error(struct nfs_client * clp,int error)1453 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1454 {
1455 switch (error) {
1456 case 0:
1457 break;
1458 case -NFS4ERR_CB_PATH_DOWN:
1459 nfs40_handle_cb_pathdown(clp);
1460 break;
1461 case -NFS4ERR_NO_GRACE:
1462 nfs4_state_end_reclaim_reboot(clp);
1463 break;
1464 case -NFS4ERR_STALE_CLIENTID:
1465 case -NFS4ERR_LEASE_MOVED:
1466 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1467 nfs4_state_clear_reclaim_reboot(clp);
1468 nfs4_state_start_reclaim_reboot(clp);
1469 break;
1470 case -NFS4ERR_EXPIRED:
1471 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1472 nfs4_state_start_reclaim_nograce(clp);
1473 break;
1474 case -NFS4ERR_BADSESSION:
1475 case -NFS4ERR_BADSLOT:
1476 case -NFS4ERR_BAD_HIGH_SLOT:
1477 case -NFS4ERR_DEADSESSION:
1478 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1479 case -NFS4ERR_SEQ_FALSE_RETRY:
1480 case -NFS4ERR_SEQ_MISORDERED:
1481 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1482 /* Zero session reset errors */
1483 break;
1484 default:
1485 return error;
1486 }
1487 return 0;
1488 }
1489
nfs4_do_reclaim(struct nfs_client * clp,const struct nfs4_state_recovery_ops * ops)1490 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1491 {
1492 struct nfs4_state_owner *sp;
1493 struct nfs_server *server;
1494 struct rb_node *pos;
1495 int status = 0;
1496
1497 restart:
1498 rcu_read_lock();
1499 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
1500 nfs4_purge_state_owners(server);
1501 spin_lock(&clp->cl_lock);
1502 for (pos = rb_first(&server->state_owners);
1503 pos != NULL;
1504 pos = rb_next(pos)) {
1505 sp = rb_entry(pos,
1506 struct nfs4_state_owner, so_server_node);
1507 if (!test_and_clear_bit(ops->owner_flag_bit,
1508 &sp->so_flags))
1509 continue;
1510 atomic_inc(&sp->so_count);
1511 spin_unlock(&clp->cl_lock);
1512 rcu_read_unlock();
1513
1514 status = nfs4_reclaim_open_state(sp, ops);
1515 if (status < 0) {
1516 set_bit(ops->owner_flag_bit, &sp->so_flags);
1517 nfs4_put_state_owner(sp);
1518 return nfs4_recovery_handle_error(clp, status);
1519 }
1520
1521 nfs4_put_state_owner(sp);
1522 goto restart;
1523 }
1524 spin_unlock(&clp->cl_lock);
1525 }
1526 rcu_read_unlock();
1527 return status;
1528 }
1529
nfs4_check_lease(struct nfs_client * clp)1530 static int nfs4_check_lease(struct nfs_client *clp)
1531 {
1532 struct rpc_cred *cred;
1533 const struct nfs4_state_maintenance_ops *ops =
1534 clp->cl_mvops->state_renewal_ops;
1535 int status;
1536
1537 /* Is the client already known to have an expired lease? */
1538 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1539 return 0;
1540 spin_lock(&clp->cl_lock);
1541 cred = ops->get_state_renewal_cred_locked(clp);
1542 spin_unlock(&clp->cl_lock);
1543 if (cred == NULL) {
1544 cred = nfs4_get_setclientid_cred(clp);
1545 status = -ENOKEY;
1546 if (cred == NULL)
1547 goto out;
1548 }
1549 status = ops->renew_lease(clp, cred);
1550 put_rpccred(cred);
1551 out:
1552 return nfs4_recovery_handle_error(clp, status);
1553 }
1554
nfs4_reclaim_lease(struct nfs_client * clp)1555 static int nfs4_reclaim_lease(struct nfs_client *clp)
1556 {
1557 struct rpc_cred *cred;
1558 const struct nfs4_state_recovery_ops *ops =
1559 clp->cl_mvops->reboot_recovery_ops;
1560 int status = -ENOENT;
1561
1562 cred = ops->get_clid_cred(clp);
1563 if (cred != NULL) {
1564 status = ops->establish_clid(clp, cred);
1565 put_rpccred(cred);
1566 /* Handle case where the user hasn't set up machine creds */
1567 if (status == -EACCES && cred == clp->cl_machine_cred) {
1568 nfs4_clear_machine_cred(clp);
1569 status = -EAGAIN;
1570 }
1571 if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
1572 status = -EPROTONOSUPPORT;
1573 }
1574 return status;
1575 }
1576
1577 #ifdef CONFIG_NFS_V4_1
nfs4_schedule_session_recovery(struct nfs4_session * session)1578 void nfs4_schedule_session_recovery(struct nfs4_session *session)
1579 {
1580 struct nfs_client *clp = session->clp;
1581
1582 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1583 nfs4_schedule_lease_recovery(clp);
1584 }
1585 EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
1586
nfs41_handle_recall_slot(struct nfs_client * clp)1587 void nfs41_handle_recall_slot(struct nfs_client *clp)
1588 {
1589 set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1590 nfs4_schedule_state_manager(clp);
1591 }
1592
nfs4_reset_all_state(struct nfs_client * clp)1593 static void nfs4_reset_all_state(struct nfs_client *clp)
1594 {
1595 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1596 clp->cl_boot_time = CURRENT_TIME;
1597 nfs4_state_start_reclaim_nograce(clp);
1598 nfs4_schedule_state_manager(clp);
1599 }
1600 }
1601
nfs41_handle_server_reboot(struct nfs_client * clp)1602 static void nfs41_handle_server_reboot(struct nfs_client *clp)
1603 {
1604 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1605 nfs4_state_start_reclaim_reboot(clp);
1606 nfs4_schedule_state_manager(clp);
1607 }
1608 }
1609
nfs41_handle_state_revoked(struct nfs_client * clp)1610 static void nfs41_handle_state_revoked(struct nfs_client *clp)
1611 {
1612 /* Temporary */
1613 nfs4_reset_all_state(clp);
1614 }
1615
nfs41_handle_recallable_state_revoked(struct nfs_client * clp)1616 static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
1617 {
1618 /* This will need to handle layouts too */
1619 nfs_expire_all_delegations(clp);
1620 }
1621
nfs41_handle_cb_path_down(struct nfs_client * clp)1622 static void nfs41_handle_cb_path_down(struct nfs_client *clp)
1623 {
1624 nfs_expire_all_delegations(clp);
1625 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
1626 nfs4_schedule_state_manager(clp);
1627 }
1628
nfs41_handle_sequence_flag_errors(struct nfs_client * clp,u32 flags)1629 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
1630 {
1631 if (!flags)
1632 return;
1633 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
1634 nfs41_handle_server_reboot(clp);
1635 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
1636 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
1637 SEQ4_STATUS_ADMIN_STATE_REVOKED |
1638 SEQ4_STATUS_LEASE_MOVED))
1639 nfs41_handle_state_revoked(clp);
1640 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
1641 nfs41_handle_recallable_state_revoked(clp);
1642 if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
1643 SEQ4_STATUS_BACKCHANNEL_FAULT |
1644 SEQ4_STATUS_CB_PATH_DOWN_SESSION))
1645 nfs41_handle_cb_path_down(clp);
1646 }
1647
nfs4_reset_session(struct nfs_client * clp)1648 static int nfs4_reset_session(struct nfs_client *clp)
1649 {
1650 int status;
1651
1652 nfs4_begin_drain_session(clp);
1653 status = nfs4_proc_destroy_session(clp->cl_session);
1654 switch (status) {
1655 case 0:
1656 case -NFS4ERR_BADSESSION:
1657 case -NFS4ERR_DEADSESSION:
1658 break;
1659 case -NFS4ERR_BACK_CHAN_BUSY:
1660 case -NFS4ERR_DELAY:
1661 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1662 status = 0;
1663 ssleep(1);
1664 goto out;
1665 default:
1666 status = nfs4_recovery_handle_error(clp, status);
1667 goto out;
1668 }
1669
1670 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
1671 status = nfs4_proc_create_session(clp);
1672 if (status) {
1673 status = nfs4_recovery_handle_error(clp, status);
1674 goto out;
1675 }
1676 clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1677 /* create_session negotiated new slot table */
1678 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1679
1680 /* Let the state manager reestablish state */
1681 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1682 nfs41_setup_state_renewal(clp);
1683 out:
1684 return status;
1685 }
1686
nfs4_recall_slot(struct nfs_client * clp)1687 static int nfs4_recall_slot(struct nfs_client *clp)
1688 {
1689 struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table;
1690 struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs;
1691 struct nfs4_slot *new, *old;
1692 int i;
1693
1694 nfs4_begin_drain_session(clp);
1695 new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot),
1696 GFP_NOFS);
1697 if (!new)
1698 return -ENOMEM;
1699
1700 spin_lock(&fc_tbl->slot_tbl_lock);
1701 for (i = 0; i < fc_tbl->target_max_slots; i++)
1702 new[i].seq_nr = fc_tbl->slots[i].seq_nr;
1703 old = fc_tbl->slots;
1704 fc_tbl->slots = new;
1705 fc_tbl->max_slots = fc_tbl->target_max_slots;
1706 fc_tbl->target_max_slots = 0;
1707 fc_attrs->max_reqs = fc_tbl->max_slots;
1708 spin_unlock(&fc_tbl->slot_tbl_lock);
1709
1710 kfree(old);
1711 nfs4_end_drain_session(clp);
1712 return 0;
1713 }
1714
1715 #else /* CONFIG_NFS_V4_1 */
nfs4_reset_session(struct nfs_client * clp)1716 static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
nfs4_end_drain_session(struct nfs_client * clp)1717 static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
nfs4_recall_slot(struct nfs_client * clp)1718 static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
1719 #endif /* CONFIG_NFS_V4_1 */
1720
1721 /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
1722 * on EXCHANGE_ID for v4.1
1723 */
nfs4_set_lease_expired(struct nfs_client * clp,int status)1724 static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
1725 {
1726 switch (status) {
1727 case -NFS4ERR_CLID_INUSE:
1728 case -NFS4ERR_STALE_CLIENTID:
1729 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
1730 break;
1731 case -NFS4ERR_DELAY:
1732 case -ETIMEDOUT:
1733 case -EAGAIN:
1734 ssleep(1);
1735 break;
1736
1737 case -EKEYEXPIRED:
1738 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
1739 * in nfs4_exchange_id */
1740 default:
1741 return;
1742 }
1743 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1744 }
1745
nfs4_state_manager(struct nfs_client * clp)1746 static void nfs4_state_manager(struct nfs_client *clp)
1747 {
1748 int status = 0;
1749
1750 /* Ensure exclusive access to NFSv4 state */
1751 do {
1752 if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1753 /* We're going to have to re-establish a clientid */
1754 status = nfs4_reclaim_lease(clp);
1755 if (status) {
1756 nfs4_set_lease_expired(clp, status);
1757 if (test_bit(NFS4CLNT_LEASE_EXPIRED,
1758 &clp->cl_state))
1759 continue;
1760 if (clp->cl_cons_state ==
1761 NFS_CS_SESSION_INITING)
1762 nfs_mark_client_ready(clp, status);
1763 goto out_error;
1764 }
1765 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1766
1767 if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH,
1768 &clp->cl_state))
1769 nfs4_state_start_reclaim_nograce(clp);
1770 else
1771 set_bit(NFS4CLNT_RECLAIM_REBOOT,
1772 &clp->cl_state);
1773
1774 pnfs_destroy_all_layouts(clp);
1775 }
1776
1777 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
1778 status = nfs4_check_lease(clp);
1779 if (status < 0)
1780 goto out_error;
1781 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1782 continue;
1783 }
1784
1785 /* Initialize or reset the session */
1786 if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
1787 && nfs4_has_session(clp)) {
1788 status = nfs4_reset_session(clp);
1789 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1790 continue;
1791 if (status < 0)
1792 goto out_error;
1793 }
1794
1795 /* First recover reboot state... */
1796 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
1797 status = nfs4_do_reclaim(clp,
1798 clp->cl_mvops->reboot_recovery_ops);
1799 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1800 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
1801 continue;
1802 nfs4_state_end_reclaim_reboot(clp);
1803 if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
1804 continue;
1805 if (status < 0)
1806 goto out_error;
1807 }
1808
1809 /* Now recover expired state... */
1810 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
1811 status = nfs4_do_reclaim(clp,
1812 clp->cl_mvops->nograce_recovery_ops);
1813 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1814 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
1815 test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1816 continue;
1817 if (status < 0)
1818 goto out_error;
1819 }
1820
1821 nfs4_end_drain_session(clp);
1822 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
1823 nfs_client_return_marked_delegations(clp);
1824 continue;
1825 }
1826 /* Recall session slots */
1827 if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state)
1828 && nfs4_has_session(clp)) {
1829 status = nfs4_recall_slot(clp);
1830 if (status < 0)
1831 goto out_error;
1832 continue;
1833 }
1834
1835
1836 nfs4_clear_state_manager_bit(clp);
1837 /* Did we race with an attempt to give us more work? */
1838 if (clp->cl_state == 0)
1839 break;
1840 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1841 break;
1842 } while (atomic_read(&clp->cl_count) > 1);
1843 return;
1844 out_error:
1845 pr_warn_ratelimited("NFS: state manager failed on NFSv4 server %s"
1846 " with error %d\n", clp->cl_hostname, -status);
1847 nfs4_end_drain_session(clp);
1848 nfs4_clear_state_manager_bit(clp);
1849 }
1850
nfs4_run_state_manager(void * ptr)1851 static int nfs4_run_state_manager(void *ptr)
1852 {
1853 struct nfs_client *clp = ptr;
1854
1855 allow_signal(SIGKILL);
1856 nfs4_state_manager(clp);
1857 nfs_put_client(clp);
1858 module_put_and_exit(0);
1859 return 0;
1860 }
1861
1862 /*
1863 * Local variables:
1864 * c-basic-offset: 8
1865 * End:
1866 */
1867