1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2008 Dave Chinner
5 * All Rights Reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_trace.h"
17 #include "xfs_errortag.h"
18 #include "xfs_error.h"
19 #include "xfs_log.h"
20 #include "xfs_log_priv.h"
21
22 #ifdef DEBUG
23 /*
24 * Check that the list is sorted as it should be.
25 *
26 * Called with the ail lock held, but we don't want to assert fail with it
27 * held otherwise we'll lock everything up and won't be able to debug the
28 * cause. Hence we sample and check the state under the AIL lock and return if
29 * everything is fine, otherwise we drop the lock and run the ASSERT checks.
30 * Asserts may not be fatal, so pick the lock back up and continue onwards.
31 */
32 STATIC void
xfs_ail_check(struct xfs_ail * ailp,struct xfs_log_item * lip)33 xfs_ail_check(
34 struct xfs_ail *ailp,
35 struct xfs_log_item *lip)
36 __must_hold(&ailp->ail_lock)
37 {
38 struct xfs_log_item *prev_lip;
39 struct xfs_log_item *next_lip;
40 xfs_lsn_t prev_lsn = NULLCOMMITLSN;
41 xfs_lsn_t next_lsn = NULLCOMMITLSN;
42 xfs_lsn_t lsn;
43 bool in_ail;
44
45
46 if (list_empty(&ailp->ail_head))
47 return;
48
49 /*
50 * Sample then check the next and previous entries are valid.
51 */
52 in_ail = test_bit(XFS_LI_IN_AIL, &lip->li_flags);
53 prev_lip = list_entry(lip->li_ail.prev, struct xfs_log_item, li_ail);
54 if (&prev_lip->li_ail != &ailp->ail_head)
55 prev_lsn = prev_lip->li_lsn;
56 next_lip = list_entry(lip->li_ail.next, struct xfs_log_item, li_ail);
57 if (&next_lip->li_ail != &ailp->ail_head)
58 next_lsn = next_lip->li_lsn;
59 lsn = lip->li_lsn;
60
61 if (in_ail &&
62 (prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0) &&
63 (next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0))
64 return;
65
66 spin_unlock(&ailp->ail_lock);
67 ASSERT(in_ail);
68 ASSERT(prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0);
69 ASSERT(next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0);
70 spin_lock(&ailp->ail_lock);
71 }
72 #else /* !DEBUG */
73 #define xfs_ail_check(a,l)
74 #endif /* DEBUG */
75
76 /*
77 * Return a pointer to the last item in the AIL. If the AIL is empty, then
78 * return NULL.
79 */
80 static struct xfs_log_item *
xfs_ail_max(struct xfs_ail * ailp)81 xfs_ail_max(
82 struct xfs_ail *ailp)
83 {
84 if (list_empty(&ailp->ail_head))
85 return NULL;
86
87 return list_entry(ailp->ail_head.prev, struct xfs_log_item, li_ail);
88 }
89
90 /*
91 * Return a pointer to the item which follows the given item in the AIL. If
92 * the given item is the last item in the list, then return NULL.
93 */
94 static struct xfs_log_item *
xfs_ail_next(struct xfs_ail * ailp,struct xfs_log_item * lip)95 xfs_ail_next(
96 struct xfs_ail *ailp,
97 struct xfs_log_item *lip)
98 {
99 if (lip->li_ail.next == &ailp->ail_head)
100 return NULL;
101
102 return list_first_entry(&lip->li_ail, struct xfs_log_item, li_ail);
103 }
104
105 /*
106 * This is called by the log manager code to determine the LSN of the tail of
107 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
108 * is empty, then this function returns 0.
109 *
110 * We need the AIL lock in order to get a coherent read of the lsn of the last
111 * item in the AIL.
112 */
113 static xfs_lsn_t
__xfs_ail_min_lsn(struct xfs_ail * ailp)114 __xfs_ail_min_lsn(
115 struct xfs_ail *ailp)
116 {
117 struct xfs_log_item *lip = xfs_ail_min(ailp);
118
119 if (lip)
120 return lip->li_lsn;
121 return 0;
122 }
123
124 xfs_lsn_t
xfs_ail_min_lsn(struct xfs_ail * ailp)125 xfs_ail_min_lsn(
126 struct xfs_ail *ailp)
127 {
128 xfs_lsn_t lsn;
129
130 spin_lock(&ailp->ail_lock);
131 lsn = __xfs_ail_min_lsn(ailp);
132 spin_unlock(&ailp->ail_lock);
133
134 return lsn;
135 }
136
137 /*
138 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
139 */
140 static xfs_lsn_t
xfs_ail_max_lsn(struct xfs_ail * ailp)141 xfs_ail_max_lsn(
142 struct xfs_ail *ailp)
143 {
144 xfs_lsn_t lsn = 0;
145 struct xfs_log_item *lip;
146
147 spin_lock(&ailp->ail_lock);
148 lip = xfs_ail_max(ailp);
149 if (lip)
150 lsn = lip->li_lsn;
151 spin_unlock(&ailp->ail_lock);
152
153 return lsn;
154 }
155
156 /*
157 * The cursor keeps track of where our current traversal is up to by tracking
158 * the next item in the list for us. However, for this to be safe, removing an
159 * object from the AIL needs to invalidate any cursor that points to it. hence
160 * the traversal cursor needs to be linked to the struct xfs_ail so that
161 * deletion can search all the active cursors for invalidation.
162 */
163 STATIC void
xfs_trans_ail_cursor_init(struct xfs_ail * ailp,struct xfs_ail_cursor * cur)164 xfs_trans_ail_cursor_init(
165 struct xfs_ail *ailp,
166 struct xfs_ail_cursor *cur)
167 {
168 cur->item = NULL;
169 list_add_tail(&cur->list, &ailp->ail_cursors);
170 }
171
172 /*
173 * Get the next item in the traversal and advance the cursor. If the cursor
174 * was invalidated (indicated by a lip of 1), restart the traversal.
175 */
176 struct xfs_log_item *
xfs_trans_ail_cursor_next(struct xfs_ail * ailp,struct xfs_ail_cursor * cur)177 xfs_trans_ail_cursor_next(
178 struct xfs_ail *ailp,
179 struct xfs_ail_cursor *cur)
180 {
181 struct xfs_log_item *lip = cur->item;
182
183 if ((uintptr_t)lip & 1)
184 lip = xfs_ail_min(ailp);
185 if (lip)
186 cur->item = xfs_ail_next(ailp, lip);
187 return lip;
188 }
189
190 /*
191 * When the traversal is complete, we need to remove the cursor from the list
192 * of traversing cursors.
193 */
194 void
xfs_trans_ail_cursor_done(struct xfs_ail_cursor * cur)195 xfs_trans_ail_cursor_done(
196 struct xfs_ail_cursor *cur)
197 {
198 cur->item = NULL;
199 list_del_init(&cur->list);
200 }
201
202 /*
203 * Invalidate any cursor that is pointing to this item. This is called when an
204 * item is removed from the AIL. Any cursor pointing to this object is now
205 * invalid and the traversal needs to be terminated so it doesn't reference a
206 * freed object. We set the low bit of the cursor item pointer so we can
207 * distinguish between an invalidation and the end of the list when getting the
208 * next item from the cursor.
209 */
210 STATIC void
xfs_trans_ail_cursor_clear(struct xfs_ail * ailp,struct xfs_log_item * lip)211 xfs_trans_ail_cursor_clear(
212 struct xfs_ail *ailp,
213 struct xfs_log_item *lip)
214 {
215 struct xfs_ail_cursor *cur;
216
217 list_for_each_entry(cur, &ailp->ail_cursors, list) {
218 if (cur->item == lip)
219 cur->item = (struct xfs_log_item *)
220 ((uintptr_t)cur->item | 1);
221 }
222 }
223
224 /*
225 * Find the first item in the AIL with the given @lsn by searching in ascending
226 * LSN order and initialise the cursor to point to the next item for a
227 * ascending traversal. Pass a @lsn of zero to initialise the cursor to the
228 * first item in the AIL. Returns NULL if the list is empty.
229 */
230 struct xfs_log_item *
xfs_trans_ail_cursor_first(struct xfs_ail * ailp,struct xfs_ail_cursor * cur,xfs_lsn_t lsn)231 xfs_trans_ail_cursor_first(
232 struct xfs_ail *ailp,
233 struct xfs_ail_cursor *cur,
234 xfs_lsn_t lsn)
235 {
236 struct xfs_log_item *lip;
237
238 xfs_trans_ail_cursor_init(ailp, cur);
239
240 if (lsn == 0) {
241 lip = xfs_ail_min(ailp);
242 goto out;
243 }
244
245 list_for_each_entry(lip, &ailp->ail_head, li_ail) {
246 if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
247 goto out;
248 }
249 return NULL;
250
251 out:
252 if (lip)
253 cur->item = xfs_ail_next(ailp, lip);
254 return lip;
255 }
256
257 static struct xfs_log_item *
__xfs_trans_ail_cursor_last(struct xfs_ail * ailp,xfs_lsn_t lsn)258 __xfs_trans_ail_cursor_last(
259 struct xfs_ail *ailp,
260 xfs_lsn_t lsn)
261 {
262 struct xfs_log_item *lip;
263
264 list_for_each_entry_reverse(lip, &ailp->ail_head, li_ail) {
265 if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
266 return lip;
267 }
268 return NULL;
269 }
270
271 /*
272 * Find the last item in the AIL with the given @lsn by searching in descending
273 * LSN order and initialise the cursor to point to that item. If there is no
274 * item with the value of @lsn, then it sets the cursor to the last item with an
275 * LSN lower than @lsn. Returns NULL if the list is empty.
276 */
277 struct xfs_log_item *
xfs_trans_ail_cursor_last(struct xfs_ail * ailp,struct xfs_ail_cursor * cur,xfs_lsn_t lsn)278 xfs_trans_ail_cursor_last(
279 struct xfs_ail *ailp,
280 struct xfs_ail_cursor *cur,
281 xfs_lsn_t lsn)
282 {
283 xfs_trans_ail_cursor_init(ailp, cur);
284 cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
285 return cur->item;
286 }
287
288 /*
289 * Splice the log item list into the AIL at the given LSN. We splice to the
290 * tail of the given LSN to maintain insert order for push traversals. The
291 * cursor is optional, allowing repeated updates to the same LSN to avoid
292 * repeated traversals. This should not be called with an empty list.
293 */
294 static void
xfs_ail_splice(struct xfs_ail * ailp,struct xfs_ail_cursor * cur,struct list_head * list,xfs_lsn_t lsn)295 xfs_ail_splice(
296 struct xfs_ail *ailp,
297 struct xfs_ail_cursor *cur,
298 struct list_head *list,
299 xfs_lsn_t lsn)
300 {
301 struct xfs_log_item *lip;
302
303 ASSERT(!list_empty(list));
304
305 /*
306 * Use the cursor to determine the insertion point if one is
307 * provided. If not, or if the one we got is not valid,
308 * find the place in the AIL where the items belong.
309 */
310 lip = cur ? cur->item : NULL;
311 if (!lip || (uintptr_t)lip & 1)
312 lip = __xfs_trans_ail_cursor_last(ailp, lsn);
313
314 /*
315 * If a cursor is provided, we know we're processing the AIL
316 * in lsn order, and future items to be spliced in will
317 * follow the last one being inserted now. Update the
318 * cursor to point to that last item, now while we have a
319 * reliable pointer to it.
320 */
321 if (cur)
322 cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
323
324 /*
325 * Finally perform the splice. Unless the AIL was empty,
326 * lip points to the item in the AIL _after_ which the new
327 * items should go. If lip is null the AIL was empty, so
328 * the new items go at the head of the AIL.
329 */
330 if (lip)
331 list_splice(list, &lip->li_ail);
332 else
333 list_splice(list, &ailp->ail_head);
334 }
335
336 /*
337 * Delete the given item from the AIL. Return a pointer to the item.
338 */
339 static void
xfs_ail_delete(struct xfs_ail * ailp,struct xfs_log_item * lip)340 xfs_ail_delete(
341 struct xfs_ail *ailp,
342 struct xfs_log_item *lip)
343 {
344 xfs_ail_check(ailp, lip);
345 list_del(&lip->li_ail);
346 xfs_trans_ail_cursor_clear(ailp, lip);
347 }
348
349 /*
350 * Requeue a failed buffer for writeback.
351 *
352 * We clear the log item failed state here as well, but we have to be careful
353 * about reference counts because the only active reference counts on the buffer
354 * may be the failed log items. Hence if we clear the log item failed state
355 * before queuing the buffer for IO we can release all active references to
356 * the buffer and free it, leading to use after free problems in
357 * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
358 * order we process them in - the buffer is locked, and we own the buffer list
359 * so nothing on them is going to change while we are performing this action.
360 *
361 * Hence we can safely queue the buffer for IO before we clear the failed log
362 * item state, therefore always having an active reference to the buffer and
363 * avoiding the transient zero-reference state that leads to use-after-free.
364 */
365 static inline int
xfsaild_resubmit_item(struct xfs_log_item * lip,struct list_head * buffer_list)366 xfsaild_resubmit_item(
367 struct xfs_log_item *lip,
368 struct list_head *buffer_list)
369 {
370 struct xfs_buf *bp = lip->li_buf;
371
372 if (!xfs_buf_trylock(bp))
373 return XFS_ITEM_LOCKED;
374
375 if (!xfs_buf_delwri_queue(bp, buffer_list)) {
376 xfs_buf_unlock(bp);
377 return XFS_ITEM_FLUSHING;
378 }
379
380 /* protected by ail_lock */
381 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
382 if (bp->b_flags & _XBF_INODES)
383 clear_bit(XFS_LI_FAILED, &lip->li_flags);
384 else
385 xfs_clear_li_failed(lip);
386 }
387
388 xfs_buf_unlock(bp);
389 return XFS_ITEM_SUCCESS;
390 }
391
392 static inline uint
xfsaild_push_item(struct xfs_ail * ailp,struct xfs_log_item * lip)393 xfsaild_push_item(
394 struct xfs_ail *ailp,
395 struct xfs_log_item *lip)
396 {
397 /*
398 * If log item pinning is enabled, skip the push and track the item as
399 * pinned. This can help induce head-behind-tail conditions.
400 */
401 if (XFS_TEST_ERROR(false, ailp->ail_log->l_mp, XFS_ERRTAG_LOG_ITEM_PIN))
402 return XFS_ITEM_PINNED;
403
404 /*
405 * Consider the item pinned if a push callback is not defined so the
406 * caller will force the log. This should only happen for intent items
407 * as they are unpinned once the associated done item is committed to
408 * the on-disk log.
409 */
410 if (!lip->li_ops->iop_push)
411 return XFS_ITEM_PINNED;
412 if (test_bit(XFS_LI_FAILED, &lip->li_flags))
413 return xfsaild_resubmit_item(lip, &ailp->ail_buf_list);
414 return lip->li_ops->iop_push(lip, &ailp->ail_buf_list);
415 }
416
417 static long
xfsaild_push(struct xfs_ail * ailp)418 xfsaild_push(
419 struct xfs_ail *ailp)
420 {
421 struct xfs_mount *mp = ailp->ail_log->l_mp;
422 struct xfs_ail_cursor cur;
423 struct xfs_log_item *lip;
424 xfs_lsn_t lsn;
425 xfs_lsn_t target;
426 long tout;
427 int stuck = 0;
428 int flushing = 0;
429 int count = 0;
430
431 /*
432 * If we encountered pinned items or did not finish writing out all
433 * buffers the last time we ran, force a background CIL push to get the
434 * items unpinned in the near future. We do not wait on the CIL push as
435 * that could stall us for seconds if there is enough background IO
436 * load. Stalling for that long when the tail of the log is pinned and
437 * needs flushing will hard stop the transaction subsystem when log
438 * space runs out.
439 */
440 if (ailp->ail_log_flush && ailp->ail_last_pushed_lsn == 0 &&
441 (!list_empty_careful(&ailp->ail_buf_list) ||
442 xfs_ail_min_lsn(ailp))) {
443 ailp->ail_log_flush = 0;
444
445 XFS_STATS_INC(mp, xs_push_ail_flush);
446 xlog_cil_flush(ailp->ail_log);
447 }
448
449 spin_lock(&ailp->ail_lock);
450
451 /*
452 * If we have a sync push waiter, we always have to push till the AIL is
453 * empty. Update the target to point to the end of the AIL so that
454 * capture updates that occur after the sync push waiter has gone to
455 * sleep.
456 */
457 if (waitqueue_active(&ailp->ail_empty)) {
458 lip = xfs_ail_max(ailp);
459 if (lip)
460 target = lip->li_lsn;
461 } else {
462 /* barrier matches the ail_target update in xfs_ail_push() */
463 smp_rmb();
464 target = ailp->ail_target;
465 ailp->ail_target_prev = target;
466 }
467
468 /* we're done if the AIL is empty or our push has reached the end */
469 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn);
470 if (!lip)
471 goto out_done;
472
473 XFS_STATS_INC(mp, xs_push_ail);
474
475 lsn = lip->li_lsn;
476 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
477 int lock_result;
478
479 /*
480 * Note that iop_push may unlock and reacquire the AIL lock. We
481 * rely on the AIL cursor implementation to be able to deal with
482 * the dropped lock.
483 */
484 lock_result = xfsaild_push_item(ailp, lip);
485 switch (lock_result) {
486 case XFS_ITEM_SUCCESS:
487 XFS_STATS_INC(mp, xs_push_ail_success);
488 trace_xfs_ail_push(lip);
489
490 ailp->ail_last_pushed_lsn = lsn;
491 break;
492
493 case XFS_ITEM_FLUSHING:
494 /*
495 * The item or its backing buffer is already being
496 * flushed. The typical reason for that is that an
497 * inode buffer is locked because we already pushed the
498 * updates to it as part of inode clustering.
499 *
500 * We do not want to stop flushing just because lots
501 * of items are already being flushed, but we need to
502 * re-try the flushing relatively soon if most of the
503 * AIL is being flushed.
504 */
505 XFS_STATS_INC(mp, xs_push_ail_flushing);
506 trace_xfs_ail_flushing(lip);
507
508 flushing++;
509 ailp->ail_last_pushed_lsn = lsn;
510 break;
511
512 case XFS_ITEM_PINNED:
513 XFS_STATS_INC(mp, xs_push_ail_pinned);
514 trace_xfs_ail_pinned(lip);
515
516 stuck++;
517 ailp->ail_log_flush++;
518 break;
519 case XFS_ITEM_LOCKED:
520 XFS_STATS_INC(mp, xs_push_ail_locked);
521 trace_xfs_ail_locked(lip);
522
523 stuck++;
524 break;
525 default:
526 ASSERT(0);
527 break;
528 }
529
530 count++;
531
532 /*
533 * Are there too many items we can't do anything with?
534 *
535 * If we are skipping too many items because we can't flush
536 * them or they are already being flushed, we back off and
537 * given them time to complete whatever operation is being
538 * done. i.e. remove pressure from the AIL while we can't make
539 * progress so traversals don't slow down further inserts and
540 * removals to/from the AIL.
541 *
542 * The value of 100 is an arbitrary magic number based on
543 * observation.
544 */
545 if (stuck > 100)
546 break;
547
548 lip = xfs_trans_ail_cursor_next(ailp, &cur);
549 if (lip == NULL)
550 break;
551 lsn = lip->li_lsn;
552 }
553
554 out_done:
555 xfs_trans_ail_cursor_done(&cur);
556 spin_unlock(&ailp->ail_lock);
557
558 if (xfs_buf_delwri_submit_nowait(&ailp->ail_buf_list))
559 ailp->ail_log_flush++;
560
561 if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
562 /*
563 * We reached the target or the AIL is empty, so wait a bit
564 * longer for I/O to complete and remove pushed items from the
565 * AIL before we start the next scan from the start of the AIL.
566 */
567 tout = 50;
568 ailp->ail_last_pushed_lsn = 0;
569 } else if (((stuck + flushing) * 100) / count > 90) {
570 /*
571 * Either there is a lot of contention on the AIL or we are
572 * stuck due to operations in progress. "Stuck" in this case
573 * is defined as >90% of the items we tried to push were stuck.
574 *
575 * Backoff a bit more to allow some I/O to complete before
576 * restarting from the start of the AIL. This prevents us from
577 * spinning on the same items, and if they are pinned will all
578 * the restart to issue a log force to unpin the stuck items.
579 */
580 tout = 20;
581 ailp->ail_last_pushed_lsn = 0;
582 } else {
583 /*
584 * Assume we have more work to do in a short while.
585 */
586 tout = 10;
587 }
588
589 return tout;
590 }
591
592 static int
xfsaild(void * data)593 xfsaild(
594 void *data)
595 {
596 struct xfs_ail *ailp = data;
597 long tout = 0; /* milliseconds */
598 unsigned int noreclaim_flag;
599
600 noreclaim_flag = memalloc_noreclaim_save();
601 set_freezable();
602
603 while (1) {
604 if (tout && tout <= 20)
605 set_current_state(TASK_KILLABLE);
606 else
607 set_current_state(TASK_INTERRUPTIBLE);
608
609 /*
610 * Check kthread_should_stop() after we set the task state to
611 * guarantee that we either see the stop bit and exit or the
612 * task state is reset to runnable such that it's not scheduled
613 * out indefinitely and detects the stop bit at next iteration.
614 * A memory barrier is included in above task state set to
615 * serialize again kthread_stop().
616 */
617 if (kthread_should_stop()) {
618 __set_current_state(TASK_RUNNING);
619
620 /*
621 * The caller forces out the AIL before stopping the
622 * thread in the common case, which means the delwri
623 * queue is drained. In the shutdown case, the queue may
624 * still hold relogged buffers that haven't been
625 * submitted because they were pinned since added to the
626 * queue.
627 *
628 * Log I/O error processing stales the underlying buffer
629 * and clears the delwri state, expecting the buf to be
630 * removed on the next submission attempt. That won't
631 * happen if we're shutting down, so this is the last
632 * opportunity to release such buffers from the queue.
633 */
634 ASSERT(list_empty(&ailp->ail_buf_list) ||
635 xlog_is_shutdown(ailp->ail_log));
636 xfs_buf_delwri_cancel(&ailp->ail_buf_list);
637 break;
638 }
639
640 spin_lock(&ailp->ail_lock);
641
642 /*
643 * Idle if the AIL is empty and we are not racing with a target
644 * update. We check the AIL after we set the task to a sleep
645 * state to guarantee that we either catch an ail_target update
646 * or that a wake_up resets the state to TASK_RUNNING.
647 * Otherwise, we run the risk of sleeping indefinitely.
648 *
649 * The barrier matches the ail_target update in xfs_ail_push().
650 */
651 smp_rmb();
652 if (!xfs_ail_min(ailp) &&
653 ailp->ail_target == ailp->ail_target_prev &&
654 list_empty(&ailp->ail_buf_list)) {
655 spin_unlock(&ailp->ail_lock);
656 freezable_schedule();
657 tout = 0;
658 continue;
659 }
660 spin_unlock(&ailp->ail_lock);
661
662 if (tout)
663 freezable_schedule_timeout(msecs_to_jiffies(tout));
664
665 __set_current_state(TASK_RUNNING);
666
667 try_to_freeze();
668
669 tout = xfsaild_push(ailp);
670 }
671
672 memalloc_noreclaim_restore(noreclaim_flag);
673 return 0;
674 }
675
676 /*
677 * This routine is called to move the tail of the AIL forward. It does this by
678 * trying to flush items in the AIL whose lsns are below the given
679 * threshold_lsn.
680 *
681 * The push is run asynchronously in a workqueue, which means the caller needs
682 * to handle waiting on the async flush for space to become available.
683 * We don't want to interrupt any push that is in progress, hence we only queue
684 * work if we set the pushing bit appropriately.
685 *
686 * We do this unlocked - we only need to know whether there is anything in the
687 * AIL at the time we are called. We don't need to access the contents of
688 * any of the objects, so the lock is not needed.
689 */
690 void
xfs_ail_push(struct xfs_ail * ailp,xfs_lsn_t threshold_lsn)691 xfs_ail_push(
692 struct xfs_ail *ailp,
693 xfs_lsn_t threshold_lsn)
694 {
695 struct xfs_log_item *lip;
696
697 lip = xfs_ail_min(ailp);
698 if (!lip || xlog_is_shutdown(ailp->ail_log) ||
699 XFS_LSN_CMP(threshold_lsn, ailp->ail_target) <= 0)
700 return;
701
702 /*
703 * Ensure that the new target is noticed in push code before it clears
704 * the XFS_AIL_PUSHING_BIT.
705 */
706 smp_wmb();
707 xfs_trans_ail_copy_lsn(ailp, &ailp->ail_target, &threshold_lsn);
708 smp_wmb();
709
710 wake_up_process(ailp->ail_task);
711 }
712
713 /*
714 * Push out all items in the AIL immediately
715 */
716 void
xfs_ail_push_all(struct xfs_ail * ailp)717 xfs_ail_push_all(
718 struct xfs_ail *ailp)
719 {
720 xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
721
722 if (threshold_lsn)
723 xfs_ail_push(ailp, threshold_lsn);
724 }
725
726 /*
727 * Push out all items in the AIL immediately and wait until the AIL is empty.
728 */
729 void
xfs_ail_push_all_sync(struct xfs_ail * ailp)730 xfs_ail_push_all_sync(
731 struct xfs_ail *ailp)
732 {
733 struct xfs_log_item *lip;
734 DEFINE_WAIT(wait);
735
736 spin_lock(&ailp->ail_lock);
737 while ((lip = xfs_ail_max(ailp)) != NULL) {
738 prepare_to_wait(&ailp->ail_empty, &wait, TASK_UNINTERRUPTIBLE);
739 wake_up_process(ailp->ail_task);
740 spin_unlock(&ailp->ail_lock);
741 schedule();
742 spin_lock(&ailp->ail_lock);
743 }
744 spin_unlock(&ailp->ail_lock);
745
746 finish_wait(&ailp->ail_empty, &wait);
747 }
748
749 void
xfs_ail_update_finish(struct xfs_ail * ailp,xfs_lsn_t old_lsn)750 xfs_ail_update_finish(
751 struct xfs_ail *ailp,
752 xfs_lsn_t old_lsn) __releases(ailp->ail_lock)
753 {
754 struct xlog *log = ailp->ail_log;
755
756 /* if the tail lsn hasn't changed, don't do updates or wakeups. */
757 if (!old_lsn || old_lsn == __xfs_ail_min_lsn(ailp)) {
758 spin_unlock(&ailp->ail_lock);
759 return;
760 }
761
762 if (!xlog_is_shutdown(log))
763 xlog_assign_tail_lsn_locked(log->l_mp);
764
765 if (list_empty(&ailp->ail_head))
766 wake_up_all(&ailp->ail_empty);
767 spin_unlock(&ailp->ail_lock);
768 xfs_log_space_wake(log->l_mp);
769 }
770
771 /*
772 * xfs_trans_ail_update - bulk AIL insertion operation.
773 *
774 * @xfs_trans_ail_update takes an array of log items that all need to be
775 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
776 * be added. Otherwise, it will be repositioned by removing it and re-adding
777 * it to the AIL. If we move the first item in the AIL, update the log tail to
778 * match the new minimum LSN in the AIL.
779 *
780 * This function takes the AIL lock once to execute the update operations on
781 * all the items in the array, and as such should not be called with the AIL
782 * lock held. As a result, once we have the AIL lock, we need to check each log
783 * item LSN to confirm it needs to be moved forward in the AIL.
784 *
785 * To optimise the insert operation, we delete all the items from the AIL in
786 * the first pass, moving them into a temporary list, then splice the temporary
787 * list into the correct position in the AIL. This avoids needing to do an
788 * insert operation on every item.
789 *
790 * This function must be called with the AIL lock held. The lock is dropped
791 * before returning.
792 */
793 void
xfs_trans_ail_update_bulk(struct xfs_ail * ailp,struct xfs_ail_cursor * cur,struct xfs_log_item ** log_items,int nr_items,xfs_lsn_t lsn)794 xfs_trans_ail_update_bulk(
795 struct xfs_ail *ailp,
796 struct xfs_ail_cursor *cur,
797 struct xfs_log_item **log_items,
798 int nr_items,
799 xfs_lsn_t lsn) __releases(ailp->ail_lock)
800 {
801 struct xfs_log_item *mlip;
802 xfs_lsn_t tail_lsn = 0;
803 int i;
804 LIST_HEAD(tmp);
805
806 ASSERT(nr_items > 0); /* Not required, but true. */
807 mlip = xfs_ail_min(ailp);
808
809 for (i = 0; i < nr_items; i++) {
810 struct xfs_log_item *lip = log_items[i];
811 if (test_and_set_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
812 /* check if we really need to move the item */
813 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
814 continue;
815
816 trace_xfs_ail_move(lip, lip->li_lsn, lsn);
817 if (mlip == lip && !tail_lsn)
818 tail_lsn = lip->li_lsn;
819
820 xfs_ail_delete(ailp, lip);
821 } else {
822 trace_xfs_ail_insert(lip, 0, lsn);
823 }
824 lip->li_lsn = lsn;
825 list_add(&lip->li_ail, &tmp);
826 }
827
828 if (!list_empty(&tmp))
829 xfs_ail_splice(ailp, cur, &tmp, lsn);
830
831 xfs_ail_update_finish(ailp, tail_lsn);
832 }
833
834 /* Insert a log item into the AIL. */
835 void
xfs_trans_ail_insert(struct xfs_ail * ailp,struct xfs_log_item * lip,xfs_lsn_t lsn)836 xfs_trans_ail_insert(
837 struct xfs_ail *ailp,
838 struct xfs_log_item *lip,
839 xfs_lsn_t lsn)
840 {
841 spin_lock(&ailp->ail_lock);
842 xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
843 }
844
845 /*
846 * Delete one log item from the AIL.
847 *
848 * If this item was at the tail of the AIL, return the LSN of the log item so
849 * that we can use it to check if the LSN of the tail of the log has moved
850 * when finishing up the AIL delete process in xfs_ail_update_finish().
851 */
852 xfs_lsn_t
xfs_ail_delete_one(struct xfs_ail * ailp,struct xfs_log_item * lip)853 xfs_ail_delete_one(
854 struct xfs_ail *ailp,
855 struct xfs_log_item *lip)
856 {
857 struct xfs_log_item *mlip = xfs_ail_min(ailp);
858 xfs_lsn_t lsn = lip->li_lsn;
859
860 trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
861 xfs_ail_delete(ailp, lip);
862 clear_bit(XFS_LI_IN_AIL, &lip->li_flags);
863 lip->li_lsn = 0;
864
865 if (mlip == lip)
866 return lsn;
867 return 0;
868 }
869
870 void
xfs_trans_ail_delete(struct xfs_log_item * lip,int shutdown_type)871 xfs_trans_ail_delete(
872 struct xfs_log_item *lip,
873 int shutdown_type)
874 {
875 struct xfs_ail *ailp = lip->li_ailp;
876 struct xlog *log = ailp->ail_log;
877 xfs_lsn_t tail_lsn;
878
879 spin_lock(&ailp->ail_lock);
880 if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
881 spin_unlock(&ailp->ail_lock);
882 if (shutdown_type && !xlog_is_shutdown(log)) {
883 xfs_alert_tag(log->l_mp, XFS_PTAG_AILDELETE,
884 "%s: attempting to delete a log item that is not in the AIL",
885 __func__);
886 xlog_force_shutdown(log, shutdown_type);
887 }
888 return;
889 }
890
891 /* xfs_ail_update_finish() drops the AIL lock */
892 xfs_clear_li_failed(lip);
893 tail_lsn = xfs_ail_delete_one(ailp, lip);
894 xfs_ail_update_finish(ailp, tail_lsn);
895 }
896
897 int
xfs_trans_ail_init(xfs_mount_t * mp)898 xfs_trans_ail_init(
899 xfs_mount_t *mp)
900 {
901 struct xfs_ail *ailp;
902
903 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
904 if (!ailp)
905 return -ENOMEM;
906
907 ailp->ail_log = mp->m_log;
908 INIT_LIST_HEAD(&ailp->ail_head);
909 INIT_LIST_HEAD(&ailp->ail_cursors);
910 spin_lock_init(&ailp->ail_lock);
911 INIT_LIST_HEAD(&ailp->ail_buf_list);
912 init_waitqueue_head(&ailp->ail_empty);
913
914 ailp->ail_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
915 mp->m_super->s_id);
916 if (IS_ERR(ailp->ail_task))
917 goto out_free_ailp;
918
919 mp->m_ail = ailp;
920 return 0;
921
922 out_free_ailp:
923 kmem_free(ailp);
924 return -ENOMEM;
925 }
926
927 void
xfs_trans_ail_destroy(xfs_mount_t * mp)928 xfs_trans_ail_destroy(
929 xfs_mount_t *mp)
930 {
931 struct xfs_ail *ailp = mp->m_ail;
932
933 kthread_stop(ailp->ail_task);
934 kmem_free(ailp);
935 }
936