1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * linux/fs/jbd2/checkpoint.c
4 *
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
6 *
7 * Copyright 1999 Red Hat Software --- All Rights Reserved
8 *
9 * Checkpoint routines for the generic filesystem journaling code.
10 * Part of the ext2fs journaling system.
11 *
12 * Checkpointing is the process of ensuring that a section of the log is
13 * committed fully to disk, so that that portion of the log can be
14 * reused.
15 */
16
17 #include <linux/time.h>
18 #include <linux/fs.h>
19 #include <linux/jbd2.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/blkdev.h>
23 #include <trace/events/jbd2.h>
24
25 /*
26 * Unlink a buffer from a transaction checkpoint list.
27 *
28 * Called with j_list_lock held.
29 */
__buffer_unlink(struct journal_head * jh)30 static inline void __buffer_unlink(struct journal_head *jh)
31 {
32 transaction_t *transaction = jh->b_cp_transaction;
33
34 jh->b_cpnext->b_cpprev = jh->b_cpprev;
35 jh->b_cpprev->b_cpnext = jh->b_cpnext;
36 if (transaction->t_checkpoint_list == jh) {
37 transaction->t_checkpoint_list = jh->b_cpnext;
38 if (transaction->t_checkpoint_list == jh)
39 transaction->t_checkpoint_list = NULL;
40 }
41 }
42
43 /*
44 * __jbd2_log_wait_for_space: wait until there is space in the journal.
45 *
46 * Called under j-state_lock *only*. It will be unlocked if we have to wait
47 * for a checkpoint to free up some space in the log.
48 */
__jbd2_log_wait_for_space(journal_t * journal)49 void __jbd2_log_wait_for_space(journal_t *journal)
50 __acquires(&journal->j_state_lock)
51 __releases(&journal->j_state_lock)
52 {
53 int nblocks, space_left;
54 /* assert_spin_locked(&journal->j_state_lock); */
55
56 nblocks = journal->j_max_transaction_buffers;
57 while (jbd2_log_space_left(journal) < nblocks) {
58 write_unlock(&journal->j_state_lock);
59 mutex_lock_io(&journal->j_checkpoint_mutex);
60
61 /*
62 * Test again, another process may have checkpointed while we
63 * were waiting for the checkpoint lock. If there are no
64 * transactions ready to be checkpointed, try to recover
65 * journal space by calling cleanup_journal_tail(), and if
66 * that doesn't work, by waiting for the currently committing
67 * transaction to complete. If there is absolutely no way
68 * to make progress, this is either a BUG or corrupted
69 * filesystem, so abort the journal and leave a stack
70 * trace for forensic evidence.
71 */
72 write_lock(&journal->j_state_lock);
73 if (journal->j_flags & JBD2_ABORT) {
74 mutex_unlock(&journal->j_checkpoint_mutex);
75 return;
76 }
77 spin_lock(&journal->j_list_lock);
78 space_left = jbd2_log_space_left(journal);
79 if (space_left < nblocks) {
80 int chkpt = journal->j_checkpoint_transactions != NULL;
81 tid_t tid = 0;
82
83 if (journal->j_committing_transaction)
84 tid = journal->j_committing_transaction->t_tid;
85 spin_unlock(&journal->j_list_lock);
86 write_unlock(&journal->j_state_lock);
87 if (chkpt) {
88 jbd2_log_do_checkpoint(journal);
89 } else if (jbd2_cleanup_journal_tail(journal) == 0) {
90 /* We were able to recover space; yay! */
91 ;
92 } else if (tid) {
93 /*
94 * jbd2_journal_commit_transaction() may want
95 * to take the checkpoint_mutex if JBD2_FLUSHED
96 * is set. So we need to temporarily drop it.
97 */
98 mutex_unlock(&journal->j_checkpoint_mutex);
99 jbd2_log_wait_commit(journal, tid);
100 write_lock(&journal->j_state_lock);
101 continue;
102 } else {
103 printk(KERN_ERR "%s: needed %d blocks and "
104 "only had %d space available\n",
105 __func__, nblocks, space_left);
106 printk(KERN_ERR "%s: no way to get more "
107 "journal space in %s\n", __func__,
108 journal->j_devname);
109 WARN_ON(1);
110 jbd2_journal_abort(journal, -EIO);
111 }
112 write_lock(&journal->j_state_lock);
113 } else {
114 spin_unlock(&journal->j_list_lock);
115 }
116 mutex_unlock(&journal->j_checkpoint_mutex);
117 }
118 }
119
120 static void
__flush_batch(journal_t * journal,int * batch_count)121 __flush_batch(journal_t *journal, int *batch_count)
122 {
123 int i;
124 struct blk_plug plug;
125
126 blk_start_plug(&plug);
127 for (i = 0; i < *batch_count; i++)
128 write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC);
129 blk_finish_plug(&plug);
130
131 for (i = 0; i < *batch_count; i++) {
132 struct buffer_head *bh = journal->j_chkpt_bhs[i];
133 BUFFER_TRACE(bh, "brelse");
134 __brelse(bh);
135 journal->j_chkpt_bhs[i] = NULL;
136 }
137 *batch_count = 0;
138 }
139
140 /*
141 * Perform an actual checkpoint. We take the first transaction on the
142 * list of transactions to be checkpointed and send all its buffers
143 * to disk. We submit larger chunks of data at once.
144 *
145 * The journal should be locked before calling this function.
146 * Called with j_checkpoint_mutex held.
147 */
jbd2_log_do_checkpoint(journal_t * journal)148 int jbd2_log_do_checkpoint(journal_t *journal)
149 {
150 struct journal_head *jh;
151 struct buffer_head *bh;
152 transaction_t *transaction;
153 tid_t this_tid;
154 int result, batch_count = 0;
155
156 jbd2_debug(1, "Start checkpoint\n");
157
158 /*
159 * First thing: if there are any transactions in the log which
160 * don't need checkpointing, just eliminate them from the
161 * journal straight away.
162 */
163 result = jbd2_cleanup_journal_tail(journal);
164 trace_jbd2_checkpoint(journal, result);
165 jbd2_debug(1, "cleanup_journal_tail returned %d\n", result);
166 if (result <= 0)
167 return result;
168
169 /*
170 * OK, we need to start writing disk blocks. Take one transaction
171 * and write it.
172 */
173 spin_lock(&journal->j_list_lock);
174 if (!journal->j_checkpoint_transactions)
175 goto out;
176 transaction = journal->j_checkpoint_transactions;
177 if (transaction->t_chp_stats.cs_chp_time == 0)
178 transaction->t_chp_stats.cs_chp_time = jiffies;
179 this_tid = transaction->t_tid;
180 restart:
181 /*
182 * If someone cleaned up this transaction while we slept, we're
183 * done (maybe it's a new transaction, but it fell at the same
184 * address).
185 */
186 if (journal->j_checkpoint_transactions != transaction ||
187 transaction->t_tid != this_tid)
188 goto out;
189
190 /* checkpoint all of the transaction's buffers */
191 while (transaction->t_checkpoint_list) {
192 jh = transaction->t_checkpoint_list;
193 bh = jh2bh(jh);
194
195 if (jh->b_transaction != NULL) {
196 transaction_t *t = jh->b_transaction;
197 tid_t tid = t->t_tid;
198
199 transaction->t_chp_stats.cs_forced_to_close++;
200 spin_unlock(&journal->j_list_lock);
201 if (unlikely(journal->j_flags & JBD2_UNMOUNT))
202 /*
203 * The journal thread is dead; so
204 * starting and waiting for a commit
205 * to finish will cause us to wait for
206 * a _very_ long time.
207 */
208 printk(KERN_ERR
209 "JBD2: %s: Waiting for Godot: block %llu\n",
210 journal->j_devname, (unsigned long long) bh->b_blocknr);
211
212 if (batch_count)
213 __flush_batch(journal, &batch_count);
214 jbd2_log_start_commit(journal, tid);
215 /*
216 * jbd2_journal_commit_transaction() may want
217 * to take the checkpoint_mutex if JBD2_FLUSHED
218 * is set, jbd2_update_log_tail() called by
219 * jbd2_journal_commit_transaction() may also take
220 * checkpoint_mutex. So we need to temporarily
221 * drop it.
222 */
223 mutex_unlock(&journal->j_checkpoint_mutex);
224 jbd2_log_wait_commit(journal, tid);
225 mutex_lock_io(&journal->j_checkpoint_mutex);
226 spin_lock(&journal->j_list_lock);
227 goto restart;
228 }
229 if (!trylock_buffer(bh)) {
230 /*
231 * The buffer is locked, it may be writing back, or
232 * flushing out in the last couple of cycles, or
233 * re-adding into a new transaction, need to check
234 * it again until it's unlocked.
235 */
236 get_bh(bh);
237 spin_unlock(&journal->j_list_lock);
238 wait_on_buffer(bh);
239 /* the journal_head may have gone by now */
240 BUFFER_TRACE(bh, "brelse");
241 __brelse(bh);
242 goto retry;
243 } else if (!buffer_dirty(bh)) {
244 unlock_buffer(bh);
245 BUFFER_TRACE(bh, "remove from checkpoint");
246 /*
247 * If the transaction was released or the checkpoint
248 * list was empty, we're done.
249 */
250 if (__jbd2_journal_remove_checkpoint(jh) ||
251 !transaction->t_checkpoint_list)
252 goto out;
253 } else {
254 unlock_buffer(bh);
255 /*
256 * We are about to write the buffer, it could be
257 * raced by some other transaction shrink or buffer
258 * re-log logic once we release the j_list_lock,
259 * leave it on the checkpoint list and check status
260 * again to make sure it's clean.
261 */
262 BUFFER_TRACE(bh, "queue");
263 get_bh(bh);
264 J_ASSERT_BH(bh, !buffer_jwrite(bh));
265 journal->j_chkpt_bhs[batch_count++] = bh;
266 transaction->t_chp_stats.cs_written++;
267 transaction->t_checkpoint_list = jh->b_cpnext;
268 }
269
270 if ((batch_count == JBD2_NR_BATCH) ||
271 need_resched() || spin_needbreak(&journal->j_list_lock) ||
272 jh2bh(transaction->t_checkpoint_list) == journal->j_chkpt_bhs[0])
273 goto unlock_and_flush;
274 }
275
276 if (batch_count) {
277 unlock_and_flush:
278 spin_unlock(&journal->j_list_lock);
279 retry:
280 if (batch_count)
281 __flush_batch(journal, &batch_count);
282 spin_lock(&journal->j_list_lock);
283 goto restart;
284 }
285
286 out:
287 spin_unlock(&journal->j_list_lock);
288 result = jbd2_cleanup_journal_tail(journal);
289
290 return (result < 0) ? result : 0;
291 }
292
293 /*
294 * Check the list of checkpoint transactions for the journal to see if
295 * we have already got rid of any since the last update of the log tail
296 * in the journal superblock. If so, we can instantly roll the
297 * superblock forward to remove those transactions from the log.
298 *
299 * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
300 *
301 * Called with the journal lock held.
302 *
303 * This is the only part of the journaling code which really needs to be
304 * aware of transaction aborts. Checkpointing involves writing to the
305 * main filesystem area rather than to the journal, so it can proceed
306 * even in abort state, but we must not update the super block if
307 * checkpointing may have failed. Otherwise, we would lose some metadata
308 * buffers which should be written-back to the filesystem.
309 */
310
jbd2_cleanup_journal_tail(journal_t * journal)311 int jbd2_cleanup_journal_tail(journal_t *journal)
312 {
313 tid_t first_tid;
314 unsigned long blocknr;
315
316 if (is_journal_aborted(journal))
317 return -EIO;
318
319 if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr))
320 return 1;
321 J_ASSERT(blocknr != 0);
322
323 /*
324 * We need to make sure that any blocks that were recently written out
325 * --- perhaps by jbd2_log_do_checkpoint() --- are flushed out before
326 * we drop the transactions from the journal. It's unlikely this will
327 * be necessary, especially with an appropriately sized journal, but we
328 * need this to guarantee correctness. Fortunately
329 * jbd2_cleanup_journal_tail() doesn't get called all that often.
330 */
331 if (journal->j_flags & JBD2_BARRIER)
332 blkdev_issue_flush(journal->j_fs_dev);
333
334 return __jbd2_update_log_tail(journal, first_tid, blocknr);
335 }
336
337
338 /* Checkpoint list management */
339
340 enum shrink_type {SHRINK_DESTROY, SHRINK_BUSY_STOP, SHRINK_BUSY_SKIP};
341
342 /*
343 * journal_shrink_one_cp_list
344 *
345 * Find all the written-back checkpoint buffers in the given list
346 * and try to release them. If the whole transaction is released, set
347 * the 'released' parameter. Return the number of released checkpointed
348 * buffers.
349 *
350 * Called with j_list_lock held.
351 */
journal_shrink_one_cp_list(struct journal_head * jh,enum shrink_type type,bool * released)352 static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
353 enum shrink_type type,
354 bool *released)
355 {
356 struct journal_head *last_jh;
357 struct journal_head *next_jh = jh;
358 unsigned long nr_freed = 0;
359 int ret;
360
361 *released = false;
362 if (!jh)
363 return 0;
364
365 last_jh = jh->b_cpprev;
366 do {
367 jh = next_jh;
368 next_jh = jh->b_cpnext;
369
370 if (type == SHRINK_DESTROY) {
371 ret = __jbd2_journal_remove_checkpoint(jh);
372 } else {
373 ret = jbd2_journal_try_remove_checkpoint(jh);
374 if (ret < 0) {
375 if (type == SHRINK_BUSY_SKIP)
376 continue;
377 break;
378 }
379 }
380
381 nr_freed++;
382 if (ret) {
383 *released = true;
384 break;
385 }
386
387 if (need_resched())
388 break;
389 } while (jh != last_jh);
390
391 return nr_freed;
392 }
393
394 /*
395 * jbd2_journal_shrink_checkpoint_list
396 *
397 * Find 'nr_to_scan' written-back checkpoint buffers in the journal
398 * and try to release them. Return the number of released checkpointed
399 * buffers.
400 *
401 * Called with j_list_lock held.
402 */
jbd2_journal_shrink_checkpoint_list(journal_t * journal,unsigned long * nr_to_scan)403 unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
404 unsigned long *nr_to_scan)
405 {
406 transaction_t *transaction, *last_transaction, *next_transaction;
407 bool __maybe_unused released;
408 tid_t first_tid = 0, last_tid = 0, next_tid = 0;
409 tid_t tid = 0;
410 unsigned long nr_freed = 0;
411 unsigned long freed;
412
413 again:
414 spin_lock(&journal->j_list_lock);
415 if (!journal->j_checkpoint_transactions) {
416 spin_unlock(&journal->j_list_lock);
417 goto out;
418 }
419
420 /*
421 * Get next shrink transaction, resume previous scan or start
422 * over again. If some others do checkpoint and drop transaction
423 * from the checkpoint list, we ignore saved j_shrink_transaction
424 * and start over unconditionally.
425 */
426 if (journal->j_shrink_transaction)
427 transaction = journal->j_shrink_transaction;
428 else
429 transaction = journal->j_checkpoint_transactions;
430
431 if (!first_tid)
432 first_tid = transaction->t_tid;
433 last_transaction = journal->j_checkpoint_transactions->t_cpprev;
434 next_transaction = transaction;
435 last_tid = last_transaction->t_tid;
436 do {
437 transaction = next_transaction;
438 next_transaction = transaction->t_cpnext;
439 tid = transaction->t_tid;
440
441 freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list,
442 SHRINK_BUSY_SKIP, &released);
443 nr_freed += freed;
444 (*nr_to_scan) -= min(*nr_to_scan, freed);
445 if (*nr_to_scan == 0)
446 break;
447 if (need_resched() || spin_needbreak(&journal->j_list_lock))
448 break;
449 } while (transaction != last_transaction);
450
451 if (transaction != last_transaction) {
452 journal->j_shrink_transaction = next_transaction;
453 next_tid = next_transaction->t_tid;
454 } else {
455 journal->j_shrink_transaction = NULL;
456 next_tid = 0;
457 }
458
459 spin_unlock(&journal->j_list_lock);
460 cond_resched();
461
462 if (*nr_to_scan && next_tid)
463 goto again;
464 out:
465 trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid,
466 nr_freed, next_tid);
467
468 return nr_freed;
469 }
470
471 /*
472 * journal_clean_checkpoint_list
473 *
474 * Find all the written-back checkpoint buffers in the journal and release them.
475 * If 'destroy' is set, release all buffers unconditionally.
476 *
477 * Called with j_list_lock held.
478 */
__jbd2_journal_clean_checkpoint_list(journal_t * journal,bool destroy)479 void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
480 {
481 transaction_t *transaction, *last_transaction, *next_transaction;
482 enum shrink_type type;
483 bool released;
484
485 transaction = journal->j_checkpoint_transactions;
486 if (!transaction)
487 return;
488
489 type = destroy ? SHRINK_DESTROY : SHRINK_BUSY_STOP;
490 last_transaction = transaction->t_cpprev;
491 next_transaction = transaction;
492 do {
493 transaction = next_transaction;
494 next_transaction = transaction->t_cpnext;
495 journal_shrink_one_cp_list(transaction->t_checkpoint_list,
496 type, &released);
497 /*
498 * This function only frees up some memory if possible so we
499 * dont have an obligation to finish processing. Bail out if
500 * preemption requested:
501 */
502 if (need_resched())
503 return;
504 /*
505 * Stop scanning if we couldn't free the transaction. This
506 * avoids pointless scanning of transactions which still
507 * weren't checkpointed.
508 */
509 if (!released)
510 return;
511 } while (transaction != last_transaction);
512 }
513
514 /*
515 * Remove buffers from all checkpoint lists as journal is aborted and we just
516 * need to free memory
517 */
jbd2_journal_destroy_checkpoint(journal_t * journal)518 void jbd2_journal_destroy_checkpoint(journal_t *journal)
519 {
520 /*
521 * We loop because __jbd2_journal_clean_checkpoint_list() may abort
522 * early due to a need of rescheduling.
523 */
524 while (1) {
525 spin_lock(&journal->j_list_lock);
526 if (!journal->j_checkpoint_transactions) {
527 spin_unlock(&journal->j_list_lock);
528 break;
529 }
530 __jbd2_journal_clean_checkpoint_list(journal, true);
531 spin_unlock(&journal->j_list_lock);
532 cond_resched();
533 }
534 }
535
536 /*
537 * journal_remove_checkpoint: called after a buffer has been committed
538 * to disk (either by being write-back flushed to disk, or being
539 * committed to the log).
540 *
541 * We cannot safely clean a transaction out of the log until all of the
542 * buffer updates committed in that transaction have safely been stored
543 * elsewhere on disk. To achieve this, all of the buffers in a
544 * transaction need to be maintained on the transaction's checkpoint
545 * lists until they have been rewritten, at which point this function is
546 * called to remove the buffer from the existing transaction's
547 * checkpoint lists.
548 *
549 * The function returns 1 if it frees the transaction, 0 otherwise.
550 * The function can free jh and bh.
551 *
552 * This function is called with j_list_lock held.
553 */
__jbd2_journal_remove_checkpoint(struct journal_head * jh)554 int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
555 {
556 struct transaction_chp_stats_s *stats;
557 transaction_t *transaction;
558 journal_t *journal;
559 struct buffer_head *bh = jh2bh(jh);
560
561 JBUFFER_TRACE(jh, "entry");
562
563 transaction = jh->b_cp_transaction;
564 if (!transaction) {
565 JBUFFER_TRACE(jh, "not on transaction");
566 return 0;
567 }
568 journal = transaction->t_journal;
569
570 JBUFFER_TRACE(jh, "removing from transaction");
571
572 /*
573 * If we have failed to write the buffer out to disk, the filesystem
574 * may become inconsistent. We cannot abort the journal here since
575 * we hold j_list_lock and we have to be careful about races with
576 * jbd2_journal_destroy(). So mark the writeback IO error in the
577 * journal here and we abort the journal later from a better context.
578 */
579 if (buffer_write_io_error(bh))
580 set_bit(JBD2_CHECKPOINT_IO_ERROR, &journal->j_atomic_flags);
581
582 __buffer_unlink(jh);
583 jh->b_cp_transaction = NULL;
584 percpu_counter_dec(&journal->j_checkpoint_jh_count);
585 jbd2_journal_put_journal_head(jh);
586
587 /* Is this transaction empty? */
588 if (transaction->t_checkpoint_list)
589 return 0;
590
591 /*
592 * There is one special case to worry about: if we have just pulled the
593 * buffer off a running or committing transaction's checkpoing list,
594 * then even if the checkpoint list is empty, the transaction obviously
595 * cannot be dropped!
596 *
597 * The locking here around t_state is a bit sleazy.
598 * See the comment at the end of jbd2_journal_commit_transaction().
599 */
600 if (transaction->t_state != T_FINISHED)
601 return 0;
602
603 /*
604 * OK, that was the last buffer for the transaction, we can now
605 * safely remove this transaction from the log.
606 */
607 stats = &transaction->t_chp_stats;
608 if (stats->cs_chp_time)
609 stats->cs_chp_time = jbd2_time_diff(stats->cs_chp_time,
610 jiffies);
611 trace_jbd2_checkpoint_stats(journal->j_fs_dev->bd_dev,
612 transaction->t_tid, stats);
613
614 __jbd2_journal_drop_transaction(journal, transaction);
615 jbd2_journal_free_transaction(transaction);
616 return 1;
617 }
618
619 /*
620 * Check the checkpoint buffer and try to remove it from the checkpoint
621 * list if it's clean. Returns -EBUSY if it is not clean, returns 1 if
622 * it frees the transaction, 0 otherwise.
623 *
624 * This function is called with j_list_lock held.
625 */
jbd2_journal_try_remove_checkpoint(struct journal_head * jh)626 int jbd2_journal_try_remove_checkpoint(struct journal_head *jh)
627 {
628 struct buffer_head *bh = jh2bh(jh);
629
630 if (jh->b_transaction)
631 return -EBUSY;
632 if (!trylock_buffer(bh))
633 return -EBUSY;
634 if (buffer_dirty(bh)) {
635 unlock_buffer(bh);
636 return -EBUSY;
637 }
638 unlock_buffer(bh);
639
640 /*
641 * Buffer is clean and the IO has finished (we held the buffer
642 * lock) so the checkpoint is done. We can safely remove the
643 * buffer from this transaction.
644 */
645 JBUFFER_TRACE(jh, "remove from checkpoint list");
646 return __jbd2_journal_remove_checkpoint(jh);
647 }
648
649 /*
650 * journal_insert_checkpoint: put a committed buffer onto a checkpoint
651 * list so that we know when it is safe to clean the transaction out of
652 * the log.
653 *
654 * Called with the journal locked.
655 * Called with j_list_lock held.
656 */
__jbd2_journal_insert_checkpoint(struct journal_head * jh,transaction_t * transaction)657 void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
658 transaction_t *transaction)
659 {
660 JBUFFER_TRACE(jh, "entry");
661 J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
662 J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
663
664 /* Get reference for checkpointing transaction */
665 jbd2_journal_grab_journal_head(jh2bh(jh));
666 jh->b_cp_transaction = transaction;
667
668 if (!transaction->t_checkpoint_list) {
669 jh->b_cpnext = jh->b_cpprev = jh;
670 } else {
671 jh->b_cpnext = transaction->t_checkpoint_list;
672 jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev;
673 jh->b_cpprev->b_cpnext = jh;
674 jh->b_cpnext->b_cpprev = jh;
675 }
676 transaction->t_checkpoint_list = jh;
677 percpu_counter_inc(&transaction->t_journal->j_checkpoint_jh_count);
678 }
679
680 /*
681 * We've finished with this transaction structure: adios...
682 *
683 * The transaction must have no links except for the checkpoint by this
684 * point.
685 *
686 * Called with the journal locked.
687 * Called with j_list_lock held.
688 */
689
__jbd2_journal_drop_transaction(journal_t * journal,transaction_t * transaction)690 void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transaction)
691 {
692 assert_spin_locked(&journal->j_list_lock);
693
694 journal->j_shrink_transaction = NULL;
695 if (transaction->t_cpnext) {
696 transaction->t_cpnext->t_cpprev = transaction->t_cpprev;
697 transaction->t_cpprev->t_cpnext = transaction->t_cpnext;
698 if (journal->j_checkpoint_transactions == transaction)
699 journal->j_checkpoint_transactions =
700 transaction->t_cpnext;
701 if (journal->j_checkpoint_transactions == transaction)
702 journal->j_checkpoint_transactions = NULL;
703 }
704
705 J_ASSERT(transaction->t_state == T_FINISHED);
706 J_ASSERT(transaction->t_buffers == NULL);
707 J_ASSERT(transaction->t_forget == NULL);
708 J_ASSERT(transaction->t_shadow_list == NULL);
709 J_ASSERT(transaction->t_checkpoint_list == NULL);
710 J_ASSERT(atomic_read(&transaction->t_updates) == 0);
711 J_ASSERT(journal->j_committing_transaction != transaction);
712 J_ASSERT(journal->j_running_transaction != transaction);
713
714 trace_jbd2_drop_transaction(journal, transaction);
715
716 jbd2_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
717 }
718