1 /*
2  * linux/fs/jbd2/commit.c
3  *
4  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5  *
6  * Copyright 1998 Red Hat corp --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Journal commit routines for the generic filesystem journaling code;
13  * part of the ext2fs journaling system.
14  */
15 
16 #include <linux/time.h>
17 #include <linux/fs.h>
18 #include <linux/jbd2.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/mm.h>
22 #include <linux/pagemap.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bio.h>
28 #include <linux/blkdev.h>
29 #include <linux/bitops.h>
30 #include <trace/events/jbd2.h>
31 #include <asm/system.h>
32 
33 /*
34  * Default IO end handler for temporary BJ_IO buffer_heads.
35  */
journal_end_buffer_io_sync(struct buffer_head * bh,int uptodate)36 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
37 {
38 	BUFFER_TRACE(bh, "");
39 	if (uptodate)
40 		set_buffer_uptodate(bh);
41 	else
42 		clear_buffer_uptodate(bh);
43 	unlock_buffer(bh);
44 }
45 
46 /*
47  * When an ext4 file is truncated, it is possible that some pages are not
48  * successfully freed, because they are attached to a committing transaction.
49  * After the transaction commits, these pages are left on the LRU, with no
50  * ->mapping, and with attached buffers.  These pages are trivially reclaimable
51  * by the VM, but their apparent absence upsets the VM accounting, and it makes
52  * the numbers in /proc/meminfo look odd.
53  *
54  * So here, we have a buffer which has just come off the forget list.  Look to
55  * see if we can strip all buffers from the backing page.
56  *
57  * Called under lock_journal(), and possibly under journal_datalist_lock.  The
58  * caller provided us with a ref against the buffer, and we drop that here.
59  */
release_buffer_page(struct buffer_head * bh)60 static void release_buffer_page(struct buffer_head *bh)
61 {
62 	struct page *page;
63 
64 	if (buffer_dirty(bh))
65 		goto nope;
66 	if (atomic_read(&bh->b_count) != 1)
67 		goto nope;
68 	page = bh->b_page;
69 	if (!page)
70 		goto nope;
71 	if (page->mapping)
72 		goto nope;
73 
74 	/* OK, it's a truncated page */
75 	if (!trylock_page(page))
76 		goto nope;
77 
78 	page_cache_get(page);
79 	__brelse(bh);
80 	try_to_free_buffers(page);
81 	unlock_page(page);
82 	page_cache_release(page);
83 	return;
84 
85 nope:
86 	__brelse(bh);
87 }
88 
89 /*
90  * Done it all: now submit the commit record.  We should have
91  * cleaned up our previous buffers by now, so if we are in abort
92  * mode we can now just skip the rest of the journal write
93  * entirely.
94  *
95  * Returns 1 if the journal needs to be aborted or 0 on success
96  */
journal_submit_commit_record(journal_t * journal,transaction_t * commit_transaction,struct buffer_head ** cbh,__u32 crc32_sum)97 static int journal_submit_commit_record(journal_t *journal,
98 					transaction_t *commit_transaction,
99 					struct buffer_head **cbh,
100 					__u32 crc32_sum)
101 {
102 	struct journal_head *descriptor;
103 	struct commit_header *tmp;
104 	struct buffer_head *bh;
105 	int ret;
106 	struct timespec now = current_kernel_time();
107 
108 	*cbh = NULL;
109 
110 	if (is_journal_aborted(journal))
111 		return 0;
112 
113 	descriptor = jbd2_journal_get_descriptor_buffer(journal);
114 	if (!descriptor)
115 		return 1;
116 
117 	bh = jh2bh(descriptor);
118 
119 	tmp = (struct commit_header *)bh->b_data;
120 	tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
121 	tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
122 	tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
123 	tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
124 	tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
125 
126 	if (JBD2_HAS_COMPAT_FEATURE(journal,
127 				    JBD2_FEATURE_COMPAT_CHECKSUM)) {
128 		tmp->h_chksum_type 	= JBD2_CRC32_CHKSUM;
129 		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
130 		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
131 	}
132 
133 	JBUFFER_TRACE(descriptor, "submit commit block");
134 	lock_buffer(bh);
135 	clear_buffer_dirty(bh);
136 	set_buffer_uptodate(bh);
137 	bh->b_end_io = journal_end_buffer_io_sync;
138 
139 	if (journal->j_flags & JBD2_BARRIER &&
140 	    !JBD2_HAS_INCOMPAT_FEATURE(journal,
141 				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
142 		ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
143 	else
144 		ret = submit_bh(WRITE_SYNC, bh);
145 
146 	*cbh = bh;
147 	return ret;
148 }
149 
150 /*
151  * This function along with journal_submit_commit_record
152  * allows to write the commit record asynchronously.
153  */
journal_wait_on_commit_record(journal_t * journal,struct buffer_head * bh)154 static int journal_wait_on_commit_record(journal_t *journal,
155 					 struct buffer_head *bh)
156 {
157 	int ret = 0;
158 
159 	clear_buffer_dirty(bh);
160 	wait_on_buffer(bh);
161 
162 	if (unlikely(!buffer_uptodate(bh)))
163 		ret = -EIO;
164 	put_bh(bh);            /* One for getblk() */
165 	jbd2_journal_put_journal_head(bh2jh(bh));
166 
167 	return ret;
168 }
169 
170 /*
171  * write the filemap data using writepage() address_space_operations.
172  * We don't do block allocation here even for delalloc. We don't
173  * use writepages() because with dealyed allocation we may be doing
174  * block allocation in writepages().
175  */
journal_submit_inode_data_buffers(struct address_space * mapping)176 static int journal_submit_inode_data_buffers(struct address_space *mapping)
177 {
178 	int ret;
179 	struct writeback_control wbc = {
180 		.sync_mode =  WB_SYNC_ALL,
181 		.nr_to_write = mapping->nrpages * 2,
182 		.range_start = 0,
183 		.range_end = i_size_read(mapping->host),
184 	};
185 
186 	ret = generic_writepages(mapping, &wbc);
187 	return ret;
188 }
189 
190 /*
191  * Submit all the data buffers of inode associated with the transaction to
192  * disk.
193  *
194  * We are in a committing transaction. Therefore no new inode can be added to
195  * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
196  * operate on from being released while we write out pages.
197  */
journal_submit_data_buffers(journal_t * journal,transaction_t * commit_transaction)198 static int journal_submit_data_buffers(journal_t *journal,
199 		transaction_t *commit_transaction)
200 {
201 	struct jbd2_inode *jinode;
202 	int err, ret = 0;
203 	struct address_space *mapping;
204 
205 	spin_lock(&journal->j_list_lock);
206 	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
207 		mapping = jinode->i_vfs_inode->i_mapping;
208 		set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
209 		spin_unlock(&journal->j_list_lock);
210 		/*
211 		 * submit the inode data buffers. We use writepage
212 		 * instead of writepages. Because writepages can do
213 		 * block allocation  with delalloc. We need to write
214 		 * only allocated blocks here.
215 		 */
216 		trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
217 		err = journal_submit_inode_data_buffers(mapping);
218 		if (!ret)
219 			ret = err;
220 		spin_lock(&journal->j_list_lock);
221 		J_ASSERT(jinode->i_transaction == commit_transaction);
222 		commit_transaction->t_flushed_data_blocks = 1;
223 		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
224 		smp_mb__after_clear_bit();
225 		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
226 	}
227 	spin_unlock(&journal->j_list_lock);
228 	return ret;
229 }
230 
231 /*
232  * Wait for data submitted for writeout, refile inodes to proper
233  * transaction if needed.
234  *
235  */
journal_finish_inode_data_buffers(journal_t * journal,transaction_t * commit_transaction)236 static int journal_finish_inode_data_buffers(journal_t *journal,
237 		transaction_t *commit_transaction)
238 {
239 	struct jbd2_inode *jinode, *next_i;
240 	int err, ret = 0;
241 
242 	/* For locking, see the comment in journal_submit_data_buffers() */
243 	spin_lock(&journal->j_list_lock);
244 	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
245 		set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
246 		spin_unlock(&journal->j_list_lock);
247 		err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
248 		if (err) {
249 			/*
250 			 * Because AS_EIO is cleared by
251 			 * filemap_fdatawait_range(), set it again so
252 			 * that user process can get -EIO from fsync().
253 			 */
254 			set_bit(AS_EIO,
255 				&jinode->i_vfs_inode->i_mapping->flags);
256 
257 			if (!ret)
258 				ret = err;
259 		}
260 		spin_lock(&journal->j_list_lock);
261 		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
262 		smp_mb__after_clear_bit();
263 		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
264 	}
265 
266 	/* Now refile inode to proper lists */
267 	list_for_each_entry_safe(jinode, next_i,
268 				 &commit_transaction->t_inode_list, i_list) {
269 		list_del(&jinode->i_list);
270 		if (jinode->i_next_transaction) {
271 			jinode->i_transaction = jinode->i_next_transaction;
272 			jinode->i_next_transaction = NULL;
273 			list_add(&jinode->i_list,
274 				&jinode->i_transaction->t_inode_list);
275 		} else {
276 			jinode->i_transaction = NULL;
277 		}
278 	}
279 	spin_unlock(&journal->j_list_lock);
280 
281 	return ret;
282 }
283 
jbd2_checksum_data(__u32 crc32_sum,struct buffer_head * bh)284 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
285 {
286 	struct page *page = bh->b_page;
287 	char *addr;
288 	__u32 checksum;
289 
290 	addr = kmap_atomic(page, KM_USER0);
291 	checksum = crc32_be(crc32_sum,
292 		(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
293 	kunmap_atomic(addr, KM_USER0);
294 
295 	return checksum;
296 }
297 
write_tag_block(int tag_bytes,journal_block_tag_t * tag,unsigned long long block)298 static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
299 				   unsigned long long block)
300 {
301 	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
302 	if (tag_bytes > JBD2_TAG_SIZE32)
303 		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
304 }
305 
306 /*
307  * jbd2_journal_commit_transaction
308  *
309  * The primary function for committing a transaction to the log.  This
310  * function is called by the journal thread to begin a complete commit.
311  */
jbd2_journal_commit_transaction(journal_t * journal)312 void jbd2_journal_commit_transaction(journal_t *journal)
313 {
314 	struct transaction_stats_s stats;
315 	transaction_t *commit_transaction;
316 	struct journal_head *jh, *new_jh, *descriptor;
317 	struct buffer_head **wbuf = journal->j_wbuf;
318 	int bufs;
319 	int flags;
320 	int err;
321 	unsigned long long blocknr;
322 	ktime_t start_time;
323 	u64 commit_time;
324 	char *tagp = NULL;
325 	journal_header_t *header;
326 	journal_block_tag_t *tag = NULL;
327 	int space_left = 0;
328 	int first_tag = 0;
329 	int tag_flag;
330 	int i, to_free = 0;
331 	int tag_bytes = journal_tag_bytes(journal);
332 	struct buffer_head *cbh = NULL; /* For transactional checksums */
333 	__u32 crc32_sum = ~0;
334 	struct blk_plug plug;
335 
336 	/*
337 	 * First job: lock down the current transaction and wait for
338 	 * all outstanding updates to complete.
339 	 */
340 
341 #ifdef COMMIT_STATS
342 	spin_lock(&journal->j_list_lock);
343 	summarise_journal_usage(journal);
344 	spin_unlock(&journal->j_list_lock);
345 #endif
346 
347 	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
348 	if (journal->j_flags & JBD2_FLUSHED) {
349 		jbd_debug(3, "super block updated\n");
350 		jbd2_journal_update_superblock(journal, 1);
351 	} else {
352 		jbd_debug(3, "superblock not updated\n");
353 	}
354 
355 	J_ASSERT(journal->j_running_transaction != NULL);
356 	J_ASSERT(journal->j_committing_transaction == NULL);
357 
358 	commit_transaction = journal->j_running_transaction;
359 	J_ASSERT(commit_transaction->t_state == T_RUNNING);
360 
361 	trace_jbd2_start_commit(journal, commit_transaction);
362 	jbd_debug(1, "JBD: starting commit of transaction %d\n",
363 			commit_transaction->t_tid);
364 
365 	write_lock(&journal->j_state_lock);
366 	commit_transaction->t_state = T_LOCKED;
367 
368 	trace_jbd2_commit_locking(journal, commit_transaction);
369 	stats.run.rs_wait = commit_transaction->t_max_wait;
370 	stats.run.rs_locked = jiffies;
371 	stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
372 					      stats.run.rs_locked);
373 
374 	spin_lock(&commit_transaction->t_handle_lock);
375 	while (atomic_read(&commit_transaction->t_updates)) {
376 		DEFINE_WAIT(wait);
377 
378 		prepare_to_wait(&journal->j_wait_updates, &wait,
379 					TASK_UNINTERRUPTIBLE);
380 		if (atomic_read(&commit_transaction->t_updates)) {
381 			spin_unlock(&commit_transaction->t_handle_lock);
382 			write_unlock(&journal->j_state_lock);
383 			schedule();
384 			write_lock(&journal->j_state_lock);
385 			spin_lock(&commit_transaction->t_handle_lock);
386 		}
387 		finish_wait(&journal->j_wait_updates, &wait);
388 	}
389 	spin_unlock(&commit_transaction->t_handle_lock);
390 
391 	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
392 			journal->j_max_transaction_buffers);
393 
394 	/*
395 	 * First thing we are allowed to do is to discard any remaining
396 	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
397 	 * that there are no such buffers: if a large filesystem
398 	 * operation like a truncate needs to split itself over multiple
399 	 * transactions, then it may try to do a jbd2_journal_restart() while
400 	 * there are still BJ_Reserved buffers outstanding.  These must
401 	 * be released cleanly from the current transaction.
402 	 *
403 	 * In this case, the filesystem must still reserve write access
404 	 * again before modifying the buffer in the new transaction, but
405 	 * we do not require it to remember exactly which old buffers it
406 	 * has reserved.  This is consistent with the existing behaviour
407 	 * that multiple jbd2_journal_get_write_access() calls to the same
408 	 * buffer are perfectly permissible.
409 	 */
410 	while (commit_transaction->t_reserved_list) {
411 		jh = commit_transaction->t_reserved_list;
412 		JBUFFER_TRACE(jh, "reserved, unused: refile");
413 		/*
414 		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
415 		 * leave undo-committed data.
416 		 */
417 		if (jh->b_committed_data) {
418 			struct buffer_head *bh = jh2bh(jh);
419 
420 			jbd_lock_bh_state(bh);
421 			jbd2_free(jh->b_committed_data, bh->b_size);
422 			jh->b_committed_data = NULL;
423 			jbd_unlock_bh_state(bh);
424 		}
425 		jbd2_journal_refile_buffer(journal, jh);
426 	}
427 
428 	/*
429 	 * Now try to drop any written-back buffers from the journal's
430 	 * checkpoint lists.  We do this *before* commit because it potentially
431 	 * frees some memory
432 	 */
433 	spin_lock(&journal->j_list_lock);
434 	__jbd2_journal_clean_checkpoint_list(journal);
435 	spin_unlock(&journal->j_list_lock);
436 
437 	jbd_debug (3, "JBD: commit phase 1\n");
438 
439 	/*
440 	 * Switch to a new revoke table.
441 	 */
442 	jbd2_journal_switch_revoke_table(journal);
443 
444 	trace_jbd2_commit_flushing(journal, commit_transaction);
445 	stats.run.rs_flushing = jiffies;
446 	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
447 					     stats.run.rs_flushing);
448 
449 	commit_transaction->t_state = T_FLUSH;
450 	journal->j_committing_transaction = commit_transaction;
451 	journal->j_running_transaction = NULL;
452 	start_time = ktime_get();
453 	commit_transaction->t_log_start = journal->j_head;
454 	wake_up(&journal->j_wait_transaction_locked);
455 	write_unlock(&journal->j_state_lock);
456 
457 	jbd_debug (3, "JBD: commit phase 2\n");
458 
459 	/*
460 	 * Now start flushing things to disk, in the order they appear
461 	 * on the transaction lists.  Data blocks go first.
462 	 */
463 	err = journal_submit_data_buffers(journal, commit_transaction);
464 	if (err)
465 		jbd2_journal_abort(journal, err);
466 
467 	blk_start_plug(&plug);
468 	jbd2_journal_write_revoke_records(journal, commit_transaction,
469 					  WRITE_SYNC);
470 	blk_finish_plug(&plug);
471 
472 	jbd_debug(3, "JBD: commit phase 2\n");
473 
474 	/*
475 	 * Way to go: we have now written out all of the data for a
476 	 * transaction!  Now comes the tricky part: we need to write out
477 	 * metadata.  Loop over the transaction's entire buffer list:
478 	 */
479 	write_lock(&journal->j_state_lock);
480 	commit_transaction->t_state = T_COMMIT;
481 	write_unlock(&journal->j_state_lock);
482 
483 	trace_jbd2_commit_logging(journal, commit_transaction);
484 	stats.run.rs_logging = jiffies;
485 	stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
486 					       stats.run.rs_logging);
487 	stats.run.rs_blocks =
488 		atomic_read(&commit_transaction->t_outstanding_credits);
489 	stats.run.rs_blocks_logged = 0;
490 
491 	J_ASSERT(commit_transaction->t_nr_buffers <=
492 		 atomic_read(&commit_transaction->t_outstanding_credits));
493 
494 	err = 0;
495 	descriptor = NULL;
496 	bufs = 0;
497 	blk_start_plug(&plug);
498 	while (commit_transaction->t_buffers) {
499 
500 		/* Find the next buffer to be journaled... */
501 
502 		jh = commit_transaction->t_buffers;
503 
504 		/* If we're in abort mode, we just un-journal the buffer and
505 		   release it. */
506 
507 		if (is_journal_aborted(journal)) {
508 			clear_buffer_jbddirty(jh2bh(jh));
509 			JBUFFER_TRACE(jh, "journal is aborting: refile");
510 			jbd2_buffer_abort_trigger(jh,
511 						  jh->b_frozen_data ?
512 						  jh->b_frozen_triggers :
513 						  jh->b_triggers);
514 			jbd2_journal_refile_buffer(journal, jh);
515 			/* If that was the last one, we need to clean up
516 			 * any descriptor buffers which may have been
517 			 * already allocated, even if we are now
518 			 * aborting. */
519 			if (!commit_transaction->t_buffers)
520 				goto start_journal_io;
521 			continue;
522 		}
523 
524 		/* Make sure we have a descriptor block in which to
525 		   record the metadata buffer. */
526 
527 		if (!descriptor) {
528 			struct buffer_head *bh;
529 
530 			J_ASSERT (bufs == 0);
531 
532 			jbd_debug(4, "JBD: get descriptor\n");
533 
534 			descriptor = jbd2_journal_get_descriptor_buffer(journal);
535 			if (!descriptor) {
536 				jbd2_journal_abort(journal, -EIO);
537 				continue;
538 			}
539 
540 			bh = jh2bh(descriptor);
541 			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
542 				(unsigned long long)bh->b_blocknr, bh->b_data);
543 			header = (journal_header_t *)&bh->b_data[0];
544 			header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
545 			header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
546 			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
547 
548 			tagp = &bh->b_data[sizeof(journal_header_t)];
549 			space_left = bh->b_size - sizeof(journal_header_t);
550 			first_tag = 1;
551 			set_buffer_jwrite(bh);
552 			set_buffer_dirty(bh);
553 			wbuf[bufs++] = bh;
554 
555 			/* Record it so that we can wait for IO
556                            completion later */
557 			BUFFER_TRACE(bh, "ph3: file as descriptor");
558 			jbd2_journal_file_buffer(descriptor, commit_transaction,
559 					BJ_LogCtl);
560 		}
561 
562 		/* Where is the buffer to be written? */
563 
564 		err = jbd2_journal_next_log_block(journal, &blocknr);
565 		/* If the block mapping failed, just abandon the buffer
566 		   and repeat this loop: we'll fall into the
567 		   refile-on-abort condition above. */
568 		if (err) {
569 			jbd2_journal_abort(journal, err);
570 			continue;
571 		}
572 
573 		/*
574 		 * start_this_handle() uses t_outstanding_credits to determine
575 		 * the free space in the log, but this counter is changed
576 		 * by jbd2_journal_next_log_block() also.
577 		 */
578 		atomic_dec(&commit_transaction->t_outstanding_credits);
579 
580 		/* Bump b_count to prevent truncate from stumbling over
581                    the shadowed buffer!  @@@ This can go if we ever get
582                    rid of the BJ_IO/BJ_Shadow pairing of buffers. */
583 		atomic_inc(&jh2bh(jh)->b_count);
584 
585 		/* Make a temporary IO buffer with which to write it out
586                    (this will requeue both the metadata buffer and the
587                    temporary IO buffer). new_bh goes on BJ_IO*/
588 
589 		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
590 		/*
591 		 * akpm: jbd2_journal_write_metadata_buffer() sets
592 		 * new_bh->b_transaction to commit_transaction.
593 		 * We need to clean this up before we release new_bh
594 		 * (which is of type BJ_IO)
595 		 */
596 		JBUFFER_TRACE(jh, "ph3: write metadata");
597 		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
598 						      jh, &new_jh, blocknr);
599 		if (flags < 0) {
600 			jbd2_journal_abort(journal, flags);
601 			continue;
602 		}
603 		set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
604 		wbuf[bufs++] = jh2bh(new_jh);
605 
606 		/* Record the new block's tag in the current descriptor
607                    buffer */
608 
609 		tag_flag = 0;
610 		if (flags & 1)
611 			tag_flag |= JBD2_FLAG_ESCAPE;
612 		if (!first_tag)
613 			tag_flag |= JBD2_FLAG_SAME_UUID;
614 
615 		tag = (journal_block_tag_t *) tagp;
616 		write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
617 		tag->t_flags = cpu_to_be32(tag_flag);
618 		tagp += tag_bytes;
619 		space_left -= tag_bytes;
620 
621 		if (first_tag) {
622 			memcpy (tagp, journal->j_uuid, 16);
623 			tagp += 16;
624 			space_left -= 16;
625 			first_tag = 0;
626 		}
627 
628 		/* If there's no more to do, or if the descriptor is full,
629 		   let the IO rip! */
630 
631 		if (bufs == journal->j_wbufsize ||
632 		    commit_transaction->t_buffers == NULL ||
633 		    space_left < tag_bytes + 16) {
634 
635 			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
636 
637 			/* Write an end-of-descriptor marker before
638                            submitting the IOs.  "tag" still points to
639                            the last tag we set up. */
640 
641 			tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
642 
643 start_journal_io:
644 			for (i = 0; i < bufs; i++) {
645 				struct buffer_head *bh = wbuf[i];
646 				/*
647 				 * Compute checksum.
648 				 */
649 				if (JBD2_HAS_COMPAT_FEATURE(journal,
650 					JBD2_FEATURE_COMPAT_CHECKSUM)) {
651 					crc32_sum =
652 					    jbd2_checksum_data(crc32_sum, bh);
653 				}
654 
655 				lock_buffer(bh);
656 				clear_buffer_dirty(bh);
657 				set_buffer_uptodate(bh);
658 				bh->b_end_io = journal_end_buffer_io_sync;
659 				submit_bh(WRITE_SYNC, bh);
660 			}
661 			cond_resched();
662 			stats.run.rs_blocks_logged += bufs;
663 
664 			/* Force a new descriptor to be generated next
665                            time round the loop. */
666 			descriptor = NULL;
667 			bufs = 0;
668 		}
669 	}
670 
671 	err = journal_finish_inode_data_buffers(journal, commit_transaction);
672 	if (err) {
673 		printk(KERN_WARNING
674 			"JBD2: Detected IO errors while flushing file data "
675 		       "on %s\n", journal->j_devname);
676 		if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
677 			jbd2_journal_abort(journal, err);
678 		err = 0;
679 	}
680 
681 	/*
682 	 * If the journal is not located on the file system device,
683 	 * then we must flush the file system device before we issue
684 	 * the commit record
685 	 */
686 	if (commit_transaction->t_flushed_data_blocks &&
687 	    (journal->j_fs_dev != journal->j_dev) &&
688 	    (journal->j_flags & JBD2_BARRIER))
689 		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
690 
691 	/* Done it all: now write the commit record asynchronously. */
692 	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
693 				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
694 		err = journal_submit_commit_record(journal, commit_transaction,
695 						 &cbh, crc32_sum);
696 		if (err)
697 			__jbd2_journal_abort_hard(journal);
698 	}
699 
700 	blk_finish_plug(&plug);
701 
702 	/* Lo and behold: we have just managed to send a transaction to
703            the log.  Before we can commit it, wait for the IO so far to
704            complete.  Control buffers being written are on the
705            transaction's t_log_list queue, and metadata buffers are on
706            the t_iobuf_list queue.
707 
708 	   Wait for the buffers in reverse order.  That way we are
709 	   less likely to be woken up until all IOs have completed, and
710 	   so we incur less scheduling load.
711 	*/
712 
713 	jbd_debug(3, "JBD: commit phase 3\n");
714 
715 	/*
716 	 * akpm: these are BJ_IO, and j_list_lock is not needed.
717 	 * See __journal_try_to_free_buffer.
718 	 */
719 wait_for_iobuf:
720 	while (commit_transaction->t_iobuf_list != NULL) {
721 		struct buffer_head *bh;
722 
723 		jh = commit_transaction->t_iobuf_list->b_tprev;
724 		bh = jh2bh(jh);
725 		if (buffer_locked(bh)) {
726 			wait_on_buffer(bh);
727 			goto wait_for_iobuf;
728 		}
729 		if (cond_resched())
730 			goto wait_for_iobuf;
731 
732 		if (unlikely(!buffer_uptodate(bh)))
733 			err = -EIO;
734 
735 		clear_buffer_jwrite(bh);
736 
737 		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
738 		jbd2_journal_unfile_buffer(journal, jh);
739 
740 		/*
741 		 * ->t_iobuf_list should contain only dummy buffer_heads
742 		 * which were created by jbd2_journal_write_metadata_buffer().
743 		 */
744 		BUFFER_TRACE(bh, "dumping temporary bh");
745 		jbd2_journal_put_journal_head(jh);
746 		__brelse(bh);
747 		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
748 		free_buffer_head(bh);
749 
750 		/* We also have to unlock and free the corresponding
751                    shadowed buffer */
752 		jh = commit_transaction->t_shadow_list->b_tprev;
753 		bh = jh2bh(jh);
754 		clear_bit(BH_JWrite, &bh->b_state);
755 		J_ASSERT_BH(bh, buffer_jbddirty(bh));
756 
757 		/* The metadata is now released for reuse, but we need
758                    to remember it against this transaction so that when
759                    we finally commit, we can do any checkpointing
760                    required. */
761 		JBUFFER_TRACE(jh, "file as BJ_Forget");
762 		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
763 		/* Wake up any transactions which were waiting for this
764 		   IO to complete */
765 		wake_up_bit(&bh->b_state, BH_Unshadow);
766 		JBUFFER_TRACE(jh, "brelse shadowed buffer");
767 		__brelse(bh);
768 	}
769 
770 	J_ASSERT (commit_transaction->t_shadow_list == NULL);
771 
772 	jbd_debug(3, "JBD: commit phase 4\n");
773 
774 	/* Here we wait for the revoke record and descriptor record buffers */
775  wait_for_ctlbuf:
776 	while (commit_transaction->t_log_list != NULL) {
777 		struct buffer_head *bh;
778 
779 		jh = commit_transaction->t_log_list->b_tprev;
780 		bh = jh2bh(jh);
781 		if (buffer_locked(bh)) {
782 			wait_on_buffer(bh);
783 			goto wait_for_ctlbuf;
784 		}
785 		if (cond_resched())
786 			goto wait_for_ctlbuf;
787 
788 		if (unlikely(!buffer_uptodate(bh)))
789 			err = -EIO;
790 
791 		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
792 		clear_buffer_jwrite(bh);
793 		jbd2_journal_unfile_buffer(journal, jh);
794 		jbd2_journal_put_journal_head(jh);
795 		__brelse(bh);		/* One for getblk */
796 		/* AKPM: bforget here */
797 	}
798 
799 	if (err)
800 		jbd2_journal_abort(journal, err);
801 
802 	jbd_debug(3, "JBD: commit phase 5\n");
803 
804 	if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
805 				       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
806 		err = journal_submit_commit_record(journal, commit_transaction,
807 						&cbh, crc32_sum);
808 		if (err)
809 			__jbd2_journal_abort_hard(journal);
810 	}
811 	if (cbh)
812 		err = journal_wait_on_commit_record(journal, cbh);
813 	if (JBD2_HAS_INCOMPAT_FEATURE(journal,
814 				      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
815 	    journal->j_flags & JBD2_BARRIER) {
816 		blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
817 	}
818 
819 	if (err)
820 		jbd2_journal_abort(journal, err);
821 
822 	/* End of a transaction!  Finally, we can do checkpoint
823            processing: any buffers committed as a result of this
824            transaction can be removed from any checkpoint list it was on
825            before. */
826 
827 	jbd_debug(3, "JBD: commit phase 6\n");
828 
829 	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
830 	J_ASSERT(commit_transaction->t_buffers == NULL);
831 	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
832 	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
833 	J_ASSERT(commit_transaction->t_shadow_list == NULL);
834 	J_ASSERT(commit_transaction->t_log_list == NULL);
835 
836 restart_loop:
837 	/*
838 	 * As there are other places (journal_unmap_buffer()) adding buffers
839 	 * to this list we have to be careful and hold the j_list_lock.
840 	 */
841 	spin_lock(&journal->j_list_lock);
842 	while (commit_transaction->t_forget) {
843 		transaction_t *cp_transaction;
844 		struct buffer_head *bh;
845 
846 		jh = commit_transaction->t_forget;
847 		spin_unlock(&journal->j_list_lock);
848 		bh = jh2bh(jh);
849 		jbd_lock_bh_state(bh);
850 		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction);
851 
852 		/*
853 		 * If there is undo-protected committed data against
854 		 * this buffer, then we can remove it now.  If it is a
855 		 * buffer needing such protection, the old frozen_data
856 		 * field now points to a committed version of the
857 		 * buffer, so rotate that field to the new committed
858 		 * data.
859 		 *
860 		 * Otherwise, we can just throw away the frozen data now.
861 		 *
862 		 * We also know that the frozen data has already fired
863 		 * its triggers if they exist, so we can clear that too.
864 		 */
865 		if (jh->b_committed_data) {
866 			jbd2_free(jh->b_committed_data, bh->b_size);
867 			jh->b_committed_data = NULL;
868 			if (jh->b_frozen_data) {
869 				jh->b_committed_data = jh->b_frozen_data;
870 				jh->b_frozen_data = NULL;
871 				jh->b_frozen_triggers = NULL;
872 			}
873 		} else if (jh->b_frozen_data) {
874 			jbd2_free(jh->b_frozen_data, bh->b_size);
875 			jh->b_frozen_data = NULL;
876 			jh->b_frozen_triggers = NULL;
877 		}
878 
879 		spin_lock(&journal->j_list_lock);
880 		cp_transaction = jh->b_cp_transaction;
881 		if (cp_transaction) {
882 			JBUFFER_TRACE(jh, "remove from old cp transaction");
883 			cp_transaction->t_chp_stats.cs_dropped++;
884 			__jbd2_journal_remove_checkpoint(jh);
885 		}
886 
887 		/* Only re-checkpoint the buffer_head if it is marked
888 		 * dirty.  If the buffer was added to the BJ_Forget list
889 		 * by jbd2_journal_forget, it may no longer be dirty and
890 		 * there's no point in keeping a checkpoint record for
891 		 * it. */
892 
893 		/* A buffer which has been freed while still being
894 		 * journaled by a previous transaction may end up still
895 		 * being dirty here, but we want to avoid writing back
896 		 * that buffer in the future after the "add to orphan"
897 		 * operation been committed,  That's not only a performance
898 		 * gain, it also stops aliasing problems if the buffer is
899 		 * left behind for writeback and gets reallocated for another
900 		 * use in a different page. */
901 		if (buffer_freed(bh) && !jh->b_next_transaction) {
902 			clear_buffer_freed(bh);
903 			clear_buffer_jbddirty(bh);
904 		}
905 
906 		if (buffer_jbddirty(bh)) {
907 			JBUFFER_TRACE(jh, "add to new checkpointing trans");
908 			__jbd2_journal_insert_checkpoint(jh, commit_transaction);
909 			if (is_journal_aborted(journal))
910 				clear_buffer_jbddirty(bh);
911 			JBUFFER_TRACE(jh, "refile for checkpoint writeback");
912 			__jbd2_journal_refile_buffer(jh);
913 			jbd_unlock_bh_state(bh);
914 		} else {
915 			J_ASSERT_BH(bh, !buffer_dirty(bh));
916 			/* The buffer on BJ_Forget list and not jbddirty means
917 			 * it has been freed by this transaction and hence it
918 			 * could not have been reallocated until this
919 			 * transaction has committed. *BUT* it could be
920 			 * reallocated once we have written all the data to
921 			 * disk and before we process the buffer on BJ_Forget
922 			 * list. */
923 			JBUFFER_TRACE(jh, "refile or unfile freed buffer");
924 			__jbd2_journal_refile_buffer(jh);
925 			if (!jh->b_transaction) {
926 				jbd_unlock_bh_state(bh);
927 				 /* needs a brelse */
928 				jbd2_journal_remove_journal_head(bh);
929 				release_buffer_page(bh);
930 			} else
931 				jbd_unlock_bh_state(bh);
932 		}
933 		cond_resched_lock(&journal->j_list_lock);
934 	}
935 	spin_unlock(&journal->j_list_lock);
936 	/*
937 	 * This is a bit sleazy.  We use j_list_lock to protect transition
938 	 * of a transaction into T_FINISHED state and calling
939 	 * __jbd2_journal_drop_transaction(). Otherwise we could race with
940 	 * other checkpointing code processing the transaction...
941 	 */
942 	write_lock(&journal->j_state_lock);
943 	spin_lock(&journal->j_list_lock);
944 	/*
945 	 * Now recheck if some buffers did not get attached to the transaction
946 	 * while the lock was dropped...
947 	 */
948 	if (commit_transaction->t_forget) {
949 		spin_unlock(&journal->j_list_lock);
950 		write_unlock(&journal->j_state_lock);
951 		goto restart_loop;
952 	}
953 
954 	/* Done with this transaction! */
955 
956 	jbd_debug(3, "JBD: commit phase 7\n");
957 
958 	J_ASSERT(commit_transaction->t_state == T_COMMIT);
959 
960 	commit_transaction->t_start = jiffies;
961 	stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
962 					      commit_transaction->t_start);
963 
964 	/*
965 	 * File the transaction statistics
966 	 */
967 	stats.ts_tid = commit_transaction->t_tid;
968 	stats.run.rs_handle_count =
969 		atomic_read(&commit_transaction->t_handle_count);
970 	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
971 			     commit_transaction->t_tid, &stats.run);
972 
973 	/*
974 	 * Calculate overall stats
975 	 */
976 	spin_lock(&journal->j_history_lock);
977 	journal->j_stats.ts_tid++;
978 	journal->j_stats.run.rs_wait += stats.run.rs_wait;
979 	journal->j_stats.run.rs_running += stats.run.rs_running;
980 	journal->j_stats.run.rs_locked += stats.run.rs_locked;
981 	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
982 	journal->j_stats.run.rs_logging += stats.run.rs_logging;
983 	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
984 	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
985 	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
986 	spin_unlock(&journal->j_history_lock);
987 
988 	commit_transaction->t_state = T_FINISHED;
989 	J_ASSERT(commit_transaction == journal->j_committing_transaction);
990 	journal->j_commit_sequence = commit_transaction->t_tid;
991 	journal->j_committing_transaction = NULL;
992 	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
993 
994 	/*
995 	 * weight the commit time higher than the average time so we don't
996 	 * react too strongly to vast changes in the commit time
997 	 */
998 	if (likely(journal->j_average_commit_time))
999 		journal->j_average_commit_time = (commit_time +
1000 				journal->j_average_commit_time*3) / 4;
1001 	else
1002 		journal->j_average_commit_time = commit_time;
1003 	write_unlock(&journal->j_state_lock);
1004 
1005 	if (commit_transaction->t_checkpoint_list == NULL &&
1006 	    commit_transaction->t_checkpoint_io_list == NULL) {
1007 		__jbd2_journal_drop_transaction(journal, commit_transaction);
1008 		to_free = 1;
1009 	} else {
1010 		if (journal->j_checkpoint_transactions == NULL) {
1011 			journal->j_checkpoint_transactions = commit_transaction;
1012 			commit_transaction->t_cpnext = commit_transaction;
1013 			commit_transaction->t_cpprev = commit_transaction;
1014 		} else {
1015 			commit_transaction->t_cpnext =
1016 				journal->j_checkpoint_transactions;
1017 			commit_transaction->t_cpprev =
1018 				commit_transaction->t_cpnext->t_cpprev;
1019 			commit_transaction->t_cpnext->t_cpprev =
1020 				commit_transaction;
1021 			commit_transaction->t_cpprev->t_cpnext =
1022 				commit_transaction;
1023 		}
1024 	}
1025 	spin_unlock(&journal->j_list_lock);
1026 
1027 	if (journal->j_commit_callback)
1028 		journal->j_commit_callback(journal, commit_transaction);
1029 
1030 	trace_jbd2_end_commit(journal, commit_transaction);
1031 	jbd_debug(1, "JBD: commit %d complete, head %d\n",
1032 		  journal->j_commit_sequence, journal->j_tail_sequence);
1033 	if (to_free)
1034 		kfree(commit_transaction);
1035 
1036 	wake_up(&journal->j_wait_done_commit);
1037 }
1038