Lines Matching refs:bh

57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
62 inline void touch_buffer(struct buffer_head *bh) in touch_buffer() argument
64 trace_block_touch_buffer(bh); in touch_buffer()
65 folio_mark_accessed(bh->b_folio); in touch_buffer()
69 void __lock_buffer(struct buffer_head *bh) in __lock_buffer() argument
71 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __lock_buffer()
75 void unlock_buffer(struct buffer_head *bh) in unlock_buffer() argument
77 clear_bit_unlock(BH_Lock, &bh->b_state); in unlock_buffer()
79 wake_up_bit(&bh->b_state, BH_Lock); in unlock_buffer()
91 struct buffer_head *head, *bh; in buffer_check_dirty_writeback() local
104 bh = head; in buffer_check_dirty_writeback()
106 if (buffer_locked(bh)) in buffer_check_dirty_writeback()
109 if (buffer_dirty(bh)) in buffer_check_dirty_writeback()
112 bh = bh->b_this_page; in buffer_check_dirty_writeback()
113 } while (bh != head); in buffer_check_dirty_writeback()
121 void __wait_on_buffer(struct buffer_head * bh) in __wait_on_buffer() argument
123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __wait_on_buffer()
127 static void buffer_io_error(struct buffer_head *bh, char *msg) in buffer_io_error() argument
129 if (!test_bit(BH_Quiet, &bh->b_state)) in buffer_io_error()
132 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); in buffer_io_error()
143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) in __end_buffer_read_notouch() argument
146 set_buffer_uptodate(bh); in __end_buffer_read_notouch()
149 clear_buffer_uptodate(bh); in __end_buffer_read_notouch()
151 unlock_buffer(bh); in __end_buffer_read_notouch()
158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) in end_buffer_read_sync() argument
160 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_sync()
161 put_bh(bh); in end_buffer_read_sync()
165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) in end_buffer_write_sync() argument
168 set_buffer_uptodate(bh); in end_buffer_write_sync()
170 buffer_io_error(bh, ", lost sync page write"); in end_buffer_write_sync()
171 mark_buffer_write_io_error(bh); in end_buffer_write_sync()
172 clear_buffer_uptodate(bh); in end_buffer_write_sync()
174 unlock_buffer(bh); in end_buffer_write_sync()
175 put_bh(bh); in end_buffer_write_sync()
196 struct buffer_head *bh; in __find_get_block_slow() local
211 bh = head; in __find_get_block_slow()
213 if (!buffer_mapped(bh)) in __find_get_block_slow()
215 else if (bh->b_blocknr == block) { in __find_get_block_slow()
216 ret = bh; in __find_get_block_slow()
217 get_bh(bh); in __find_get_block_slow()
220 bh = bh->b_this_page; in __find_get_block_slow()
221 } while (bh != head); in __find_get_block_slow()
234 (unsigned long long)bh->b_blocknr, in __find_get_block_slow()
235 bh->b_state, bh->b_size, bdev, in __find_get_block_slow()
245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) in end_buffer_async_read() argument
253 BUG_ON(!buffer_async_read(bh)); in end_buffer_async_read()
255 folio = bh->b_folio; in end_buffer_async_read()
257 set_buffer_uptodate(bh); in end_buffer_async_read()
259 clear_buffer_uptodate(bh); in end_buffer_async_read()
260 buffer_io_error(bh, ", async page read"); in end_buffer_async_read()
271 clear_buffer_async_read(bh); in end_buffer_async_read()
272 unlock_buffer(bh); in end_buffer_async_read()
273 tmp = bh; in end_buffer_async_read()
282 } while (tmp != bh); in end_buffer_async_read()
301 struct buffer_head *bh; member
308 struct buffer_head *bh = ctx->bh; in verify_bh() local
311 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh)); in verify_bh()
312 end_buffer_async_read(bh, valid); in verify_bh()
316 static bool need_fsverity(struct buffer_head *bh) in need_fsverity() argument
318 struct folio *folio = bh->b_folio; in need_fsverity()
330 struct buffer_head *bh = ctx->bh; in decrypt_bh() local
333 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size, in decrypt_bh()
334 bh_offset(bh)); in decrypt_bh()
335 if (err == 0 && need_fsverity(bh)) { in decrypt_bh()
345 end_buffer_async_read(bh, err == 0); in decrypt_bh()
353 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) in end_buffer_async_read_io() argument
355 struct inode *inode = bh->b_folio->mapping->host; in end_buffer_async_read_io()
357 bool verify = need_fsverity(bh); in end_buffer_async_read_io()
365 ctx->bh = bh; in end_buffer_async_read_io()
377 end_buffer_async_read(bh, uptodate); in end_buffer_async_read_io()
384 void end_buffer_async_write(struct buffer_head *bh, int uptodate) in end_buffer_async_write() argument
391 BUG_ON(!buffer_async_write(bh)); in end_buffer_async_write()
393 folio = bh->b_folio; in end_buffer_async_write()
395 set_buffer_uptodate(bh); in end_buffer_async_write()
397 buffer_io_error(bh, ", lost async page write"); in end_buffer_async_write()
398 mark_buffer_write_io_error(bh); in end_buffer_async_write()
399 clear_buffer_uptodate(bh); in end_buffer_async_write()
406 clear_buffer_async_write(bh); in end_buffer_async_write()
407 unlock_buffer(bh); in end_buffer_async_write()
408 tmp = bh->b_this_page; in end_buffer_async_write()
409 while (tmp != bh) { in end_buffer_async_write()
447 static void mark_buffer_async_read(struct buffer_head *bh) in mark_buffer_async_read() argument
449 bh->b_end_io = end_buffer_async_read_io; in mark_buffer_async_read()
450 set_buffer_async_read(bh); in mark_buffer_async_read()
453 static void mark_buffer_async_write_endio(struct buffer_head *bh, in mark_buffer_async_write_endio() argument
456 bh->b_end_io = handler; in mark_buffer_async_write_endio()
457 set_buffer_async_write(bh); in mark_buffer_async_write_endio()
460 void mark_buffer_async_write(struct buffer_head *bh) in mark_buffer_async_write() argument
462 mark_buffer_async_write_endio(bh, end_buffer_async_write); in mark_buffer_async_write()
519 static void __remove_assoc_queue(struct buffer_head *bh) in __remove_assoc_queue() argument
521 list_del_init(&bh->b_assoc_buffers); in __remove_assoc_queue()
522 WARN_ON(!bh->b_assoc_map); in __remove_assoc_queue()
523 bh->b_assoc_map = NULL; in __remove_assoc_queue()
543 struct buffer_head *bh; in osync_buffers_list() local
550 bh = BH_ENTRY(p); in osync_buffers_list()
551 if (buffer_locked(bh)) { in osync_buffers_list()
552 get_bh(bh); in osync_buffers_list()
554 wait_on_buffer(bh); in osync_buffers_list()
555 if (!buffer_uptodate(bh)) in osync_buffers_list()
557 brelse(bh); in osync_buffers_list()
668 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); in write_boundary_block() local
669 if (bh) { in write_boundary_block()
670 if (buffer_dirty(bh)) in write_boundary_block()
671 write_dirty_buffer(bh, 0); in write_boundary_block()
672 put_bh(bh); in write_boundary_block()
676 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) in mark_buffer_dirty_inode() argument
679 struct address_space *buffer_mapping = bh->b_folio->mapping; in mark_buffer_dirty_inode()
681 mark_buffer_dirty(bh); in mark_buffer_dirty_inode()
687 if (!bh->b_assoc_map) { in mark_buffer_dirty_inode()
689 list_move_tail(&bh->b_assoc_buffers, in mark_buffer_dirty_inode()
691 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
730 struct buffer_head *bh = head; in block_dirty_folio() local
733 set_buffer_dirty(bh); in block_dirty_folio()
734 bh = bh->b_this_page; in block_dirty_folio()
735 } while (bh != head); in block_dirty_folio()
778 struct buffer_head *bh; in fsync_buffers_list() local
789 bh = BH_ENTRY(list->next); in fsync_buffers_list()
790 mapping = bh->b_assoc_map; in fsync_buffers_list()
791 __remove_assoc_queue(bh); in fsync_buffers_list()
795 if (buffer_dirty(bh) || buffer_locked(bh)) { in fsync_buffers_list()
796 list_add(&bh->b_assoc_buffers, &tmp); in fsync_buffers_list()
797 bh->b_assoc_map = mapping; in fsync_buffers_list()
798 if (buffer_dirty(bh)) { in fsync_buffers_list()
799 get_bh(bh); in fsync_buffers_list()
808 write_dirty_buffer(bh, REQ_SYNC); in fsync_buffers_list()
816 brelse(bh); in fsync_buffers_list()
827 bh = BH_ENTRY(tmp.prev); in fsync_buffers_list()
828 get_bh(bh); in fsync_buffers_list()
829 mapping = bh->b_assoc_map; in fsync_buffers_list()
830 __remove_assoc_queue(bh); in fsync_buffers_list()
834 if (buffer_dirty(bh)) { in fsync_buffers_list()
835 list_add(&bh->b_assoc_buffers, in fsync_buffers_list()
837 bh->b_assoc_map = mapping; in fsync_buffers_list()
840 wait_on_buffer(bh); in fsync_buffers_list()
841 if (!buffer_uptodate(bh)) in fsync_buffers_list()
843 brelse(bh); in fsync_buffers_list()
896 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers() local
897 if (buffer_dirty(bh)) { in remove_inode_buffers()
901 __remove_assoc_queue(bh); in remove_inode_buffers()
920 struct buffer_head *bh, *head; in folio_alloc_buffers() local
935 bh = alloc_buffer_head(gfp); in folio_alloc_buffers()
936 if (!bh) in folio_alloc_buffers()
939 bh->b_this_page = head; in folio_alloc_buffers()
940 bh->b_blocknr = -1; in folio_alloc_buffers()
941 head = bh; in folio_alloc_buffers()
943 bh->b_size = size; in folio_alloc_buffers()
946 folio_set_bh(bh, folio, offset); in folio_alloc_buffers()
957 bh = head; in folio_alloc_buffers()
959 free_buffer_head(bh); in folio_alloc_buffers()
977 struct buffer_head *bh, *tail; in link_dev_buffers() local
979 bh = head; in link_dev_buffers()
981 tail = bh; in link_dev_buffers()
982 bh = bh->b_this_page; in link_dev_buffers()
983 } while (bh); in link_dev_buffers()
1007 struct buffer_head *bh = head; in folio_init_buffers() local
1012 if (!buffer_mapped(bh)) { in folio_init_buffers()
1013 bh->b_end_io = NULL; in folio_init_buffers()
1014 bh->b_private = NULL; in folio_init_buffers()
1015 bh->b_bdev = bdev; in folio_init_buffers()
1016 bh->b_blocknr = block; in folio_init_buffers()
1018 set_buffer_uptodate(bh); in folio_init_buffers()
1020 set_buffer_mapped(bh); in folio_init_buffers()
1023 bh = bh->b_this_page; in folio_init_buffers()
1024 } while (bh != head); in folio_init_buffers()
1043 struct buffer_head *bh; in grow_dev_page() local
1061 bh = folio_buffers(folio); in grow_dev_page()
1062 if (bh) { in grow_dev_page()
1063 if (bh->b_size == size) { in grow_dev_page()
1072 bh = folio_alloc_buffers(folio, size, true); in grow_dev_page()
1080 link_dev_buffers(folio, bh); in grow_dev_page()
1138 struct buffer_head *bh; in __getblk_slow() local
1141 bh = __find_get_block(bdev, block, size); in __getblk_slow()
1142 if (bh) in __getblk_slow()
1143 return bh; in __getblk_slow()
1186 void mark_buffer_dirty(struct buffer_head *bh) in mark_buffer_dirty() argument
1188 WARN_ON_ONCE(!buffer_uptodate(bh)); in mark_buffer_dirty()
1190 trace_block_dirty_buffer(bh); in mark_buffer_dirty()
1198 if (buffer_dirty(bh)) { in mark_buffer_dirty()
1200 if (buffer_dirty(bh)) in mark_buffer_dirty()
1204 if (!test_set_buffer_dirty(bh)) { in mark_buffer_dirty()
1205 struct folio *folio = bh->b_folio; in mark_buffer_dirty()
1221 void mark_buffer_write_io_error(struct buffer_head *bh) in mark_buffer_write_io_error() argument
1223 set_buffer_write_io_error(bh); in mark_buffer_write_io_error()
1225 if (bh->b_folio && bh->b_folio->mapping) in mark_buffer_write_io_error()
1226 mapping_set_error(bh->b_folio->mapping, -EIO); in mark_buffer_write_io_error()
1227 if (bh->b_assoc_map) { in mark_buffer_write_io_error()
1228 mapping_set_error(bh->b_assoc_map, -EIO); in mark_buffer_write_io_error()
1229 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO); in mark_buffer_write_io_error()
1255 void __bforget(struct buffer_head *bh) in __bforget() argument
1257 clear_buffer_dirty(bh); in __bforget()
1258 if (bh->b_assoc_map) { in __bforget()
1259 struct address_space *buffer_mapping = bh->b_folio->mapping; in __bforget()
1262 list_del_init(&bh->b_assoc_buffers); in __bforget()
1263 bh->b_assoc_map = NULL; in __bforget()
1266 __brelse(bh); in __bforget()
1270 static struct buffer_head *__bread_slow(struct buffer_head *bh) in __bread_slow() argument
1272 lock_buffer(bh); in __bread_slow()
1273 if (buffer_uptodate(bh)) { in __bread_slow()
1274 unlock_buffer(bh); in __bread_slow()
1275 return bh; in __bread_slow()
1277 get_bh(bh); in __bread_slow()
1278 bh->b_end_io = end_buffer_read_sync; in __bread_slow()
1279 submit_bh(REQ_OP_READ, bh); in __bread_slow()
1280 wait_on_buffer(bh); in __bread_slow()
1281 if (buffer_uptodate(bh)) in __bread_slow()
1282 return bh; in __bread_slow()
1284 brelse(bh); in __bread_slow()
1330 static void bh_lru_install(struct buffer_head *bh) in bh_lru_install() argument
1332 struct buffer_head *evictee = bh; in bh_lru_install()
1353 if (evictee == bh) { in bh_lru_install()
1359 get_bh(bh); in bh_lru_install()
1380 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru() local
1382 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && in lookup_bh_lru()
1383 bh->b_size == size) { in lookup_bh_lru()
1390 __this_cpu_write(bh_lrus.bhs[0], bh); in lookup_bh_lru()
1392 get_bh(bh); in lookup_bh_lru()
1393 ret = bh; in lookup_bh_lru()
1409 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); in __find_get_block() local
1411 if (bh == NULL) { in __find_get_block()
1413 bh = __find_get_block_slow(bdev, block); in __find_get_block()
1414 if (bh) in __find_get_block()
1415 bh_lru_install(bh); in __find_get_block()
1417 touch_buffer(bh); in __find_get_block()
1419 return bh; in __find_get_block()
1435 struct buffer_head *bh = __find_get_block(bdev, block, size); in __getblk_gfp() local
1438 if (bh == NULL) in __getblk_gfp()
1439 bh = __getblk_slow(bdev, block, size, gfp); in __getblk_gfp()
1440 return bh; in __getblk_gfp()
1449 struct buffer_head *bh = __getblk(bdev, block, size); in __breadahead() local
1450 if (likely(bh)) { in __breadahead()
1451 bh_readahead(bh, REQ_RAHEAD); in __breadahead()
1452 brelse(bh); in __breadahead()
1473 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); in __bread_gfp() local
1475 if (likely(bh) && !buffer_uptodate(bh)) in __bread_gfp()
1476 bh = __bread_slow(bh); in __bread_gfp()
1477 return bh; in __bread_gfp()
1536 void folio_set_bh(struct buffer_head *bh, struct folio *folio, in folio_set_bh() argument
1539 bh->b_folio = folio; in folio_set_bh()
1545 bh->b_data = (char *)(0 + offset); in folio_set_bh()
1547 bh->b_data = folio_address(folio) + offset; in folio_set_bh()
1560 static void discard_buffer(struct buffer_head * bh) in discard_buffer() argument
1564 lock_buffer(bh); in discard_buffer()
1565 clear_buffer_dirty(bh); in discard_buffer()
1566 bh->b_bdev = NULL; in discard_buffer()
1567 b_state = READ_ONCE(bh->b_state); in discard_buffer()
1569 } while (!try_cmpxchg(&bh->b_state, &b_state, in discard_buffer()
1571 unlock_buffer(bh); in discard_buffer()
1591 struct buffer_head *head, *bh, *next; in block_invalidate_folio() local
1606 bh = head; in block_invalidate_folio()
1608 size_t next_off = curr_off + bh->b_size; in block_invalidate_folio()
1609 next = bh->b_this_page; in block_invalidate_folio()
1621 discard_buffer(bh); in block_invalidate_folio()
1623 bh = next; in block_invalidate_folio()
1624 } while (bh != head); in block_invalidate_folio()
1646 struct buffer_head *bh, *head, *tail; in folio_create_empty_buffers() local
1649 bh = head; in folio_create_empty_buffers()
1651 bh->b_state |= b_state; in folio_create_empty_buffers()
1652 tail = bh; in folio_create_empty_buffers()
1653 bh = bh->b_this_page; in folio_create_empty_buffers()
1654 } while (bh); in folio_create_empty_buffers()
1659 bh = head; in folio_create_empty_buffers()
1662 set_buffer_dirty(bh); in folio_create_empty_buffers()
1664 set_buffer_uptodate(bh); in folio_create_empty_buffers()
1665 bh = bh->b_this_page; in folio_create_empty_buffers()
1666 } while (bh != head); in folio_create_empty_buffers()
1708 struct buffer_head *bh; in clean_bdev_aliases() local
1730 bh = head; in clean_bdev_aliases()
1732 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) in clean_bdev_aliases()
1734 if (bh->b_blocknr >= block + len) in clean_bdev_aliases()
1736 clear_buffer_dirty(bh); in clean_bdev_aliases()
1737 wait_on_buffer(bh); in clean_bdev_aliases()
1738 clear_buffer_req(bh); in clean_bdev_aliases()
1740 bh = bh->b_this_page; in clean_bdev_aliases()
1741 } while (bh != head); in clean_bdev_aliases()
1816 struct buffer_head *bh, *head; in __block_write_full_folio() local
1834 bh = head; in __block_write_full_folio()
1835 blocksize = bh->b_size; in __block_write_full_folio()
1855 clear_buffer_dirty(bh); in __block_write_full_folio()
1856 set_buffer_uptodate(bh); in __block_write_full_folio()
1857 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && in __block_write_full_folio()
1858 buffer_dirty(bh)) { in __block_write_full_folio()
1859 WARN_ON(bh->b_size != blocksize); in __block_write_full_folio()
1860 err = get_block(inode, block, bh, 1); in __block_write_full_folio()
1863 clear_buffer_delay(bh); in __block_write_full_folio()
1864 if (buffer_new(bh)) { in __block_write_full_folio()
1866 clear_buffer_new(bh); in __block_write_full_folio()
1867 clean_bdev_bh_alias(bh); in __block_write_full_folio()
1870 bh = bh->b_this_page; in __block_write_full_folio()
1872 } while (bh != head); in __block_write_full_folio()
1875 if (!buffer_mapped(bh)) in __block_write_full_folio()
1885 lock_buffer(bh); in __block_write_full_folio()
1886 } else if (!trylock_buffer(bh)) { in __block_write_full_folio()
1890 if (test_clear_buffer_dirty(bh)) { in __block_write_full_folio()
1891 mark_buffer_async_write_endio(bh, handler); in __block_write_full_folio()
1893 unlock_buffer(bh); in __block_write_full_folio()
1895 } while ((bh = bh->b_this_page) != head); in __block_write_full_folio()
1905 struct buffer_head *next = bh->b_this_page; in __block_write_full_folio()
1906 if (buffer_async_write(bh)) { in __block_write_full_folio()
1907 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc); in __block_write_full_folio()
1910 bh = next; in __block_write_full_folio()
1911 } while (bh != head); in __block_write_full_folio()
1938 bh = head; in __block_write_full_folio()
1941 if (buffer_mapped(bh) && buffer_dirty(bh) && in __block_write_full_folio()
1942 !buffer_delay(bh)) { in __block_write_full_folio()
1943 lock_buffer(bh); in __block_write_full_folio()
1944 mark_buffer_async_write_endio(bh, handler); in __block_write_full_folio()
1950 clear_buffer_dirty(bh); in __block_write_full_folio()
1952 } while ((bh = bh->b_this_page) != head); in __block_write_full_folio()
1958 struct buffer_head *next = bh->b_this_page; in __block_write_full_folio()
1959 if (buffer_async_write(bh)) { in __block_write_full_folio()
1960 clear_buffer_dirty(bh); in __block_write_full_folio()
1961 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc); in __block_write_full_folio()
1964 bh = next; in __block_write_full_folio()
1965 } while (bh != head); in __block_write_full_folio()
1979 struct buffer_head *head, *bh; in folio_zero_new_buffers() local
1986 bh = head; in folio_zero_new_buffers()
1989 block_end = block_start + bh->b_size; in folio_zero_new_buffers()
1991 if (buffer_new(bh)) { in folio_zero_new_buffers()
2000 set_buffer_uptodate(bh); in folio_zero_new_buffers()
2003 clear_buffer_new(bh); in folio_zero_new_buffers()
2004 mark_buffer_dirty(bh); in folio_zero_new_buffers()
2009 bh = bh->b_this_page; in folio_zero_new_buffers()
2010 } while (bh != head); in folio_zero_new_buffers()
2015 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, in iomap_to_bh() argument
2020 bh->b_bdev = iomap->bdev; in iomap_to_bh()
2038 if (!buffer_uptodate(bh) || in iomap_to_bh()
2040 set_buffer_new(bh); in iomap_to_bh()
2043 if (!buffer_uptodate(bh) || in iomap_to_bh()
2045 set_buffer_new(bh); in iomap_to_bh()
2046 set_buffer_uptodate(bh); in iomap_to_bh()
2047 set_buffer_mapped(bh); in iomap_to_bh()
2048 set_buffer_delay(bh); in iomap_to_bh()
2056 set_buffer_new(bh); in iomap_to_bh()
2057 set_buffer_unwritten(bh); in iomap_to_bh()
2070 set_buffer_new(bh); in iomap_to_bh()
2072 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> in iomap_to_bh()
2074 set_buffer_mapped(bh); in iomap_to_bh()
2092 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; in __block_write_begin_int() local
2105 for(bh = head, block_start = 0; bh != head || !block_start; in __block_write_begin_int()
2106 block++, block_start=block_end, bh = bh->b_this_page) { in __block_write_begin_int()
2110 if (!buffer_uptodate(bh)) in __block_write_begin_int()
2111 set_buffer_uptodate(bh); in __block_write_begin_int()
2115 if (buffer_new(bh)) in __block_write_begin_int()
2116 clear_buffer_new(bh); in __block_write_begin_int()
2117 if (!buffer_mapped(bh)) { in __block_write_begin_int()
2118 WARN_ON(bh->b_size != blocksize); in __block_write_begin_int()
2120 err = get_block(inode, block, bh, 1); in __block_write_begin_int()
2122 err = iomap_to_bh(inode, block, bh, iomap); in __block_write_begin_int()
2126 if (buffer_new(bh)) { in __block_write_begin_int()
2127 clean_bdev_bh_alias(bh); in __block_write_begin_int()
2129 clear_buffer_new(bh); in __block_write_begin_int()
2130 set_buffer_uptodate(bh); in __block_write_begin_int()
2131 mark_buffer_dirty(bh); in __block_write_begin_int()
2142 if (!buffer_uptodate(bh)) in __block_write_begin_int()
2143 set_buffer_uptodate(bh); in __block_write_begin_int()
2146 if (!buffer_uptodate(bh) && !buffer_delay(bh) && in __block_write_begin_int()
2147 !buffer_unwritten(bh) && in __block_write_begin_int()
2149 bh_read_nowait(bh, 0); in __block_write_begin_int()
2150 *wait_bh++=bh; in __block_write_begin_int()
2179 struct buffer_head *bh, *head; in __block_commit_write() local
2181 bh = head = folio_buffers(folio); in __block_commit_write()
2182 blocksize = bh->b_size; in __block_commit_write()
2188 if (!buffer_uptodate(bh)) in __block_commit_write()
2191 set_buffer_uptodate(bh); in __block_commit_write()
2192 mark_buffer_dirty(bh); in __block_commit_write()
2194 if (buffer_new(bh)) in __block_commit_write()
2195 clear_buffer_new(bh); in __block_commit_write()
2198 bh = bh->b_this_page; in __block_commit_write()
2199 } while (bh != head); in __block_commit_write()
2324 struct buffer_head *bh, *head; in block_is_partially_uptodate() local
2336 bh = head; in block_is_partially_uptodate()
2341 if (!buffer_uptodate(bh)) { in block_is_partially_uptodate()
2349 bh = bh->b_this_page; in block_is_partially_uptodate()
2350 } while (bh != head); in block_is_partially_uptodate()
2367 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; in block_read_full_folio() local
2386 bh = head; in block_read_full_folio()
2391 if (buffer_uptodate(bh)) in block_read_full_folio()
2394 if (!buffer_mapped(bh)) { in block_read_full_folio()
2399 WARN_ON(bh->b_size != blocksize); in block_read_full_folio()
2400 err = get_block(inode, iblock, bh, 0); in block_read_full_folio()
2406 if (!buffer_mapped(bh)) { in block_read_full_folio()
2410 set_buffer_uptodate(bh); in block_read_full_folio()
2417 if (buffer_uptodate(bh)) in block_read_full_folio()
2420 arr[nr++] = bh; in block_read_full_folio()
2421 } while (i++, iblock++, (bh = bh->b_this_page) != head); in block_read_full_folio()
2439 bh = arr[i]; in block_read_full_folio()
2440 lock_buffer(bh); in block_read_full_folio()
2441 mark_buffer_async_read(bh); in block_read_full_folio()
2450 bh = arr[i]; in block_read_full_folio()
2451 if (buffer_uptodate(bh)) in block_read_full_folio()
2452 end_buffer_async_read(bh, 1); in block_read_full_folio()
2454 submit_bh(REQ_OP_READ, bh); in block_read_full_folio()
2661 struct buffer_head *bh; in block_truncate_page() local
2678 bh = folio_buffers(folio); in block_truncate_page()
2679 if (!bh) { in block_truncate_page()
2681 bh = folio_buffers(folio); in block_truncate_page()
2688 bh = bh->b_this_page; in block_truncate_page()
2693 if (!buffer_mapped(bh)) { in block_truncate_page()
2694 WARN_ON(bh->b_size != blocksize); in block_truncate_page()
2695 err = get_block(inode, iblock, bh, 0); in block_truncate_page()
2699 if (!buffer_mapped(bh)) in block_truncate_page()
2705 set_buffer_uptodate(bh); in block_truncate_page()
2707 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { in block_truncate_page()
2708 err = bh_read(bh, 0); in block_truncate_page()
2715 mark_buffer_dirty(bh); in block_truncate_page()
2775 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync() local
2778 set_bit(BH_Quiet, &bh->b_state); in end_bio_bh_io_sync()
2780 bh->b_end_io(bh, !bio->bi_status); in end_bio_bh_io_sync()
2784 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, in submit_bh_wbc() argument
2790 BUG_ON(!buffer_locked(bh)); in submit_bh_wbc()
2791 BUG_ON(!buffer_mapped(bh)); in submit_bh_wbc()
2792 BUG_ON(!bh->b_end_io); in submit_bh_wbc()
2793 BUG_ON(buffer_delay(bh)); in submit_bh_wbc()
2794 BUG_ON(buffer_unwritten(bh)); in submit_bh_wbc()
2799 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) in submit_bh_wbc()
2800 clear_buffer_write_io_error(bh); in submit_bh_wbc()
2802 if (buffer_meta(bh)) in submit_bh_wbc()
2804 if (buffer_prio(bh)) in submit_bh_wbc()
2807 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO); in submit_bh_wbc()
2809 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); in submit_bh_wbc()
2811 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in submit_bh_wbc()
2813 __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); in submit_bh_wbc()
2816 bio->bi_private = bh; in submit_bh_wbc()
2823 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); in submit_bh_wbc()
2829 void submit_bh(blk_opf_t opf, struct buffer_head *bh) in submit_bh() argument
2831 submit_bh_wbc(opf, bh, NULL); in submit_bh()
2835 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) in write_dirty_buffer() argument
2837 lock_buffer(bh); in write_dirty_buffer()
2838 if (!test_clear_buffer_dirty(bh)) { in write_dirty_buffer()
2839 unlock_buffer(bh); in write_dirty_buffer()
2842 bh->b_end_io = end_buffer_write_sync; in write_dirty_buffer()
2843 get_bh(bh); in write_dirty_buffer()
2844 submit_bh(REQ_OP_WRITE | op_flags, bh); in write_dirty_buffer()
2853 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) in __sync_dirty_buffer() argument
2855 WARN_ON(atomic_read(&bh->b_count) < 1); in __sync_dirty_buffer()
2856 lock_buffer(bh); in __sync_dirty_buffer()
2857 if (test_clear_buffer_dirty(bh)) { in __sync_dirty_buffer()
2862 if (!buffer_mapped(bh)) { in __sync_dirty_buffer()
2863 unlock_buffer(bh); in __sync_dirty_buffer()
2867 get_bh(bh); in __sync_dirty_buffer()
2868 bh->b_end_io = end_buffer_write_sync; in __sync_dirty_buffer()
2869 submit_bh(REQ_OP_WRITE | op_flags, bh); in __sync_dirty_buffer()
2870 wait_on_buffer(bh); in __sync_dirty_buffer()
2871 if (!buffer_uptodate(bh)) in __sync_dirty_buffer()
2874 unlock_buffer(bh); in __sync_dirty_buffer()
2880 int sync_dirty_buffer(struct buffer_head *bh) in sync_dirty_buffer() argument
2882 return __sync_dirty_buffer(bh, REQ_SYNC); in sync_dirty_buffer()
2906 static inline int buffer_busy(struct buffer_head *bh) in buffer_busy() argument
2908 return atomic_read(&bh->b_count) | in buffer_busy()
2909 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); in buffer_busy()
2916 struct buffer_head *bh; in drop_buffers() local
2918 bh = head; in drop_buffers()
2920 if (buffer_busy(bh)) in drop_buffers()
2922 bh = bh->b_this_page; in drop_buffers()
2923 } while (bh != head); in drop_buffers()
2926 struct buffer_head *next = bh->b_this_page; in drop_buffers()
2928 if (bh->b_assoc_map) in drop_buffers()
2929 __remove_assoc_queue(bh); in drop_buffers()
2930 bh = next; in drop_buffers()
2931 } while (bh != head); in drop_buffers()
2976 struct buffer_head *bh = buffers_to_free; in try_to_free_buffers() local
2979 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
2980 free_buffer_head(bh); in try_to_free_buffers()
2981 bh = next; in try_to_free_buffers()
2982 } while (bh != buffers_to_free); in try_to_free_buffers()
3036 void free_buffer_head(struct buffer_head *bh) in free_buffer_head() argument
3038 BUG_ON(!list_empty(&bh->b_assoc_buffers)); in free_buffer_head()
3039 kmem_cache_free(bh_cachep, bh); in free_buffer_head()
3068 int bh_uptodate_or_lock(struct buffer_head *bh) in bh_uptodate_or_lock() argument
3070 if (!buffer_uptodate(bh)) { in bh_uptodate_or_lock()
3071 lock_buffer(bh); in bh_uptodate_or_lock()
3072 if (!buffer_uptodate(bh)) in bh_uptodate_or_lock()
3074 unlock_buffer(bh); in bh_uptodate_or_lock()
3088 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait) in __bh_read() argument
3092 BUG_ON(!buffer_locked(bh)); in __bh_read()
3094 get_bh(bh); in __bh_read()
3095 bh->b_end_io = end_buffer_read_sync; in __bh_read()
3096 submit_bh(REQ_OP_READ | op_flags, bh); in __bh_read()
3098 wait_on_buffer(bh); in __bh_read()
3099 if (!buffer_uptodate(bh)) in __bh_read()
3122 struct buffer_head *bh = bhs[i]; in __bh_read_batch() local
3124 if (buffer_uptodate(bh)) in __bh_read_batch()
3128 lock_buffer(bh); in __bh_read_batch()
3130 if (!trylock_buffer(bh)) in __bh_read_batch()
3133 if (buffer_uptodate(bh)) { in __bh_read_batch()
3134 unlock_buffer(bh); in __bh_read_batch()
3138 bh->b_end_io = end_buffer_read_sync; in __bh_read_batch()
3139 get_bh(bh); in __bh_read_batch()
3140 submit_bh(REQ_OP_READ | op_flags, bh); in __bh_read_batch()