1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * include/linux/buffer_head.h
4 *
5 * Everything to do with buffer_heads.
6 */
7
8 #ifndef _LINUX_BUFFER_HEAD_H
9 #define _LINUX_BUFFER_HEAD_H
10
11 #include <linux/types.h>
12 #include <linux/blk_types.h>
13 #include <linux/fs.h>
14 #include <linux/linkage.h>
15 #include <linux/pagemap.h>
16 #include <linux/wait.h>
17 #include <linux/atomic.h>
18
19 #ifdef CONFIG_BLOCK
20
21 enum bh_state_bits {
22 BH_Uptodate, /* Contains valid data */
23 BH_Dirty, /* Is dirty */
24 BH_Lock, /* Is locked */
25 BH_Req, /* Has been submitted for I/O */
26
27 BH_Mapped, /* Has a disk mapping */
28 BH_New, /* Disk mapping was newly created by get_block */
29 BH_Async_Read, /* Is under end_buffer_async_read I/O */
30 BH_Async_Write, /* Is under end_buffer_async_write I/O */
31 BH_Delay, /* Buffer is not yet allocated on disk */
32 BH_Boundary, /* Block is followed by a discontiguity */
33 BH_Write_EIO, /* I/O error on write */
34 BH_Unwritten, /* Buffer is allocated on disk but not written */
35 BH_Quiet, /* Buffer Error Prinks to be quiet */
36 BH_Meta, /* Buffer contains metadata */
37 BH_Prio, /* Buffer should be submitted with REQ_PRIO */
38 BH_Defer_Completion, /* Defer AIO completion to workqueue */
39
40 BH_PrivateStart,/* not a state bit, but the first bit available
41 * for private allocation by other entities
42 */
43 };
44
45 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
46
47 struct page;
48 struct buffer_head;
49 struct address_space;
50 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
51
52 /*
53 * Historically, a buffer_head was used to map a single block
54 * within a page, and of course as the unit of I/O through the
55 * filesystem and block layers. Nowadays the basic I/O unit
56 * is the bio, and buffer_heads are used for extracting block
57 * mappings (via a get_block_t call), for tracking state within
58 * a page (via a page_mapping) and for wrapping bio submission
59 * for backward compatibility reasons (e.g. submit_bh).
60 */
61 struct buffer_head {
62 unsigned long b_state; /* buffer state bitmap (see above) */
63 struct buffer_head *b_this_page;/* circular list of page's buffers */
64 struct page *b_page; /* the page this bh is mapped to */
65
66 sector_t b_blocknr; /* start block number */
67 size_t b_size; /* size of mapping */
68 char *b_data; /* pointer to data within the page */
69
70 struct block_device *b_bdev;
71 bh_end_io_t *b_end_io; /* I/O completion */
72 void *b_private; /* reserved for b_end_io */
73 struct list_head b_assoc_buffers; /* associated with another mapping */
74 struct address_space *b_assoc_map; /* mapping this buffer is
75 associated with */
76 atomic_t b_count; /* users using this buffer_head */
77 spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
78 * serialise IO completion of other
79 * buffers in the page */
80 };
81
82 /*
83 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
84 * and buffer_foo() functions.
85 * To avoid reset buffer flags that are already set, because that causes
86 * a costly cache line transition, check the flag first.
87 */
88 #define BUFFER_FNS(bit, name) \
89 static __always_inline void set_buffer_##name(struct buffer_head *bh) \
90 { \
91 if (!test_bit(BH_##bit, &(bh)->b_state)) \
92 set_bit(BH_##bit, &(bh)->b_state); \
93 } \
94 static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
95 { \
96 clear_bit(BH_##bit, &(bh)->b_state); \
97 } \
98 static __always_inline int buffer_##name(const struct buffer_head *bh) \
99 { \
100 return test_bit(BH_##bit, &(bh)->b_state); \
101 }
102
103 /*
104 * test_set_buffer_foo() and test_clear_buffer_foo()
105 */
106 #define TAS_BUFFER_FNS(bit, name) \
107 static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
108 { \
109 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
110 } \
111 static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
112 { \
113 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
114 } \
115
116 /*
117 * Emit the buffer bitops functions. Note that there are also functions
118 * of the form "mark_buffer_foo()". These are higher-level functions which
119 * do something in addition to setting a b_state bit.
120 */
BUFFER_FNS(Dirty,dirty)121 BUFFER_FNS(Dirty, dirty)
122 TAS_BUFFER_FNS(Dirty, dirty)
123 BUFFER_FNS(Lock, locked)
124 BUFFER_FNS(Req, req)
125 TAS_BUFFER_FNS(Req, req)
126 BUFFER_FNS(Mapped, mapped)
127 BUFFER_FNS(New, new)
128 BUFFER_FNS(Async_Read, async_read)
129 BUFFER_FNS(Async_Write, async_write)
130 BUFFER_FNS(Delay, delay)
131 BUFFER_FNS(Boundary, boundary)
132 BUFFER_FNS(Write_EIO, write_io_error)
133 BUFFER_FNS(Unwritten, unwritten)
134 BUFFER_FNS(Meta, meta)
135 BUFFER_FNS(Prio, prio)
136 BUFFER_FNS(Defer_Completion, defer_completion)
137
138 static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
139 {
140 /*
141 * If somebody else already set this uptodate, they will
142 * have done the memory barrier, and a reader will thus
143 * see *some* valid buffer state.
144 *
145 * Any other serialization (with IO errors or whatever that
146 * might clear the bit) has to come from other state (eg BH_Lock).
147 */
148 if (test_bit(BH_Uptodate, &bh->b_state))
149 return;
150
151 /*
152 * make it consistent with folio_mark_uptodate
153 * pairs with smp_load_acquire in buffer_uptodate
154 */
155 smp_mb__before_atomic();
156 set_bit(BH_Uptodate, &bh->b_state);
157 }
158
clear_buffer_uptodate(struct buffer_head * bh)159 static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
160 {
161 clear_bit(BH_Uptodate, &bh->b_state);
162 }
163
buffer_uptodate(const struct buffer_head * bh)164 static __always_inline int buffer_uptodate(const struct buffer_head *bh)
165 {
166 /*
167 * make it consistent with folio_test_uptodate
168 * pairs with smp_mb__before_atomic in set_buffer_uptodate
169 */
170 return test_bit_acquire(BH_Uptodate, &bh->b_state);
171 }
172
173 #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
174
175 /* If we *know* page->private refers to buffer_heads */
176 #define page_buffers(page) \
177 ({ \
178 BUG_ON(!PagePrivate(page)); \
179 ((struct buffer_head *)page_private(page)); \
180 })
181 #define page_has_buffers(page) PagePrivate(page)
182 #define folio_buffers(folio) folio_get_private(folio)
183
184 void buffer_check_dirty_writeback(struct folio *folio,
185 bool *dirty, bool *writeback);
186
187 /*
188 * Declarations
189 */
190
191 void mark_buffer_dirty(struct buffer_head *bh);
192 void mark_buffer_write_io_error(struct buffer_head *bh);
193 void touch_buffer(struct buffer_head *bh);
194 void set_bh_page(struct buffer_head *bh,
195 struct page *page, unsigned long offset);
196 bool try_to_free_buffers(struct folio *);
197 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
198 bool retry);
199 void create_empty_buffers(struct page *, unsigned long,
200 unsigned long b_state);
201 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
202 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
203 void end_buffer_async_write(struct buffer_head *bh, int uptodate);
204
205 /* Things to do with buffers at mapping->private_list */
206 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
207 int inode_has_buffers(struct inode *);
208 void invalidate_inode_buffers(struct inode *);
209 int remove_inode_buffers(struct inode *inode);
210 int sync_mapping_buffers(struct address_space *mapping);
211 void clean_bdev_aliases(struct block_device *bdev, sector_t block,
212 sector_t len);
clean_bdev_bh_alias(struct buffer_head * bh)213 static inline void clean_bdev_bh_alias(struct buffer_head *bh)
214 {
215 clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
216 }
217
218 void mark_buffer_async_write(struct buffer_head *bh);
219 void __wait_on_buffer(struct buffer_head *);
220 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
221 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
222 unsigned size);
223 struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
224 unsigned size, gfp_t gfp);
225 void __brelse(struct buffer_head *);
226 void __bforget(struct buffer_head *);
227 void __breadahead(struct block_device *, sector_t block, unsigned int size);
228 struct buffer_head *__bread_gfp(struct block_device *,
229 sector_t block, unsigned size, gfp_t gfp);
230 void invalidate_bh_lrus(void);
231 void invalidate_bh_lrus_cpu(void);
232 bool has_bh_in_lru(int cpu, void *dummy);
233 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
234 void free_buffer_head(struct buffer_head * bh);
235 void unlock_buffer(struct buffer_head *bh);
236 void __lock_buffer(struct buffer_head *bh);
237 int sync_dirty_buffer(struct buffer_head *bh);
238 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
239 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
240 void submit_bh(blk_opf_t, struct buffer_head *);
241 void write_boundary_block(struct block_device *bdev,
242 sector_t bblock, unsigned blocksize);
243 int bh_uptodate_or_lock(struct buffer_head *bh);
244 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
245 void __bh_read_batch(int nr, struct buffer_head *bhs[],
246 blk_opf_t op_flags, bool force_lock);
247
248 extern int buffer_heads_over_limit;
249
250 /*
251 * Generic address_space_operations implementations for buffer_head-backed
252 * address_spaces.
253 */
254 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
255 int block_write_full_page(struct page *page, get_block_t *get_block,
256 struct writeback_control *wbc);
257 int __block_write_full_page(struct inode *inode, struct page *page,
258 get_block_t *get_block, struct writeback_control *wbc,
259 bh_end_io_t *handler);
260 int block_read_full_folio(struct folio *, get_block_t *);
261 bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
262 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
263 struct page **pagep, get_block_t *get_block);
264 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
265 get_block_t *get_block);
266 int block_write_end(struct file *, struct address_space *,
267 loff_t, unsigned, unsigned,
268 struct page *, void *);
269 int generic_write_end(struct file *, struct address_space *,
270 loff_t, unsigned, unsigned,
271 struct page *, void *);
272 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
273 void clean_page_buffers(struct page *page);
274 int cont_write_begin(struct file *, struct address_space *, loff_t,
275 unsigned, struct page **, void **,
276 get_block_t *, loff_t *);
277 int generic_cont_expand_simple(struct inode *inode, loff_t size);
278 int block_commit_write(struct page *page, unsigned from, unsigned to);
279 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
280 get_block_t get_block);
281 /* Convert errno to return value from ->page_mkwrite() call */
block_page_mkwrite_return(int err)282 static inline vm_fault_t block_page_mkwrite_return(int err)
283 {
284 if (err == 0)
285 return VM_FAULT_LOCKED;
286 if (err == -EFAULT || err == -EAGAIN)
287 return VM_FAULT_NOPAGE;
288 if (err == -ENOMEM)
289 return VM_FAULT_OOM;
290 /* -ENOSPC, -EDQUOT, -EIO ... */
291 return VM_FAULT_SIGBUS;
292 }
293 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
294 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
295
296 #ifdef CONFIG_MIGRATION
297 extern int buffer_migrate_folio(struct address_space *,
298 struct folio *dst, struct folio *src, enum migrate_mode);
299 extern int buffer_migrate_folio_norefs(struct address_space *,
300 struct folio *dst, struct folio *src, enum migrate_mode);
301 #else
302 #define buffer_migrate_folio NULL
303 #define buffer_migrate_folio_norefs NULL
304 #endif
305
306 void buffer_init(void);
307
308 /*
309 * inline definitions
310 */
311
get_bh(struct buffer_head * bh)312 static inline void get_bh(struct buffer_head *bh)
313 {
314 atomic_inc(&bh->b_count);
315 }
316
put_bh(struct buffer_head * bh)317 static inline void put_bh(struct buffer_head *bh)
318 {
319 smp_mb__before_atomic();
320 atomic_dec(&bh->b_count);
321 }
322
brelse(struct buffer_head * bh)323 static inline void brelse(struct buffer_head *bh)
324 {
325 if (bh)
326 __brelse(bh);
327 }
328
bforget(struct buffer_head * bh)329 static inline void bforget(struct buffer_head *bh)
330 {
331 if (bh)
332 __bforget(bh);
333 }
334
335 static inline struct buffer_head *
sb_bread(struct super_block * sb,sector_t block)336 sb_bread(struct super_block *sb, sector_t block)
337 {
338 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
339 }
340
341 static inline struct buffer_head *
sb_bread_unmovable(struct super_block * sb,sector_t block)342 sb_bread_unmovable(struct super_block *sb, sector_t block)
343 {
344 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
345 }
346
347 static inline void
sb_breadahead(struct super_block * sb,sector_t block)348 sb_breadahead(struct super_block *sb, sector_t block)
349 {
350 __breadahead(sb->s_bdev, block, sb->s_blocksize);
351 }
352
353 static inline struct buffer_head *
sb_getblk(struct super_block * sb,sector_t block)354 sb_getblk(struct super_block *sb, sector_t block)
355 {
356 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
357 }
358
359
360 static inline struct buffer_head *
sb_getblk_gfp(struct super_block * sb,sector_t block,gfp_t gfp)361 sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
362 {
363 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
364 }
365
366 static inline struct buffer_head *
sb_find_get_block(struct super_block * sb,sector_t block)367 sb_find_get_block(struct super_block *sb, sector_t block)
368 {
369 return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
370 }
371
372 static inline void
map_bh(struct buffer_head * bh,struct super_block * sb,sector_t block)373 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
374 {
375 set_buffer_mapped(bh);
376 bh->b_bdev = sb->s_bdev;
377 bh->b_blocknr = block;
378 bh->b_size = sb->s_blocksize;
379 }
380
wait_on_buffer(struct buffer_head * bh)381 static inline void wait_on_buffer(struct buffer_head *bh)
382 {
383 might_sleep();
384 if (buffer_locked(bh))
385 __wait_on_buffer(bh);
386 }
387
trylock_buffer(struct buffer_head * bh)388 static inline int trylock_buffer(struct buffer_head *bh)
389 {
390 return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
391 }
392
lock_buffer(struct buffer_head * bh)393 static inline void lock_buffer(struct buffer_head *bh)
394 {
395 might_sleep();
396 if (!trylock_buffer(bh))
397 __lock_buffer(bh);
398 }
399
getblk_unmovable(struct block_device * bdev,sector_t block,unsigned size)400 static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
401 sector_t block,
402 unsigned size)
403 {
404 return __getblk_gfp(bdev, block, size, 0);
405 }
406
__getblk(struct block_device * bdev,sector_t block,unsigned size)407 static inline struct buffer_head *__getblk(struct block_device *bdev,
408 sector_t block,
409 unsigned size)
410 {
411 return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
412 }
413
bh_readahead(struct buffer_head * bh,blk_opf_t op_flags)414 static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
415 {
416 if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
417 if (!buffer_uptodate(bh))
418 __bh_read(bh, op_flags, false);
419 else
420 unlock_buffer(bh);
421 }
422 }
423
bh_read_nowait(struct buffer_head * bh,blk_opf_t op_flags)424 static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
425 {
426 if (!bh_uptodate_or_lock(bh))
427 __bh_read(bh, op_flags, false);
428 }
429
430 /* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
bh_read(struct buffer_head * bh,blk_opf_t op_flags)431 static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
432 {
433 if (bh_uptodate_or_lock(bh))
434 return 1;
435 return __bh_read(bh, op_flags, true);
436 }
437
bh_read_batch(int nr,struct buffer_head * bhs[])438 static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
439 {
440 __bh_read_batch(nr, bhs, 0, true);
441 }
442
bh_readahead_batch(int nr,struct buffer_head * bhs[],blk_opf_t op_flags)443 static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
444 blk_opf_t op_flags)
445 {
446 __bh_read_batch(nr, bhs, op_flags, false);
447 }
448
449 /**
450 * __bread() - reads a specified block and returns the bh
451 * @bdev: the block_device to read from
452 * @block: number of block
453 * @size: size (in bytes) to read
454 *
455 * Reads a specified block, and returns buffer head that contains it.
456 * The page cache is allocated from movable area so that it can be migrated.
457 * It returns NULL if the block was unreadable.
458 */
459 static inline struct buffer_head *
__bread(struct block_device * bdev,sector_t block,unsigned size)460 __bread(struct block_device *bdev, sector_t block, unsigned size)
461 {
462 return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
463 }
464
465 bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
466
467 #else /* CONFIG_BLOCK */
468
buffer_init(void)469 static inline void buffer_init(void) {}
try_to_free_buffers(struct folio * folio)470 static inline bool try_to_free_buffers(struct folio *folio) { return true; }
inode_has_buffers(struct inode * inode)471 static inline int inode_has_buffers(struct inode *inode) { return 0; }
invalidate_inode_buffers(struct inode * inode)472 static inline void invalidate_inode_buffers(struct inode *inode) {}
remove_inode_buffers(struct inode * inode)473 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
sync_mapping_buffers(struct address_space * mapping)474 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
invalidate_bh_lrus_cpu(void)475 static inline void invalidate_bh_lrus_cpu(void) {}
has_bh_in_lru(int cpu,void * dummy)476 static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
477 #define buffer_heads_over_limit 0
478
479 #endif /* CONFIG_BLOCK */
480 #endif /* _LINUX_BUFFER_HEAD_H */
481