1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
15 #include <linux/fs.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
23
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 #include "aops.h"
38
39
gfs2_page_add_databufs(struct gfs2_inode * ip,struct page * page,unsigned int from,unsigned int len)40 void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41 unsigned int from, unsigned int len)
42 {
43 struct buffer_head *head = page_buffers(page);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
46 unsigned int to = from + len;
47 unsigned int start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
52 if (end <= from)
53 continue;
54 if (start >= to)
55 break;
56 set_buffer_uptodate(bh);
57 gfs2_trans_add_data(ip->i_gl, bh);
58 }
59 }
60
61 /**
62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
gfs2_get_block_noalloc(struct inode * inode,sector_t lblock,struct buffer_head * bh_result,int create)71 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
73 {
74 int error;
75
76 error = gfs2_block_map(inode, lblock, bh_result, 0);
77 if (error)
78 return error;
79 if (!buffer_mapped(bh_result))
80 return -ENODATA;
81 return 0;
82 }
83
84 /**
85 * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
86 * @page: The page to write
87 * @wbc: The writeback control
88 *
89 * This is the same as calling block_write_full_page, but it also
90 * writes pages outside of i_size
91 */
gfs2_write_jdata_page(struct page * page,struct writeback_control * wbc)92 static int gfs2_write_jdata_page(struct page *page,
93 struct writeback_control *wbc)
94 {
95 struct inode * const inode = page->mapping->host;
96 loff_t i_size = i_size_read(inode);
97 const pgoff_t end_index = i_size >> PAGE_SHIFT;
98 unsigned offset;
99
100 /*
101 * The page straddles i_size. It must be zeroed out on each and every
102 * writepage invocation because it may be mmapped. "A file is mapped
103 * in multiples of the page size. For a file that is not a multiple of
104 * the page size, the remaining memory is zeroed when mapped, and
105 * writes to that region are not written out to the file."
106 */
107 offset = i_size & (PAGE_SIZE - 1);
108 if (page->index == end_index && offset)
109 zero_user_segment(page, offset, PAGE_SIZE);
110
111 return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
112 end_buffer_async_write);
113 }
114
115 /**
116 * __gfs2_jdata_writepage - The core of jdata writepage
117 * @page: The page to write
118 * @wbc: The writeback control
119 *
120 * This is shared between writepage and writepages and implements the
121 * core of the writepage operation. If a transaction is required then
122 * PageChecked will have been set and the transaction will have
123 * already been started before this is called.
124 */
125
__gfs2_jdata_writepage(struct page * page,struct writeback_control * wbc)126 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
127 {
128 struct inode *inode = page->mapping->host;
129 struct gfs2_inode *ip = GFS2_I(inode);
130 struct gfs2_sbd *sdp = GFS2_SB(inode);
131
132 if (PageChecked(page)) {
133 ClearPageChecked(page);
134 if (!page_has_buffers(page)) {
135 create_empty_buffers(page, inode->i_sb->s_blocksize,
136 BIT(BH_Dirty)|BIT(BH_Uptodate));
137 }
138 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
139 }
140 return gfs2_write_jdata_page(page, wbc);
141 }
142
143 /**
144 * gfs2_jdata_writepage - Write complete page
145 * @page: Page to write
146 * @wbc: The writeback control
147 *
148 * Returns: errno
149 *
150 */
151
gfs2_jdata_writepage(struct page * page,struct writeback_control * wbc)152 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
153 {
154 struct inode *inode = page->mapping->host;
155 struct gfs2_inode *ip = GFS2_I(inode);
156 struct gfs2_sbd *sdp = GFS2_SB(inode);
157
158 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
159 goto out;
160 if (PageChecked(page) || current->journal_info)
161 goto out_ignore;
162 return __gfs2_jdata_writepage(page, wbc);
163
164 out_ignore:
165 redirty_page_for_writepage(wbc, page);
166 out:
167 unlock_page(page);
168 return 0;
169 }
170
171 /**
172 * gfs2_writepages - Write a bunch of dirty pages back to disk
173 * @mapping: The mapping to write
174 * @wbc: Write-back control
175 *
176 * Used for both ordered and writeback modes.
177 */
gfs2_writepages(struct address_space * mapping,struct writeback_control * wbc)178 static int gfs2_writepages(struct address_space *mapping,
179 struct writeback_control *wbc)
180 {
181 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
182 struct iomap_writepage_ctx wpc = { };
183 int ret;
184
185 /*
186 * Even if we didn't write any pages here, we might still be holding
187 * dirty pages in the ail. We forcibly flush the ail because we don't
188 * want balance_dirty_pages() to loop indefinitely trying to write out
189 * pages held in the ail that it can't find.
190 */
191 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
192 if (ret == 0)
193 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
194 return ret;
195 }
196
197 /**
198 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
199 * @mapping: The mapping
200 * @wbc: The writeback control
201 * @pvec: The vector of pages
202 * @nr_pages: The number of pages to write
203 * @done_index: Page index
204 *
205 * Returns: non-zero if loop should terminate, zero otherwise
206 */
207
gfs2_write_jdata_pagevec(struct address_space * mapping,struct writeback_control * wbc,struct pagevec * pvec,int nr_pages,pgoff_t * done_index)208 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
209 struct writeback_control *wbc,
210 struct pagevec *pvec,
211 int nr_pages,
212 pgoff_t *done_index)
213 {
214 struct inode *inode = mapping->host;
215 struct gfs2_sbd *sdp = GFS2_SB(inode);
216 unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
217 int i;
218 int ret;
219
220 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
221 if (ret < 0)
222 return ret;
223
224 for(i = 0; i < nr_pages; i++) {
225 struct page *page = pvec->pages[i];
226
227 *done_index = page->index;
228
229 lock_page(page);
230
231 if (unlikely(page->mapping != mapping)) {
232 continue_unlock:
233 unlock_page(page);
234 continue;
235 }
236
237 if (!PageDirty(page)) {
238 /* someone wrote it for us */
239 goto continue_unlock;
240 }
241
242 if (PageWriteback(page)) {
243 if (wbc->sync_mode != WB_SYNC_NONE)
244 wait_on_page_writeback(page);
245 else
246 goto continue_unlock;
247 }
248
249 BUG_ON(PageWriteback(page));
250 if (!clear_page_dirty_for_io(page))
251 goto continue_unlock;
252
253 trace_wbc_writepage(wbc, inode_to_bdi(inode));
254
255 ret = __gfs2_jdata_writepage(page, wbc);
256 if (unlikely(ret)) {
257 if (ret == AOP_WRITEPAGE_ACTIVATE) {
258 unlock_page(page);
259 ret = 0;
260 } else {
261
262 /*
263 * done_index is set past this page,
264 * so media errors will not choke
265 * background writeout for the entire
266 * file. This has consequences for
267 * range_cyclic semantics (ie. it may
268 * not be suitable for data integrity
269 * writeout).
270 */
271 *done_index = page->index + 1;
272 ret = 1;
273 break;
274 }
275 }
276
277 /*
278 * We stop writing back only if we are not doing
279 * integrity sync. In case of integrity sync we have to
280 * keep going until we have written all the pages
281 * we tagged for writeback prior to entering this loop.
282 */
283 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
284 ret = 1;
285 break;
286 }
287
288 }
289 gfs2_trans_end(sdp);
290 return ret;
291 }
292
293 /**
294 * gfs2_write_cache_jdata - Like write_cache_pages but different
295 * @mapping: The mapping to write
296 * @wbc: The writeback control
297 *
298 * The reason that we use our own function here is that we need to
299 * start transactions before we grab page locks. This allows us
300 * to get the ordering right.
301 */
302
gfs2_write_cache_jdata(struct address_space * mapping,struct writeback_control * wbc)303 static int gfs2_write_cache_jdata(struct address_space *mapping,
304 struct writeback_control *wbc)
305 {
306 int ret = 0;
307 int done = 0;
308 struct pagevec pvec;
309 int nr_pages;
310 pgoff_t writeback_index;
311 pgoff_t index;
312 pgoff_t end;
313 pgoff_t done_index;
314 int cycled;
315 int range_whole = 0;
316 xa_mark_t tag;
317
318 pagevec_init(&pvec);
319 if (wbc->range_cyclic) {
320 writeback_index = mapping->writeback_index; /* prev offset */
321 index = writeback_index;
322 if (index == 0)
323 cycled = 1;
324 else
325 cycled = 0;
326 end = -1;
327 } else {
328 index = wbc->range_start >> PAGE_SHIFT;
329 end = wbc->range_end >> PAGE_SHIFT;
330 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
331 range_whole = 1;
332 cycled = 1; /* ignore range_cyclic tests */
333 }
334 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
335 tag = PAGECACHE_TAG_TOWRITE;
336 else
337 tag = PAGECACHE_TAG_DIRTY;
338
339 retry:
340 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
341 tag_pages_for_writeback(mapping, index, end);
342 done_index = index;
343 while (!done && (index <= end)) {
344 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
345 tag);
346 if (nr_pages == 0)
347 break;
348
349 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
350 if (ret)
351 done = 1;
352 if (ret > 0)
353 ret = 0;
354 pagevec_release(&pvec);
355 cond_resched();
356 }
357
358 if (!cycled && !done) {
359 /*
360 * range_cyclic:
361 * We hit the last page and there is more work to be done: wrap
362 * back to the start of the file
363 */
364 cycled = 1;
365 index = 0;
366 end = writeback_index - 1;
367 goto retry;
368 }
369
370 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
371 mapping->writeback_index = done_index;
372
373 return ret;
374 }
375
376
377 /**
378 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
379 * @mapping: The mapping to write
380 * @wbc: The writeback control
381 *
382 */
383
gfs2_jdata_writepages(struct address_space * mapping,struct writeback_control * wbc)384 static int gfs2_jdata_writepages(struct address_space *mapping,
385 struct writeback_control *wbc)
386 {
387 struct gfs2_inode *ip = GFS2_I(mapping->host);
388 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
389 int ret;
390
391 ret = gfs2_write_cache_jdata(mapping, wbc);
392 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
393 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
394 GFS2_LFC_JDATA_WPAGES);
395 ret = gfs2_write_cache_jdata(mapping, wbc);
396 }
397 return ret;
398 }
399
400 /**
401 * stuffed_readpage - Fill in a Linux page with stuffed file data
402 * @ip: the inode
403 * @page: the page
404 *
405 * Returns: errno
406 */
stuffed_readpage(struct gfs2_inode * ip,struct page * page)407 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
408 {
409 struct buffer_head *dibh;
410 u64 dsize = i_size_read(&ip->i_inode);
411 void *kaddr;
412 int error;
413
414 /*
415 * Due to the order of unstuffing files and ->fault(), we can be
416 * asked for a zero page in the case of a stuffed file being extended,
417 * so we need to supply one here. It doesn't happen often.
418 */
419 if (unlikely(page->index)) {
420 zero_user(page, 0, PAGE_SIZE);
421 SetPageUptodate(page);
422 return 0;
423 }
424
425 error = gfs2_meta_inode_buffer(ip, &dibh);
426 if (error)
427 return error;
428
429 kaddr = kmap_atomic(page);
430 if (dsize > gfs2_max_stuffed_size(ip))
431 dsize = gfs2_max_stuffed_size(ip);
432 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
433 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
434 kunmap_atomic(kaddr);
435 flush_dcache_page(page);
436 brelse(dibh);
437 SetPageUptodate(page);
438
439 return 0;
440 }
441
442 /**
443 * gfs2_read_folio - read a folio from a file
444 * @file: The file to read
445 * @folio: The folio in the file
446 */
gfs2_read_folio(struct file * file,struct folio * folio)447 static int gfs2_read_folio(struct file *file, struct folio *folio)
448 {
449 struct inode *inode = folio->mapping->host;
450 struct gfs2_inode *ip = GFS2_I(inode);
451 struct gfs2_sbd *sdp = GFS2_SB(inode);
452 int error;
453
454 if (!gfs2_is_jdata(ip) ||
455 (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
456 error = iomap_read_folio(folio, &gfs2_iomap_ops);
457 } else if (gfs2_is_stuffed(ip)) {
458 error = stuffed_readpage(ip, &folio->page);
459 folio_unlock(folio);
460 } else {
461 error = mpage_read_folio(folio, gfs2_block_map);
462 }
463
464 if (unlikely(gfs2_withdrawn(sdp)))
465 return -EIO;
466
467 return error;
468 }
469
470 /**
471 * gfs2_internal_read - read an internal file
472 * @ip: The gfs2 inode
473 * @buf: The buffer to fill
474 * @pos: The file position
475 * @size: The amount to read
476 *
477 */
478
gfs2_internal_read(struct gfs2_inode * ip,char * buf,loff_t * pos,unsigned size)479 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
480 unsigned size)
481 {
482 struct address_space *mapping = ip->i_inode.i_mapping;
483 unsigned long index = *pos >> PAGE_SHIFT;
484 unsigned offset = *pos & (PAGE_SIZE - 1);
485 unsigned copied = 0;
486 unsigned amt;
487 struct page *page;
488 void *p;
489
490 do {
491 amt = size - copied;
492 if (offset + size > PAGE_SIZE)
493 amt = PAGE_SIZE - offset;
494 page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
495 if (IS_ERR(page))
496 return PTR_ERR(page);
497 p = kmap_atomic(page);
498 memcpy(buf + copied, p + offset, amt);
499 kunmap_atomic(p);
500 put_page(page);
501 copied += amt;
502 index++;
503 offset = 0;
504 } while(copied < size);
505 (*pos) += size;
506 return size;
507 }
508
509 /**
510 * gfs2_readahead - Read a bunch of pages at once
511 * @rac: Read-ahead control structure
512 *
513 * Some notes:
514 * 1. This is only for readahead, so we can simply ignore any things
515 * which are slightly inconvenient (such as locking conflicts between
516 * the page lock and the glock) and return having done no I/O. Its
517 * obviously not something we'd want to do on too regular a basis.
518 * Any I/O we ignore at this time will be done via readpage later.
519 * 2. We don't handle stuffed files here we let readpage do the honours.
520 * 3. mpage_readahead() does most of the heavy lifting in the common case.
521 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
522 */
523
gfs2_readahead(struct readahead_control * rac)524 static void gfs2_readahead(struct readahead_control *rac)
525 {
526 struct inode *inode = rac->mapping->host;
527 struct gfs2_inode *ip = GFS2_I(inode);
528
529 if (gfs2_is_stuffed(ip))
530 ;
531 else if (gfs2_is_jdata(ip))
532 mpage_readahead(rac, gfs2_block_map);
533 else
534 iomap_readahead(rac, &gfs2_iomap_ops);
535 }
536
537 /**
538 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
539 * @inode: the rindex inode
540 */
adjust_fs_space(struct inode * inode)541 void adjust_fs_space(struct inode *inode)
542 {
543 struct gfs2_sbd *sdp = GFS2_SB(inode);
544 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
545 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
546 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
547 struct buffer_head *m_bh;
548 u64 fs_total, new_free;
549
550 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
551 return;
552
553 /* Total up the file system space, according to the latest rindex. */
554 fs_total = gfs2_ri_total(sdp);
555 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
556 goto out;
557
558 spin_lock(&sdp->sd_statfs_spin);
559 gfs2_statfs_change_in(m_sc, m_bh->b_data +
560 sizeof(struct gfs2_dinode));
561 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
562 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
563 else
564 new_free = 0;
565 spin_unlock(&sdp->sd_statfs_spin);
566 fs_warn(sdp, "File system extended by %llu blocks.\n",
567 (unsigned long long)new_free);
568 gfs2_statfs_change(sdp, new_free, new_free, 0);
569
570 update_statfs(sdp, m_bh);
571 brelse(m_bh);
572 out:
573 sdp->sd_rindex_uptodate = 0;
574 gfs2_trans_end(sdp);
575 }
576
jdata_dirty_folio(struct address_space * mapping,struct folio * folio)577 static bool jdata_dirty_folio(struct address_space *mapping,
578 struct folio *folio)
579 {
580 if (current->journal_info)
581 folio_set_checked(folio);
582 return block_dirty_folio(mapping, folio);
583 }
584
585 /**
586 * gfs2_bmap - Block map function
587 * @mapping: Address space info
588 * @lblock: The block to map
589 *
590 * Returns: The disk address for the block or 0 on hole or error
591 */
592
gfs2_bmap(struct address_space * mapping,sector_t lblock)593 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
594 {
595 struct gfs2_inode *ip = GFS2_I(mapping->host);
596 struct gfs2_holder i_gh;
597 sector_t dblock = 0;
598 int error;
599
600 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
601 if (error)
602 return 0;
603
604 if (!gfs2_is_stuffed(ip))
605 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
606
607 gfs2_glock_dq_uninit(&i_gh);
608
609 return dblock;
610 }
611
gfs2_discard(struct gfs2_sbd * sdp,struct buffer_head * bh)612 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
613 {
614 struct gfs2_bufdata *bd;
615
616 lock_buffer(bh);
617 gfs2_log_lock(sdp);
618 clear_buffer_dirty(bh);
619 bd = bh->b_private;
620 if (bd) {
621 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
622 list_del_init(&bd->bd_list);
623 else {
624 spin_lock(&sdp->sd_ail_lock);
625 gfs2_remove_from_journal(bh, REMOVE_JDATA);
626 spin_unlock(&sdp->sd_ail_lock);
627 }
628 }
629 bh->b_bdev = NULL;
630 clear_buffer_mapped(bh);
631 clear_buffer_req(bh);
632 clear_buffer_new(bh);
633 gfs2_log_unlock(sdp);
634 unlock_buffer(bh);
635 }
636
gfs2_invalidate_folio(struct folio * folio,size_t offset,size_t length)637 static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
638 size_t length)
639 {
640 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
641 size_t stop = offset + length;
642 int partial_page = (offset || length < folio_size(folio));
643 struct buffer_head *bh, *head;
644 unsigned long pos = 0;
645
646 BUG_ON(!folio_test_locked(folio));
647 if (!partial_page)
648 folio_clear_checked(folio);
649 head = folio_buffers(folio);
650 if (!head)
651 goto out;
652
653 bh = head;
654 do {
655 if (pos + bh->b_size > stop)
656 return;
657
658 if (offset <= pos)
659 gfs2_discard(sdp, bh);
660 pos += bh->b_size;
661 bh = bh->b_this_page;
662 } while (bh != head);
663 out:
664 if (!partial_page)
665 filemap_release_folio(folio, 0);
666 }
667
668 /**
669 * gfs2_release_folio - free the metadata associated with a folio
670 * @folio: the folio that's being released
671 * @gfp_mask: passed from Linux VFS, ignored by us
672 *
673 * Calls try_to_free_buffers() to free the buffers and put the folio if the
674 * buffers can be released.
675 *
676 * Returns: true if the folio was put or else false
677 */
678
gfs2_release_folio(struct folio * folio,gfp_t gfp_mask)679 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
680 {
681 struct address_space *mapping = folio->mapping;
682 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
683 struct buffer_head *bh, *head;
684 struct gfs2_bufdata *bd;
685
686 head = folio_buffers(folio);
687 if (!head)
688 return false;
689
690 /*
691 * mm accommodates an old ext3 case where clean folios might
692 * not have had the dirty bit cleared. Thus, it can send actual
693 * dirty folios to ->release_folio() via shrink_active_list().
694 *
695 * As a workaround, we skip folios that contain dirty buffers
696 * below. Once ->release_folio isn't called on dirty folios
697 * anymore, we can warn on dirty buffers like we used to here
698 * again.
699 */
700
701 gfs2_log_lock(sdp);
702 bh = head;
703 do {
704 if (atomic_read(&bh->b_count))
705 goto cannot_release;
706 bd = bh->b_private;
707 if (bd && bd->bd_tr)
708 goto cannot_release;
709 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
710 goto cannot_release;
711 bh = bh->b_this_page;
712 } while (bh != head);
713
714 bh = head;
715 do {
716 bd = bh->b_private;
717 if (bd) {
718 gfs2_assert_warn(sdp, bd->bd_bh == bh);
719 bd->bd_bh = NULL;
720 bh->b_private = NULL;
721 /*
722 * The bd may still be queued as a revoke, in which
723 * case we must not dequeue nor free it.
724 */
725 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
726 list_del_init(&bd->bd_list);
727 if (list_empty(&bd->bd_list))
728 kmem_cache_free(gfs2_bufdata_cachep, bd);
729 }
730
731 bh = bh->b_this_page;
732 } while (bh != head);
733 gfs2_log_unlock(sdp);
734
735 return try_to_free_buffers(folio);
736
737 cannot_release:
738 gfs2_log_unlock(sdp);
739 return false;
740 }
741
742 static const struct address_space_operations gfs2_aops = {
743 .writepages = gfs2_writepages,
744 .read_folio = gfs2_read_folio,
745 .readahead = gfs2_readahead,
746 .dirty_folio = filemap_dirty_folio,
747 .release_folio = iomap_release_folio,
748 .invalidate_folio = iomap_invalidate_folio,
749 .bmap = gfs2_bmap,
750 .direct_IO = noop_direct_IO,
751 .migrate_folio = filemap_migrate_folio,
752 .is_partially_uptodate = iomap_is_partially_uptodate,
753 .error_remove_page = generic_error_remove_page,
754 };
755
756 static const struct address_space_operations gfs2_jdata_aops = {
757 .writepage = gfs2_jdata_writepage,
758 .writepages = gfs2_jdata_writepages,
759 .read_folio = gfs2_read_folio,
760 .readahead = gfs2_readahead,
761 .dirty_folio = jdata_dirty_folio,
762 .bmap = gfs2_bmap,
763 .invalidate_folio = gfs2_invalidate_folio,
764 .release_folio = gfs2_release_folio,
765 .is_partially_uptodate = block_is_partially_uptodate,
766 .error_remove_page = generic_error_remove_page,
767 };
768
gfs2_set_aops(struct inode * inode)769 void gfs2_set_aops(struct inode *inode)
770 {
771 if (gfs2_is_jdata(GFS2_I(inode)))
772 inode->i_mapping->a_ops = &gfs2_jdata_aops;
773 else
774 inode->i_mapping->a_ops = &gfs2_aops;
775 }
776