1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2019 Christoph Hellwig.
5 */
6 #include <linux/module.h>
7 #include <linux/compiler.h>
8 #include <linux/fs.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/list_sort.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/sched/signal.h>
19 #include <linux/migrate.h>
20 #include "trace.h"
21
22 #include "../internal.h"
23
24 #define IOEND_BATCH_SIZE 4096
25
26 typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length);
27 /*
28 * Structure allocated for each folio to track per-block uptodate, dirty state
29 * and I/O completions.
30 */
31 struct iomap_folio_state {
32 atomic_t read_bytes_pending;
33 atomic_t write_bytes_pending;
34 spinlock_t state_lock;
35
36 /*
37 * Each block has two bits in this bitmap:
38 * Bits [0..blocks_per_folio) has the uptodate status.
39 * Bits [b_p_f...(2*b_p_f)) has the dirty status.
40 */
41 unsigned long state[];
42 };
43
44 static struct bio_set iomap_ioend_bioset;
45
ifs_is_fully_uptodate(struct folio * folio,struct iomap_folio_state * ifs)46 static inline bool ifs_is_fully_uptodate(struct folio *folio,
47 struct iomap_folio_state *ifs)
48 {
49 struct inode *inode = folio->mapping->host;
50
51 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
52 }
53
ifs_block_is_uptodate(struct iomap_folio_state * ifs,unsigned int block)54 static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
55 unsigned int block)
56 {
57 return test_bit(block, ifs->state);
58 }
59
ifs_set_range_uptodate(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)60 static void ifs_set_range_uptodate(struct folio *folio,
61 struct iomap_folio_state *ifs, size_t off, size_t len)
62 {
63 struct inode *inode = folio->mapping->host;
64 unsigned int first_blk = off >> inode->i_blkbits;
65 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
66 unsigned int nr_blks = last_blk - first_blk + 1;
67 unsigned long flags;
68
69 spin_lock_irqsave(&ifs->state_lock, flags);
70 bitmap_set(ifs->state, first_blk, nr_blks);
71 if (ifs_is_fully_uptodate(folio, ifs))
72 folio_mark_uptodate(folio);
73 spin_unlock_irqrestore(&ifs->state_lock, flags);
74 }
75
iomap_set_range_uptodate(struct folio * folio,size_t off,size_t len)76 static void iomap_set_range_uptodate(struct folio *folio, size_t off,
77 size_t len)
78 {
79 struct iomap_folio_state *ifs = folio->private;
80
81 if (ifs)
82 ifs_set_range_uptodate(folio, ifs, off, len);
83 else
84 folio_mark_uptodate(folio);
85 }
86
ifs_block_is_dirty(struct folio * folio,struct iomap_folio_state * ifs,int block)87 static inline bool ifs_block_is_dirty(struct folio *folio,
88 struct iomap_folio_state *ifs, int block)
89 {
90 struct inode *inode = folio->mapping->host;
91 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
92
93 return test_bit(block + blks_per_folio, ifs->state);
94 }
95
ifs_clear_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)96 static void ifs_clear_range_dirty(struct folio *folio,
97 struct iomap_folio_state *ifs, size_t off, size_t len)
98 {
99 struct inode *inode = folio->mapping->host;
100 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
101 unsigned int first_blk = (off >> inode->i_blkbits);
102 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
103 unsigned int nr_blks = last_blk - first_blk + 1;
104 unsigned long flags;
105
106 spin_lock_irqsave(&ifs->state_lock, flags);
107 bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
108 spin_unlock_irqrestore(&ifs->state_lock, flags);
109 }
110
iomap_clear_range_dirty(struct folio * folio,size_t off,size_t len)111 static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
112 {
113 struct iomap_folio_state *ifs = folio->private;
114
115 if (ifs)
116 ifs_clear_range_dirty(folio, ifs, off, len);
117 }
118
ifs_set_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)119 static void ifs_set_range_dirty(struct folio *folio,
120 struct iomap_folio_state *ifs, size_t off, size_t len)
121 {
122 struct inode *inode = folio->mapping->host;
123 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
124 unsigned int first_blk = (off >> inode->i_blkbits);
125 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
126 unsigned int nr_blks = last_blk - first_blk + 1;
127 unsigned long flags;
128
129 spin_lock_irqsave(&ifs->state_lock, flags);
130 bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
131 spin_unlock_irqrestore(&ifs->state_lock, flags);
132 }
133
iomap_set_range_dirty(struct folio * folio,size_t off,size_t len)134 static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
135 {
136 struct iomap_folio_state *ifs = folio->private;
137
138 if (ifs)
139 ifs_set_range_dirty(folio, ifs, off, len);
140 }
141
ifs_alloc(struct inode * inode,struct folio * folio,unsigned int flags)142 static struct iomap_folio_state *ifs_alloc(struct inode *inode,
143 struct folio *folio, unsigned int flags)
144 {
145 struct iomap_folio_state *ifs = folio->private;
146 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
147 gfp_t gfp;
148
149 if (ifs || nr_blocks <= 1)
150 return ifs;
151
152 if (flags & IOMAP_NOWAIT)
153 gfp = GFP_NOWAIT;
154 else
155 gfp = GFP_NOFS | __GFP_NOFAIL;
156
157 /*
158 * ifs->state tracks two sets of state flags when the
159 * filesystem block size is smaller than the folio size.
160 * The first state tracks per-block uptodate and the
161 * second tracks per-block dirty state.
162 */
163 ifs = kzalloc(struct_size(ifs, state,
164 BITS_TO_LONGS(2 * nr_blocks)), gfp);
165 if (!ifs)
166 return ifs;
167
168 spin_lock_init(&ifs->state_lock);
169 if (folio_test_uptodate(folio))
170 bitmap_set(ifs->state, 0, nr_blocks);
171 if (folio_test_dirty(folio))
172 bitmap_set(ifs->state, nr_blocks, nr_blocks);
173 folio_attach_private(folio, ifs);
174
175 return ifs;
176 }
177
ifs_free(struct folio * folio)178 static void ifs_free(struct folio *folio)
179 {
180 struct iomap_folio_state *ifs = folio_detach_private(folio);
181
182 if (!ifs)
183 return;
184 WARN_ON_ONCE(atomic_read(&ifs->read_bytes_pending));
185 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
186 WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
187 folio_test_uptodate(folio));
188 kfree(ifs);
189 }
190
191 /*
192 * Calculate the range inside the folio that we actually need to read.
193 */
iomap_adjust_read_range(struct inode * inode,struct folio * folio,loff_t * pos,loff_t length,size_t * offp,size_t * lenp)194 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
195 loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
196 {
197 struct iomap_folio_state *ifs = folio->private;
198 loff_t orig_pos = *pos;
199 loff_t isize = i_size_read(inode);
200 unsigned block_bits = inode->i_blkbits;
201 unsigned block_size = (1 << block_bits);
202 size_t poff = offset_in_folio(folio, *pos);
203 size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
204 unsigned first = poff >> block_bits;
205 unsigned last = (poff + plen - 1) >> block_bits;
206
207 /*
208 * If the block size is smaller than the page size, we need to check the
209 * per-block uptodate status and adjust the offset and length if needed
210 * to avoid reading in already uptodate ranges.
211 */
212 if (ifs) {
213 unsigned int i;
214
215 /* move forward for each leading block marked uptodate */
216 for (i = first; i <= last; i++) {
217 if (!ifs_block_is_uptodate(ifs, i))
218 break;
219 *pos += block_size;
220 poff += block_size;
221 plen -= block_size;
222 first++;
223 }
224
225 /* truncate len if we find any trailing uptodate block(s) */
226 for ( ; i <= last; i++) {
227 if (ifs_block_is_uptodate(ifs, i)) {
228 plen -= (last - i + 1) * block_size;
229 last = i - 1;
230 break;
231 }
232 }
233 }
234
235 /*
236 * If the extent spans the block that contains the i_size, we need to
237 * handle both halves separately so that we properly zero data in the
238 * page cache for blocks that are entirely outside of i_size.
239 */
240 if (orig_pos <= isize && orig_pos + length > isize) {
241 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
242
243 if (first <= end && last > end)
244 plen -= (last - end) * block_size;
245 }
246
247 *offp = poff;
248 *lenp = plen;
249 }
250
iomap_finish_folio_read(struct folio * folio,size_t offset,size_t len,int error)251 static void iomap_finish_folio_read(struct folio *folio, size_t offset,
252 size_t len, int error)
253 {
254 struct iomap_folio_state *ifs = folio->private;
255
256 if (unlikely(error)) {
257 folio_clear_uptodate(folio);
258 folio_set_error(folio);
259 } else {
260 iomap_set_range_uptodate(folio, offset, len);
261 }
262
263 if (!ifs || atomic_sub_and_test(len, &ifs->read_bytes_pending))
264 folio_unlock(folio);
265 }
266
iomap_read_end_io(struct bio * bio)267 static void iomap_read_end_io(struct bio *bio)
268 {
269 int error = blk_status_to_errno(bio->bi_status);
270 struct folio_iter fi;
271
272 bio_for_each_folio_all(fi, bio)
273 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
274 bio_put(bio);
275 }
276
277 struct iomap_readpage_ctx {
278 struct folio *cur_folio;
279 bool cur_folio_in_bio;
280 struct bio *bio;
281 struct readahead_control *rac;
282 };
283
284 /**
285 * iomap_read_inline_data - copy inline data into the page cache
286 * @iter: iteration structure
287 * @folio: folio to copy to
288 *
289 * Copy the inline data in @iter into @folio and zero out the rest of the folio.
290 * Only a single IOMAP_INLINE extent is allowed at the end of each file.
291 * Returns zero for success to complete the read, or the usual negative errno.
292 */
iomap_read_inline_data(const struct iomap_iter * iter,struct folio * folio)293 static int iomap_read_inline_data(const struct iomap_iter *iter,
294 struct folio *folio)
295 {
296 const struct iomap *iomap = iomap_iter_srcmap(iter);
297 size_t size = i_size_read(iter->inode) - iomap->offset;
298 size_t poff = offset_in_page(iomap->offset);
299 size_t offset = offset_in_folio(folio, iomap->offset);
300 void *addr;
301
302 if (folio_test_uptodate(folio))
303 return 0;
304
305 if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
306 return -EIO;
307 if (WARN_ON_ONCE(size > PAGE_SIZE -
308 offset_in_page(iomap->inline_data)))
309 return -EIO;
310 if (WARN_ON_ONCE(size > iomap->length))
311 return -EIO;
312 if (offset > 0)
313 ifs_alloc(iter->inode, folio, iter->flags);
314
315 addr = kmap_local_folio(folio, offset);
316 memcpy(addr, iomap->inline_data, size);
317 memset(addr + size, 0, PAGE_SIZE - poff - size);
318 kunmap_local(addr);
319 iomap_set_range_uptodate(folio, offset, PAGE_SIZE - poff);
320 return 0;
321 }
322
iomap_block_needs_zeroing(const struct iomap_iter * iter,loff_t pos)323 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
324 loff_t pos)
325 {
326 const struct iomap *srcmap = iomap_iter_srcmap(iter);
327
328 return srcmap->type != IOMAP_MAPPED ||
329 (srcmap->flags & IOMAP_F_NEW) ||
330 pos >= i_size_read(iter->inode);
331 }
332
iomap_readpage_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx,loff_t offset)333 static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
334 struct iomap_readpage_ctx *ctx, loff_t offset)
335 {
336 const struct iomap *iomap = &iter->iomap;
337 loff_t pos = iter->pos + offset;
338 loff_t length = iomap_length(iter) - offset;
339 struct folio *folio = ctx->cur_folio;
340 struct iomap_folio_state *ifs;
341 loff_t orig_pos = pos;
342 size_t poff, plen;
343 sector_t sector;
344
345 if (iomap->type == IOMAP_INLINE)
346 return iomap_read_inline_data(iter, folio);
347
348 /* zero post-eof blocks as the page may be mapped */
349 ifs = ifs_alloc(iter->inode, folio, iter->flags);
350 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
351 if (plen == 0)
352 goto done;
353
354 if (iomap_block_needs_zeroing(iter, pos)) {
355 folio_zero_range(folio, poff, plen);
356 iomap_set_range_uptodate(folio, poff, plen);
357 goto done;
358 }
359
360 ctx->cur_folio_in_bio = true;
361 if (ifs)
362 atomic_add(plen, &ifs->read_bytes_pending);
363
364 sector = iomap_sector(iomap, pos);
365 if (!ctx->bio ||
366 bio_end_sector(ctx->bio) != sector ||
367 !bio_add_folio(ctx->bio, folio, plen, poff)) {
368 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
369 gfp_t orig_gfp = gfp;
370 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
371
372 if (ctx->bio)
373 submit_bio(ctx->bio);
374
375 if (ctx->rac) /* same as readahead_gfp_mask */
376 gfp |= __GFP_NORETRY | __GFP_NOWARN;
377 ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
378 REQ_OP_READ, gfp);
379 /*
380 * If the bio_alloc fails, try it again for a single page to
381 * avoid having to deal with partial page reads. This emulates
382 * what do_mpage_read_folio does.
383 */
384 if (!ctx->bio) {
385 ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
386 orig_gfp);
387 }
388 if (ctx->rac)
389 ctx->bio->bi_opf |= REQ_RAHEAD;
390 ctx->bio->bi_iter.bi_sector = sector;
391 ctx->bio->bi_end_io = iomap_read_end_io;
392 bio_add_folio_nofail(ctx->bio, folio, plen, poff);
393 }
394
395 done:
396 /*
397 * Move the caller beyond our range so that it keeps making progress.
398 * For that, we have to include any leading non-uptodate ranges, but
399 * we can skip trailing ones as they will be handled in the next
400 * iteration.
401 */
402 return pos - orig_pos + plen;
403 }
404
iomap_read_folio(struct folio * folio,const struct iomap_ops * ops)405 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
406 {
407 struct iomap_iter iter = {
408 .inode = folio->mapping->host,
409 .pos = folio_pos(folio),
410 .len = folio_size(folio),
411 };
412 struct iomap_readpage_ctx ctx = {
413 .cur_folio = folio,
414 };
415 int ret;
416
417 trace_iomap_readpage(iter.inode, 1);
418
419 while ((ret = iomap_iter(&iter, ops)) > 0)
420 iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
421
422 if (ret < 0)
423 folio_set_error(folio);
424
425 if (ctx.bio) {
426 submit_bio(ctx.bio);
427 WARN_ON_ONCE(!ctx.cur_folio_in_bio);
428 } else {
429 WARN_ON_ONCE(ctx.cur_folio_in_bio);
430 folio_unlock(folio);
431 }
432
433 /*
434 * Just like mpage_readahead and block_read_full_folio, we always
435 * return 0 and just set the folio error flag on errors. This
436 * should be cleaned up throughout the stack eventually.
437 */
438 return 0;
439 }
440 EXPORT_SYMBOL_GPL(iomap_read_folio);
441
iomap_readahead_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx)442 static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
443 struct iomap_readpage_ctx *ctx)
444 {
445 loff_t length = iomap_length(iter);
446 loff_t done, ret;
447
448 for (done = 0; done < length; done += ret) {
449 if (ctx->cur_folio &&
450 offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
451 if (!ctx->cur_folio_in_bio)
452 folio_unlock(ctx->cur_folio);
453 ctx->cur_folio = NULL;
454 }
455 if (!ctx->cur_folio) {
456 ctx->cur_folio = readahead_folio(ctx->rac);
457 ctx->cur_folio_in_bio = false;
458 }
459 ret = iomap_readpage_iter(iter, ctx, done);
460 if (ret <= 0)
461 return ret;
462 }
463
464 return done;
465 }
466
467 /**
468 * iomap_readahead - Attempt to read pages from a file.
469 * @rac: Describes the pages to be read.
470 * @ops: The operations vector for the filesystem.
471 *
472 * This function is for filesystems to call to implement their readahead
473 * address_space operation.
474 *
475 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
476 * blocks from disc), and may wait for it. The caller may be trying to
477 * access a different page, and so sleeping excessively should be avoided.
478 * It may allocate memory, but should avoid costly allocations. This
479 * function is called with memalloc_nofs set, so allocations will not cause
480 * the filesystem to be reentered.
481 */
iomap_readahead(struct readahead_control * rac,const struct iomap_ops * ops)482 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
483 {
484 struct iomap_iter iter = {
485 .inode = rac->mapping->host,
486 .pos = readahead_pos(rac),
487 .len = readahead_length(rac),
488 };
489 struct iomap_readpage_ctx ctx = {
490 .rac = rac,
491 };
492
493 trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
494
495 while (iomap_iter(&iter, ops) > 0)
496 iter.processed = iomap_readahead_iter(&iter, &ctx);
497
498 if (ctx.bio)
499 submit_bio(ctx.bio);
500 if (ctx.cur_folio) {
501 if (!ctx.cur_folio_in_bio)
502 folio_unlock(ctx.cur_folio);
503 }
504 }
505 EXPORT_SYMBOL_GPL(iomap_readahead);
506
507 /*
508 * iomap_is_partially_uptodate checks whether blocks within a folio are
509 * uptodate or not.
510 *
511 * Returns true if all blocks which correspond to the specified part
512 * of the folio are uptodate.
513 */
iomap_is_partially_uptodate(struct folio * folio,size_t from,size_t count)514 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
515 {
516 struct iomap_folio_state *ifs = folio->private;
517 struct inode *inode = folio->mapping->host;
518 unsigned first, last, i;
519
520 if (!ifs)
521 return false;
522
523 /* Caller's range may extend past the end of this folio */
524 count = min(folio_size(folio) - from, count);
525
526 /* First and last blocks in range within folio */
527 first = from >> inode->i_blkbits;
528 last = (from + count - 1) >> inode->i_blkbits;
529
530 for (i = first; i <= last; i++)
531 if (!ifs_block_is_uptodate(ifs, i))
532 return false;
533 return true;
534 }
535 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
536
537 /**
538 * iomap_get_folio - get a folio reference for writing
539 * @iter: iteration structure
540 * @pos: start offset of write
541 * @len: Suggested size of folio to create.
542 *
543 * Returns a locked reference to the folio at @pos, or an error pointer if the
544 * folio could not be obtained.
545 */
iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)546 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
547 {
548 fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
549
550 if (iter->flags & IOMAP_NOWAIT)
551 fgp |= FGP_NOWAIT;
552 fgp |= fgf_set_order(len);
553
554 return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
555 fgp, mapping_gfp_mask(iter->inode->i_mapping));
556 }
557 EXPORT_SYMBOL_GPL(iomap_get_folio);
558
iomap_release_folio(struct folio * folio,gfp_t gfp_flags)559 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
560 {
561 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
562 folio_size(folio));
563
564 /*
565 * If the folio is dirty, we refuse to release our metadata because
566 * it may be partially dirty. Once we track per-block dirty state,
567 * we can release the metadata if every block is dirty.
568 */
569 if (folio_test_dirty(folio))
570 return false;
571 ifs_free(folio);
572 return true;
573 }
574 EXPORT_SYMBOL_GPL(iomap_release_folio);
575
iomap_invalidate_folio(struct folio * folio,size_t offset,size_t len)576 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
577 {
578 trace_iomap_invalidate_folio(folio->mapping->host,
579 folio_pos(folio) + offset, len);
580
581 /*
582 * If we're invalidating the entire folio, clear the dirty state
583 * from it and release it to avoid unnecessary buildup of the LRU.
584 */
585 if (offset == 0 && len == folio_size(folio)) {
586 WARN_ON_ONCE(folio_test_writeback(folio));
587 folio_cancel_dirty(folio);
588 ifs_free(folio);
589 }
590 }
591 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
592
iomap_dirty_folio(struct address_space * mapping,struct folio * folio)593 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
594 {
595 struct inode *inode = mapping->host;
596 size_t len = folio_size(folio);
597
598 ifs_alloc(inode, folio, 0);
599 iomap_set_range_dirty(folio, 0, len);
600 return filemap_dirty_folio(mapping, folio);
601 }
602 EXPORT_SYMBOL_GPL(iomap_dirty_folio);
603
604 static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)605 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
606 {
607 loff_t i_size = i_size_read(inode);
608
609 /*
610 * Only truncate newly allocated pages beyoned EOF, even if the
611 * write started inside the existing inode size.
612 */
613 if (pos + len > i_size)
614 truncate_pagecache_range(inode, max(pos, i_size),
615 pos + len - 1);
616 }
617
iomap_read_folio_sync(loff_t block_start,struct folio * folio,size_t poff,size_t plen,const struct iomap * iomap)618 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
619 size_t poff, size_t plen, const struct iomap *iomap)
620 {
621 struct bio_vec bvec;
622 struct bio bio;
623
624 bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
625 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
626 bio_add_folio_nofail(&bio, folio, plen, poff);
627 return submit_bio_wait(&bio);
628 }
629
__iomap_write_begin(const struct iomap_iter * iter,loff_t pos,size_t len,struct folio * folio)630 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
631 size_t len, struct folio *folio)
632 {
633 const struct iomap *srcmap = iomap_iter_srcmap(iter);
634 struct iomap_folio_state *ifs;
635 loff_t block_size = i_blocksize(iter->inode);
636 loff_t block_start = round_down(pos, block_size);
637 loff_t block_end = round_up(pos + len, block_size);
638 unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
639 size_t from = offset_in_folio(folio, pos), to = from + len;
640 size_t poff, plen;
641
642 /*
643 * If the write or zeroing completely overlaps the current folio, then
644 * entire folio will be dirtied so there is no need for
645 * per-block state tracking structures to be attached to this folio.
646 * For the unshare case, we must read in the ondisk contents because we
647 * are not changing pagecache contents.
648 */
649 if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
650 pos + len >= folio_pos(folio) + folio_size(folio))
651 return 0;
652
653 ifs = ifs_alloc(iter->inode, folio, iter->flags);
654 if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
655 return -EAGAIN;
656
657 if (folio_test_uptodate(folio))
658 return 0;
659 folio_clear_error(folio);
660
661 do {
662 iomap_adjust_read_range(iter->inode, folio, &block_start,
663 block_end - block_start, &poff, &plen);
664 if (plen == 0)
665 break;
666
667 if (!(iter->flags & IOMAP_UNSHARE) &&
668 (from <= poff || from >= poff + plen) &&
669 (to <= poff || to >= poff + plen))
670 continue;
671
672 if (iomap_block_needs_zeroing(iter, block_start)) {
673 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
674 return -EIO;
675 folio_zero_segments(folio, poff, from, to, poff + plen);
676 } else {
677 int status;
678
679 if (iter->flags & IOMAP_NOWAIT)
680 return -EAGAIN;
681
682 status = iomap_read_folio_sync(block_start, folio,
683 poff, plen, srcmap);
684 if (status)
685 return status;
686 }
687 iomap_set_range_uptodate(folio, poff, plen);
688 } while ((block_start += plen) < block_end);
689
690 return 0;
691 }
692
__iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)693 static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
694 size_t len)
695 {
696 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
697
698 if (folio_ops && folio_ops->get_folio)
699 return folio_ops->get_folio(iter, pos, len);
700 else
701 return iomap_get_folio(iter, pos, len);
702 }
703
__iomap_put_folio(struct iomap_iter * iter,loff_t pos,size_t ret,struct folio * folio)704 static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
705 struct folio *folio)
706 {
707 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
708
709 if (folio_ops && folio_ops->put_folio) {
710 folio_ops->put_folio(iter->inode, pos, ret, folio);
711 } else {
712 folio_unlock(folio);
713 folio_put(folio);
714 }
715 }
716
iomap_write_begin_inline(const struct iomap_iter * iter,struct folio * folio)717 static int iomap_write_begin_inline(const struct iomap_iter *iter,
718 struct folio *folio)
719 {
720 /* needs more work for the tailpacking case; disable for now */
721 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
722 return -EIO;
723 return iomap_read_inline_data(iter, folio);
724 }
725
iomap_write_begin(struct iomap_iter * iter,loff_t pos,size_t len,struct folio ** foliop)726 static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
727 size_t len, struct folio **foliop)
728 {
729 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
730 const struct iomap *srcmap = iomap_iter_srcmap(iter);
731 struct folio *folio;
732 int status = 0;
733
734 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
735 if (srcmap != &iter->iomap)
736 BUG_ON(pos + len > srcmap->offset + srcmap->length);
737
738 if (fatal_signal_pending(current))
739 return -EINTR;
740
741 if (!mapping_large_folio_support(iter->inode->i_mapping))
742 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
743
744 folio = __iomap_get_folio(iter, pos, len);
745 if (IS_ERR(folio))
746 return PTR_ERR(folio);
747
748 /*
749 * Now we have a locked folio, before we do anything with it we need to
750 * check that the iomap we have cached is not stale. The inode extent
751 * mapping can change due to concurrent IO in flight (e.g.
752 * IOMAP_UNWRITTEN state can change and memory reclaim could have
753 * reclaimed a previously partially written page at this index after IO
754 * completion before this write reaches this file offset) and hence we
755 * could do the wrong thing here (zero a page range incorrectly or fail
756 * to zero) and corrupt data.
757 */
758 if (folio_ops && folio_ops->iomap_valid) {
759 bool iomap_valid = folio_ops->iomap_valid(iter->inode,
760 &iter->iomap);
761 if (!iomap_valid) {
762 iter->iomap.flags |= IOMAP_F_STALE;
763 status = 0;
764 goto out_unlock;
765 }
766 }
767
768 if (pos + len > folio_pos(folio) + folio_size(folio))
769 len = folio_pos(folio) + folio_size(folio) - pos;
770
771 if (srcmap->type == IOMAP_INLINE)
772 status = iomap_write_begin_inline(iter, folio);
773 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
774 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
775 else
776 status = __iomap_write_begin(iter, pos, len, folio);
777
778 if (unlikely(status))
779 goto out_unlock;
780
781 *foliop = folio;
782 return 0;
783
784 out_unlock:
785 __iomap_put_folio(iter, pos, 0, folio);
786 iomap_write_failed(iter->inode, pos, len);
787
788 return status;
789 }
790
__iomap_write_end(struct inode * inode,loff_t pos,size_t len,size_t copied,struct folio * folio)791 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
792 size_t copied, struct folio *folio)
793 {
794 flush_dcache_folio(folio);
795
796 /*
797 * The blocks that were entirely written will now be uptodate, so we
798 * don't have to worry about a read_folio reading them and overwriting a
799 * partial write. However, if we've encountered a short write and only
800 * partially written into a block, it will not be marked uptodate, so a
801 * read_folio might come in and destroy our partial write.
802 *
803 * Do the simplest thing and just treat any short write to a
804 * non-uptodate page as a zero-length write, and force the caller to
805 * redo the whole thing.
806 */
807 if (unlikely(copied < len && !folio_test_uptodate(folio)))
808 return 0;
809 iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
810 iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
811 filemap_dirty_folio(inode->i_mapping, folio);
812 return copied;
813 }
814
iomap_write_end_inline(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t copied)815 static size_t iomap_write_end_inline(const struct iomap_iter *iter,
816 struct folio *folio, loff_t pos, size_t copied)
817 {
818 const struct iomap *iomap = &iter->iomap;
819 void *addr;
820
821 WARN_ON_ONCE(!folio_test_uptodate(folio));
822 BUG_ON(!iomap_inline_data_valid(iomap));
823
824 flush_dcache_folio(folio);
825 addr = kmap_local_folio(folio, pos);
826 memcpy(iomap_inline_data(iomap, pos), addr, copied);
827 kunmap_local(addr);
828
829 mark_inode_dirty(iter->inode);
830 return copied;
831 }
832
833 /* Returns the number of bytes copied. May be 0. Cannot be an errno. */
iomap_write_end(struct iomap_iter * iter,loff_t pos,size_t len,size_t copied,struct folio * folio)834 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
835 size_t copied, struct folio *folio)
836 {
837 const struct iomap *srcmap = iomap_iter_srcmap(iter);
838 loff_t old_size = iter->inode->i_size;
839 size_t ret;
840
841 if (srcmap->type == IOMAP_INLINE) {
842 ret = iomap_write_end_inline(iter, folio, pos, copied);
843 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
844 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
845 copied, &folio->page, NULL);
846 } else {
847 ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
848 }
849
850 /*
851 * Update the in-memory inode size after copying the data into the page
852 * cache. It's up to the file system to write the updated size to disk,
853 * preferably after I/O completion so that no stale data is exposed.
854 */
855 if (pos + ret > old_size) {
856 i_size_write(iter->inode, pos + ret);
857 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
858 }
859 __iomap_put_folio(iter, pos, ret, folio);
860
861 if (old_size < pos)
862 pagecache_isize_extended(iter->inode, old_size, pos);
863 if (ret < len)
864 iomap_write_failed(iter->inode, pos + ret, len - ret);
865 return ret;
866 }
867
iomap_write_iter(struct iomap_iter * iter,struct iov_iter * i)868 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
869 {
870 loff_t length = iomap_length(iter);
871 size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
872 loff_t pos = iter->pos;
873 ssize_t written = 0;
874 long status = 0;
875 struct address_space *mapping = iter->inode->i_mapping;
876 unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
877
878 do {
879 struct folio *folio;
880 size_t offset; /* Offset into folio */
881 size_t bytes; /* Bytes to write to folio */
882 size_t copied; /* Bytes copied from user */
883
884 bytes = iov_iter_count(i);
885 retry:
886 offset = pos & (chunk - 1);
887 bytes = min(chunk - offset, bytes);
888 status = balance_dirty_pages_ratelimited_flags(mapping,
889 bdp_flags);
890 if (unlikely(status))
891 break;
892
893 if (bytes > length)
894 bytes = length;
895
896 /*
897 * Bring in the user page that we'll copy from _first_.
898 * Otherwise there's a nasty deadlock on copying from the
899 * same page as we're writing to, without it being marked
900 * up-to-date.
901 *
902 * For async buffered writes the assumption is that the user
903 * page has already been faulted in. This can be optimized by
904 * faulting the user page.
905 */
906 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
907 status = -EFAULT;
908 break;
909 }
910
911 status = iomap_write_begin(iter, pos, bytes, &folio);
912 if (unlikely(status))
913 break;
914 if (iter->iomap.flags & IOMAP_F_STALE)
915 break;
916
917 offset = offset_in_folio(folio, pos);
918 if (bytes > folio_size(folio) - offset)
919 bytes = folio_size(folio) - offset;
920
921 if (mapping_writably_mapped(mapping))
922 flush_dcache_folio(folio);
923
924 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
925 status = iomap_write_end(iter, pos, bytes, copied, folio);
926
927 if (unlikely(copied != status))
928 iov_iter_revert(i, copied - status);
929
930 cond_resched();
931 if (unlikely(status == 0)) {
932 /*
933 * A short copy made iomap_write_end() reject the
934 * thing entirely. Might be memory poisoning
935 * halfway through, might be a race with munmap,
936 * might be severe memory pressure.
937 */
938 if (chunk > PAGE_SIZE)
939 chunk /= 2;
940 if (copied) {
941 bytes = copied;
942 goto retry;
943 }
944 } else {
945 pos += status;
946 written += status;
947 length -= status;
948 }
949 } while (iov_iter_count(i) && length);
950
951 if (status == -EAGAIN) {
952 iov_iter_revert(i, written);
953 return -EAGAIN;
954 }
955 return written ? written : status;
956 }
957
958 ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * i,const struct iomap_ops * ops)959 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
960 const struct iomap_ops *ops)
961 {
962 struct iomap_iter iter = {
963 .inode = iocb->ki_filp->f_mapping->host,
964 .pos = iocb->ki_pos,
965 .len = iov_iter_count(i),
966 .flags = IOMAP_WRITE,
967 };
968 ssize_t ret;
969
970 if (iocb->ki_flags & IOCB_NOWAIT)
971 iter.flags |= IOMAP_NOWAIT;
972
973 while ((ret = iomap_iter(&iter, ops)) > 0)
974 iter.processed = iomap_write_iter(&iter, i);
975
976 if (unlikely(iter.pos == iocb->ki_pos))
977 return ret;
978 ret = iter.pos - iocb->ki_pos;
979 iocb->ki_pos = iter.pos;
980 return ret;
981 }
982 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
983
iomap_write_delalloc_ifs_punch(struct inode * inode,struct folio * folio,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)984 static int iomap_write_delalloc_ifs_punch(struct inode *inode,
985 struct folio *folio, loff_t start_byte, loff_t end_byte,
986 iomap_punch_t punch)
987 {
988 unsigned int first_blk, last_blk, i;
989 loff_t last_byte;
990 u8 blkbits = inode->i_blkbits;
991 struct iomap_folio_state *ifs;
992 int ret = 0;
993
994 /*
995 * When we have per-block dirty tracking, there can be
996 * blocks within a folio which are marked uptodate
997 * but not dirty. In that case it is necessary to punch
998 * out such blocks to avoid leaking any delalloc blocks.
999 */
1000 ifs = folio->private;
1001 if (!ifs)
1002 return ret;
1003
1004 last_byte = min_t(loff_t, end_byte - 1,
1005 folio_pos(folio) + folio_size(folio) - 1);
1006 first_blk = offset_in_folio(folio, start_byte) >> blkbits;
1007 last_blk = offset_in_folio(folio, last_byte) >> blkbits;
1008 for (i = first_blk; i <= last_blk; i++) {
1009 if (!ifs_block_is_dirty(folio, ifs, i)) {
1010 ret = punch(inode, folio_pos(folio) + (i << blkbits),
1011 1 << blkbits);
1012 if (ret)
1013 return ret;
1014 }
1015 }
1016
1017 return ret;
1018 }
1019
1020
iomap_write_delalloc_punch(struct inode * inode,struct folio * folio,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)1021 static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1022 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1023 iomap_punch_t punch)
1024 {
1025 int ret = 0;
1026
1027 if (!folio_test_dirty(folio))
1028 return ret;
1029
1030 /* if dirty, punch up to offset */
1031 if (start_byte > *punch_start_byte) {
1032 ret = punch(inode, *punch_start_byte,
1033 start_byte - *punch_start_byte);
1034 if (ret)
1035 return ret;
1036 }
1037
1038 /* Punch non-dirty blocks within folio */
1039 ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte,
1040 end_byte, punch);
1041 if (ret)
1042 return ret;
1043
1044 /*
1045 * Make sure the next punch start is correctly bound to
1046 * the end of this data range, not the end of the folio.
1047 */
1048 *punch_start_byte = min_t(loff_t, end_byte,
1049 folio_pos(folio) + folio_size(folio));
1050
1051 return ret;
1052 }
1053
1054 /*
1055 * Scan the data range passed to us for dirty page cache folios. If we find a
1056 * dirty folio, punch out the preceding range and update the offset from which
1057 * the next punch will start from.
1058 *
1059 * We can punch out storage reservations under clean pages because they either
1060 * contain data that has been written back - in which case the delalloc punch
1061 * over that range is a no-op - or they have been read faults in which case they
1062 * contain zeroes and we can remove the delalloc backing range and any new
1063 * writes to those pages will do the normal hole filling operation...
1064 *
1065 * This makes the logic simple: we only need to keep the delalloc extents only
1066 * over the dirty ranges of the page cache.
1067 *
1068 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1069 * simplify range iterations.
1070 */
iomap_write_delalloc_scan(struct inode * inode,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)1071 static int iomap_write_delalloc_scan(struct inode *inode,
1072 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1073 iomap_punch_t punch)
1074 {
1075 while (start_byte < end_byte) {
1076 struct folio *folio;
1077 int ret;
1078
1079 /* grab locked page */
1080 folio = filemap_lock_folio(inode->i_mapping,
1081 start_byte >> PAGE_SHIFT);
1082 if (IS_ERR(folio)) {
1083 start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
1084 PAGE_SIZE;
1085 continue;
1086 }
1087
1088 ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
1089 start_byte, end_byte, punch);
1090 if (ret) {
1091 folio_unlock(folio);
1092 folio_put(folio);
1093 return ret;
1094 }
1095
1096 /* move offset to start of next folio in range */
1097 start_byte = folio_next_index(folio) << PAGE_SHIFT;
1098 folio_unlock(folio);
1099 folio_put(folio);
1100 }
1101 return 0;
1102 }
1103
1104 /*
1105 * Punch out all the delalloc blocks in the range given except for those that
1106 * have dirty data still pending in the page cache - those are going to be
1107 * written and so must still retain the delalloc backing for writeback.
1108 *
1109 * As we are scanning the page cache for data, we don't need to reimplement the
1110 * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1111 * start and end of data ranges correctly even for sub-folio block sizes. This
1112 * byte range based iteration is especially convenient because it means we
1113 * don't have to care about variable size folios, nor where the start or end of
1114 * the data range lies within a folio, if they lie within the same folio or even
1115 * if there are multiple discontiguous data ranges within the folio.
1116 *
1117 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1118 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1119 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1120 * date. A write page fault can then mark it dirty. If we then fail a write()
1121 * beyond EOF into that up to date cached range, we allocate a delalloc block
1122 * beyond EOF and then have to punch it out. Because the range is up to date,
1123 * mapping_seek_hole_data() will return it, and we will skip the punch because
1124 * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1125 * beyond EOF in this case as writeback will never write back and covert that
1126 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1127 * resulting in always punching out the range from the EOF to the end of the
1128 * range the iomap spans.
1129 *
1130 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1131 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1132 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1133 * returns the end of the data range (data_end). Using closed intervals would
1134 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1135 * the code to subtle off-by-one bugs....
1136 */
iomap_write_delalloc_release(struct inode * inode,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)1137 static int iomap_write_delalloc_release(struct inode *inode,
1138 loff_t start_byte, loff_t end_byte, iomap_punch_t punch)
1139 {
1140 loff_t punch_start_byte = start_byte;
1141 loff_t scan_end_byte = min(i_size_read(inode), end_byte);
1142 int error = 0;
1143
1144 /*
1145 * Lock the mapping to avoid races with page faults re-instantiating
1146 * folios and dirtying them via ->page_mkwrite whilst we walk the
1147 * cache and perform delalloc extent removal. Failing to do this can
1148 * leave dirty pages with no space reservation in the cache.
1149 */
1150 filemap_invalidate_lock(inode->i_mapping);
1151 while (start_byte < scan_end_byte) {
1152 loff_t data_end;
1153
1154 start_byte = mapping_seek_hole_data(inode->i_mapping,
1155 start_byte, scan_end_byte, SEEK_DATA);
1156 /*
1157 * If there is no more data to scan, all that is left is to
1158 * punch out the remaining range.
1159 */
1160 if (start_byte == -ENXIO || start_byte == scan_end_byte)
1161 break;
1162 if (start_byte < 0) {
1163 error = start_byte;
1164 goto out_unlock;
1165 }
1166 WARN_ON_ONCE(start_byte < punch_start_byte);
1167 WARN_ON_ONCE(start_byte > scan_end_byte);
1168
1169 /*
1170 * We find the end of this contiguous cached data range by
1171 * seeking from start_byte to the beginning of the next hole.
1172 */
1173 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1174 scan_end_byte, SEEK_HOLE);
1175 if (data_end < 0) {
1176 error = data_end;
1177 goto out_unlock;
1178 }
1179 WARN_ON_ONCE(data_end <= start_byte);
1180 WARN_ON_ONCE(data_end > scan_end_byte);
1181
1182 error = iomap_write_delalloc_scan(inode, &punch_start_byte,
1183 start_byte, data_end, punch);
1184 if (error)
1185 goto out_unlock;
1186
1187 /* The next data search starts at the end of this one. */
1188 start_byte = data_end;
1189 }
1190
1191 if (punch_start_byte < end_byte)
1192 error = punch(inode, punch_start_byte,
1193 end_byte - punch_start_byte);
1194 out_unlock:
1195 filemap_invalidate_unlock(inode->i_mapping);
1196 return error;
1197 }
1198
1199 /*
1200 * When a short write occurs, the filesystem may need to remove reserved space
1201 * that was allocated in ->iomap_begin from it's ->iomap_end method. For
1202 * filesystems that use delayed allocation, we need to punch out delalloc
1203 * extents from the range that are not dirty in the page cache. As the write can
1204 * race with page faults, there can be dirty pages over the delalloc extent
1205 * outside the range of a short write but still within the delalloc extent
1206 * allocated for this iomap.
1207 *
1208 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1209 * simplify range iterations.
1210 *
1211 * The punch() callback *must* only punch delalloc extents in the range passed
1212 * to it. It must skip over all other types of extents in the range and leave
1213 * them completely unchanged. It must do this punch atomically with respect to
1214 * other extent modifications.
1215 *
1216 * The punch() callback may be called with a folio locked to prevent writeback
1217 * extent allocation racing at the edge of the range we are currently punching.
1218 * The locked folio may or may not cover the range being punched, so it is not
1219 * safe for the punch() callback to lock folios itself.
1220 *
1221 * Lock order is:
1222 *
1223 * inode->i_rwsem (shared or exclusive)
1224 * inode->i_mapping->invalidate_lock (exclusive)
1225 * folio_lock()
1226 * ->punch
1227 * internal filesystem allocation lock
1228 */
iomap_file_buffered_write_punch_delalloc(struct inode * inode,struct iomap * iomap,loff_t pos,loff_t length,ssize_t written,iomap_punch_t punch)1229 int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
1230 struct iomap *iomap, loff_t pos, loff_t length,
1231 ssize_t written, iomap_punch_t punch)
1232 {
1233 loff_t start_byte;
1234 loff_t end_byte;
1235 unsigned int blocksize = i_blocksize(inode);
1236
1237 if (iomap->type != IOMAP_DELALLOC)
1238 return 0;
1239
1240 /* If we didn't reserve the blocks, we're not allowed to punch them. */
1241 if (!(iomap->flags & IOMAP_F_NEW))
1242 return 0;
1243
1244 /*
1245 * start_byte refers to the first unused block after a short write. If
1246 * nothing was written, round offset down to point at the first block in
1247 * the range.
1248 */
1249 if (unlikely(!written))
1250 start_byte = round_down(pos, blocksize);
1251 else
1252 start_byte = round_up(pos + written, blocksize);
1253 end_byte = round_up(pos + length, blocksize);
1254
1255 /* Nothing to do if we've written the entire delalloc extent */
1256 if (start_byte >= end_byte)
1257 return 0;
1258
1259 return iomap_write_delalloc_release(inode, start_byte, end_byte,
1260 punch);
1261 }
1262 EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
1263
iomap_unshare_iter(struct iomap_iter * iter)1264 static loff_t iomap_unshare_iter(struct iomap_iter *iter)
1265 {
1266 struct iomap *iomap = &iter->iomap;
1267 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1268 loff_t pos = iter->pos;
1269 loff_t length = iomap_length(iter);
1270 loff_t written = 0;
1271
1272 /* don't bother with blocks that are not shared to start with */
1273 if (!(iomap->flags & IOMAP_F_SHARED))
1274 return length;
1275 /* don't bother with holes or unwritten extents */
1276 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1277 return length;
1278
1279 do {
1280 struct folio *folio;
1281 int status;
1282 size_t offset;
1283 size_t bytes = min_t(u64, SIZE_MAX, length);
1284
1285 status = iomap_write_begin(iter, pos, bytes, &folio);
1286 if (unlikely(status))
1287 return status;
1288 if (iomap->flags & IOMAP_F_STALE)
1289 break;
1290
1291 offset = offset_in_folio(folio, pos);
1292 if (bytes > folio_size(folio) - offset)
1293 bytes = folio_size(folio) - offset;
1294
1295 bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
1296 if (WARN_ON_ONCE(bytes == 0))
1297 return -EIO;
1298
1299 cond_resched();
1300
1301 pos += bytes;
1302 written += bytes;
1303 length -= bytes;
1304
1305 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1306 } while (length > 0);
1307
1308 return written;
1309 }
1310
1311 int
iomap_file_unshare(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops)1312 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1313 const struct iomap_ops *ops)
1314 {
1315 struct iomap_iter iter = {
1316 .inode = inode,
1317 .pos = pos,
1318 .len = len,
1319 .flags = IOMAP_WRITE | IOMAP_UNSHARE,
1320 };
1321 int ret;
1322
1323 while ((ret = iomap_iter(&iter, ops)) > 0)
1324 iter.processed = iomap_unshare_iter(&iter);
1325 return ret;
1326 }
1327 EXPORT_SYMBOL_GPL(iomap_file_unshare);
1328
iomap_zero_iter(struct iomap_iter * iter,bool * did_zero)1329 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
1330 {
1331 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1332 loff_t pos = iter->pos;
1333 loff_t length = iomap_length(iter);
1334 loff_t written = 0;
1335
1336 /* already zeroed? we're done. */
1337 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1338 return length;
1339
1340 do {
1341 struct folio *folio;
1342 int status;
1343 size_t offset;
1344 size_t bytes = min_t(u64, SIZE_MAX, length);
1345
1346 status = iomap_write_begin(iter, pos, bytes, &folio);
1347 if (status)
1348 return status;
1349 if (iter->iomap.flags & IOMAP_F_STALE)
1350 break;
1351
1352 offset = offset_in_folio(folio, pos);
1353 if (bytes > folio_size(folio) - offset)
1354 bytes = folio_size(folio) - offset;
1355
1356 folio_zero_range(folio, offset, bytes);
1357 folio_mark_accessed(folio);
1358
1359 bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
1360 if (WARN_ON_ONCE(bytes == 0))
1361 return -EIO;
1362
1363 pos += bytes;
1364 length -= bytes;
1365 written += bytes;
1366 } while (length > 0);
1367
1368 if (did_zero)
1369 *did_zero = true;
1370 return written;
1371 }
1372
1373 int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops)1374 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1375 const struct iomap_ops *ops)
1376 {
1377 struct iomap_iter iter = {
1378 .inode = inode,
1379 .pos = pos,
1380 .len = len,
1381 .flags = IOMAP_ZERO,
1382 };
1383 int ret;
1384
1385 while ((ret = iomap_iter(&iter, ops)) > 0)
1386 iter.processed = iomap_zero_iter(&iter, did_zero);
1387 return ret;
1388 }
1389 EXPORT_SYMBOL_GPL(iomap_zero_range);
1390
1391 int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops)1392 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1393 const struct iomap_ops *ops)
1394 {
1395 unsigned int blocksize = i_blocksize(inode);
1396 unsigned int off = pos & (blocksize - 1);
1397
1398 /* Block boundary? Nothing to do */
1399 if (!off)
1400 return 0;
1401 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1402 }
1403 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1404
iomap_folio_mkwrite_iter(struct iomap_iter * iter,struct folio * folio)1405 static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1406 struct folio *folio)
1407 {
1408 loff_t length = iomap_length(iter);
1409 int ret;
1410
1411 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1412 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1413 &iter->iomap);
1414 if (ret)
1415 return ret;
1416 block_commit_write(&folio->page, 0, length);
1417 } else {
1418 WARN_ON_ONCE(!folio_test_uptodate(folio));
1419 folio_mark_dirty(folio);
1420 }
1421
1422 return length;
1423 }
1424
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops)1425 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1426 {
1427 struct iomap_iter iter = {
1428 .inode = file_inode(vmf->vma->vm_file),
1429 .flags = IOMAP_WRITE | IOMAP_FAULT,
1430 };
1431 struct folio *folio = page_folio(vmf->page);
1432 ssize_t ret;
1433
1434 folio_lock(folio);
1435 ret = folio_mkwrite_check_truncate(folio, iter.inode);
1436 if (ret < 0)
1437 goto out_unlock;
1438 iter.pos = folio_pos(folio);
1439 iter.len = ret;
1440 while ((ret = iomap_iter(&iter, ops)) > 0)
1441 iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
1442
1443 if (ret < 0)
1444 goto out_unlock;
1445 folio_wait_stable(folio);
1446 return VM_FAULT_LOCKED;
1447 out_unlock:
1448 folio_unlock(folio);
1449 return vmf_fs_error(ret);
1450 }
1451 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1452
iomap_finish_folio_write(struct inode * inode,struct folio * folio,size_t len,int error)1453 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1454 size_t len, int error)
1455 {
1456 struct iomap_folio_state *ifs = folio->private;
1457
1458 if (error) {
1459 folio_set_error(folio);
1460 mapping_set_error(inode->i_mapping, error);
1461 }
1462
1463 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1464 WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1465
1466 if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
1467 folio_end_writeback(folio);
1468 }
1469
1470 /*
1471 * We're now finished for good with this ioend structure. Update the page
1472 * state, release holds on bios, and finally free up memory. Do not use the
1473 * ioend after this.
1474 */
1475 static u32
iomap_finish_ioend(struct iomap_ioend * ioend,int error)1476 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1477 {
1478 struct inode *inode = ioend->io_inode;
1479 struct bio *bio = &ioend->io_inline_bio;
1480 struct bio *last = ioend->io_bio, *next;
1481 u64 start = bio->bi_iter.bi_sector;
1482 loff_t offset = ioend->io_offset;
1483 bool quiet = bio_flagged(bio, BIO_QUIET);
1484 u32 folio_count = 0;
1485
1486 for (bio = &ioend->io_inline_bio; bio; bio = next) {
1487 struct folio_iter fi;
1488
1489 /*
1490 * For the last bio, bi_private points to the ioend, so we
1491 * need to explicitly end the iteration here.
1492 */
1493 if (bio == last)
1494 next = NULL;
1495 else
1496 next = bio->bi_private;
1497
1498 /* walk all folios in bio, ending page IO on them */
1499 bio_for_each_folio_all(fi, bio) {
1500 iomap_finish_folio_write(inode, fi.folio, fi.length,
1501 error);
1502 folio_count++;
1503 }
1504 bio_put(bio);
1505 }
1506 /* The ioend has been freed by bio_put() */
1507
1508 if (unlikely(error && !quiet)) {
1509 printk_ratelimited(KERN_ERR
1510 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1511 inode->i_sb->s_id, inode->i_ino, offset, start);
1512 }
1513 return folio_count;
1514 }
1515
1516 /*
1517 * Ioend completion routine for merged bios. This can only be called from task
1518 * contexts as merged ioends can be of unbound length. Hence we have to break up
1519 * the writeback completions into manageable chunks to avoid long scheduler
1520 * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1521 * good batch processing throughput without creating adverse scheduler latency
1522 * conditions.
1523 */
1524 void
iomap_finish_ioends(struct iomap_ioend * ioend,int error)1525 iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1526 {
1527 struct list_head tmp;
1528 u32 completions;
1529
1530 might_sleep();
1531
1532 list_replace_init(&ioend->io_list, &tmp);
1533 completions = iomap_finish_ioend(ioend, error);
1534
1535 while (!list_empty(&tmp)) {
1536 if (completions > IOEND_BATCH_SIZE * 8) {
1537 cond_resched();
1538 completions = 0;
1539 }
1540 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1541 list_del_init(&ioend->io_list);
1542 completions += iomap_finish_ioend(ioend, error);
1543 }
1544 }
1545 EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1546
1547 /*
1548 * We can merge two adjacent ioends if they have the same set of work to do.
1549 */
1550 static bool
iomap_ioend_can_merge(struct iomap_ioend * ioend,struct iomap_ioend * next)1551 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1552 {
1553 if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1554 return false;
1555 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1556 (next->io_flags & IOMAP_F_SHARED))
1557 return false;
1558 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1559 (next->io_type == IOMAP_UNWRITTEN))
1560 return false;
1561 if (ioend->io_offset + ioend->io_size != next->io_offset)
1562 return false;
1563 /*
1564 * Do not merge physically discontiguous ioends. The filesystem
1565 * completion functions will have to iterate the physical
1566 * discontiguities even if we merge the ioends at a logical level, so
1567 * we don't gain anything by merging physical discontiguities here.
1568 *
1569 * We cannot use bio->bi_iter.bi_sector here as it is modified during
1570 * submission so does not point to the start sector of the bio at
1571 * completion.
1572 */
1573 if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1574 return false;
1575 return true;
1576 }
1577
1578 void
iomap_ioend_try_merge(struct iomap_ioend * ioend,struct list_head * more_ioends)1579 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1580 {
1581 struct iomap_ioend *next;
1582
1583 INIT_LIST_HEAD(&ioend->io_list);
1584
1585 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1586 io_list))) {
1587 if (!iomap_ioend_can_merge(ioend, next))
1588 break;
1589 list_move_tail(&next->io_list, &ioend->io_list);
1590 ioend->io_size += next->io_size;
1591 }
1592 }
1593 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1594
1595 static int
iomap_ioend_compare(void * priv,const struct list_head * a,const struct list_head * b)1596 iomap_ioend_compare(void *priv, const struct list_head *a,
1597 const struct list_head *b)
1598 {
1599 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1600 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1601
1602 if (ia->io_offset < ib->io_offset)
1603 return -1;
1604 if (ia->io_offset > ib->io_offset)
1605 return 1;
1606 return 0;
1607 }
1608
1609 void
iomap_sort_ioends(struct list_head * ioend_list)1610 iomap_sort_ioends(struct list_head *ioend_list)
1611 {
1612 list_sort(NULL, ioend_list, iomap_ioend_compare);
1613 }
1614 EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1615
iomap_writepage_end_bio(struct bio * bio)1616 static void iomap_writepage_end_bio(struct bio *bio)
1617 {
1618 struct iomap_ioend *ioend = bio->bi_private;
1619
1620 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1621 }
1622
1623 /*
1624 * Submit the final bio for an ioend.
1625 *
1626 * If @error is non-zero, it means that we have a situation where some part of
1627 * the submission process has failed after we've marked pages for writeback
1628 * and unlocked them. In this situation, we need to fail the bio instead of
1629 * submitting it. This typically only happens on a filesystem shutdown.
1630 */
1631 static int
iomap_submit_ioend(struct iomap_writepage_ctx * wpc,struct iomap_ioend * ioend,int error)1632 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1633 int error)
1634 {
1635 ioend->io_bio->bi_private = ioend;
1636 ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1637
1638 if (wpc->ops->prepare_ioend)
1639 error = wpc->ops->prepare_ioend(ioend, error);
1640 if (error) {
1641 /*
1642 * If we're failing the IO now, just mark the ioend with an
1643 * error and finish it. This will run IO completion immediately
1644 * as there is only one reference to the ioend at this point in
1645 * time.
1646 */
1647 ioend->io_bio->bi_status = errno_to_blk_status(error);
1648 bio_endio(ioend->io_bio);
1649 return error;
1650 }
1651
1652 submit_bio(ioend->io_bio);
1653 return 0;
1654 }
1655
1656 static struct iomap_ioend *
iomap_alloc_ioend(struct inode * inode,struct iomap_writepage_ctx * wpc,loff_t offset,sector_t sector,struct writeback_control * wbc)1657 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1658 loff_t offset, sector_t sector, struct writeback_control *wbc)
1659 {
1660 struct iomap_ioend *ioend;
1661 struct bio *bio;
1662
1663 bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1664 REQ_OP_WRITE | wbc_to_write_flags(wbc),
1665 GFP_NOFS, &iomap_ioend_bioset);
1666 bio->bi_iter.bi_sector = sector;
1667 wbc_init_bio(wbc, bio);
1668
1669 ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1670 INIT_LIST_HEAD(&ioend->io_list);
1671 ioend->io_type = wpc->iomap.type;
1672 ioend->io_flags = wpc->iomap.flags;
1673 ioend->io_inode = inode;
1674 ioend->io_size = 0;
1675 ioend->io_folios = 0;
1676 ioend->io_offset = offset;
1677 ioend->io_bio = bio;
1678 ioend->io_sector = sector;
1679 return ioend;
1680 }
1681
1682 /*
1683 * Allocate a new bio, and chain the old bio to the new one.
1684 *
1685 * Note that we have to perform the chaining in this unintuitive order
1686 * so that the bi_private linkage is set up in the right direction for the
1687 * traversal in iomap_finish_ioend().
1688 */
1689 static struct bio *
iomap_chain_bio(struct bio * prev)1690 iomap_chain_bio(struct bio *prev)
1691 {
1692 struct bio *new;
1693
1694 new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
1695 bio_clone_blkg_association(new, prev);
1696 new->bi_iter.bi_sector = bio_end_sector(prev);
1697
1698 bio_chain(prev, new);
1699 bio_get(prev); /* for iomap_finish_ioend */
1700 submit_bio(prev);
1701 return new;
1702 }
1703
1704 static bool
iomap_can_add_to_ioend(struct iomap_writepage_ctx * wpc,loff_t offset,sector_t sector)1705 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1706 sector_t sector)
1707 {
1708 if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1709 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1710 return false;
1711 if (wpc->iomap.type != wpc->ioend->io_type)
1712 return false;
1713 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1714 return false;
1715 if (sector != bio_end_sector(wpc->ioend->io_bio))
1716 return false;
1717 /*
1718 * Limit ioend bio chain lengths to minimise IO completion latency. This
1719 * also prevents long tight loops ending page writeback on all the
1720 * folios in the ioend.
1721 */
1722 if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
1723 return false;
1724 return true;
1725 }
1726
1727 /*
1728 * Test to see if we have an existing ioend structure that we could append to
1729 * first; otherwise finish off the current ioend and start another.
1730 */
1731 static void
iomap_add_to_ioend(struct inode * inode,loff_t pos,struct folio * folio,struct iomap_folio_state * ifs,struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct list_head * iolist)1732 iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
1733 struct iomap_folio_state *ifs, struct iomap_writepage_ctx *wpc,
1734 struct writeback_control *wbc, struct list_head *iolist)
1735 {
1736 sector_t sector = iomap_sector(&wpc->iomap, pos);
1737 unsigned len = i_blocksize(inode);
1738 size_t poff = offset_in_folio(folio, pos);
1739
1740 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
1741 if (wpc->ioend)
1742 list_add(&wpc->ioend->io_list, iolist);
1743 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
1744 }
1745
1746 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
1747 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
1748 bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff);
1749 }
1750
1751 if (ifs)
1752 atomic_add(len, &ifs->write_bytes_pending);
1753 wpc->ioend->io_size += len;
1754 wbc_account_cgroup_owner(wbc, &folio->page, len);
1755 }
1756
1757 /*
1758 * We implement an immediate ioend submission policy here to avoid needing to
1759 * chain multiple ioends and hence nest mempool allocations which can violate
1760 * the forward progress guarantees we need to provide. The current ioend we're
1761 * adding blocks to is cached in the writepage context, and if the new block
1762 * doesn't append to the cached ioend, it will create a new ioend and cache that
1763 * instead.
1764 *
1765 * If a new ioend is created and cached, the old ioend is returned and queued
1766 * locally for submission once the entire page is processed or an error has been
1767 * detected. While ioends are submitted immediately after they are completed,
1768 * batching optimisations are provided by higher level block plugging.
1769 *
1770 * At the end of a writeback pass, there will be a cached ioend remaining on the
1771 * writepage context that the caller will need to submit.
1772 */
1773 static int
iomap_writepage_map(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct inode * inode,struct folio * folio,u64 end_pos)1774 iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1775 struct writeback_control *wbc, struct inode *inode,
1776 struct folio *folio, u64 end_pos)
1777 {
1778 struct iomap_folio_state *ifs = folio->private;
1779 struct iomap_ioend *ioend, *next;
1780 unsigned len = i_blocksize(inode);
1781 unsigned nblocks = i_blocks_per_folio(inode, folio);
1782 u64 pos = folio_pos(folio);
1783 int error = 0, count = 0, i;
1784 LIST_HEAD(submit_list);
1785
1786 WARN_ON_ONCE(end_pos <= pos);
1787
1788 if (!ifs && nblocks > 1) {
1789 ifs = ifs_alloc(inode, folio, 0);
1790 iomap_set_range_dirty(folio, 0, end_pos - pos);
1791 }
1792
1793 WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) != 0);
1794
1795 /*
1796 * Walk through the folio to find areas to write back. If we
1797 * run off the end of the current map or find the current map
1798 * invalid, grab a new one.
1799 */
1800 for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
1801 if (ifs && !ifs_block_is_dirty(folio, ifs, i))
1802 continue;
1803
1804 error = wpc->ops->map_blocks(wpc, inode, pos);
1805 if (error)
1806 break;
1807 trace_iomap_writepage_map(inode, &wpc->iomap);
1808 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1809 continue;
1810 if (wpc->iomap.type == IOMAP_HOLE)
1811 continue;
1812 iomap_add_to_ioend(inode, pos, folio, ifs, wpc, wbc,
1813 &submit_list);
1814 count++;
1815 }
1816 if (count)
1817 wpc->ioend->io_folios++;
1818
1819 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1820 WARN_ON_ONCE(!folio_test_locked(folio));
1821 WARN_ON_ONCE(folio_test_writeback(folio));
1822 WARN_ON_ONCE(folio_test_dirty(folio));
1823
1824 /*
1825 * We cannot cancel the ioend directly here on error. We may have
1826 * already set other pages under writeback and hence we have to run I/O
1827 * completion to mark the error state of the pages under writeback
1828 * appropriately.
1829 */
1830 if (unlikely(error)) {
1831 /*
1832 * Let the filesystem know what portion of the current page
1833 * failed to map. If the page hasn't been added to ioend, it
1834 * won't be affected by I/O completion and we must unlock it
1835 * now.
1836 */
1837 if (wpc->ops->discard_folio)
1838 wpc->ops->discard_folio(folio, pos);
1839 if (!count) {
1840 folio_unlock(folio);
1841 goto done;
1842 }
1843 }
1844
1845 /*
1846 * We can have dirty bits set past end of file in page_mkwrite path
1847 * while mapping the last partial folio. Hence it's better to clear
1848 * all the dirty bits in the folio here.
1849 */
1850 iomap_clear_range_dirty(folio, 0, folio_size(folio));
1851 folio_start_writeback(folio);
1852 folio_unlock(folio);
1853
1854 /*
1855 * Preserve the original error if there was one; catch
1856 * submission errors here and propagate into subsequent ioend
1857 * submissions.
1858 */
1859 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1860 int error2;
1861
1862 list_del_init(&ioend->io_list);
1863 error2 = iomap_submit_ioend(wpc, ioend, error);
1864 if (error2 && !error)
1865 error = error2;
1866 }
1867
1868 /*
1869 * We can end up here with no error and nothing to write only if we race
1870 * with a partial page truncate on a sub-page block sized filesystem.
1871 */
1872 if (!count)
1873 folio_end_writeback(folio);
1874 done:
1875 mapping_set_error(inode->i_mapping, error);
1876 return error;
1877 }
1878
1879 /*
1880 * Write out a dirty page.
1881 *
1882 * For delalloc space on the page, we need to allocate space and flush it.
1883 * For unwritten space on the page, we need to start the conversion to
1884 * regular allocated space.
1885 */
iomap_do_writepage(struct folio * folio,struct writeback_control * wbc,void * data)1886 static int iomap_do_writepage(struct folio *folio,
1887 struct writeback_control *wbc, void *data)
1888 {
1889 struct iomap_writepage_ctx *wpc = data;
1890 struct inode *inode = folio->mapping->host;
1891 u64 end_pos, isize;
1892
1893 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
1894
1895 /*
1896 * Refuse to write the folio out if we're called from reclaim context.
1897 *
1898 * This avoids stack overflows when called from deeply used stacks in
1899 * random callers for direct reclaim or memcg reclaim. We explicitly
1900 * allow reclaim from kswapd as the stack usage there is relatively low.
1901 *
1902 * This should never happen except in the case of a VM regression so
1903 * warn about it.
1904 */
1905 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1906 PF_MEMALLOC))
1907 goto redirty;
1908
1909 /*
1910 * Is this folio beyond the end of the file?
1911 *
1912 * The folio index is less than the end_index, adjust the end_pos
1913 * to the highest offset that this folio should represent.
1914 * -----------------------------------------------------
1915 * | file mapping | <EOF> |
1916 * -----------------------------------------------------
1917 * | Page ... | Page N-2 | Page N-1 | Page N | |
1918 * ^--------------------------------^----------|--------
1919 * | desired writeback range | see else |
1920 * ---------------------------------^------------------|
1921 */
1922 isize = i_size_read(inode);
1923 end_pos = folio_pos(folio) + folio_size(folio);
1924 if (end_pos > isize) {
1925 /*
1926 * Check whether the page to write out is beyond or straddles
1927 * i_size or not.
1928 * -------------------------------------------------------
1929 * | file mapping | <EOF> |
1930 * -------------------------------------------------------
1931 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1932 * ^--------------------------------^-----------|---------
1933 * | | Straddles |
1934 * ---------------------------------^-----------|--------|
1935 */
1936 size_t poff = offset_in_folio(folio, isize);
1937 pgoff_t end_index = isize >> PAGE_SHIFT;
1938
1939 /*
1940 * Skip the page if it's fully outside i_size, e.g.
1941 * due to a truncate operation that's in progress. We've
1942 * cleaned this page and truncate will finish things off for
1943 * us.
1944 *
1945 * Note that the end_index is unsigned long. If the given
1946 * offset is greater than 16TB on a 32-bit system then if we
1947 * checked if the page is fully outside i_size with
1948 * "if (page->index >= end_index + 1)", "end_index + 1" would
1949 * overflow and evaluate to 0. Hence this page would be
1950 * redirtied and written out repeatedly, which would result in
1951 * an infinite loop; the user program performing this operation
1952 * would hang. Instead, we can detect this situation by
1953 * checking if the page is totally beyond i_size or if its
1954 * offset is just equal to the EOF.
1955 */
1956 if (folio->index > end_index ||
1957 (folio->index == end_index && poff == 0))
1958 goto unlock;
1959
1960 /*
1961 * The page straddles i_size. It must be zeroed out on each
1962 * and every writepage invocation because it may be mmapped.
1963 * "A file is mapped in multiples of the page size. For a file
1964 * that is not a multiple of the page size, the remaining
1965 * memory is zeroed when mapped, and writes to that region are
1966 * not written out to the file."
1967 */
1968 folio_zero_segment(folio, poff, folio_size(folio));
1969 end_pos = isize;
1970 }
1971
1972 return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
1973
1974 redirty:
1975 folio_redirty_for_writepage(wbc, folio);
1976 unlock:
1977 folio_unlock(folio);
1978 return 0;
1979 }
1980
1981 int
iomap_writepages(struct address_space * mapping,struct writeback_control * wbc,struct iomap_writepage_ctx * wpc,const struct iomap_writeback_ops * ops)1982 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1983 struct iomap_writepage_ctx *wpc,
1984 const struct iomap_writeback_ops *ops)
1985 {
1986 int ret;
1987
1988 wpc->ops = ops;
1989 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1990 if (!wpc->ioend)
1991 return ret;
1992 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1993 }
1994 EXPORT_SYMBOL_GPL(iomap_writepages);
1995
iomap_init(void)1996 static int __init iomap_init(void)
1997 {
1998 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1999 offsetof(struct iomap_ioend, io_inline_bio),
2000 BIOSET_NEED_BVECS);
2001 }
2002 fs_initcall(iomap_init);
2003