1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * include/linux/writeback.h
4 */
5 #ifndef WRITEBACK_H
6 #define WRITEBACK_H
7
8 #include <linux/sched.h>
9 #include <linux/workqueue.h>
10 #include <linux/fs.h>
11 #include <linux/flex_proportions.h>
12 #include <linux/backing-dev-defs.h>
13 #include <linux/blk_types.h>
14
15 struct bio;
16
17 DECLARE_PER_CPU(int, dirty_throttle_leaks);
18
19 /*
20 * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
21 *
22 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
23 *
24 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
25 * time) for the dirty pages to drop, unless written enough pages.
26 *
27 * The global dirty threshold is normally equal to the global dirty limit,
28 * except when the system suddenly allocates a lot of anonymous memory and
29 * knocks down the global dirty threshold quickly, in which case the global
30 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
31 */
32 #define DIRTY_SCOPE 8
33 #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
34
35 struct backing_dev_info;
36
37 /*
38 * fs/fs-writeback.c
39 */
40 enum writeback_sync_modes {
41 WB_SYNC_NONE, /* Don't wait on anything */
42 WB_SYNC_ALL, /* Wait on every mapping */
43 };
44
45 /*
46 * A control structure which tells the writeback code what to do. These are
47 * always on the stack, and hence need no locking. They are always initialised
48 * in a manner such that unspecified fields are set to zero.
49 */
50 struct writeback_control {
51 long nr_to_write; /* Write this many pages, and decrement
52 this for each page written */
53 long pages_skipped; /* Pages which were not written */
54
55 /*
56 * For a_ops->writepages(): if start or end are non-zero then this is
57 * a hint that the filesystem need only write out the pages inside that
58 * byterange. The byte at `end' is included in the writeout request.
59 */
60 loff_t range_start;
61 loff_t range_end;
62
63 enum writeback_sync_modes sync_mode;
64
65 unsigned for_kupdate:1; /* A kupdate writeback */
66 unsigned for_background:1; /* A background writeback */
67 unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
68 unsigned for_reclaim:1; /* Invoked from the page allocator */
69 unsigned range_cyclic:1; /* range_start is cyclic */
70 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
71 unsigned unpinned_fscache_wb:1; /* Cleared I_PINNING_FSCACHE_WB */
72
73 /*
74 * When writeback IOs are bounced through async layers, only the
75 * initial synchronous phase should be accounted towards inode
76 * cgroup ownership arbitration to avoid confusion. Later stages
77 * can set the following flag to disable the accounting.
78 */
79 unsigned no_cgroup_owner:1;
80
81 unsigned punt_to_cgroup:1; /* cgrp punting, see __REQ_CGROUP_PUNT */
82
83 /* To enable batching of swap writes to non-block-device backends,
84 * "plug" can be set point to a 'struct swap_iocb *'. When all swap
85 * writes have been submitted, if with swap_iocb is not NULL,
86 * swap_write_unplug() should be called.
87 */
88 struct swap_iocb **swap_plug;
89
90 #ifdef CONFIG_CGROUP_WRITEBACK
91 struct bdi_writeback *wb; /* wb this writeback is issued under */
92 struct inode *inode; /* inode being written out */
93
94 /* foreign inode detection, see wbc_detach_inode() */
95 int wb_id; /* current wb id */
96 int wb_lcand_id; /* last foreign candidate wb id */
97 int wb_tcand_id; /* this foreign candidate wb id */
98 size_t wb_bytes; /* bytes written by current wb */
99 size_t wb_lcand_bytes; /* bytes written by last candidate */
100 size_t wb_tcand_bytes; /* bytes written by this candidate */
101 #endif
102 };
103
wbc_to_write_flags(struct writeback_control * wbc)104 static inline int wbc_to_write_flags(struct writeback_control *wbc)
105 {
106 int flags = 0;
107
108 if (wbc->punt_to_cgroup)
109 flags = REQ_CGROUP_PUNT;
110
111 if (wbc->sync_mode == WB_SYNC_ALL)
112 flags |= REQ_SYNC;
113 else if (wbc->for_kupdate || wbc->for_background)
114 flags |= REQ_BACKGROUND;
115
116 return flags;
117 }
118
119 #ifdef CONFIG_CGROUP_WRITEBACK
120 #define wbc_blkcg_css(wbc) \
121 ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css)
122 #else
123 #define wbc_blkcg_css(wbc) (blkcg_root_css)
124 #endif /* CONFIG_CGROUP_WRITEBACK */
125
126 /*
127 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
128 * and are measured against each other in. There always is one global
129 * domain, global_wb_domain, that every wb in the system is a member of.
130 * This allows measuring the relative bandwidth of each wb to distribute
131 * dirtyable memory accordingly.
132 */
133 struct wb_domain {
134 spinlock_t lock;
135
136 /*
137 * Scale the writeback cache size proportional to the relative
138 * writeout speed.
139 *
140 * We do this by keeping a floating proportion between BDIs, based
141 * on page writeback completions [end_page_writeback()]. Those
142 * devices that write out pages fastest will get the larger share,
143 * while the slower will get a smaller share.
144 *
145 * We use page writeout completions because we are interested in
146 * getting rid of dirty pages. Having them written out is the
147 * primary goal.
148 *
149 * We introduce a concept of time, a period over which we measure
150 * these events, because demand can/will vary over time. The length
151 * of this period itself is measured in page writeback completions.
152 */
153 struct fprop_global completions;
154 struct timer_list period_timer; /* timer for aging of completions */
155 unsigned long period_time;
156
157 /*
158 * The dirtyable memory and dirty threshold could be suddenly
159 * knocked down by a large amount (eg. on the startup of KVM in a
160 * swapless system). This may throw the system into deep dirty
161 * exceeded state and throttle heavy/light dirtiers alike. To
162 * retain good responsiveness, maintain global_dirty_limit for
163 * tracking slowly down to the knocked down dirty threshold.
164 *
165 * Both fields are protected by ->lock.
166 */
167 unsigned long dirty_limit_tstamp;
168 unsigned long dirty_limit;
169 };
170
171 /**
172 * wb_domain_size_changed - memory available to a wb_domain has changed
173 * @dom: wb_domain of interest
174 *
175 * This function should be called when the amount of memory available to
176 * @dom has changed. It resets @dom's dirty limit parameters to prevent
177 * the past values which don't match the current configuration from skewing
178 * dirty throttling. Without this, when memory size of a wb_domain is
179 * greatly reduced, the dirty throttling logic may allow too many pages to
180 * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
181 * that situation.
182 */
wb_domain_size_changed(struct wb_domain * dom)183 static inline void wb_domain_size_changed(struct wb_domain *dom)
184 {
185 spin_lock(&dom->lock);
186 dom->dirty_limit_tstamp = jiffies;
187 dom->dirty_limit = 0;
188 spin_unlock(&dom->lock);
189 }
190
191 /*
192 * fs/fs-writeback.c
193 */
194 struct bdi_writeback;
195 void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
196 void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
197 enum wb_reason reason);
198 void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason);
199 void sync_inodes_sb(struct super_block *);
200 void wakeup_flusher_threads(enum wb_reason reason);
201 void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
202 enum wb_reason reason);
203 void inode_wait_for_writeback(struct inode *inode);
204 void inode_io_list_del(struct inode *inode);
205
206 /* writeback.h requires fs.h; it, too, is not included from here. */
wait_on_inode(struct inode * inode)207 static inline void wait_on_inode(struct inode *inode)
208 {
209 might_sleep();
210 wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
211 }
212
213 #ifdef CONFIG_CGROUP_WRITEBACK
214
215 #include <linux/cgroup.h>
216 #include <linux/bio.h>
217
218 void __inode_attach_wb(struct inode *inode, struct page *page);
219 void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
220 struct inode *inode)
221 __releases(&inode->i_lock);
222 void wbc_detach_inode(struct writeback_control *wbc);
223 void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
224 size_t bytes);
225 int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
226 enum wb_reason reason, struct wb_completion *done);
227 void cgroup_writeback_umount(void);
228 bool cleanup_offline_cgwb(struct bdi_writeback *wb);
229
230 /**
231 * inode_attach_wb - associate an inode with its wb
232 * @inode: inode of interest
233 * @page: page being dirtied (may be NULL)
234 *
235 * If @inode doesn't have its wb, associate it with the wb matching the
236 * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
237 * @inode->i_lock.
238 */
inode_attach_wb(struct inode * inode,struct page * page)239 static inline void inode_attach_wb(struct inode *inode, struct page *page)
240 {
241 if (!inode->i_wb)
242 __inode_attach_wb(inode, page);
243 }
244
245 /**
246 * inode_detach_wb - disassociate an inode from its wb
247 * @inode: inode of interest
248 *
249 * @inode is being freed. Detach from its wb.
250 */
inode_detach_wb(struct inode * inode)251 static inline void inode_detach_wb(struct inode *inode)
252 {
253 if (inode->i_wb) {
254 WARN_ON_ONCE(!(inode->i_state & I_CLEAR));
255 wb_put(inode->i_wb);
256 inode->i_wb = NULL;
257 }
258 }
259
260 /**
261 * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
262 * @wbc: writeback_control of interest
263 * @inode: target inode
264 *
265 * This function is to be used by __filemap_fdatawrite_range(), which is an
266 * alternative entry point into writeback code, and first ensures @inode is
267 * associated with a bdi_writeback and attaches it to @wbc.
268 */
wbc_attach_fdatawrite_inode(struct writeback_control * wbc,struct inode * inode)269 static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
270 struct inode *inode)
271 {
272 spin_lock(&inode->i_lock);
273 inode_attach_wb(inode, NULL);
274 wbc_attach_and_unlock_inode(wbc, inode);
275 }
276
277 /**
278 * wbc_init_bio - writeback specific initializtion of bio
279 * @wbc: writeback_control for the writeback in progress
280 * @bio: bio to be initialized
281 *
282 * @bio is a part of the writeback in progress controlled by @wbc. Perform
283 * writeback specific initialization. This is used to apply the cgroup
284 * writeback context. Must be called after the bio has been associated with
285 * a device.
286 */
wbc_init_bio(struct writeback_control * wbc,struct bio * bio)287 static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
288 {
289 /*
290 * pageout() path doesn't attach @wbc to the inode being written
291 * out. This is intentional as we don't want the function to block
292 * behind a slow cgroup. Ultimately, we want pageout() to kick off
293 * regular writeback instead of writing things out itself.
294 */
295 if (wbc->wb)
296 bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
297 }
298
299 #else /* CONFIG_CGROUP_WRITEBACK */
300
inode_attach_wb(struct inode * inode,struct page * page)301 static inline void inode_attach_wb(struct inode *inode, struct page *page)
302 {
303 }
304
inode_detach_wb(struct inode * inode)305 static inline void inode_detach_wb(struct inode *inode)
306 {
307 }
308
wbc_attach_and_unlock_inode(struct writeback_control * wbc,struct inode * inode)309 static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
310 struct inode *inode)
311 __releases(&inode->i_lock)
312 {
313 spin_unlock(&inode->i_lock);
314 }
315
wbc_attach_fdatawrite_inode(struct writeback_control * wbc,struct inode * inode)316 static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
317 struct inode *inode)
318 {
319 }
320
wbc_detach_inode(struct writeback_control * wbc)321 static inline void wbc_detach_inode(struct writeback_control *wbc)
322 {
323 }
324
wbc_init_bio(struct writeback_control * wbc,struct bio * bio)325 static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
326 {
327 }
328
wbc_account_cgroup_owner(struct writeback_control * wbc,struct page * page,size_t bytes)329 static inline void wbc_account_cgroup_owner(struct writeback_control *wbc,
330 struct page *page, size_t bytes)
331 {
332 }
333
cgroup_writeback_umount(void)334 static inline void cgroup_writeback_umount(void)
335 {
336 }
337
338 #endif /* CONFIG_CGROUP_WRITEBACK */
339
340 /*
341 * mm/page-writeback.c
342 */
343 void laptop_io_completion(struct backing_dev_info *info);
344 void laptop_sync_completion(void);
345 void laptop_mode_timer_fn(struct timer_list *t);
346 bool node_dirty_ok(struct pglist_data *pgdat);
347 int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
348 #ifdef CONFIG_CGROUP_WRITEBACK
349 void wb_domain_exit(struct wb_domain *dom);
350 #endif
351
352 extern struct wb_domain global_wb_domain;
353
354 /* These are exported to sysctl. */
355 extern unsigned int dirty_writeback_interval;
356 extern unsigned int dirty_expire_interval;
357 extern unsigned int dirtytime_expire_interval;
358 extern int laptop_mode;
359
360 int dirtytime_interval_handler(struct ctl_table *table, int write,
361 void *buffer, size_t *lenp, loff_t *ppos);
362
363 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
364 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
365
366 void wb_update_bandwidth(struct bdi_writeback *wb);
367 void balance_dirty_pages_ratelimited(struct address_space *mapping);
368 bool wb_over_bg_thresh(struct bdi_writeback *wb);
369
370 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
371 void *data);
372
373 int generic_writepages(struct address_space *mapping,
374 struct writeback_control *wbc);
375 void tag_pages_for_writeback(struct address_space *mapping,
376 pgoff_t start, pgoff_t end);
377 int write_cache_pages(struct address_space *mapping,
378 struct writeback_control *wbc, writepage_t writepage,
379 void *data);
380 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
381 void writeback_set_ratelimit(void);
382 void tag_pages_for_writeback(struct address_space *mapping,
383 pgoff_t start, pgoff_t end);
384
385 bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
386 void folio_account_redirty(struct folio *folio);
account_page_redirty(struct page * page)387 static inline void account_page_redirty(struct page *page)
388 {
389 folio_account_redirty(page_folio(page));
390 }
391 bool folio_redirty_for_writepage(struct writeback_control *, struct folio *);
392 bool redirty_page_for_writepage(struct writeback_control *, struct page *);
393
394 void sb_mark_inode_writeback(struct inode *inode);
395 void sb_clear_inode_writeback(struct inode *inode);
396
397 #endif /* WRITEBACK_H */
398