1 /*
2  * include/linux/backing-dev.h
3  *
4  * low-level device information and state which is propagated up through
5  * to high-level code.
6  */
7 
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10 
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/proportions.h>
14 #include <linux/kernel.h>
15 #include <linux/fs.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/writeback.h>
19 #include <asm/atomic.h>
20 
21 struct page;
22 struct device;
23 struct dentry;
24 
25 /*
26  * Bits in backing_dev_info.state
27  */
28 enum bdi_state {
29 	BDI_pending,		/* On its way to being activated */
30 	BDI_wb_alloc,		/* Default embedded wb allocated */
31 	BDI_async_congested,	/* The async (write) queue is getting full */
32 	BDI_sync_congested,	/* The sync queue is getting full */
33 	BDI_registered,		/* bdi_register() was done */
34 	BDI_writeback_running,	/* Writeback is in progress */
35 	BDI_unused,		/* Available bits start here */
36 };
37 
38 typedef int (congested_fn)(void *, int);
39 
40 enum bdi_stat_item {
41 	BDI_RECLAIMABLE,
42 	BDI_WRITEBACK,
43 	NR_BDI_STAT_ITEMS
44 };
45 
46 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
47 
48 struct bdi_writeback {
49 	struct backing_dev_info *bdi;	/* our parent bdi */
50 	unsigned int nr;
51 
52 	unsigned long last_old_flush;	/* last old data flush */
53 	unsigned long last_active;	/* last time bdi thread was active */
54 
55 	struct task_struct *task;	/* writeback thread */
56 	struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
57 	struct list_head b_dirty;	/* dirty inodes */
58 	struct list_head b_io;		/* parked for writeback */
59 	struct list_head b_more_io;	/* parked for more writeback */
60 };
61 
62 struct backing_dev_info {
63 	struct list_head bdi_list;
64 	unsigned long ra_pages;	/* max readahead in PAGE_CACHE_SIZE units */
65 	unsigned long state;	/* Always use atomic bitops on this */
66 	unsigned int capabilities; /* Device capabilities */
67 	congested_fn *congested_fn; /* Function pointer if device is md/dm */
68 	void *congested_data;	/* Pointer to aux data for congested func */
69 
70 	char *name;
71 
72 	struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
73 
74 	struct prop_local_percpu completions;
75 	int dirty_exceeded;
76 
77 	unsigned int min_ratio;
78 	unsigned int max_ratio, max_prop_frac;
79 
80 	struct bdi_writeback wb;  /* default writeback info for this bdi */
81 	spinlock_t wb_lock;	  /* protects work_list */
82 
83 	struct list_head work_list;
84 
85 	struct device *dev;
86 
87 	struct timer_list laptop_mode_wb_timer;
88 
89 #ifdef CONFIG_DEBUG_FS
90 	struct dentry *debug_dir;
91 	struct dentry *debug_stats;
92 #endif
93 };
94 
95 int bdi_init(struct backing_dev_info *bdi);
96 void bdi_destroy(struct backing_dev_info *bdi);
97 
98 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
99 		const char *fmt, ...);
100 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
101 void bdi_unregister(struct backing_dev_info *bdi);
102 int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
103 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
104 void bdi_start_background_writeback(struct backing_dev_info *bdi);
105 int bdi_writeback_thread(void *data);
106 int bdi_has_dirty_io(struct backing_dev_info *bdi);
107 void bdi_arm_supers_timer(void);
108 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
109 
110 extern spinlock_t bdi_lock;
111 extern struct list_head bdi_list;
112 extern struct list_head bdi_pending_list;
113 
wb_has_dirty_io(struct bdi_writeback * wb)114 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
115 {
116 	return !list_empty(&wb->b_dirty) ||
117 	       !list_empty(&wb->b_io) ||
118 	       !list_empty(&wb->b_more_io);
119 }
120 
__add_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item,s64 amount)121 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
122 		enum bdi_stat_item item, s64 amount)
123 {
124 	__percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
125 }
126 
__inc_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)127 static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
128 		enum bdi_stat_item item)
129 {
130 	__add_bdi_stat(bdi, item, 1);
131 }
132 
inc_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)133 static inline void inc_bdi_stat(struct backing_dev_info *bdi,
134 		enum bdi_stat_item item)
135 {
136 	unsigned long flags;
137 
138 	local_irq_save(flags);
139 	__inc_bdi_stat(bdi, item);
140 	local_irq_restore(flags);
141 }
142 
__dec_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)143 static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
144 		enum bdi_stat_item item)
145 {
146 	__add_bdi_stat(bdi, item, -1);
147 }
148 
dec_bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)149 static inline void dec_bdi_stat(struct backing_dev_info *bdi,
150 		enum bdi_stat_item item)
151 {
152 	unsigned long flags;
153 
154 	local_irq_save(flags);
155 	__dec_bdi_stat(bdi, item);
156 	local_irq_restore(flags);
157 }
158 
bdi_stat(struct backing_dev_info * bdi,enum bdi_stat_item item)159 static inline s64 bdi_stat(struct backing_dev_info *bdi,
160 		enum bdi_stat_item item)
161 {
162 	return percpu_counter_read_positive(&bdi->bdi_stat[item]);
163 }
164 
__bdi_stat_sum(struct backing_dev_info * bdi,enum bdi_stat_item item)165 static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
166 		enum bdi_stat_item item)
167 {
168 	return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
169 }
170 
bdi_stat_sum(struct backing_dev_info * bdi,enum bdi_stat_item item)171 static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
172 		enum bdi_stat_item item)
173 {
174 	s64 sum;
175 	unsigned long flags;
176 
177 	local_irq_save(flags);
178 	sum = __bdi_stat_sum(bdi, item);
179 	local_irq_restore(flags);
180 
181 	return sum;
182 }
183 
184 extern void bdi_writeout_inc(struct backing_dev_info *bdi);
185 
186 /*
187  * maximal error of a stat counter.
188  */
bdi_stat_error(struct backing_dev_info * bdi)189 static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
190 {
191 #ifdef CONFIG_SMP
192 	return nr_cpu_ids * BDI_STAT_BATCH;
193 #else
194 	return 1;
195 #endif
196 }
197 
198 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
199 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
200 
201 /*
202  * Flags in backing_dev_info::capability
203  *
204  * The first three flags control whether dirty pages will contribute to the
205  * VM's accounting and whether writepages() should be called for dirty pages
206  * (something that would not, for example, be appropriate for ramfs)
207  *
208  * WARNING: these flags are closely related and should not normally be
209  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
210  * three flags into a single convenience macro.
211  *
212  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
213  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
214  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
215  *
216  * These flags let !MMU mmap() govern direct device mapping vs immediate
217  * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
218  *
219  * BDI_CAP_MAP_COPY:       Copy can be mapped (MAP_PRIVATE)
220  * BDI_CAP_MAP_DIRECT:     Can be mapped directly (MAP_SHARED)
221  * BDI_CAP_READ_MAP:       Can be mapped for reading
222  * BDI_CAP_WRITE_MAP:      Can be mapped for writing
223  * BDI_CAP_EXEC_MAP:       Can be mapped for execution
224  *
225  * BDI_CAP_SWAP_BACKED:    Count shmem/tmpfs objects as swap-backed.
226  */
227 #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
228 #define BDI_CAP_NO_WRITEBACK	0x00000002
229 #define BDI_CAP_MAP_COPY	0x00000004
230 #define BDI_CAP_MAP_DIRECT	0x00000008
231 #define BDI_CAP_READ_MAP	0x00000010
232 #define BDI_CAP_WRITE_MAP	0x00000020
233 #define BDI_CAP_EXEC_MAP	0x00000040
234 #define BDI_CAP_NO_ACCT_WB	0x00000080
235 #define BDI_CAP_SWAP_BACKED	0x00000100
236 
237 #define BDI_CAP_VMFLAGS \
238 	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
239 
240 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
241 	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
242 
243 #if defined(VM_MAYREAD) && \
244 	(BDI_CAP_READ_MAP != VM_MAYREAD || \
245 	 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
246 	 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
247 #error please change backing_dev_info::capabilities flags
248 #endif
249 
250 extern struct backing_dev_info default_backing_dev_info;
251 extern struct backing_dev_info noop_backing_dev_info;
252 
253 int writeback_in_progress(struct backing_dev_info *bdi);
254 
bdi_congested(struct backing_dev_info * bdi,int bdi_bits)255 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
256 {
257 	if (bdi->congested_fn)
258 		return bdi->congested_fn(bdi->congested_data, bdi_bits);
259 	return (bdi->state & bdi_bits);
260 }
261 
bdi_read_congested(struct backing_dev_info * bdi)262 static inline int bdi_read_congested(struct backing_dev_info *bdi)
263 {
264 	return bdi_congested(bdi, 1 << BDI_sync_congested);
265 }
266 
bdi_write_congested(struct backing_dev_info * bdi)267 static inline int bdi_write_congested(struct backing_dev_info *bdi)
268 {
269 	return bdi_congested(bdi, 1 << BDI_async_congested);
270 }
271 
bdi_rw_congested(struct backing_dev_info * bdi)272 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
273 {
274 	return bdi_congested(bdi, (1 << BDI_sync_congested) |
275 				  (1 << BDI_async_congested));
276 }
277 
278 enum {
279 	BLK_RW_ASYNC	= 0,
280 	BLK_RW_SYNC	= 1,
281 };
282 
283 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
284 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
285 long congestion_wait(int sync, long timeout);
286 long wait_iff_congested(struct zone *zone, int sync, long timeout);
287 
bdi_cap_writeback_dirty(struct backing_dev_info * bdi)288 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
289 {
290 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
291 }
292 
bdi_cap_account_dirty(struct backing_dev_info * bdi)293 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
294 {
295 	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
296 }
297 
bdi_cap_account_writeback(struct backing_dev_info * bdi)298 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
299 {
300 	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
301 	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
302 				      BDI_CAP_NO_WRITEBACK));
303 }
304 
bdi_cap_swap_backed(struct backing_dev_info * bdi)305 static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
306 {
307 	return bdi->capabilities & BDI_CAP_SWAP_BACKED;
308 }
309 
bdi_cap_flush_forker(struct backing_dev_info * bdi)310 static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
311 {
312 	return bdi == &default_backing_dev_info;
313 }
314 
mapping_cap_writeback_dirty(struct address_space * mapping)315 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
316 {
317 	return bdi_cap_writeback_dirty(mapping->backing_dev_info);
318 }
319 
mapping_cap_account_dirty(struct address_space * mapping)320 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
321 {
322 	return bdi_cap_account_dirty(mapping->backing_dev_info);
323 }
324 
mapping_cap_swap_backed(struct address_space * mapping)325 static inline bool mapping_cap_swap_backed(struct address_space *mapping)
326 {
327 	return bdi_cap_swap_backed(mapping->backing_dev_info);
328 }
329 
bdi_sched_wait(void * word)330 static inline int bdi_sched_wait(void *word)
331 {
332 	schedule();
333 	return 0;
334 }
335 
336 #endif		/* _LINUX_BACKING_DEV_H */
337