1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * background writeback - scan btree for dirty data and write it to the backing
4 * device
5 *
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
9
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "writeback.h"
14
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/clock.h>
18 #include <trace/events/bcache.h>
19
update_gc_after_writeback(struct cache_set * c)20 static void update_gc_after_writeback(struct cache_set *c)
21 {
22 if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
23 c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
24 return;
25
26 c->gc_after_writeback |= BCH_DO_AUTO_GC;
27 }
28
29 /* Rate limiting */
__calc_target_rate(struct cached_dev * dc)30 static uint64_t __calc_target_rate(struct cached_dev *dc)
31 {
32 struct cache_set *c = dc->disk.c;
33
34 /*
35 * This is the size of the cache, minus the amount used for
36 * flash-only devices
37 */
38 uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
39 atomic_long_read(&c->flash_dev_dirty_sectors);
40
41 /*
42 * Unfortunately there is no control of global dirty data. If the
43 * user states that they want 10% dirty data in the cache, and has,
44 * e.g., 5 backing volumes of equal size, we try and ensure each
45 * backing volume uses about 2% of the cache for dirty data.
46 */
47 uint32_t bdev_share =
48 div64_u64(bdev_nr_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
49 c->cached_dev_sectors);
50
51 uint64_t cache_dirty_target =
52 div_u64(cache_sectors * dc->writeback_percent, 100);
53
54 /* Ensure each backing dev gets at least one dirty share */
55 if (bdev_share < 1)
56 bdev_share = 1;
57
58 return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
59 }
60
__update_writeback_rate(struct cached_dev * dc)61 static void __update_writeback_rate(struct cached_dev *dc)
62 {
63 /*
64 * PI controller:
65 * Figures out the amount that should be written per second.
66 *
67 * First, the error (number of sectors that are dirty beyond our
68 * target) is calculated. The error is accumulated (numerically
69 * integrated).
70 *
71 * Then, the proportional value and integral value are scaled
72 * based on configured values. These are stored as inverses to
73 * avoid fixed point math and to make configuration easy-- e.g.
74 * the default value of 40 for writeback_rate_p_term_inverse
75 * attempts to write at a rate that would retire all the dirty
76 * blocks in 40 seconds.
77 *
78 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
79 * of the error is accumulated in the integral term per second.
80 * This acts as a slow, long-term average that is not subject to
81 * variations in usage like the p term.
82 */
83 int64_t target = __calc_target_rate(dc);
84 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
85 int64_t error = dirty - target;
86 int64_t proportional_scaled =
87 div_s64(error, dc->writeback_rate_p_term_inverse);
88 int64_t integral_scaled;
89 uint32_t new_rate;
90
91 /*
92 * We need to consider the number of dirty buckets as well
93 * when calculating the proportional_scaled, Otherwise we might
94 * have an unreasonable small writeback rate at a highly fragmented situation
95 * when very few dirty sectors consumed a lot dirty buckets, the
96 * worst case is when dirty buckets reached cutoff_writeback_sync and
97 * dirty data is still not even reached to writeback percent, so the rate
98 * still will be at the minimum value, which will cause the write
99 * stuck at a non-writeback mode.
100 */
101 struct cache_set *c = dc->disk.c;
102
103 int64_t dirty_buckets = c->nbuckets - c->avail_nbuckets;
104
105 if (dc->writeback_consider_fragment &&
106 c->gc_stats.in_use > BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW && dirty > 0) {
107 int64_t fragment =
108 div_s64((dirty_buckets * c->cache->sb.bucket_size), dirty);
109 int64_t fp_term;
110 int64_t fps;
111
112 if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) {
113 fp_term = (int64_t)dc->writeback_rate_fp_term_low *
114 (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW);
115 } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) {
116 fp_term = (int64_t)dc->writeback_rate_fp_term_mid *
117 (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID);
118 } else {
119 fp_term = (int64_t)dc->writeback_rate_fp_term_high *
120 (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH);
121 }
122 fps = div_s64(dirty, dirty_buckets) * fp_term;
123 if (fragment > 3 && fps > proportional_scaled) {
124 /* Only overrite the p when fragment > 3 */
125 proportional_scaled = fps;
126 }
127 }
128
129 if ((error < 0 && dc->writeback_rate_integral > 0) ||
130 (error > 0 && time_before64(local_clock(),
131 dc->writeback_rate.next + NSEC_PER_MSEC))) {
132 /*
133 * Only decrease the integral term if it's more than
134 * zero. Only increase the integral term if the device
135 * is keeping up. (Don't wind up the integral
136 * ineffectively in either case).
137 *
138 * It's necessary to scale this by
139 * writeback_rate_update_seconds to keep the integral
140 * term dimensioned properly.
141 */
142 dc->writeback_rate_integral += error *
143 dc->writeback_rate_update_seconds;
144 }
145
146 integral_scaled = div_s64(dc->writeback_rate_integral,
147 dc->writeback_rate_i_term_inverse);
148
149 new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
150 dc->writeback_rate_minimum, NSEC_PER_SEC);
151
152 dc->writeback_rate_proportional = proportional_scaled;
153 dc->writeback_rate_integral_scaled = integral_scaled;
154 dc->writeback_rate_change = new_rate -
155 atomic_long_read(&dc->writeback_rate.rate);
156 atomic_long_set(&dc->writeback_rate.rate, new_rate);
157 dc->writeback_rate_target = target;
158 }
159
set_at_max_writeback_rate(struct cache_set * c,struct cached_dev * dc)160 static bool set_at_max_writeback_rate(struct cache_set *c,
161 struct cached_dev *dc)
162 {
163 /* Don't sst max writeback rate if it is disabled */
164 if (!c->idle_max_writeback_rate_enabled)
165 return false;
166
167 /* Don't set max writeback rate if gc is running */
168 if (!c->gc_mark_valid)
169 return false;
170 /*
171 * Idle_counter is increased everytime when update_writeback_rate() is
172 * called. If all backing devices attached to the same cache set have
173 * identical dc->writeback_rate_update_seconds values, it is about 6
174 * rounds of update_writeback_rate() on each backing device before
175 * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
176 * to each dc->writeback_rate.rate.
177 * In order to avoid extra locking cost for counting exact dirty cached
178 * devices number, c->attached_dev_nr is used to calculate the idle
179 * throushold. It might be bigger if not all cached device are in write-
180 * back mode, but it still works well with limited extra rounds of
181 * update_writeback_rate().
182 */
183 if (atomic_inc_return(&c->idle_counter) <
184 atomic_read(&c->attached_dev_nr) * 6)
185 return false;
186
187 if (atomic_read(&c->at_max_writeback_rate) != 1)
188 atomic_set(&c->at_max_writeback_rate, 1);
189
190 atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
191
192 /* keep writeback_rate_target as existing value */
193 dc->writeback_rate_proportional = 0;
194 dc->writeback_rate_integral_scaled = 0;
195 dc->writeback_rate_change = 0;
196
197 /*
198 * Check c->idle_counter and c->at_max_writeback_rate agagain in case
199 * new I/O arrives during before set_at_max_writeback_rate() returns.
200 * Then the writeback rate is set to 1, and its new value should be
201 * decided via __update_writeback_rate().
202 */
203 if ((atomic_read(&c->idle_counter) <
204 atomic_read(&c->attached_dev_nr) * 6) ||
205 !atomic_read(&c->at_max_writeback_rate))
206 return false;
207
208 return true;
209 }
210
update_writeback_rate(struct work_struct * work)211 static void update_writeback_rate(struct work_struct *work)
212 {
213 struct cached_dev *dc = container_of(to_delayed_work(work),
214 struct cached_dev,
215 writeback_rate_update);
216 struct cache_set *c = dc->disk.c;
217
218 /*
219 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
220 * cancel_delayed_work_sync().
221 */
222 set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
223 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
224 smp_mb__after_atomic();
225
226 /*
227 * CACHE_SET_IO_DISABLE might be set via sysfs interface,
228 * check it here too.
229 */
230 if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
231 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
232 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
233 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
234 smp_mb__after_atomic();
235 return;
236 }
237
238 /*
239 * If the whole cache set is idle, set_at_max_writeback_rate()
240 * will set writeback rate to a max number. Then it is
241 * unncessary to update writeback rate for an idle cache set
242 * in maximum writeback rate number(s).
243 */
244 if (atomic_read(&dc->has_dirty) && dc->writeback_percent &&
245 !set_at_max_writeback_rate(c, dc)) {
246 do {
247 if (!down_read_trylock((&dc->writeback_lock))) {
248 dc->rate_update_retry++;
249 if (dc->rate_update_retry <=
250 BCH_WBRATE_UPDATE_MAX_SKIPS)
251 break;
252 down_read(&dc->writeback_lock);
253 dc->rate_update_retry = 0;
254 }
255 __update_writeback_rate(dc);
256 update_gc_after_writeback(c);
257 up_read(&dc->writeback_lock);
258 } while (0);
259 }
260
261
262 /*
263 * CACHE_SET_IO_DISABLE might be set via sysfs interface,
264 * check it here too.
265 */
266 if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
267 !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
268 schedule_delayed_work(&dc->writeback_rate_update,
269 dc->writeback_rate_update_seconds * HZ);
270 }
271
272 /*
273 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
274 * cancel_delayed_work_sync().
275 */
276 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
277 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
278 smp_mb__after_atomic();
279 }
280
writeback_delay(struct cached_dev * dc,unsigned int sectors)281 static unsigned int writeback_delay(struct cached_dev *dc,
282 unsigned int sectors)
283 {
284 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
285 !dc->writeback_percent)
286 return 0;
287
288 return bch_next_delay(&dc->writeback_rate, sectors);
289 }
290
291 struct dirty_io {
292 struct closure cl;
293 struct cached_dev *dc;
294 uint16_t sequence;
295 struct bio bio;
296 };
297
dirty_init(struct keybuf_key * w)298 static void dirty_init(struct keybuf_key *w)
299 {
300 struct dirty_io *io = w->private;
301 struct bio *bio = &io->bio;
302
303 bio_init(bio, NULL, bio->bi_inline_vecs,
304 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
305 if (!io->dc->writeback_percent)
306 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
307
308 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
309 bio->bi_private = w;
310 bch_bio_map(bio, NULL);
311 }
312
dirty_io_destructor(struct closure * cl)313 static void dirty_io_destructor(struct closure *cl)
314 {
315 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
316
317 kfree(io);
318 }
319
write_dirty_finish(struct closure * cl)320 static void write_dirty_finish(struct closure *cl)
321 {
322 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
323 struct keybuf_key *w = io->bio.bi_private;
324 struct cached_dev *dc = io->dc;
325
326 bio_free_pages(&io->bio);
327
328 /* This is kind of a dumb way of signalling errors. */
329 if (KEY_DIRTY(&w->key)) {
330 int ret;
331 unsigned int i;
332 struct keylist keys;
333
334 bch_keylist_init(&keys);
335
336 bkey_copy(keys.top, &w->key);
337 SET_KEY_DIRTY(keys.top, false);
338 bch_keylist_push(&keys);
339
340 for (i = 0; i < KEY_PTRS(&w->key); i++)
341 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
342
343 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
344
345 if (ret)
346 trace_bcache_writeback_collision(&w->key);
347
348 atomic_long_inc(ret
349 ? &dc->disk.c->writeback_keys_failed
350 : &dc->disk.c->writeback_keys_done);
351 }
352
353 bch_keybuf_del(&dc->writeback_keys, w);
354 up(&dc->in_flight);
355
356 closure_return_with_destructor(cl, dirty_io_destructor);
357 }
358
dirty_endio(struct bio * bio)359 static void dirty_endio(struct bio *bio)
360 {
361 struct keybuf_key *w = bio->bi_private;
362 struct dirty_io *io = w->private;
363
364 if (bio->bi_status) {
365 SET_KEY_DIRTY(&w->key, false);
366 bch_count_backing_io_errors(io->dc, bio);
367 }
368
369 closure_put(&io->cl);
370 }
371
write_dirty(struct closure * cl)372 static void write_dirty(struct closure *cl)
373 {
374 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
375 struct keybuf_key *w = io->bio.bi_private;
376 struct cached_dev *dc = io->dc;
377
378 uint16_t next_sequence;
379
380 if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
381 /* Not our turn to write; wait for a write to complete */
382 closure_wait(&dc->writeback_ordering_wait, cl);
383
384 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
385 /*
386 * Edge case-- it happened in indeterminate order
387 * relative to when we were added to wait list..
388 */
389 closure_wake_up(&dc->writeback_ordering_wait);
390 }
391
392 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
393 return;
394 }
395
396 next_sequence = io->sequence + 1;
397
398 /*
399 * IO errors are signalled using the dirty bit on the key.
400 * If we failed to read, we should not attempt to write to the
401 * backing device. Instead, immediately go to write_dirty_finish
402 * to clean up.
403 */
404 if (KEY_DIRTY(&w->key)) {
405 dirty_init(w);
406 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
407 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
408 bio_set_dev(&io->bio, io->dc->bdev);
409 io->bio.bi_end_io = dirty_endio;
410
411 /* I/O request sent to backing device */
412 closure_bio_submit(io->dc->disk.c, &io->bio, cl);
413 }
414
415 atomic_set(&dc->writeback_sequence_next, next_sequence);
416 closure_wake_up(&dc->writeback_ordering_wait);
417
418 continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
419 }
420
read_dirty_endio(struct bio * bio)421 static void read_dirty_endio(struct bio *bio)
422 {
423 struct keybuf_key *w = bio->bi_private;
424 struct dirty_io *io = w->private;
425
426 /* is_read = 1 */
427 bch_count_io_errors(io->dc->disk.c->cache,
428 bio->bi_status, 1,
429 "reading dirty data from cache");
430
431 dirty_endio(bio);
432 }
433
read_dirty_submit(struct closure * cl)434 static void read_dirty_submit(struct closure *cl)
435 {
436 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
437
438 closure_bio_submit(io->dc->disk.c, &io->bio, cl);
439
440 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
441 }
442
read_dirty(struct cached_dev * dc)443 static void read_dirty(struct cached_dev *dc)
444 {
445 unsigned int delay = 0;
446 struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
447 size_t size;
448 int nk, i;
449 struct dirty_io *io;
450 struct closure cl;
451 uint16_t sequence = 0;
452
453 BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
454 atomic_set(&dc->writeback_sequence_next, sequence);
455 closure_init_stack(&cl);
456
457 /*
458 * XXX: if we error, background writeback just spins. Should use some
459 * mempools.
460 */
461
462 next = bch_keybuf_next(&dc->writeback_keys);
463
464 while (!kthread_should_stop() &&
465 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
466 next) {
467 size = 0;
468 nk = 0;
469
470 do {
471 BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
472
473 /*
474 * Don't combine too many operations, even if they
475 * are all small.
476 */
477 if (nk >= MAX_WRITEBACKS_IN_PASS)
478 break;
479
480 /*
481 * If the current operation is very large, don't
482 * further combine operations.
483 */
484 if (size >= MAX_WRITESIZE_IN_PASS)
485 break;
486
487 /*
488 * Operations are only eligible to be combined
489 * if they are contiguous.
490 *
491 * TODO: add a heuristic willing to fire a
492 * certain amount of non-contiguous IO per pass,
493 * so that we can benefit from backing device
494 * command queueing.
495 */
496 if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
497 &START_KEY(&next->key)))
498 break;
499
500 size += KEY_SIZE(&next->key);
501 keys[nk++] = next;
502 } while ((next = bch_keybuf_next(&dc->writeback_keys)));
503
504 /* Now we have gathered a set of 1..5 keys to write back. */
505 for (i = 0; i < nk; i++) {
506 w = keys[i];
507
508 io = kzalloc(struct_size(io, bio.bi_inline_vecs,
509 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
510 GFP_KERNEL);
511 if (!io)
512 goto err;
513
514 w->private = io;
515 io->dc = dc;
516 io->sequence = sequence++;
517
518 dirty_init(w);
519 bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
520 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
521 bio_set_dev(&io->bio, dc->disk.c->cache->bdev);
522 io->bio.bi_end_io = read_dirty_endio;
523
524 if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
525 goto err_free;
526
527 trace_bcache_writeback(&w->key);
528
529 down(&dc->in_flight);
530
531 /*
532 * We've acquired a semaphore for the maximum
533 * simultaneous number of writebacks; from here
534 * everything happens asynchronously.
535 */
536 closure_call(&io->cl, read_dirty_submit, NULL, &cl);
537 }
538
539 delay = writeback_delay(dc, size);
540
541 while (!kthread_should_stop() &&
542 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
543 delay) {
544 schedule_timeout_interruptible(delay);
545 delay = writeback_delay(dc, 0);
546 }
547 }
548
549 if (0) {
550 err_free:
551 kfree(w->private);
552 err:
553 bch_keybuf_del(&dc->writeback_keys, w);
554 }
555
556 /*
557 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
558 * freed) before refilling again
559 */
560 closure_sync(&cl);
561 }
562
563 /* Scan for dirty data */
564
bcache_dev_sectors_dirty_add(struct cache_set * c,unsigned int inode,uint64_t offset,int nr_sectors)565 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
566 uint64_t offset, int nr_sectors)
567 {
568 struct bcache_device *d = c->devices[inode];
569 unsigned int stripe_offset, sectors_dirty;
570 int stripe;
571
572 if (!d)
573 return;
574
575 stripe = offset_to_stripe(d, offset);
576 if (stripe < 0)
577 return;
578
579 if (UUID_FLASH_ONLY(&c->uuids[inode]))
580 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
581
582 stripe_offset = offset & (d->stripe_size - 1);
583
584 while (nr_sectors) {
585 int s = min_t(unsigned int, abs(nr_sectors),
586 d->stripe_size - stripe_offset);
587
588 if (nr_sectors < 0)
589 s = -s;
590
591 if (stripe >= d->nr_stripes)
592 return;
593
594 sectors_dirty = atomic_add_return(s,
595 d->stripe_sectors_dirty + stripe);
596 if (sectors_dirty == d->stripe_size) {
597 if (!test_bit(stripe, d->full_dirty_stripes))
598 set_bit(stripe, d->full_dirty_stripes);
599 } else {
600 if (test_bit(stripe, d->full_dirty_stripes))
601 clear_bit(stripe, d->full_dirty_stripes);
602 }
603
604 nr_sectors -= s;
605 stripe_offset = 0;
606 stripe++;
607 }
608 }
609
dirty_pred(struct keybuf * buf,struct bkey * k)610 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
611 {
612 struct cached_dev *dc = container_of(buf,
613 struct cached_dev,
614 writeback_keys);
615
616 BUG_ON(KEY_INODE(k) != dc->disk.id);
617
618 return KEY_DIRTY(k);
619 }
620
refill_full_stripes(struct cached_dev * dc)621 static void refill_full_stripes(struct cached_dev *dc)
622 {
623 struct keybuf *buf = &dc->writeback_keys;
624 unsigned int start_stripe, next_stripe;
625 int stripe;
626 bool wrapped = false;
627
628 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
629 if (stripe < 0)
630 stripe = 0;
631
632 start_stripe = stripe;
633
634 while (1) {
635 stripe = find_next_bit(dc->disk.full_dirty_stripes,
636 dc->disk.nr_stripes, stripe);
637
638 if (stripe == dc->disk.nr_stripes)
639 goto next;
640
641 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
642 dc->disk.nr_stripes, stripe);
643
644 buf->last_scanned = KEY(dc->disk.id,
645 stripe * dc->disk.stripe_size, 0);
646
647 bch_refill_keybuf(dc->disk.c, buf,
648 &KEY(dc->disk.id,
649 next_stripe * dc->disk.stripe_size, 0),
650 dirty_pred);
651
652 if (array_freelist_empty(&buf->freelist))
653 return;
654
655 stripe = next_stripe;
656 next:
657 if (wrapped && stripe > start_stripe)
658 return;
659
660 if (stripe == dc->disk.nr_stripes) {
661 stripe = 0;
662 wrapped = true;
663 }
664 }
665 }
666
667 /*
668 * Returns true if we scanned the entire disk
669 */
refill_dirty(struct cached_dev * dc)670 static bool refill_dirty(struct cached_dev *dc)
671 {
672 struct keybuf *buf = &dc->writeback_keys;
673 struct bkey start = KEY(dc->disk.id, 0, 0);
674 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
675 struct bkey start_pos;
676
677 /*
678 * make sure keybuf pos is inside the range for this disk - at bringup
679 * we might not be attached yet so this disk's inode nr isn't
680 * initialized then
681 */
682 if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
683 bkey_cmp(&buf->last_scanned, &end) > 0)
684 buf->last_scanned = start;
685
686 if (dc->partial_stripes_expensive) {
687 refill_full_stripes(dc);
688 if (array_freelist_empty(&buf->freelist))
689 return false;
690 }
691
692 start_pos = buf->last_scanned;
693 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
694
695 if (bkey_cmp(&buf->last_scanned, &end) < 0)
696 return false;
697
698 /*
699 * If we get to the end start scanning again from the beginning, and
700 * only scan up to where we initially started scanning from:
701 */
702 buf->last_scanned = start;
703 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
704
705 return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
706 }
707
bch_writeback_thread(void * arg)708 static int bch_writeback_thread(void *arg)
709 {
710 struct cached_dev *dc = arg;
711 struct cache_set *c = dc->disk.c;
712 bool searched_full_index;
713
714 bch_ratelimit_reset(&dc->writeback_rate);
715
716 while (!kthread_should_stop() &&
717 !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
718 down_write(&dc->writeback_lock);
719 set_current_state(TASK_INTERRUPTIBLE);
720 /*
721 * If the bache device is detaching, skip here and continue
722 * to perform writeback. Otherwise, if no dirty data on cache,
723 * or there is dirty data on cache but writeback is disabled,
724 * the writeback thread should sleep here and wait for others
725 * to wake up it.
726 */
727 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
728 (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
729 up_write(&dc->writeback_lock);
730
731 if (kthread_should_stop() ||
732 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
733 set_current_state(TASK_RUNNING);
734 break;
735 }
736
737 schedule();
738 continue;
739 }
740 set_current_state(TASK_RUNNING);
741
742 searched_full_index = refill_dirty(dc);
743
744 if (searched_full_index &&
745 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
746 atomic_set(&dc->has_dirty, 0);
747 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
748 bch_write_bdev_super(dc, NULL);
749 /*
750 * If bcache device is detaching via sysfs interface,
751 * writeback thread should stop after there is no dirty
752 * data on cache. BCACHE_DEV_DETACHING flag is set in
753 * bch_cached_dev_detach().
754 */
755 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
756 struct closure cl;
757
758 closure_init_stack(&cl);
759 memset(&dc->sb.set_uuid, 0, 16);
760 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
761
762 bch_write_bdev_super(dc, &cl);
763 closure_sync(&cl);
764
765 up_write(&dc->writeback_lock);
766 break;
767 }
768
769 /*
770 * When dirty data rate is high (e.g. 50%+), there might
771 * be heavy buckets fragmentation after writeback
772 * finished, which hurts following write performance.
773 * If users really care about write performance they
774 * may set BCH_ENABLE_AUTO_GC via sysfs, then when
775 * BCH_DO_AUTO_GC is set, garbage collection thread
776 * will be wake up here. After moving gc, the shrunk
777 * btree and discarded free buckets SSD space may be
778 * helpful for following write requests.
779 */
780 if (c->gc_after_writeback ==
781 (BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
782 c->gc_after_writeback &= ~BCH_DO_AUTO_GC;
783 force_wake_up_gc(c);
784 }
785 }
786
787 up_write(&dc->writeback_lock);
788
789 read_dirty(dc);
790
791 if (searched_full_index) {
792 unsigned int delay = dc->writeback_delay * HZ;
793
794 while (delay &&
795 !kthread_should_stop() &&
796 !test_bit(CACHE_SET_IO_DISABLE, &c->flags) &&
797 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
798 delay = schedule_timeout_interruptible(delay);
799
800 bch_ratelimit_reset(&dc->writeback_rate);
801 }
802 }
803
804 if (dc->writeback_write_wq) {
805 flush_workqueue(dc->writeback_write_wq);
806 destroy_workqueue(dc->writeback_write_wq);
807 }
808 cached_dev_put(dc);
809 wait_for_kthread_stop();
810
811 return 0;
812 }
813
814 /* Init */
815 #define INIT_KEYS_EACH_TIME 500000
816
817 struct sectors_dirty_init {
818 struct btree_op op;
819 unsigned int inode;
820 size_t count;
821 };
822
sectors_dirty_init_fn(struct btree_op * _op,struct btree * b,struct bkey * k)823 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
824 struct bkey *k)
825 {
826 struct sectors_dirty_init *op = container_of(_op,
827 struct sectors_dirty_init, op);
828 if (KEY_INODE(k) > op->inode)
829 return MAP_DONE;
830
831 if (KEY_DIRTY(k))
832 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
833 KEY_START(k), KEY_SIZE(k));
834
835 op->count++;
836 if (!(op->count % INIT_KEYS_EACH_TIME))
837 cond_resched();
838
839 return MAP_CONTINUE;
840 }
841
bch_root_node_dirty_init(struct cache_set * c,struct bcache_device * d,struct bkey * k)842 static int bch_root_node_dirty_init(struct cache_set *c,
843 struct bcache_device *d,
844 struct bkey *k)
845 {
846 struct sectors_dirty_init op;
847 int ret;
848
849 bch_btree_op_init(&op.op, -1);
850 op.inode = d->id;
851 op.count = 0;
852
853 ret = bcache_btree(map_keys_recurse,
854 k,
855 c->root,
856 &op.op,
857 &KEY(op.inode, 0, 0),
858 sectors_dirty_init_fn,
859 0);
860 if (ret < 0)
861 pr_warn("sectors dirty init failed, ret=%d!\n", ret);
862
863 return ret;
864 }
865
bch_dirty_init_thread(void * arg)866 static int bch_dirty_init_thread(void *arg)
867 {
868 struct dirty_init_thrd_info *info = arg;
869 struct bch_dirty_init_state *state = info->state;
870 struct cache_set *c = state->c;
871 struct btree_iter iter;
872 struct bkey *k, *p;
873 int cur_idx, prev_idx, skip_nr;
874
875 k = p = NULL;
876 cur_idx = prev_idx = 0;
877
878 bch_btree_iter_init(&c->root->keys, &iter, NULL);
879 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
880 BUG_ON(!k);
881
882 p = k;
883
884 while (k) {
885 spin_lock(&state->idx_lock);
886 cur_idx = state->key_idx;
887 state->key_idx++;
888 spin_unlock(&state->idx_lock);
889
890 skip_nr = cur_idx - prev_idx;
891
892 while (skip_nr) {
893 k = bch_btree_iter_next_filter(&iter,
894 &c->root->keys,
895 bch_ptr_bad);
896 if (k)
897 p = k;
898 else {
899 atomic_set(&state->enough, 1);
900 /* Update state->enough earlier */
901 smp_mb__after_atomic();
902 goto out;
903 }
904 skip_nr--;
905 }
906
907 if (p) {
908 if (bch_root_node_dirty_init(c, state->d, p) < 0)
909 goto out;
910 }
911
912 p = NULL;
913 prev_idx = cur_idx;
914 }
915
916 out:
917 /* In order to wake up state->wait in time */
918 smp_mb__before_atomic();
919 if (atomic_dec_and_test(&state->started))
920 wake_up(&state->wait);
921
922 return 0;
923 }
924
bch_btre_dirty_init_thread_nr(void)925 static int bch_btre_dirty_init_thread_nr(void)
926 {
927 int n = num_online_cpus()/2;
928
929 if (n == 0)
930 n = 1;
931 else if (n > BCH_DIRTY_INIT_THRD_MAX)
932 n = BCH_DIRTY_INIT_THRD_MAX;
933
934 return n;
935 }
936
bch_sectors_dirty_init(struct bcache_device * d)937 void bch_sectors_dirty_init(struct bcache_device *d)
938 {
939 int i;
940 struct bkey *k = NULL;
941 struct btree_iter iter;
942 struct sectors_dirty_init op;
943 struct cache_set *c = d->c;
944 struct bch_dirty_init_state state;
945
946 /* Just count root keys if no leaf node */
947 rw_lock(0, c->root, c->root->level);
948 if (c->root->level == 0) {
949 bch_btree_op_init(&op.op, -1);
950 op.inode = d->id;
951 op.count = 0;
952
953 for_each_key_filter(&c->root->keys,
954 k, &iter, bch_ptr_invalid)
955 sectors_dirty_init_fn(&op.op, c->root, k);
956
957 rw_unlock(0, c->root);
958 return;
959 }
960
961 memset(&state, 0, sizeof(struct bch_dirty_init_state));
962 state.c = c;
963 state.d = d;
964 state.total_threads = bch_btre_dirty_init_thread_nr();
965 state.key_idx = 0;
966 spin_lock_init(&state.idx_lock);
967 atomic_set(&state.started, 0);
968 atomic_set(&state.enough, 0);
969 init_waitqueue_head(&state.wait);
970
971 for (i = 0; i < state.total_threads; i++) {
972 /* Fetch latest state.enough earlier */
973 smp_mb__before_atomic();
974 if (atomic_read(&state.enough))
975 break;
976
977 state.infos[i].state = &state;
978 state.infos[i].thread =
979 kthread_run(bch_dirty_init_thread, &state.infos[i],
980 "bch_dirtcnt[%d]", i);
981 if (IS_ERR(state.infos[i].thread)) {
982 pr_err("fails to run thread bch_dirty_init[%d]\n", i);
983 for (--i; i >= 0; i--)
984 kthread_stop(state.infos[i].thread);
985 goto out;
986 }
987 atomic_inc(&state.started);
988 }
989
990 out:
991 /* Must wait for all threads to stop. */
992 wait_event(state.wait, atomic_read(&state.started) == 0);
993 rw_unlock(0, c->root);
994 }
995
bch_cached_dev_writeback_init(struct cached_dev * dc)996 void bch_cached_dev_writeback_init(struct cached_dev *dc)
997 {
998 sema_init(&dc->in_flight, 64);
999 init_rwsem(&dc->writeback_lock);
1000 bch_keybuf_init(&dc->writeback_keys);
1001
1002 dc->writeback_metadata = true;
1003 dc->writeback_running = false;
1004 dc->writeback_consider_fragment = true;
1005 dc->writeback_percent = 10;
1006 dc->writeback_delay = 30;
1007 atomic_long_set(&dc->writeback_rate.rate, 1024);
1008 dc->writeback_rate_minimum = 8;
1009
1010 dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
1011 dc->writeback_rate_p_term_inverse = 40;
1012 dc->writeback_rate_fp_term_low = 1;
1013 dc->writeback_rate_fp_term_mid = 10;
1014 dc->writeback_rate_fp_term_high = 1000;
1015 dc->writeback_rate_i_term_inverse = 10000;
1016
1017 /* For dc->writeback_lock contention in update_writeback_rate() */
1018 dc->rate_update_retry = 0;
1019
1020 WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
1021 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
1022 }
1023
bch_cached_dev_writeback_start(struct cached_dev * dc)1024 int bch_cached_dev_writeback_start(struct cached_dev *dc)
1025 {
1026 dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
1027 WQ_MEM_RECLAIM, 0);
1028 if (!dc->writeback_write_wq)
1029 return -ENOMEM;
1030
1031 cached_dev_get(dc);
1032 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
1033 "bcache_writeback");
1034 if (IS_ERR(dc->writeback_thread)) {
1035 cached_dev_put(dc);
1036 destroy_workqueue(dc->writeback_write_wq);
1037 return PTR_ERR(dc->writeback_thread);
1038 }
1039 dc->writeback_running = true;
1040
1041 WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
1042 schedule_delayed_work(&dc->writeback_rate_update,
1043 dc->writeback_rate_update_seconds * HZ);
1044
1045 bch_writeback_queue(dc);
1046
1047 return 0;
1048 }
1049