1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bcache journalling code, for btree insertions
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8 #include "bcache.h"
9 #include "btree.h"
10 #include "debug.h"
11 #include "extents.h"
12
13 #include <trace/events/bcache.h>
14
15 /*
16 * Journal replay/recovery:
17 *
18 * This code is all driven from run_cache_set(); we first read the journal
19 * entries, do some other stuff, then we mark all the keys in the journal
20 * entries (same as garbage collection would), then we replay them - reinserting
21 * them into the cache in precisely the same order as they appear in the
22 * journal.
23 *
24 * We only journal keys that go in leaf nodes, which simplifies things quite a
25 * bit.
26 */
27
journal_read_endio(struct bio * bio)28 static void journal_read_endio(struct bio *bio)
29 {
30 struct closure *cl = bio->bi_private;
31
32 closure_put(cl);
33 }
34
journal_read_bucket(struct cache * ca,struct list_head * list,unsigned int bucket_index)35 static int journal_read_bucket(struct cache *ca, struct list_head *list,
36 unsigned int bucket_index)
37 {
38 struct journal_device *ja = &ca->journal;
39 struct bio *bio = &ja->bio;
40
41 struct journal_replay *i;
42 struct jset *j, *data = ca->set->journal.w[0].data;
43 struct closure cl;
44 unsigned int len, left, offset = 0;
45 int ret = 0;
46 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
47
48 closure_init_stack(&cl);
49
50 pr_debug("reading %u\n", bucket_index);
51
52 while (offset < ca->sb.bucket_size) {
53 reread: left = ca->sb.bucket_size - offset;
54 len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
55
56 bio_reset(bio, ca->bdev, REQ_OP_READ);
57 bio->bi_iter.bi_sector = bucket + offset;
58 bio->bi_iter.bi_size = len << 9;
59
60 bio->bi_end_io = journal_read_endio;
61 bio->bi_private = &cl;
62 bch_bio_map(bio, data);
63
64 closure_bio_submit(ca->set, bio, &cl);
65 closure_sync(&cl);
66
67 /* This function could be simpler now since we no longer write
68 * journal entries that overlap bucket boundaries; this means
69 * the start of a bucket will always have a valid journal entry
70 * if it has any journal entries at all.
71 */
72
73 j = data;
74 while (len) {
75 struct list_head *where;
76 size_t blocks, bytes = set_bytes(j);
77
78 if (j->magic != jset_magic(&ca->sb)) {
79 pr_debug("%u: bad magic\n", bucket_index);
80 return ret;
81 }
82
83 if (bytes > left << 9 ||
84 bytes > PAGE_SIZE << JSET_BITS) {
85 pr_info("%u: too big, %zu bytes, offset %u\n",
86 bucket_index, bytes, offset);
87 return ret;
88 }
89
90 if (bytes > len << 9)
91 goto reread;
92
93 if (j->csum != csum_set(j)) {
94 pr_info("%u: bad csum, %zu bytes, offset %u\n",
95 bucket_index, bytes, offset);
96 return ret;
97 }
98
99 blocks = set_blocks(j, block_bytes(ca));
100
101 /*
102 * Nodes in 'list' are in linear increasing order of
103 * i->j.seq, the node on head has the smallest (oldest)
104 * journal seq, the node on tail has the biggest
105 * (latest) journal seq.
106 */
107
108 /*
109 * Check from the oldest jset for last_seq. If
110 * i->j.seq < j->last_seq, it means the oldest jset
111 * in list is expired and useless, remove it from
112 * this list. Otherwise, j is a candidate jset for
113 * further following checks.
114 */
115 while (!list_empty(list)) {
116 i = list_first_entry(list,
117 struct journal_replay, list);
118 if (i->j.seq >= j->last_seq)
119 break;
120 list_del(&i->list);
121 kfree(i);
122 }
123
124 /* iterate list in reverse order (from latest jset) */
125 list_for_each_entry_reverse(i, list, list) {
126 if (j->seq == i->j.seq)
127 goto next_set;
128
129 /*
130 * if j->seq is less than any i->j.last_seq
131 * in list, j is an expired and useless jset.
132 */
133 if (j->seq < i->j.last_seq)
134 goto next_set;
135
136 /*
137 * 'where' points to first jset in list which
138 * is elder then j.
139 */
140 if (j->seq > i->j.seq) {
141 where = &i->list;
142 goto add;
143 }
144 }
145
146 where = list;
147 add:
148 i = kmalloc(offsetof(struct journal_replay, j) +
149 bytes, GFP_KERNEL);
150 if (!i)
151 return -ENOMEM;
152 memcpy(&i->j, j, bytes);
153 /* Add to the location after 'where' points to */
154 list_add(&i->list, where);
155 ret = 1;
156
157 if (j->seq > ja->seq[bucket_index])
158 ja->seq[bucket_index] = j->seq;
159 next_set:
160 offset += blocks * ca->sb.block_size;
161 len -= blocks * ca->sb.block_size;
162 j = ((void *) j) + blocks * block_bytes(ca);
163 }
164 }
165
166 return ret;
167 }
168
bch_journal_read(struct cache_set * c,struct list_head * list)169 int bch_journal_read(struct cache_set *c, struct list_head *list)
170 {
171 #define read_bucket(b) \
172 ({ \
173 ret = journal_read_bucket(ca, list, b); \
174 __set_bit(b, bitmap); \
175 if (ret < 0) \
176 return ret; \
177 ret; \
178 })
179
180 struct cache *ca = c->cache;
181 int ret = 0;
182 struct journal_device *ja = &ca->journal;
183 DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
184 unsigned int i, l, r, m;
185 uint64_t seq;
186
187 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
188 pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
189
190 /*
191 * Read journal buckets ordered by golden ratio hash to quickly
192 * find a sequence of buckets with valid journal entries
193 */
194 for (i = 0; i < ca->sb.njournal_buckets; i++) {
195 /*
196 * We must try the index l with ZERO first for
197 * correctness due to the scenario that the journal
198 * bucket is circular buffer which might have wrapped
199 */
200 l = (i * 2654435769U) % ca->sb.njournal_buckets;
201
202 if (test_bit(l, bitmap))
203 break;
204
205 if (read_bucket(l))
206 goto bsearch;
207 }
208
209 /*
210 * If that fails, check all the buckets we haven't checked
211 * already
212 */
213 pr_debug("falling back to linear search\n");
214
215 for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
216 if (read_bucket(l))
217 goto bsearch;
218
219 /* no journal entries on this device? */
220 if (l == ca->sb.njournal_buckets)
221 goto out;
222 bsearch:
223 BUG_ON(list_empty(list));
224
225 /* Binary search */
226 m = l;
227 r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
228 pr_debug("starting binary search, l %u r %u\n", l, r);
229
230 while (l + 1 < r) {
231 seq = list_entry(list->prev, struct journal_replay,
232 list)->j.seq;
233
234 m = (l + r) >> 1;
235 read_bucket(m);
236
237 if (seq != list_entry(list->prev, struct journal_replay,
238 list)->j.seq)
239 l = m;
240 else
241 r = m;
242 }
243
244 /*
245 * Read buckets in reverse order until we stop finding more
246 * journal entries
247 */
248 pr_debug("finishing up: m %u njournal_buckets %u\n",
249 m, ca->sb.njournal_buckets);
250 l = m;
251
252 while (1) {
253 if (!l--)
254 l = ca->sb.njournal_buckets - 1;
255
256 if (l == m)
257 break;
258
259 if (test_bit(l, bitmap))
260 continue;
261
262 if (!read_bucket(l))
263 break;
264 }
265
266 seq = 0;
267
268 for (i = 0; i < ca->sb.njournal_buckets; i++)
269 if (ja->seq[i] > seq) {
270 seq = ja->seq[i];
271 /*
272 * When journal_reclaim() goes to allocate for
273 * the first time, it'll use the bucket after
274 * ja->cur_idx
275 */
276 ja->cur_idx = i;
277 ja->last_idx = ja->discard_idx = (i + 1) %
278 ca->sb.njournal_buckets;
279
280 }
281
282 out:
283 if (!list_empty(list))
284 c->journal.seq = list_entry(list->prev,
285 struct journal_replay,
286 list)->j.seq;
287
288 return 0;
289 #undef read_bucket
290 }
291
bch_journal_mark(struct cache_set * c,struct list_head * list)292 void bch_journal_mark(struct cache_set *c, struct list_head *list)
293 {
294 atomic_t p = { 0 };
295 struct bkey *k;
296 struct journal_replay *i;
297 struct journal *j = &c->journal;
298 uint64_t last = j->seq;
299
300 /*
301 * journal.pin should never fill up - we never write a journal
302 * entry when it would fill up. But if for some reason it does, we
303 * iterate over the list in reverse order so that we can just skip that
304 * refcount instead of bugging.
305 */
306
307 list_for_each_entry_reverse(i, list, list) {
308 BUG_ON(last < i->j.seq);
309 i->pin = NULL;
310
311 while (last-- != i->j.seq)
312 if (fifo_free(&j->pin) > 1) {
313 fifo_push_front(&j->pin, p);
314 atomic_set(&fifo_front(&j->pin), 0);
315 }
316
317 if (fifo_free(&j->pin) > 1) {
318 fifo_push_front(&j->pin, p);
319 i->pin = &fifo_front(&j->pin);
320 atomic_set(i->pin, 1);
321 }
322
323 for (k = i->j.start;
324 k < bset_bkey_last(&i->j);
325 k = bkey_next(k))
326 if (!__bch_extent_invalid(c, k)) {
327 unsigned int j;
328
329 for (j = 0; j < KEY_PTRS(k); j++)
330 if (ptr_available(c, k, j))
331 atomic_inc(&PTR_BUCKET(c, k, j)->pin);
332
333 bch_initial_mark_key(c, 0, k);
334 }
335 }
336 }
337
is_discard_enabled(struct cache_set * s)338 static bool is_discard_enabled(struct cache_set *s)
339 {
340 struct cache *ca = s->cache;
341
342 if (ca->discard)
343 return true;
344
345 return false;
346 }
347
bch_journal_replay(struct cache_set * s,struct list_head * list)348 int bch_journal_replay(struct cache_set *s, struct list_head *list)
349 {
350 int ret = 0, keys = 0, entries = 0;
351 struct bkey *k;
352 struct journal_replay *i =
353 list_entry(list->prev, struct journal_replay, list);
354
355 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
356 struct keylist keylist;
357
358 list_for_each_entry(i, list, list) {
359 BUG_ON(i->pin && atomic_read(i->pin) != 1);
360
361 if (n != i->j.seq) {
362 if (n == start && is_discard_enabled(s))
363 pr_info("journal entries %llu-%llu may be discarded! (replaying %llu-%llu)\n",
364 n, i->j.seq - 1, start, end);
365 else {
366 pr_err("journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
367 n, i->j.seq - 1, start, end);
368 ret = -EIO;
369 goto err;
370 }
371 }
372
373 for (k = i->j.start;
374 k < bset_bkey_last(&i->j);
375 k = bkey_next(k)) {
376 trace_bcache_journal_replay_key(k);
377
378 bch_keylist_init_single(&keylist, k);
379
380 ret = bch_btree_insert(s, &keylist, i->pin, NULL);
381 if (ret)
382 goto err;
383
384 BUG_ON(!bch_keylist_empty(&keylist));
385 keys++;
386
387 cond_resched();
388 }
389
390 if (i->pin)
391 atomic_dec(i->pin);
392 n = i->j.seq + 1;
393 entries++;
394 }
395
396 pr_info("journal replay done, %i keys in %i entries, seq %llu\n",
397 keys, entries, end);
398 err:
399 while (!list_empty(list)) {
400 i = list_first_entry(list, struct journal_replay, list);
401 list_del(&i->list);
402 kfree(i);
403 }
404
405 return ret;
406 }
407
bch_journal_space_reserve(struct journal * j)408 void bch_journal_space_reserve(struct journal *j)
409 {
410 j->do_reserve = true;
411 }
412
413 /* Journalling */
414
btree_flush_write(struct cache_set * c)415 static void btree_flush_write(struct cache_set *c)
416 {
417 struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
418 unsigned int i, nr;
419 int ref_nr;
420 atomic_t *fifo_front_p, *now_fifo_front_p;
421 size_t mask;
422
423 if (c->journal.btree_flushing)
424 return;
425
426 spin_lock(&c->journal.flush_write_lock);
427 if (c->journal.btree_flushing) {
428 spin_unlock(&c->journal.flush_write_lock);
429 return;
430 }
431 c->journal.btree_flushing = true;
432 spin_unlock(&c->journal.flush_write_lock);
433
434 /* get the oldest journal entry and check its refcount */
435 spin_lock(&c->journal.lock);
436 fifo_front_p = &fifo_front(&c->journal.pin);
437 ref_nr = atomic_read(fifo_front_p);
438 if (ref_nr <= 0) {
439 /*
440 * do nothing if no btree node references
441 * the oldest journal entry
442 */
443 spin_unlock(&c->journal.lock);
444 goto out;
445 }
446 spin_unlock(&c->journal.lock);
447
448 mask = c->journal.pin.mask;
449 nr = 0;
450 atomic_long_inc(&c->flush_write);
451 memset(btree_nodes, 0, sizeof(btree_nodes));
452
453 mutex_lock(&c->bucket_lock);
454 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
455 /*
456 * It is safe to get now_fifo_front_p without holding
457 * c->journal.lock here, because we don't need to know
458 * the exactly accurate value, just check whether the
459 * front pointer of c->journal.pin is changed.
460 */
461 now_fifo_front_p = &fifo_front(&c->journal.pin);
462 /*
463 * If the oldest journal entry is reclaimed and front
464 * pointer of c->journal.pin changes, it is unnecessary
465 * to scan c->btree_cache anymore, just quit the loop and
466 * flush out what we have already.
467 */
468 if (now_fifo_front_p != fifo_front_p)
469 break;
470 /*
471 * quit this loop if all matching btree nodes are
472 * scanned and record in btree_nodes[] already.
473 */
474 ref_nr = atomic_read(fifo_front_p);
475 if (nr >= ref_nr)
476 break;
477
478 if (btree_node_journal_flush(b))
479 pr_err("BUG: flush_write bit should not be set here!\n");
480
481 mutex_lock(&b->write_lock);
482
483 if (!btree_node_dirty(b)) {
484 mutex_unlock(&b->write_lock);
485 continue;
486 }
487
488 if (!btree_current_write(b)->journal) {
489 mutex_unlock(&b->write_lock);
490 continue;
491 }
492
493 /*
494 * Only select the btree node which exactly references
495 * the oldest journal entry.
496 *
497 * If the journal entry pointed by fifo_front_p is
498 * reclaimed in parallel, don't worry:
499 * - the list_for_each_xxx loop will quit when checking
500 * next now_fifo_front_p.
501 * - If there are matched nodes recorded in btree_nodes[],
502 * they are clean now (this is why and how the oldest
503 * journal entry can be reclaimed). These selected nodes
504 * will be ignored and skipped in the following for-loop.
505 */
506 if (((btree_current_write(b)->journal - fifo_front_p) &
507 mask) != 0) {
508 mutex_unlock(&b->write_lock);
509 continue;
510 }
511
512 set_btree_node_journal_flush(b);
513
514 mutex_unlock(&b->write_lock);
515
516 btree_nodes[nr++] = b;
517 /*
518 * To avoid holding c->bucket_lock too long time,
519 * only scan for BTREE_FLUSH_NR matched btree nodes
520 * at most. If there are more btree nodes reference
521 * the oldest journal entry, try to flush them next
522 * time when btree_flush_write() is called.
523 */
524 if (nr == BTREE_FLUSH_NR)
525 break;
526 }
527 mutex_unlock(&c->bucket_lock);
528
529 for (i = 0; i < nr; i++) {
530 b = btree_nodes[i];
531 if (!b) {
532 pr_err("BUG: btree_nodes[%d] is NULL\n", i);
533 continue;
534 }
535
536 /* safe to check without holding b->write_lock */
537 if (!btree_node_journal_flush(b)) {
538 pr_err("BUG: bnode %p: journal_flush bit cleaned\n", b);
539 continue;
540 }
541
542 mutex_lock(&b->write_lock);
543 if (!btree_current_write(b)->journal) {
544 clear_bit(BTREE_NODE_journal_flush, &b->flags);
545 mutex_unlock(&b->write_lock);
546 pr_debug("bnode %p: written by others\n", b);
547 continue;
548 }
549
550 if (!btree_node_dirty(b)) {
551 clear_bit(BTREE_NODE_journal_flush, &b->flags);
552 mutex_unlock(&b->write_lock);
553 pr_debug("bnode %p: dirty bit cleaned by others\n", b);
554 continue;
555 }
556
557 __bch_btree_node_write(b, NULL);
558 clear_bit(BTREE_NODE_journal_flush, &b->flags);
559 mutex_unlock(&b->write_lock);
560 }
561
562 out:
563 spin_lock(&c->journal.flush_write_lock);
564 c->journal.btree_flushing = false;
565 spin_unlock(&c->journal.flush_write_lock);
566 }
567
568 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
569
journal_discard_endio(struct bio * bio)570 static void journal_discard_endio(struct bio *bio)
571 {
572 struct journal_device *ja =
573 container_of(bio, struct journal_device, discard_bio);
574 struct cache *ca = container_of(ja, struct cache, journal);
575
576 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
577
578 closure_wake_up(&ca->set->journal.wait);
579 closure_put(&ca->set->cl);
580 }
581
journal_discard_work(struct work_struct * work)582 static void journal_discard_work(struct work_struct *work)
583 {
584 struct journal_device *ja =
585 container_of(work, struct journal_device, discard_work);
586
587 submit_bio(&ja->discard_bio);
588 }
589
do_journal_discard(struct cache * ca)590 static void do_journal_discard(struct cache *ca)
591 {
592 struct journal_device *ja = &ca->journal;
593 struct bio *bio = &ja->discard_bio;
594
595 if (!ca->discard) {
596 ja->discard_idx = ja->last_idx;
597 return;
598 }
599
600 switch (atomic_read(&ja->discard_in_flight)) {
601 case DISCARD_IN_FLIGHT:
602 return;
603
604 case DISCARD_DONE:
605 ja->discard_idx = (ja->discard_idx + 1) %
606 ca->sb.njournal_buckets;
607
608 atomic_set(&ja->discard_in_flight, DISCARD_READY);
609 fallthrough;
610
611 case DISCARD_READY:
612 if (ja->discard_idx == ja->last_idx)
613 return;
614
615 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
616
617 bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD);
618 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
619 ca->sb.d[ja->discard_idx]);
620 bio->bi_iter.bi_size = bucket_bytes(ca);
621 bio->bi_end_io = journal_discard_endio;
622
623 closure_get(&ca->set->cl);
624 INIT_WORK(&ja->discard_work, journal_discard_work);
625 queue_work(bch_journal_wq, &ja->discard_work);
626 }
627 }
628
free_journal_buckets(struct cache_set * c)629 static unsigned int free_journal_buckets(struct cache_set *c)
630 {
631 struct journal *j = &c->journal;
632 struct cache *ca = c->cache;
633 struct journal_device *ja = &c->cache->journal;
634 unsigned int n;
635
636 /* In case njournal_buckets is not power of 2 */
637 if (ja->cur_idx >= ja->discard_idx)
638 n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
639 else
640 n = ja->discard_idx - ja->cur_idx;
641
642 if (n > (1 + j->do_reserve))
643 return n - (1 + j->do_reserve);
644
645 return 0;
646 }
647
journal_reclaim(struct cache_set * c)648 static void journal_reclaim(struct cache_set *c)
649 {
650 struct bkey *k = &c->journal.key;
651 struct cache *ca = c->cache;
652 uint64_t last_seq;
653 struct journal_device *ja = &ca->journal;
654 atomic_t p __maybe_unused;
655
656 atomic_long_inc(&c->reclaim);
657
658 while (!atomic_read(&fifo_front(&c->journal.pin)))
659 fifo_pop(&c->journal.pin, p);
660
661 last_seq = last_seq(&c->journal);
662
663 /* Update last_idx */
664
665 while (ja->last_idx != ja->cur_idx &&
666 ja->seq[ja->last_idx] < last_seq)
667 ja->last_idx = (ja->last_idx + 1) %
668 ca->sb.njournal_buckets;
669
670 do_journal_discard(ca);
671
672 if (c->journal.blocks_free)
673 goto out;
674
675 if (!free_journal_buckets(c))
676 goto out;
677
678 ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
679 k->ptr[0] = MAKE_PTR(0,
680 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
681 ca->sb.nr_this_dev);
682 atomic_long_inc(&c->reclaimed_journal_buckets);
683
684 bkey_init(k);
685 SET_KEY_PTRS(k, 1);
686 c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
687
688 out:
689 if (!journal_full(&c->journal))
690 __closure_wake_up(&c->journal.wait);
691 }
692
bch_journal_next(struct journal * j)693 void bch_journal_next(struct journal *j)
694 {
695 atomic_t p = { 1 };
696
697 j->cur = (j->cur == j->w)
698 ? &j->w[1]
699 : &j->w[0];
700
701 /*
702 * The fifo_push() needs to happen at the same time as j->seq is
703 * incremented for last_seq() to be calculated correctly
704 */
705 BUG_ON(!fifo_push(&j->pin, p));
706 atomic_set(&fifo_back(&j->pin), 1);
707
708 j->cur->data->seq = ++j->seq;
709 j->cur->dirty = false;
710 j->cur->need_write = false;
711 j->cur->data->keys = 0;
712
713 if (fifo_full(&j->pin))
714 pr_debug("journal_pin full (%zu)\n", fifo_used(&j->pin));
715 }
716
journal_write_endio(struct bio * bio)717 static void journal_write_endio(struct bio *bio)
718 {
719 struct journal_write *w = bio->bi_private;
720
721 cache_set_err_on(bio->bi_status, w->c, "journal io error");
722 closure_put(&w->c->journal.io);
723 }
724
725 static void journal_write(struct closure *cl);
726
journal_write_done(struct closure * cl)727 static void journal_write_done(struct closure *cl)
728 {
729 struct journal *j = container_of(cl, struct journal, io);
730 struct journal_write *w = (j->cur == j->w)
731 ? &j->w[1]
732 : &j->w[0];
733
734 __closure_wake_up(&w->wait);
735 continue_at_nobarrier(cl, journal_write, bch_journal_wq);
736 }
737
journal_write_unlock(struct closure * cl)738 static void journal_write_unlock(struct closure *cl)
739 __releases(&c->journal.lock)
740 {
741 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
742
743 c->journal.io_in_flight = 0;
744 spin_unlock(&c->journal.lock);
745 }
746
journal_write_unlocked(struct closure * cl)747 static void journal_write_unlocked(struct closure *cl)
748 __releases(c->journal.lock)
749 {
750 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
751 struct cache *ca = c->cache;
752 struct journal_write *w = c->journal.cur;
753 struct bkey *k = &c->journal.key;
754 unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
755 ca->sb.block_size;
756
757 struct bio *bio;
758 struct bio_list list;
759
760 bio_list_init(&list);
761
762 if (!w->need_write) {
763 closure_return_with_destructor(cl, journal_write_unlock);
764 return;
765 } else if (journal_full(&c->journal)) {
766 journal_reclaim(c);
767 spin_unlock(&c->journal.lock);
768
769 btree_flush_write(c);
770 continue_at(cl, journal_write, bch_journal_wq);
771 return;
772 }
773
774 c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
775
776 w->data->btree_level = c->root->level;
777
778 bkey_copy(&w->data->btree_root, &c->root->key);
779 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
780
781 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
782 w->data->magic = jset_magic(&ca->sb);
783 w->data->version = BCACHE_JSET_VERSION;
784 w->data->last_seq = last_seq(&c->journal);
785 w->data->csum = csum_set(w->data);
786
787 for (i = 0; i < KEY_PTRS(k); i++) {
788 ca = c->cache;
789 bio = &ca->journal.bio;
790
791 atomic_long_add(sectors, &ca->meta_sectors_written);
792
793 bio_reset(bio, ca->bdev, REQ_OP_WRITE |
794 REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
795 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
796 bio->bi_iter.bi_size = sectors << 9;
797
798 bio->bi_end_io = journal_write_endio;
799 bio->bi_private = w;
800 bch_bio_map(bio, w->data);
801
802 trace_bcache_journal_write(bio, w->data->keys);
803 bio_list_add(&list, bio);
804
805 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
806
807 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
808 }
809
810 /* If KEY_PTRS(k) == 0, this jset gets lost in air */
811 BUG_ON(i == 0);
812
813 atomic_dec_bug(&fifo_back(&c->journal.pin));
814 bch_journal_next(&c->journal);
815 journal_reclaim(c);
816
817 spin_unlock(&c->journal.lock);
818
819 while ((bio = bio_list_pop(&list)))
820 closure_bio_submit(c, bio, cl);
821
822 continue_at(cl, journal_write_done, NULL);
823 }
824
journal_write(struct closure * cl)825 static void journal_write(struct closure *cl)
826 {
827 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
828
829 spin_lock(&c->journal.lock);
830 journal_write_unlocked(cl);
831 }
832
journal_try_write(struct cache_set * c)833 static void journal_try_write(struct cache_set *c)
834 __releases(c->journal.lock)
835 {
836 struct closure *cl = &c->journal.io;
837 struct journal_write *w = c->journal.cur;
838
839 w->need_write = true;
840
841 if (!c->journal.io_in_flight) {
842 c->journal.io_in_flight = 1;
843 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
844 } else {
845 spin_unlock(&c->journal.lock);
846 }
847 }
848
journal_wait_for_write(struct cache_set * c,unsigned int nkeys)849 static struct journal_write *journal_wait_for_write(struct cache_set *c,
850 unsigned int nkeys)
851 __acquires(&c->journal.lock)
852 {
853 size_t sectors;
854 struct closure cl;
855 bool wait = false;
856 struct cache *ca = c->cache;
857
858 closure_init_stack(&cl);
859
860 spin_lock(&c->journal.lock);
861
862 while (1) {
863 struct journal_write *w = c->journal.cur;
864
865 sectors = __set_blocks(w->data, w->data->keys + nkeys,
866 block_bytes(ca)) * ca->sb.block_size;
867
868 if (sectors <= min_t(size_t,
869 c->journal.blocks_free * ca->sb.block_size,
870 PAGE_SECTORS << JSET_BITS))
871 return w;
872
873 if (wait)
874 closure_wait(&c->journal.wait, &cl);
875
876 if (!journal_full(&c->journal)) {
877 if (wait)
878 trace_bcache_journal_entry_full(c);
879
880 /*
881 * XXX: If we were inserting so many keys that they
882 * won't fit in an _empty_ journal write, we'll
883 * deadlock. For now, handle this in
884 * bch_keylist_realloc() - but something to think about.
885 */
886 BUG_ON(!w->data->keys);
887
888 journal_try_write(c); /* unlocks */
889 } else {
890 if (wait)
891 trace_bcache_journal_full(c);
892
893 journal_reclaim(c);
894 spin_unlock(&c->journal.lock);
895
896 btree_flush_write(c);
897 }
898
899 closure_sync(&cl);
900 spin_lock(&c->journal.lock);
901 wait = true;
902 }
903 }
904
journal_write_work(struct work_struct * work)905 static void journal_write_work(struct work_struct *work)
906 {
907 struct cache_set *c = container_of(to_delayed_work(work),
908 struct cache_set,
909 journal.work);
910 spin_lock(&c->journal.lock);
911 if (c->journal.cur->dirty)
912 journal_try_write(c);
913 else
914 spin_unlock(&c->journal.lock);
915 }
916
917 /*
918 * Entry point to the journalling code - bio_insert() and btree_invalidate()
919 * pass bch_journal() a list of keys to be journalled, and then
920 * bch_journal() hands those same keys off to btree_insert_async()
921 */
922
bch_journal(struct cache_set * c,struct keylist * keys,struct closure * parent)923 atomic_t *bch_journal(struct cache_set *c,
924 struct keylist *keys,
925 struct closure *parent)
926 {
927 struct journal_write *w;
928 atomic_t *ret;
929
930 /* No journaling if CACHE_SET_IO_DISABLE set already */
931 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
932 return NULL;
933
934 if (!CACHE_SYNC(&c->cache->sb))
935 return NULL;
936
937 w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
938
939 memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
940 w->data->keys += bch_keylist_nkeys(keys);
941
942 ret = &fifo_back(&c->journal.pin);
943 atomic_inc(ret);
944
945 if (parent) {
946 closure_wait(&w->wait, parent);
947 journal_try_write(c);
948 } else if (!w->dirty) {
949 w->dirty = true;
950 queue_delayed_work(bch_flush_wq, &c->journal.work,
951 msecs_to_jiffies(c->journal_delay_ms));
952 spin_unlock(&c->journal.lock);
953 } else {
954 spin_unlock(&c->journal.lock);
955 }
956
957
958 return ret;
959 }
960
bch_journal_meta(struct cache_set * c,struct closure * cl)961 void bch_journal_meta(struct cache_set *c, struct closure *cl)
962 {
963 struct keylist keys;
964 atomic_t *ref;
965
966 bch_keylist_init(&keys);
967
968 ref = bch_journal(c, &keys, cl);
969 if (ref)
970 atomic_dec_bug(ref);
971 }
972
bch_journal_free(struct cache_set * c)973 void bch_journal_free(struct cache_set *c)
974 {
975 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
976 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
977 free_fifo(&c->journal.pin);
978 }
979
bch_journal_alloc(struct cache_set * c)980 int bch_journal_alloc(struct cache_set *c)
981 {
982 struct journal *j = &c->journal;
983
984 spin_lock_init(&j->lock);
985 spin_lock_init(&j->flush_write_lock);
986 INIT_DELAYED_WORK(&j->work, journal_write_work);
987
988 c->journal_delay_ms = 100;
989
990 j->w[0].c = c;
991 j->w[1].c = c;
992
993 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
994 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
995 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
996 return -ENOMEM;
997
998 return 0;
999 }
1000