Lines Matching refs:ca
358 struct cache *ca = bio->bi_private; in write_super_endio() local
361 bch_count_io_errors(ca, bio->bi_status, 0, in write_super_endio()
363 closure_put(&ca->set->sb_write); in write_super_endio()
376 struct cache *ca = c->cache; in bcache_write_super() local
377 struct bio *bio = &ca->sb_bio; in bcache_write_super()
383 ca->sb.seq++; in bcache_write_super()
385 if (ca->sb.version < version) in bcache_write_super()
386 ca->sb.version = version; in bcache_write_super()
388 bio_init(bio, ca->bdev, ca->sb_bv, 1, 0); in bcache_write_super()
390 bio->bi_private = ca; in bcache_write_super()
393 __write_super(&ca->sb, ca->sb_disk, bio); in bcache_write_super()
503 struct cache *ca = c->cache; in __uuid_write() local
512 size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS; in __uuid_write()
518 atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written); in __uuid_write()
583 struct cache *ca = bio->bi_private; in prio_endio() local
585 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); in prio_endio()
586 bch_bbio_free(bio, ca->set); in prio_endio()
587 closure_put(&ca->prio); in prio_endio()
590 static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf) in prio_io() argument
592 struct closure *cl = &ca->prio; in prio_io()
593 struct bio *bio = bch_bbio_alloc(ca->set); in prio_io()
597 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; in prio_io()
598 bio_set_dev(bio, ca->bdev); in prio_io()
599 bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb); in prio_io()
602 bio->bi_private = ca; in prio_io()
604 bch_bio_map(bio, ca->disk_buckets); in prio_io()
606 closure_bio_submit(ca->set, bio, &ca->prio); in prio_io()
610 int bch_prio_write(struct cache *ca, bool wait) in bch_prio_write() argument
617 fifo_used(&ca->free[RESERVE_PRIO]), in bch_prio_write()
618 fifo_used(&ca->free[RESERVE_NONE]), in bch_prio_write()
619 fifo_used(&ca->free_inc)); in bch_prio_write()
627 size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) + in bch_prio_write()
628 fifo_used(&ca->free[RESERVE_NONE]); in bch_prio_write()
629 if (prio_buckets(ca) > avail) in bch_prio_write()
635 lockdep_assert_held(&ca->set->bucket_lock); in bch_prio_write()
637 ca->disk_buckets->seq++; in bch_prio_write()
639 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), in bch_prio_write()
640 &ca->meta_sectors_written); in bch_prio_write()
642 for (i = prio_buckets(ca) - 1; i >= 0; --i) { in bch_prio_write()
644 struct prio_set *p = ca->disk_buckets; in bch_prio_write()
646 struct bucket_disk *end = d + prios_per_bucket(ca); in bch_prio_write()
648 for (b = ca->buckets + i * prios_per_bucket(ca); in bch_prio_write()
649 b < ca->buckets + ca->sb.nbuckets && d < end; in bch_prio_write()
655 p->next_bucket = ca->prio_buckets[i + 1]; in bch_prio_write()
656 p->magic = pset_magic(&ca->sb); in bch_prio_write()
657 p->csum = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8); in bch_prio_write()
659 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait); in bch_prio_write()
662 mutex_unlock(&ca->set->bucket_lock); in bch_prio_write()
663 prio_io(ca, bucket, REQ_OP_WRITE); in bch_prio_write()
664 mutex_lock(&ca->set->bucket_lock); in bch_prio_write()
666 ca->prio_buckets[i] = bucket; in bch_prio_write()
667 atomic_dec_bug(&ca->buckets[bucket].pin); in bch_prio_write()
670 mutex_unlock(&ca->set->bucket_lock); in bch_prio_write()
672 bch_journal_meta(ca->set, &cl); in bch_prio_write()
675 mutex_lock(&ca->set->bucket_lock); in bch_prio_write()
681 for (i = 0; i < prio_buckets(ca); i++) { in bch_prio_write()
682 if (ca->prio_last_buckets[i]) in bch_prio_write()
683 __bch_bucket_free(ca, in bch_prio_write()
684 &ca->buckets[ca->prio_last_buckets[i]]); in bch_prio_write()
686 ca->prio_last_buckets[i] = ca->prio_buckets[i]; in bch_prio_write()
691 static int prio_read(struct cache *ca, uint64_t bucket) in prio_read() argument
693 struct prio_set *p = ca->disk_buckets; in prio_read()
694 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; in prio_read()
699 for (b = ca->buckets; in prio_read()
700 b < ca->buckets + ca->sb.nbuckets; in prio_read()
703 ca->prio_buckets[bucket_nr] = bucket; in prio_read()
704 ca->prio_last_buckets[bucket_nr] = bucket; in prio_read()
707 prio_io(ca, bucket, REQ_OP_READ); in prio_read()
710 bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) { in prio_read()
715 if (p->magic != pset_magic(&ca->sb)) { in prio_read()
793 struct cache *ca = d->c->cache; in bcache_device_unlink() local
798 bd_unlink_disk_holder(ca->bdev, d->disk); in bcache_device_unlink()
805 struct cache *ca = c->cache; in bcache_device_link() local
808 bd_link_disk_holder(ca->bdev, d->disk); in bcache_device_link()
1675 struct cache *ca; in cache_set_free() local
1687 ca = c->cache; in cache_set_free()
1688 if (ca) { in cache_set_free()
1689 ca->set = NULL; in cache_set_free()
1691 kobject_put(&ca->kobj); in cache_set_free()
1716 struct cache *ca = c->cache; in cache_set_flush() local
1742 if (ca->alloc_thread) in cache_set_flush()
1743 kthread_stop(ca->alloc_thread); in cache_set_flush()
1859 struct cache *ca = container_of(sb, struct cache, sb); in bch_cache_set_alloc() local
1883 c->cache = ca; in bch_cache_set_alloc()
1974 struct cache *ca = c->cache; in run_cache_set() local
1981 c->nbuckets = ca->sb.nbuckets; in run_cache_set()
2001 if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev])) in run_cache_set()
2046 if (bch_cache_allocator_start(ca)) in run_cache_set()
2069 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, in run_cache_set()
2072 for (j = 0; j < ca->sb.keys; j++) in run_cache_set()
2073 ca->sb.d[j] = ca->sb.first_bucket + j; in run_cache_set()
2078 if (bch_cache_allocator_start(ca)) in run_cache_set()
2082 bch_prio_write(ca, true); in run_cache_set()
2146 static const char *register_cache_set(struct cache *ca) in register_cache_set() argument
2153 if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) { in register_cache_set()
2160 c = bch_cache_set_alloc(&ca->sb); in register_cache_set()
2176 sprintf(buf, "cache%i", ca->sb.nr_this_dev); in register_cache_set()
2177 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || in register_cache_set()
2178 sysfs_create_link(&c->kobj, &ca->kobj, buf)) in register_cache_set()
2181 kobject_get(&ca->kobj); in register_cache_set()
2182 ca->set = c; in register_cache_set()
2183 ca->set->cache = ca; in register_cache_set()
2200 struct cache *ca = container_of(kobj, struct cache, kobj); in bch_cache_release() local
2203 if (ca->set) { in bch_cache_release()
2204 BUG_ON(ca->set->cache != ca); in bch_cache_release()
2205 ca->set->cache = NULL; in bch_cache_release()
2208 free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb))); in bch_cache_release()
2209 kfree(ca->prio_buckets); in bch_cache_release()
2210 vfree(ca->buckets); in bch_cache_release()
2212 free_heap(&ca->heap); in bch_cache_release()
2213 free_fifo(&ca->free_inc); in bch_cache_release()
2216 free_fifo(&ca->free[i]); in bch_cache_release()
2218 if (ca->sb_disk) in bch_cache_release()
2219 put_page(virt_to_page(ca->sb_disk)); in bch_cache_release()
2221 if (!IS_ERR_OR_NULL(ca->bdev)) in bch_cache_release()
2222 blkdev_put(ca->bdev, ca); in bch_cache_release()
2224 kfree(ca); in bch_cache_release()
2228 static int cache_alloc(struct cache *ca) in cache_alloc() argument
2237 kobject_init(&ca->kobj, &bch_cache_ktype); in cache_alloc()
2239 bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0); in cache_alloc()
2250 btree_buckets = ca->sb.njournal_buckets ?: 8; in cache_alloc()
2251 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; in cache_alloc()
2258 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, in cache_alloc()
2264 if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), in cache_alloc()
2270 if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) { in cache_alloc()
2275 if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) { in cache_alloc()
2280 if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) { in cache_alloc()
2285 if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) { in cache_alloc()
2290 ca->buckets = vzalloc(array_size(sizeof(struct bucket), in cache_alloc()
2291 ca->sb.nbuckets)); in cache_alloc()
2292 if (!ca->buckets) { in cache_alloc()
2297 ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t), in cache_alloc()
2298 prio_buckets(ca), 2), in cache_alloc()
2300 if (!ca->prio_buckets) { in cache_alloc()
2305 ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb); in cache_alloc()
2306 if (!ca->disk_buckets) { in cache_alloc()
2311 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); in cache_alloc()
2313 for_each_bucket(b, ca) in cache_alloc()
2318 kfree(ca->prio_buckets); in cache_alloc()
2320 vfree(ca->buckets); in cache_alloc()
2322 free_heap(&ca->heap); in cache_alloc()
2324 free_fifo(&ca->free_inc); in cache_alloc()
2326 free_fifo(&ca->free[RESERVE_NONE]); in cache_alloc()
2328 free_fifo(&ca->free[RESERVE_MOVINGGC]); in cache_alloc()
2330 free_fifo(&ca->free[RESERVE_PRIO]); in cache_alloc()
2332 free_fifo(&ca->free[RESERVE_BTREE]); in cache_alloc()
2337 pr_notice("error %pg: %s\n", ca->bdev, err); in cache_alloc()
2342 struct block_device *bdev, struct cache *ca) in register_cache() argument
2347 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); in register_cache()
2348 ca->bdev = bdev; in register_cache()
2349 ca->sb_disk = sb_disk; in register_cache()
2352 ca->discard = CACHE_DISCARD(&ca->sb); in register_cache()
2354 ret = cache_alloc(ca); in register_cache()
2362 blkdev_put(bdev, ca); in register_cache()
2372 if (kobject_add(&ca->kobj, bdev_kobj(bdev), "bcache")) { in register_cache()
2379 err = register_cache_set(ca); in register_cache()
2387 pr_info("registered cache device %pg\n", ca->bdev); in register_cache()
2390 kobject_put(&ca->kobj); in register_cache()
2394 pr_notice("error %pg: %s\n", ca->bdev, err); in register_cache()
2431 struct cache *ca = c->cache; in bch_is_open_cache() local
2433 if (ca->bdev->bd_dev == dev) in bch_is_open_cache()