Lines Matching refs:ca
76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen() argument
80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); in bch_inc_gen()
81 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); in bch_inc_gen()
88 struct cache *ca; in bch_rescale_priorities() local
106 ca = c->cache; in bch_rescale_priorities()
107 for_each_bucket(b, ca) in bch_rescale_priorities()
130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket() argument
132 BUG_ON(!ca->set->gc_mark_valid); in bch_can_invalidate_bucket()
140 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket() argument
142 lockdep_assert_held(&ca->set->bucket_lock); in __bch_invalidate_one_bucket()
146 trace_bcache_invalidate(ca, b - ca->buckets); in __bch_invalidate_one_bucket()
148 bch_inc_gen(ca, b); in __bch_invalidate_one_bucket()
153 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket() argument
155 __bch_invalidate_one_bucket(ca, b); in bch_invalidate_one_bucket()
157 fifo_push(&ca->free_inc, b - ca->buckets); in bch_invalidate_one_bucket()
171 unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
173 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
179 static void invalidate_buckets_lru(struct cache *ca) in invalidate_buckets_lru() argument
184 ca->heap.used = 0; in invalidate_buckets_lru()
186 for_each_bucket(b, ca) { in invalidate_buckets_lru()
187 if (!bch_can_invalidate_bucket(ca, b)) in invalidate_buckets_lru()
190 if (!heap_full(&ca->heap)) in invalidate_buckets_lru()
191 heap_add(&ca->heap, b, bucket_max_cmp); in invalidate_buckets_lru()
192 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { in invalidate_buckets_lru()
193 ca->heap.data[0] = b; in invalidate_buckets_lru()
194 heap_sift(&ca->heap, 0, bucket_max_cmp); in invalidate_buckets_lru()
198 for (i = ca->heap.used / 2 - 1; i >= 0; --i) in invalidate_buckets_lru()
199 heap_sift(&ca->heap, i, bucket_min_cmp); in invalidate_buckets_lru()
201 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_lru()
202 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { in invalidate_buckets_lru()
207 ca->invalidate_needs_gc = 1; in invalidate_buckets_lru()
208 wake_up_gc(ca->set); in invalidate_buckets_lru()
212 bch_invalidate_one_bucket(ca, b); in invalidate_buckets_lru()
216 static void invalidate_buckets_fifo(struct cache *ca) in invalidate_buckets_fifo() argument
221 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_fifo()
222 if (ca->fifo_last_bucket < ca->sb.first_bucket || in invalidate_buckets_fifo()
223 ca->fifo_last_bucket >= ca->sb.nbuckets) in invalidate_buckets_fifo()
224 ca->fifo_last_bucket = ca->sb.first_bucket; in invalidate_buckets_fifo()
226 b = ca->buckets + ca->fifo_last_bucket++; in invalidate_buckets_fifo()
228 if (bch_can_invalidate_bucket(ca, b)) in invalidate_buckets_fifo()
229 bch_invalidate_one_bucket(ca, b); in invalidate_buckets_fifo()
231 if (++checked >= ca->sb.nbuckets) { in invalidate_buckets_fifo()
232 ca->invalidate_needs_gc = 1; in invalidate_buckets_fifo()
233 wake_up_gc(ca->set); in invalidate_buckets_fifo()
239 static void invalidate_buckets_random(struct cache *ca) in invalidate_buckets_random() argument
244 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_random()
249 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); in invalidate_buckets_random()
250 n += ca->sb.first_bucket; in invalidate_buckets_random()
252 b = ca->buckets + n; in invalidate_buckets_random()
254 if (bch_can_invalidate_bucket(ca, b)) in invalidate_buckets_random()
255 bch_invalidate_one_bucket(ca, b); in invalidate_buckets_random()
257 if (++checked >= ca->sb.nbuckets / 2) { in invalidate_buckets_random()
258 ca->invalidate_needs_gc = 1; in invalidate_buckets_random()
259 wake_up_gc(ca->set); in invalidate_buckets_random()
265 static void invalidate_buckets(struct cache *ca) in invalidate_buckets() argument
267 BUG_ON(ca->invalidate_needs_gc); in invalidate_buckets()
269 switch (CACHE_REPLACEMENT(&ca->sb)) { in invalidate_buckets()
271 invalidate_buckets_lru(ca); in invalidate_buckets()
274 invalidate_buckets_fifo(ca); in invalidate_buckets()
277 invalidate_buckets_random(ca); in invalidate_buckets()
282 #define allocator_wait(ca, cond) \ argument
289 mutex_unlock(&(ca)->set->bucket_lock); \
291 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
297 mutex_lock(&(ca)->set->bucket_lock); \
302 static int bch_allocator_push(struct cache *ca, long bucket) in bch_allocator_push() argument
307 if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) in bch_allocator_push()
311 if (fifo_push(&ca->free[i], bucket)) in bch_allocator_push()
319 struct cache *ca = arg; in bch_allocator_thread() local
321 mutex_lock(&ca->set->bucket_lock); in bch_allocator_thread()
332 if (!fifo_pop(&ca->free_inc, bucket)) in bch_allocator_thread()
335 if (ca->discard) { in bch_allocator_thread()
336 mutex_unlock(&ca->set->bucket_lock); in bch_allocator_thread()
337 blkdev_issue_discard(ca->bdev, in bch_allocator_thread()
338 bucket_to_sector(ca->set, bucket), in bch_allocator_thread()
339 ca->sb.bucket_size, GFP_KERNEL); in bch_allocator_thread()
340 mutex_lock(&ca->set->bucket_lock); in bch_allocator_thread()
343 allocator_wait(ca, bch_allocator_push(ca, bucket)); in bch_allocator_thread()
344 wake_up(&ca->set->btree_cache_wait); in bch_allocator_thread()
345 wake_up(&ca->set->bucket_wait); in bch_allocator_thread()
355 allocator_wait(ca, ca->set->gc_mark_valid && in bch_allocator_thread()
356 !ca->invalidate_needs_gc); in bch_allocator_thread()
357 invalidate_buckets(ca); in bch_allocator_thread()
363 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked)); in bch_allocator_thread()
364 if (CACHE_SYNC(&ca->sb)) { in bch_allocator_thread()
376 if (!fifo_full(&ca->free_inc)) in bch_allocator_thread()
379 if (bch_prio_write(ca, false) < 0) { in bch_allocator_thread()
380 ca->invalidate_needs_gc = 1; in bch_allocator_thread()
381 wake_up_gc(ca->set); in bch_allocator_thread()
392 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait) in bch_bucket_alloc() argument
400 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags))) in bch_bucket_alloc()
404 if (fifo_pop(&ca->free[RESERVE_NONE], r) || in bch_bucket_alloc()
405 fifo_pop(&ca->free[reserve], r)) in bch_bucket_alloc()
409 trace_bcache_alloc_fail(ca, reserve); in bch_bucket_alloc()
414 prepare_to_wait(&ca->set->bucket_wait, &w, in bch_bucket_alloc()
417 mutex_unlock(&ca->set->bucket_lock); in bch_bucket_alloc()
419 mutex_lock(&ca->set->bucket_lock); in bch_bucket_alloc()
420 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) && in bch_bucket_alloc()
421 !fifo_pop(&ca->free[reserve], r)); in bch_bucket_alloc()
423 finish_wait(&ca->set->bucket_wait, &w); in bch_bucket_alloc()
425 if (ca->alloc_thread) in bch_bucket_alloc()
426 wake_up_process(ca->alloc_thread); in bch_bucket_alloc()
428 trace_bcache_alloc(ca, reserve); in bch_bucket_alloc()
430 if (expensive_debug_checks(ca->set)) { in bch_bucket_alloc()
435 for (iter = 0; iter < prio_buckets(ca) * 2; iter++) in bch_bucket_alloc()
436 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); in bch_bucket_alloc()
439 fifo_for_each(i, &ca->free[j], iter) in bch_bucket_alloc()
441 fifo_for_each(i, &ca->free_inc, iter) in bch_bucket_alloc()
445 b = ca->buckets + r; in bch_bucket_alloc()
449 SET_GC_SECTORS_USED(b, ca->sb.bucket_size); in bch_bucket_alloc()
461 if (ca->set->avail_nbuckets > 0) { in bch_bucket_alloc()
462 ca->set->avail_nbuckets--; in bch_bucket_alloc()
463 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats); in bch_bucket_alloc()
469 void __bch_bucket_free(struct cache *ca, struct bucket *b) in __bch_bucket_free() argument
474 if (ca->set->avail_nbuckets < ca->set->nbuckets) { in __bch_bucket_free()
475 ca->set->avail_nbuckets++; in __bch_bucket_free()
476 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats); in __bch_bucket_free()
491 struct cache *ca; in __bch_bucket_alloc_set() local
502 ca = c->cache; in __bch_bucket_alloc_set()
503 b = bch_bucket_alloc(ca, reserve, wait); in __bch_bucket_alloc_set()
507 k->ptr[0] = MAKE_PTR(ca->buckets[b].gen, in __bch_bucket_alloc_set()
509 ca->sb.nr_this_dev); in __bch_bucket_alloc_set()
727 int bch_cache_allocator_start(struct cache *ca) in bch_cache_allocator_start() argument
730 ca, "bcache_allocator"); in bch_cache_allocator_start()
734 ca->alloc_thread = k; in bch_cache_allocator_start()