Searched refs:from_cblock (Results 1 – 4 of 4) sorted by relevance
/linux-5.19.10/drivers/md/ |
D | dm-cache-metadata.c | 703 disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); in __commit_transaction() 893 r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value); in block_clean_combined_dirty() 919 (unsigned long long) from_cblock(begin)); in blocks_are_clean_combined_dirty() 923 begin = to_cblock(from_cblock(begin) + 1); in blocks_are_clean_combined_dirty() 937 if (from_cblock(cmd->cache_blocks) == 0) in blocks_are_clean_separate_dirty() 942 from_cblock(cmd->cache_blocks), &cmd->dirty_cursor); in blocks_are_clean_separate_dirty() 948 r = dm_bitset_cursor_skip(&cmd->dirty_cursor, from_cblock(begin)); in blocks_are_clean_separate_dirty() 963 (unsigned long long) from_cblock(begin)); in blocks_are_clean_separate_dirty() 969 begin = to_cblock(from_cblock(begin) + 1); in blocks_are_clean_separate_dirty() 1055 if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) { in dm_cache_resize() [all …]
|
D | dm-cache-policy-smq.c | 999 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { in requeue() 1109 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in end_cache_period() 1129 return from_cblock(mq->cache_size) * p / 100u; in percent_to_target() 1155 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; in free_target_met() 1465 from_cblock(work->cblock)); in __complete_background_work() 1521 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in __smq_set_clear_dirty() 1554 return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1); in random_level() 1564 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_load_mapping() 1582 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_invalidate_mapping() 1597 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_get_hint() [all …]
|
D | dm-cache-target.c | 606 return test_bit(from_cblock(b), cache->dirty_bitset); in is_dirty() 611 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { in set_dirty() 623 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) in force_set_dirty() 630 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { in force_clear_dirty() 720 sector_t block = from_cblock(cblock); in remap_to_cache() 1092 c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block; in copy() 2330 dm_block_t nr_blocks = from_cblock(size); in set_cache_size() 2459 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); in cache_create() 2464 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); in cache_create() 2658 r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset); in write_dirty_bitset() [all …]
|
D | dm-cache-block-types.h | 39 static inline uint32_t from_cblock(dm_cblock_t b) in from_cblock() function
|