Lines Matching refs:pool

229 struct pool {  struct
287 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument
289 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument
291 return pool->pf.mode; in get_pool_mode()
294 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument
304 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change()
307 if (!pool->pf.error_if_no_space) in notify_of_pool_mode_change()
313 dm_table_event(pool->ti->table); in notify_of_pool_mode_change()
315 dm_device_name(pool->pool_md), in notify_of_pool_mode_change()
324 struct pool *pool; member
343 struct pool *pool; member
364 static bool block_size_is_power_of_two(struct pool *pool) in block_size_is_power_of_two() argument
366 return pool->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
369 static sector_t block_to_sectors(struct pool *pool, dm_block_t b) in block_to_sectors() argument
371 return block_size_is_power_of_two(pool) ? in block_to_sectors()
372 (b << pool->sectors_per_block_shift) : in block_to_sectors()
373 (b * pool->sectors_per_block); in block_to_sectors()
398 sector_t s = block_to_sectors(tc->pool, data_b); in issue_discard()
399 sector_t len = block_to_sectors(tc->pool, data_e - data_b); in issue_discard()
434 static void wake_worker(struct pool *pool) in wake_worker() argument
436 queue_work(pool->wq, &pool->worker); in wake_worker()
441 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, in bio_detain() argument
451 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO); in bio_detain()
453 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); in bio_detain()
459 dm_bio_prison_free_cell(pool->prison, cell_prealloc); in bio_detain()
464 static void cell_release(struct pool *pool, in cell_release() argument
468 dm_cell_release(pool->prison, cell, bios); in cell_release()
469 dm_bio_prison_free_cell(pool->prison, cell); in cell_release()
472 static void cell_visit_release(struct pool *pool, in cell_visit_release() argument
477 dm_cell_visit_release(pool->prison, fn, context, cell); in cell_visit_release()
478 dm_bio_prison_free_cell(pool->prison, cell); in cell_visit_release()
481 static void cell_release_no_holder(struct pool *pool, in cell_release_no_holder() argument
485 dm_cell_release_no_holder(pool->prison, cell, bios); in cell_release_no_holder()
486 dm_bio_prison_free_cell(pool->prison, cell); in cell_release_no_holder()
489 static void cell_error_with_code(struct pool *pool, in cell_error_with_code() argument
492 dm_cell_error(pool->prison, cell, error_code); in cell_error_with_code()
493 dm_bio_prison_free_cell(pool->prison, cell); in cell_error_with_code()
496 static blk_status_t get_pool_io_error_code(struct pool *pool) in get_pool_io_error_code() argument
498 return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR; in get_pool_io_error_code()
501 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_error() argument
503 cell_error_with_code(pool, cell, get_pool_io_error_code(pool)); in cell_error()
506 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_success() argument
508 cell_error_with_code(pool, cell, 0); in cell_success()
511 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_requeue() argument
513 cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE); in cell_requeue()
537 static void __pool_table_insert(struct pool *pool) in __pool_table_insert() argument
540 list_add(&pool->list, &dm_thin_pool_table.pools); in __pool_table_insert()
543 static void __pool_table_remove(struct pool *pool) in __pool_table_remove() argument
546 list_del(&pool->list); in __pool_table_remove()
549 static struct pool *__pool_table_lookup(struct mapped_device *md) in __pool_table_lookup()
551 struct pool *pool = NULL, *tmp; in __pool_table_lookup() local
557 pool = tmp; in __pool_table_lookup()
562 return pool; in __pool_table_lookup()
565 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev) in __pool_table_lookup_metadata_dev()
567 struct pool *pool = NULL, *tmp; in __pool_table_lookup_metadata_dev() local
573 pool = tmp; in __pool_table_lookup_metadata_dev()
578 return pool; in __pool_table_lookup_metadata_dev()
624 struct pool *pool = tc->pool; in requeue_deferred_cells() local
635 cell_requeue(pool, cell); in requeue_deferred_cells()
653 static void error_retry_list_with_code(struct pool *pool, blk_status_t error) in error_retry_list_with_code() argument
658 list_for_each_entry_rcu(tc, &pool->active_thins, list) in error_retry_list_with_code()
663 static void error_retry_list(struct pool *pool) in error_retry_list() argument
665 error_retry_list_with_code(pool, get_pool_io_error_code(pool)); in error_retry_list()
677 struct pool *pool = tc->pool; in get_bio_block() local
680 if (block_size_is_power_of_two(pool)) in get_bio_block()
681 block_nr >>= pool->sectors_per_block_shift; in get_bio_block()
683 (void) sector_div(block_nr, pool->sectors_per_block); in get_bio_block()
694 struct pool *pool = tc->pool; in get_bio_block_range() local
698 b += pool->sectors_per_block - 1ull; /* so we round up */ in get_bio_block_range()
700 if (block_size_is_power_of_two(pool)) { in get_bio_block_range()
701 b >>= pool->sectors_per_block_shift; in get_bio_block_range()
702 e >>= pool->sectors_per_block_shift; in get_bio_block_range()
704 (void) sector_div(b, pool->sectors_per_block); in get_bio_block_range()
705 (void) sector_div(e, pool->sectors_per_block); in get_bio_block_range()
718 struct pool *pool = tc->pool; in remap() local
722 if (block_size_is_power_of_two(pool)) in remap()
724 (block << pool->sectors_per_block_shift) | in remap()
725 (bi_sector & (pool->sectors_per_block - 1)); in remap()
727 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
728 sector_div(bi_sector, pool->sectors_per_block); in remap()
742 static void inc_all_io_entry(struct pool *pool, struct bio *bio) in inc_all_io_entry() argument
750 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); in inc_all_io_entry()
755 struct pool *pool = tc->pool; in issue() local
776 spin_lock_irq(&pool->lock); in issue()
777 bio_list_add(&pool->deferred_flush_bios, bio); in issue()
778 spin_unlock_irq(&pool->lock); in issue()
830 struct pool *pool = m->tc->pool; in __complete_mapping_preparation() local
833 list_add_tail(&m->list, &pool->prepared_mappings); in __complete_mapping_preparation()
834 wake_worker(pool); in __complete_mapping_preparation()
841 struct pool *pool = m->tc->pool; in complete_mapping_preparation() local
843 spin_lock_irqsave(&pool->lock, flags); in complete_mapping_preparation()
845 spin_unlock_irqrestore(&pool->lock, flags); in complete_mapping_preparation()
883 struct pool *pool = tc->pool; in cell_defer_no_holder() local
888 cell_release_no_holder(pool, cell, &tc->deferred_bio_list); in cell_defer_no_holder()
893 wake_worker(pool); in cell_defer_no_holder()
914 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
942 cell_visit_release(tc->pool, __inc_remap_and_issue_cell, in inc_remap_and_issue_cell()
954 cell_error(m->tc->pool, m->cell); in process_prepared_mapping_fail()
956 mempool_free(m, &m->tc->pool->mapping_pool); in process_prepared_mapping_fail()
961 struct pool *pool = tc->pool; in complete_overwrite_bio() local
986 spin_lock_irq(&pool->lock); in complete_overwrite_bio()
987 bio_list_add(&pool->deferred_flush_completions, bio); in complete_overwrite_bio()
988 spin_unlock_irq(&pool->lock); in complete_overwrite_bio()
994 struct pool *pool = tc->pool; in process_prepared_mapping() local
999 cell_error(pool, m->cell); in process_prepared_mapping()
1010 metadata_operation_failed(pool, "dm_thin_insert_block", r); in process_prepared_mapping()
1011 cell_error(pool, m->cell); in process_prepared_mapping()
1025 inc_all_io_entry(tc->pool, m->cell->holder); in process_prepared_mapping()
1032 mempool_free(m, &pool->mapping_pool); in process_prepared_mapping()
1042 mempool_free(m, &tc->pool->mapping_pool); in free_discard_mapping()
1064 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); in process_prepared_discard_no_passdown()
1070 mempool_free(m, &tc->pool->mapping_pool); in process_prepared_discard_no_passdown()
1085 struct pool *pool = tc->pool; in passdown_double_checking_shared_status() local
1093 r = dm_pool_block_is_shared(pool->pmd, b, &shared); in passdown_double_checking_shared_status()
1106 r = dm_pool_block_is_shared(pool->pmd, e, &shared); in passdown_double_checking_shared_status()
1127 struct pool *pool = m->tc->pool; in queue_passdown_pt2() local
1129 spin_lock_irqsave(&pool->lock, flags); in queue_passdown_pt2()
1130 list_add_tail(&m->list, &pool->prepared_discards_pt2); in queue_passdown_pt2()
1131 spin_unlock_irqrestore(&pool->lock, flags); in queue_passdown_pt2()
1132 wake_worker(pool); in queue_passdown_pt2()
1149 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt1() local
1160 metadata_operation_failed(pool, "dm_thin_remove_range", r); in process_prepared_discard_passdown_pt1()
1163 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt1()
1171 r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); in process_prepared_discard_passdown_pt1()
1173 metadata_operation_failed(pool, "dm_pool_inc_data_range", r); in process_prepared_discard_passdown_pt1()
1176 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt1()
1198 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt2() local
1204 r = dm_pool_dec_data_range(pool->pmd, m->data_block, in process_prepared_discard_passdown_pt2()
1207 metadata_operation_failed(pool, "dm_pool_dec_data_range", r); in process_prepared_discard_passdown_pt2()
1213 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt2()
1216 static void process_prepared(struct pool *pool, struct list_head *head, in process_prepared() argument
1223 spin_lock_irq(&pool->lock); in process_prepared()
1225 spin_unlock_irq(&pool->lock); in process_prepared()
1234 static int io_overlaps_block(struct pool *pool, struct bio *bio) in io_overlaps_block() argument
1237 (pool->sectors_per_block << SECTOR_SHIFT); in io_overlaps_block()
1240 static int io_overwrites_block(struct pool *pool, struct bio *bio) in io_overwrites_block() argument
1243 io_overlaps_block(pool, bio); in io_overwrites_block()
1253 static int ensure_next_mapping(struct pool *pool) in ensure_next_mapping() argument
1255 if (pool->next_mapping) in ensure_next_mapping()
1258 pool->next_mapping = mempool_alloc(&pool->mapping_pool, GFP_ATOMIC); in ensure_next_mapping()
1260 return pool->next_mapping ? 0 : -ENOMEM; in ensure_next_mapping()
1263 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) in get_next_mapping() argument
1265 struct dm_thin_new_mapping *m = pool->next_mapping; in get_next_mapping()
1267 BUG_ON(!pool->next_mapping); in get_next_mapping()
1273 pool->next_mapping = NULL; in get_next_mapping()
1287 dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); in ll_zero()
1294 struct pool *pool = tc->pool; in remap_and_issue_overwrite() local
1300 inc_all_io_entry(pool, bio); in remap_and_issue_overwrite()
1313 struct pool *pool = tc->pool; in schedule_copy() local
1314 struct dm_thin_new_mapping *m = get_next_mapping(pool); in schedule_copy()
1329 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) in schedule_copy()
1338 if (io_overwrites_block(pool, bio)) in schedule_copy()
1344 from.sector = data_origin * pool->sectors_per_block; in schedule_copy()
1348 to.sector = data_dest * pool->sectors_per_block; in schedule_copy()
1351 dm_kcopyd_copy(pool->copier, &from, 1, &to, in schedule_copy()
1357 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { in schedule_copy()
1360 data_dest * pool->sectors_per_block + len, in schedule_copy()
1361 (data_dest + 1) * pool->sectors_per_block); in schedule_copy()
1374 tc->pool->sectors_per_block); in schedule_internal_copy()
1381 struct pool *pool = tc->pool; in schedule_zero() local
1382 struct dm_thin_new_mapping *m = get_next_mapping(pool); in schedule_zero()
1396 if (pool->pf.zero_new_blocks) { in schedule_zero()
1397 if (io_overwrites_block(pool, bio)) in schedule_zero()
1400 ll_zero(tc, m, data_block * pool->sectors_per_block, in schedule_zero()
1401 (data_block + 1) * pool->sectors_per_block); in schedule_zero()
1410 struct pool *pool = tc->pool; in schedule_external_copy() local
1411 sector_t virt_block_begin = virt_block * pool->sectors_per_block; in schedule_external_copy()
1412 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; in schedule_external_copy()
1417 pool->sectors_per_block); in schedule_external_copy()
1428 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1430 static void requeue_bios(struct pool *pool);
1437 static bool is_read_only(struct pool *pool) in is_read_only() argument
1439 return is_read_only_pool_mode(get_pool_mode(pool)); in is_read_only()
1442 static void check_for_metadata_space(struct pool *pool) in check_for_metadata_space() argument
1448 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free); in check_for_metadata_space()
1454 if (ooms_reason && !is_read_only(pool)) { in check_for_metadata_space()
1456 set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE); in check_for_metadata_space()
1460 static void check_for_data_space(struct pool *pool) in check_for_data_space() argument
1465 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE) in check_for_data_space()
1468 r = dm_pool_get_free_block_count(pool->pmd, &nr_free); in check_for_data_space()
1473 set_pool_mode(pool, PM_WRITE); in check_for_data_space()
1474 requeue_bios(pool); in check_for_data_space()
1482 static int commit(struct pool *pool) in commit() argument
1486 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) in commit()
1489 r = dm_pool_commit_metadata(pool->pmd); in commit()
1491 metadata_operation_failed(pool, "dm_pool_commit_metadata", r); in commit()
1493 check_for_metadata_space(pool); in commit()
1494 check_for_data_space(pool); in commit()
1500 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks) in check_low_water_mark() argument
1502 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { in check_low_water_mark()
1504 dm_device_name(pool->pool_md)); in check_low_water_mark()
1505 spin_lock_irq(&pool->lock); in check_low_water_mark()
1506 pool->low_water_triggered = true; in check_low_water_mark()
1507 spin_unlock_irq(&pool->lock); in check_low_water_mark()
1508 dm_table_event(pool->ti->table); in check_low_water_mark()
1516 struct pool *pool = tc->pool; in alloc_data_block() local
1518 if (WARN_ON(get_pool_mode(pool) != PM_WRITE)) in alloc_data_block()
1521 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1523 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); in alloc_data_block()
1527 check_low_water_mark(pool, free_blocks); in alloc_data_block()
1534 r = commit(pool); in alloc_data_block()
1538 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1540 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); in alloc_data_block()
1545 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); in alloc_data_block()
1550 r = dm_pool_alloc_data_block(pool->pmd, result); in alloc_data_block()
1553 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); in alloc_data_block()
1555 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); in alloc_data_block()
1559 r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1561 metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r); in alloc_data_block()
1567 r = commit(pool); in alloc_data_block()
1589 static blk_status_t should_error_unserviceable_bio(struct pool *pool) in should_error_unserviceable_bio() argument
1591 enum pool_mode m = get_pool_mode(pool); in should_error_unserviceable_bio()
1600 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; in should_error_unserviceable_bio()
1613 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) in handle_unserviceable_bio() argument
1615 blk_status_t error = should_error_unserviceable_bio(pool); in handle_unserviceable_bio()
1624 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) in retry_bios_on_resume() argument
1630 error = should_error_unserviceable_bio(pool); in retry_bios_on_resume()
1632 cell_error_with_code(pool, cell, error); in retry_bios_on_resume()
1637 cell_release(pool, cell, &bios); in retry_bios_on_resume()
1646 struct pool *pool = tc->pool; in process_discard_cell_no_passdown() local
1647 struct dm_thin_new_mapping *m = get_next_mapping(pool); in process_discard_cell_no_passdown()
1659 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) in process_discard_cell_no_passdown()
1660 pool->process_prepared_discard(m); in process_discard_cell_no_passdown()
1666 struct pool *pool = tc->pool; in break_up_discard_bio() local
1676 r = ensure_next_mapping(pool); in break_up_discard_bio()
1691 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) { in break_up_discard_bio()
1701 m = get_next_mapping(pool); in break_up_discard_bio()
1719 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) in break_up_discard_bio()
1720 pool->process_prepared_discard(m); in break_up_discard_bio()
1763 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) in process_discard_bio()
1773 tc->pool->process_discard_cell(tc, virt_cell); in process_discard_bio()
1783 struct pool *pool = tc->pool; in break_sharing() local
1793 retry_bios_on_resume(pool, cell); in break_sharing()
1799 cell_error(pool, cell); in break_sharing()
1817 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); in __remap_and_issue_shared_cell()
1818 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1835 cell_visit_release(tc->pool, __remap_and_issue_shared_cell, in remap_and_issue_shared_cell()
1851 struct pool *pool = tc->pool; in process_shared_bio() local
1859 if (bio_detain(pool, &key, bio, &data_cell)) { in process_shared_bio()
1870 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); in process_shared_bio()
1871 inc_all_io_entry(pool, bio); in process_shared_bio()
1884 struct pool *pool = tc->pool; in provision_block() local
1890 inc_all_io_entry(pool, bio); in provision_block()
1917 retry_bios_on_resume(pool, cell); in provision_block()
1923 cell_error(pool, cell); in provision_block()
1931 struct pool *pool = tc->pool; in process_cell() local
1937 cell_requeue(pool, cell); in process_cell()
1947 inc_all_io_entry(pool, bio); in process_cell()
1955 inc_all_io_entry(pool, bio); in process_cell()
1985 struct pool *pool = tc->pool; in process_bio() local
1995 if (bio_detain(pool, &key, bio, &cell)) in process_bio()
2013 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2017 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2028 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2033 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2074 cell_success(tc->pool, cell); in process_cell_success()
2079 cell_error(tc->pool, cell); in process_cell_fail()
2086 static int need_commit_due_to_time(struct pool *pool) in need_commit_due_to_time() argument
2088 return !time_in_range(jiffies, pool->last_commit_jiffies, in need_commit_due_to_time()
2089 pool->last_commit_jiffies + COMMIT_PERIOD); in need_commit_due_to_time()
2158 struct pool *pool = tc->pool; in process_thin_deferred_bios() local
2193 if (ensure_next_mapping(pool)) { in process_thin_deferred_bios()
2202 pool->process_discard(tc, bio); in process_thin_deferred_bios()
2204 pool->process_bio(tc, bio); in process_thin_deferred_bios()
2207 throttle_work_update(&pool->throttle); in process_thin_deferred_bios()
2208 dm_pool_issue_prefetches(pool->pmd); in process_thin_deferred_bios()
2231 static unsigned sort_cells(struct pool *pool, struct list_head *cells) in sort_cells() argument
2240 pool->cell_sort_array[count++] = cell; in sort_cells()
2244 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL); in sort_cells()
2251 struct pool *pool = tc->pool; in process_thin_deferred_cells() local
2266 count = sort_cells(tc->pool, &cells); in process_thin_deferred_cells()
2269 cell = pool->cell_sort_array[i]; in process_thin_deferred_cells()
2277 if (ensure_next_mapping(pool)) { in process_thin_deferred_cells()
2279 list_add(&pool->cell_sort_array[j]->user_list, &cells); in process_thin_deferred_cells()
2288 pool->process_discard_cell(tc, cell); in process_thin_deferred_cells()
2290 pool->process_cell(tc, cell); in process_thin_deferred_cells()
2303 static struct thin_c *get_first_thin(struct pool *pool) in get_first_thin() argument
2308 if (!list_empty(&pool->active_thins)) { in get_first_thin()
2309 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); in get_first_thin()
2317 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) in get_next_thin() argument
2322 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { in get_next_thin()
2334 static void process_deferred_bios(struct pool *pool) in process_deferred_bios() argument
2340 tc = get_first_thin(pool); in process_deferred_bios()
2344 tc = get_next_thin(pool, tc); in process_deferred_bios()
2354 spin_lock_irq(&pool->lock); in process_deferred_bios()
2355 bio_list_merge(&bios, &pool->deferred_flush_bios); in process_deferred_bios()
2356 bio_list_init(&pool->deferred_flush_bios); in process_deferred_bios()
2358 bio_list_merge(&bio_completions, &pool->deferred_flush_completions); in process_deferred_bios()
2359 bio_list_init(&pool->deferred_flush_completions); in process_deferred_bios()
2360 spin_unlock_irq(&pool->lock); in process_deferred_bios()
2363 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) in process_deferred_bios()
2366 if (commit(pool)) { in process_deferred_bios()
2373 pool->last_commit_jiffies = jiffies; in process_deferred_bios()
2392 struct pool *pool = container_of(ws, struct pool, worker); in do_worker() local
2394 throttle_work_start(&pool->throttle); in do_worker()
2395 dm_pool_issue_prefetches(pool->pmd); in do_worker()
2396 throttle_work_update(&pool->throttle); in do_worker()
2397 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); in do_worker()
2398 throttle_work_update(&pool->throttle); in do_worker()
2399 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); in do_worker()
2400 throttle_work_update(&pool->throttle); in do_worker()
2401 process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2); in do_worker()
2402 throttle_work_update(&pool->throttle); in do_worker()
2403 process_deferred_bios(pool); in do_worker()
2404 throttle_work_complete(&pool->throttle); in do_worker()
2413 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker); in do_waker() local
2414 wake_worker(pool); in do_waker()
2415 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); in do_waker()
2425 struct pool *pool = container_of(to_delayed_work(ws), struct pool, in do_no_space_timeout() local
2428 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { in do_no_space_timeout()
2429 pool->pf.error_if_no_space = true; in do_no_space_timeout()
2430 notify_of_pool_mode_change(pool); in do_no_space_timeout()
2431 error_retry_list_with_code(pool, BLK_STS_NOSPC); in do_no_space_timeout()
2452 static void pool_work_wait(struct pool_work *pw, struct pool *pool, in pool_work_wait() argument
2457 queue_work(pool->wq, &pw->worker); in pool_work_wait()
2493 pool_work_wait(&w.pw, tc->pool, fn); in noflush_work()
2503 static void set_discard_callbacks(struct pool *pool) in set_discard_callbacks() argument
2505 struct pool_c *pt = pool->ti->private; in set_discard_callbacks()
2508 pool->process_discard_cell = process_discard_cell_passdown; in set_discard_callbacks()
2509 pool->process_prepared_discard = process_prepared_discard_passdown_pt1; in set_discard_callbacks()
2510 pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2; in set_discard_callbacks()
2512 pool->process_discard_cell = process_discard_cell_no_passdown; in set_discard_callbacks()
2513 pool->process_prepared_discard = process_prepared_discard_no_passdown; in set_discard_callbacks()
2517 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) in set_pool_mode() argument
2519 struct pool_c *pt = pool->ti->private; in set_pool_mode()
2520 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); in set_pool_mode()
2521 enum pool_mode old_mode = get_pool_mode(pool); in set_pool_mode()
2530 dm_device_name(pool->pool_md)); in set_pool_mode()
2546 dm_pool_metadata_read_only(pool->pmd); in set_pool_mode()
2547 pool->process_bio = process_bio_fail; in set_pool_mode()
2548 pool->process_discard = process_bio_fail; in set_pool_mode()
2549 pool->process_cell = process_cell_fail; in set_pool_mode()
2550 pool->process_discard_cell = process_cell_fail; in set_pool_mode()
2551 pool->process_prepared_mapping = process_prepared_mapping_fail; in set_pool_mode()
2552 pool->process_prepared_discard = process_prepared_discard_fail; in set_pool_mode()
2554 error_retry_list(pool); in set_pool_mode()
2559 dm_pool_metadata_read_only(pool->pmd); in set_pool_mode()
2560 pool->process_bio = process_bio_read_only; in set_pool_mode()
2561 pool->process_discard = process_bio_success; in set_pool_mode()
2562 pool->process_cell = process_cell_read_only; in set_pool_mode()
2563 pool->process_discard_cell = process_cell_success; in set_pool_mode()
2564 pool->process_prepared_mapping = process_prepared_mapping_fail; in set_pool_mode()
2565 pool->process_prepared_discard = process_prepared_discard_success; in set_pool_mode()
2567 error_retry_list(pool); in set_pool_mode()
2579 pool->out_of_data_space = true; in set_pool_mode()
2580 pool->process_bio = process_bio_read_only; in set_pool_mode()
2581 pool->process_discard = process_discard_bio; in set_pool_mode()
2582 pool->process_cell = process_cell_read_only; in set_pool_mode()
2583 pool->process_prepared_mapping = process_prepared_mapping; in set_pool_mode()
2584 set_discard_callbacks(pool); in set_pool_mode()
2586 if (!pool->pf.error_if_no_space && no_space_timeout) in set_pool_mode()
2587 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); in set_pool_mode()
2592 cancel_delayed_work_sync(&pool->no_space_timeout); in set_pool_mode()
2593 pool->out_of_data_space = false; in set_pool_mode()
2594 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; in set_pool_mode()
2595 dm_pool_metadata_read_write(pool->pmd); in set_pool_mode()
2596 pool->process_bio = process_bio; in set_pool_mode()
2597 pool->process_discard = process_discard_bio; in set_pool_mode()
2598 pool->process_cell = process_cell; in set_pool_mode()
2599 pool->process_prepared_mapping = process_prepared_mapping; in set_pool_mode()
2600 set_discard_callbacks(pool); in set_pool_mode()
2604 pool->pf.mode = new_mode; in set_pool_mode()
2612 notify_of_pool_mode_change(pool); in set_pool_mode()
2615 static void abort_transaction(struct pool *pool) in abort_transaction() argument
2617 const char *dev_name = dm_device_name(pool->pool_md); in abort_transaction()
2620 if (dm_pool_abort_metadata(pool->pmd)) { in abort_transaction()
2622 set_pool_mode(pool, PM_FAIL); in abort_transaction()
2625 if (dm_pool_metadata_set_needs_check(pool->pmd)) { in abort_transaction()
2627 set_pool_mode(pool, PM_FAIL); in abort_transaction()
2631 static void metadata_operation_failed(struct pool *pool, const char *op, int r) in metadata_operation_failed() argument
2634 dm_device_name(pool->pool_md), op, r); in metadata_operation_failed()
2636 abort_transaction(pool); in metadata_operation_failed()
2637 set_pool_mode(pool, PM_READ_ONLY); in metadata_operation_failed()
2651 struct pool *pool = tc->pool; in thin_defer_bio() local
2657 wake_worker(pool); in thin_defer_bio()
2662 struct pool *pool = tc->pool; in thin_defer_bio_with_throttle() local
2664 throttle_lock(&pool->throttle); in thin_defer_bio_with_throttle()
2666 throttle_unlock(&pool->throttle); in thin_defer_bio_with_throttle()
2671 struct pool *pool = tc->pool; in thin_defer_cell() local
2673 throttle_lock(&pool->throttle); in thin_defer_cell()
2677 throttle_unlock(&pool->throttle); in thin_defer_cell()
2679 wake_worker(pool); in thin_defer_cell()
2714 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_bio_map()
2729 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2759 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2764 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2788 static void requeue_bios(struct pool *pool) in requeue_bios() argument
2793 list_for_each_entry_rcu(tc, &pool->active_thins, list) { in requeue_bios()
2816 struct pool *pool = pt->pool; in disable_passdown_if_not_supported() local
2827 else if (data_limits->max_discard_sectors < pool->sectors_per_block) in disable_passdown_if_not_supported()
2836 static int bind_control_target(struct pool *pool, struct dm_target *ti) in bind_control_target() argument
2843 enum pool_mode old_mode = get_pool_mode(pool); in bind_control_target()
2853 pool->ti = ti; in bind_control_target()
2854 pool->pf = pt->adjusted_pf; in bind_control_target()
2855 pool->low_water_blocks = pt->low_water_blocks; in bind_control_target()
2857 set_pool_mode(pool, new_mode); in bind_control_target()
2862 static void unbind_control_target(struct pool *pool, struct dm_target *ti) in unbind_control_target() argument
2864 if (pool->ti == ti) in unbind_control_target()
2865 pool->ti = NULL; in unbind_control_target()
2881 static void __pool_destroy(struct pool *pool) in __pool_destroy() argument
2883 __pool_table_remove(pool); in __pool_destroy()
2885 vfree(pool->cell_sort_array); in __pool_destroy()
2886 if (dm_pool_metadata_close(pool->pmd) < 0) in __pool_destroy()
2889 dm_bio_prison_destroy(pool->prison); in __pool_destroy()
2890 dm_kcopyd_client_destroy(pool->copier); in __pool_destroy()
2892 cancel_delayed_work_sync(&pool->waker); in __pool_destroy()
2893 cancel_delayed_work_sync(&pool->no_space_timeout); in __pool_destroy()
2894 if (pool->wq) in __pool_destroy()
2895 destroy_workqueue(pool->wq); in __pool_destroy()
2897 if (pool->next_mapping) in __pool_destroy()
2898 mempool_free(pool->next_mapping, &pool->mapping_pool); in __pool_destroy()
2899 mempool_exit(&pool->mapping_pool); in __pool_destroy()
2900 dm_deferred_set_destroy(pool->shared_read_ds); in __pool_destroy()
2901 dm_deferred_set_destroy(pool->all_io_ds); in __pool_destroy()
2902 kfree(pool); in __pool_destroy()
2907 static struct pool *pool_create(struct mapped_device *pool_md, in pool_create()
2915 struct pool *pool; in pool_create() local
2922 return (struct pool *)pmd; in pool_create()
2925 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in pool_create()
2926 if (!pool) { in pool_create()
2932 pool->pmd = pmd; in pool_create()
2933 pool->sectors_per_block = block_size; in pool_create()
2935 pool->sectors_per_block_shift = -1; in pool_create()
2937 pool->sectors_per_block_shift = __ffs(block_size); in pool_create()
2938 pool->low_water_blocks = 0; in pool_create()
2939 pool_features_init(&pool->pf); in pool_create()
2940 pool->prison = dm_bio_prison_create(); in pool_create()
2941 if (!pool->prison) { in pool_create()
2947 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); in pool_create()
2948 if (IS_ERR(pool->copier)) { in pool_create()
2949 r = PTR_ERR(pool->copier); in pool_create()
2959 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in pool_create()
2960 if (!pool->wq) { in pool_create()
2966 throttle_init(&pool->throttle); in pool_create()
2967 INIT_WORK(&pool->worker, do_worker); in pool_create()
2968 INIT_DELAYED_WORK(&pool->waker, do_waker); in pool_create()
2969 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); in pool_create()
2970 spin_lock_init(&pool->lock); in pool_create()
2971 bio_list_init(&pool->deferred_flush_bios); in pool_create()
2972 bio_list_init(&pool->deferred_flush_completions); in pool_create()
2973 INIT_LIST_HEAD(&pool->prepared_mappings); in pool_create()
2974 INIT_LIST_HEAD(&pool->prepared_discards); in pool_create()
2975 INIT_LIST_HEAD(&pool->prepared_discards_pt2); in pool_create()
2976 INIT_LIST_HEAD(&pool->active_thins); in pool_create()
2977 pool->low_water_triggered = false; in pool_create()
2978 pool->suspended = true; in pool_create()
2979 pool->out_of_data_space = false; in pool_create()
2981 pool->shared_read_ds = dm_deferred_set_create(); in pool_create()
2982 if (!pool->shared_read_ds) { in pool_create()
2988 pool->all_io_ds = dm_deferred_set_create(); in pool_create()
2989 if (!pool->all_io_ds) { in pool_create()
2995 pool->next_mapping = NULL; in pool_create()
2996 r = mempool_init_slab_pool(&pool->mapping_pool, MAPPING_POOL_SIZE, in pool_create()
3004 pool->cell_sort_array = in pool_create()
3006 sizeof(*pool->cell_sort_array))); in pool_create()
3007 if (!pool->cell_sort_array) { in pool_create()
3013 pool->ref_count = 1; in pool_create()
3014 pool->last_commit_jiffies = jiffies; in pool_create()
3015 pool->pool_md = pool_md; in pool_create()
3016 pool->md_dev = metadata_dev; in pool_create()
3017 pool->data_dev = data_dev; in pool_create()
3018 __pool_table_insert(pool); in pool_create()
3020 return pool; in pool_create()
3023 mempool_exit(&pool->mapping_pool); in pool_create()
3025 dm_deferred_set_destroy(pool->all_io_ds); in pool_create()
3027 dm_deferred_set_destroy(pool->shared_read_ds); in pool_create()
3029 destroy_workqueue(pool->wq); in pool_create()
3031 dm_kcopyd_client_destroy(pool->copier); in pool_create()
3033 dm_bio_prison_destroy(pool->prison); in pool_create()
3035 kfree(pool); in pool_create()
3043 static void __pool_inc(struct pool *pool) in __pool_inc() argument
3046 pool->ref_count++; in __pool_inc()
3049 static void __pool_dec(struct pool *pool) in __pool_dec() argument
3052 BUG_ON(!pool->ref_count); in __pool_dec()
3053 if (!--pool->ref_count) in __pool_dec()
3054 __pool_destroy(pool); in __pool_dec()
3057 static struct pool *__pool_find(struct mapped_device *pool_md, in __pool_find()
3063 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); in __pool_find() local
3065 if (pool) { in __pool_find()
3066 if (pool->pool_md != pool_md) { in __pool_find()
3070 if (pool->data_dev != data_dev) { in __pool_find()
3074 __pool_inc(pool); in __pool_find()
3077 pool = __pool_table_lookup(pool_md); in __pool_find()
3078 if (pool) { in __pool_find()
3079 if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) { in __pool_find()
3083 __pool_inc(pool); in __pool_find()
3086 pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error); in __pool_find()
3091 return pool; in __pool_find()
3103 unbind_control_target(pt->pool, ti); in pool_dtr()
3104 __pool_dec(pt->pool); in pool_dtr()
3164 struct pool *pool = context; in metadata_low_callback() local
3167 dm_device_name(pool->pool_md)); in metadata_low_callback()
3169 dm_table_event(pool->ti->table); in metadata_low_callback()
3185 struct pool *pool = context; in metadata_pre_commit_callback() local
3187 return blkdev_issue_flush(pool->data_dev); in metadata_pre_commit_callback()
3257 struct pool *pool; in pool_ctr() local
3332 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev, in pool_ctr()
3334 if (IS_ERR(pool)) { in pool_ctr()
3335 r = PTR_ERR(pool); in pool_ctr()
3345 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) { in pool_ctr()
3351 pt->pool = pool; in pool_ctr()
3376 r = dm_pool_register_metadata_threshold(pt->pool->pmd, in pool_ctr()
3379 pool); in pool_ctr()
3385 dm_pool_register_pre_commit_callback(pool->pmd, in pool_ctr()
3386 metadata_pre_commit_callback, pool); in pool_ctr()
3393 __pool_dec(pool); in pool_ctr()
3410 struct pool *pool = pt->pool; in pool_map() local
3415 spin_lock_irq(&pool->lock); in pool_map()
3418 spin_unlock_irq(&pool->lock); in pool_map()
3427 struct pool *pool = pt->pool; in maybe_resize_data_dev() local
3433 (void) sector_div(data_size, pool->sectors_per_block); in maybe_resize_data_dev()
3435 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); in maybe_resize_data_dev()
3438 dm_device_name(pool->pool_md)); in maybe_resize_data_dev()
3444 dm_device_name(pool->pool_md), in maybe_resize_data_dev()
3449 if (dm_pool_metadata_needs_check(pool->pmd)) { in maybe_resize_data_dev()
3451 dm_device_name(pool->pool_md)); in maybe_resize_data_dev()
3457 dm_device_name(pool->pool_md), in maybe_resize_data_dev()
3459 r = dm_pool_resize_data_dev(pool->pmd, data_size); in maybe_resize_data_dev()
3461 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r); in maybe_resize_data_dev()
3475 struct pool *pool = pt->pool; in maybe_resize_metadata_dev() local
3480 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); in maybe_resize_metadata_dev()
3482 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); in maybe_resize_metadata_dev()
3485 dm_device_name(pool->pool_md)); in maybe_resize_metadata_dev()
3491 dm_device_name(pool->pool_md), in maybe_resize_metadata_dev()
3496 if (dm_pool_metadata_needs_check(pool->pmd)) { in maybe_resize_metadata_dev()
3498 dm_device_name(pool->pool_md)); in maybe_resize_metadata_dev()
3502 warn_if_metadata_device_too_big(pool->md_dev); in maybe_resize_metadata_dev()
3504 dm_device_name(pool->pool_md), in maybe_resize_metadata_dev()
3507 if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE) in maybe_resize_metadata_dev()
3508 set_pool_mode(pool, PM_WRITE); in maybe_resize_metadata_dev()
3510 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); in maybe_resize_metadata_dev()
3512 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); in maybe_resize_metadata_dev()
3538 struct pool *pool = pt->pool; in pool_preresume() local
3543 r = bind_control_target(pool, ti); in pool_preresume()
3556 (void) commit(pool); in pool_preresume()
3563 if (r && get_pool_mode(pool) == PM_FAIL) in pool_preresume()
3569 static void pool_suspend_active_thins(struct pool *pool) in pool_suspend_active_thins() argument
3574 tc = get_first_thin(pool); in pool_suspend_active_thins()
3577 tc = get_next_thin(pool, tc); in pool_suspend_active_thins()
3581 static void pool_resume_active_thins(struct pool *pool) in pool_resume_active_thins() argument
3586 tc = get_first_thin(pool); in pool_resume_active_thins()
3589 tc = get_next_thin(pool, tc); in pool_resume_active_thins()
3596 struct pool *pool = pt->pool; in pool_resume() local
3602 requeue_bios(pool); in pool_resume()
3603 pool_resume_active_thins(pool); in pool_resume()
3605 spin_lock_irq(&pool->lock); in pool_resume()
3606 pool->low_water_triggered = false; in pool_resume()
3607 pool->suspended = false; in pool_resume()
3608 spin_unlock_irq(&pool->lock); in pool_resume()
3610 do_waker(&pool->waker.work); in pool_resume()
3616 struct pool *pool = pt->pool; in pool_presuspend() local
3618 spin_lock_irq(&pool->lock); in pool_presuspend()
3619 pool->suspended = true; in pool_presuspend()
3620 spin_unlock_irq(&pool->lock); in pool_presuspend()
3622 pool_suspend_active_thins(pool); in pool_presuspend()
3628 struct pool *pool = pt->pool; in pool_presuspend_undo() local
3630 pool_resume_active_thins(pool); in pool_presuspend_undo()
3632 spin_lock_irq(&pool->lock); in pool_presuspend_undo()
3633 pool->suspended = false; in pool_presuspend_undo()
3634 spin_unlock_irq(&pool->lock); in pool_presuspend_undo()
3640 struct pool *pool = pt->pool; in pool_postsuspend() local
3642 cancel_delayed_work_sync(&pool->waker); in pool_postsuspend()
3643 cancel_delayed_work_sync(&pool->no_space_timeout); in pool_postsuspend()
3644 flush_workqueue(pool->wq); in pool_postsuspend()
3645 (void) commit(pool); in pool_postsuspend()
3671 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool) in process_create_thin_mesg() argument
3684 r = dm_pool_create_thin(pool->pmd, dev_id); in process_create_thin_mesg()
3694 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool) in process_create_snap_mesg() argument
3712 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id); in process_create_snap_mesg()
3722 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool) in process_delete_mesg() argument
3735 r = dm_pool_delete_thin_device(pool->pmd, dev_id); in process_delete_mesg()
3742 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool) in process_set_transaction_id_mesg() argument
3761 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id); in process_set_transaction_id_mesg()
3771 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) in process_reserve_metadata_snap_mesg() argument
3779 (void) commit(pool); in process_reserve_metadata_snap_mesg()
3781 r = dm_pool_reserve_metadata_snap(pool->pmd); in process_reserve_metadata_snap_mesg()
3788 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) in process_release_metadata_snap_mesg() argument
3796 r = dm_pool_release_metadata_snap(pool->pmd); in process_release_metadata_snap_mesg()
3817 struct pool *pool = pt->pool; in pool_message() local
3819 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) { in pool_message()
3821 dm_device_name(pool->pool_md)); in pool_message()
3826 r = process_create_thin_mesg(argc, argv, pool); in pool_message()
3829 r = process_create_snap_mesg(argc, argv, pool); in pool_message()
3832 r = process_delete_mesg(argc, argv, pool); in pool_message()
3835 r = process_set_transaction_id_mesg(argc, argv, pool); in pool_message()
3838 r = process_reserve_metadata_snap_mesg(argc, argv, pool); in pool_message()
3841 r = process_release_metadata_snap_mesg(argc, argv, pool); in pool_message()
3847 (void) commit(pool); in pool_message()
3897 struct pool *pool = pt->pool; in pool_status() local
3901 if (get_pool_mode(pool) == PM_FAIL) { in pool_status()
3908 (void) commit(pool); in pool_status()
3910 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); in pool_status()
3913 dm_device_name(pool->pool_md), r); in pool_status()
3917 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); in pool_status()
3920 dm_device_name(pool->pool_md), r); in pool_status()
3924 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); in pool_status()
3927 dm_device_name(pool->pool_md), r); in pool_status()
3931 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); in pool_status()
3934 dm_device_name(pool->pool_md), r); in pool_status()
3938 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); in pool_status()
3941 dm_device_name(pool->pool_md), r); in pool_status()
3945 r = dm_pool_get_metadata_snap(pool->pmd, &held_root); in pool_status()
3948 dm_device_name(pool->pool_md), r); in pool_status()
3964 mode = get_pool_mode(pool); in pool_status()
3972 if (!pool->pf.discard_enabled) in pool_status()
3974 else if (pool->pf.discard_passdown) in pool_status()
3979 if (pool->pf.error_if_no_space) in pool_status()
3984 if (dm_pool_metadata_needs_check(pool->pmd)) in pool_status()
3997 (unsigned long)pool->sectors_per_block, in pool_status()
4023 struct pool *pool = pt->pool; in pool_io_hints() local
4035 if (limits->max_sectors < pool->sectors_per_block) { in pool_io_hints()
4036 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) { in pool_io_hints()
4047 if (io_opt_sectors < pool->sectors_per_block || in pool_io_hints()
4048 !is_factor(io_opt_sectors, pool->sectors_per_block)) { in pool_io_hints()
4049 if (is_factor(pool->sectors_per_block, limits->max_sectors)) in pool_io_hints()
4052 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints()
4053 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints()
4116 spin_lock_irq(&tc->pool->lock); in thin_dtr()
4118 spin_unlock_irq(&tc->pool->lock); in thin_dtr()
4126 __pool_dec(tc->pool); in thin_dtr()
4211 tc->pool = __pool_table_lookup(pool_md); in thin_ctr()
4212 if (!tc->pool) { in thin_ctr()
4217 __pool_inc(tc->pool); in thin_ctr()
4219 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_ctr()
4225 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); in thin_ctr()
4231 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); in thin_ctr()
4241 if (tc->pool->pf.discard_enabled) { in thin_ctr()
4248 spin_lock_irq(&tc->pool->lock); in thin_ctr()
4249 if (tc->pool->suspended) { in thin_ctr()
4250 spin_unlock_irq(&tc->pool->lock); in thin_ctr()
4258 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); in thin_ctr()
4259 spin_unlock_irq(&tc->pool->lock); in thin_ctr()
4275 __pool_dec(tc->pool); in thin_ctr()
4305 struct pool *pool = h->tc->pool; in thin_endio() local
4311 spin_lock_irqsave(&pool->lock, flags); in thin_endio()
4316 spin_unlock_irqrestore(&pool->lock, flags); in thin_endio()
4323 spin_lock_irqsave(&pool->lock, flags); in thin_endio()
4325 list_add_tail(&m->list, &pool->prepared_discards); in thin_endio()
4326 spin_unlock_irqrestore(&pool->lock, flags); in thin_endio()
4327 wake_worker(pool); in thin_endio()
4378 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_status()
4400 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); in thin_status()
4403 tc->pool->sectors_per_block) - 1); in thin_status()
4433 struct pool *pool = tc->pool; in thin_iterate_devices() local
4439 if (!pool->ti) in thin_iterate_devices()
4442 blocks = pool->ti->len; in thin_iterate_devices()
4443 (void) sector_div(blocks, pool->sectors_per_block); in thin_iterate_devices()
4445 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); in thin_iterate_devices()
4453 struct pool *pool = tc->pool; in thin_io_hints() local
4455 if (!pool->pf.discard_enabled) in thin_io_hints()
4458 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; in thin_io_hints()