Lines Matching refs:ub

201 static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)  in ublk_dev_is_user_copy()  argument
203 return ub->dev_info.flags & UBLK_F_USER_COPY; in ublk_dev_is_user_copy()
206 static inline bool ublk_dev_is_zoned(const struct ublk_device *ub) in ublk_dev_is_zoned() argument
208 return ub->dev_info.flags & UBLK_F_ZONED; in ublk_dev_is_zoned()
218 static int ublk_get_nr_zones(const struct ublk_device *ub) in ublk_get_nr_zones() argument
220 const struct ublk_param_basic *p = &ub->params.basic; in ublk_get_nr_zones()
226 static int ublk_revalidate_disk_zones(struct ublk_device *ub) in ublk_revalidate_disk_zones() argument
228 return blk_revalidate_disk_zones(ub->ub_disk, NULL); in ublk_revalidate_disk_zones()
231 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub) in ublk_dev_param_zoned_validate() argument
233 const struct ublk_param_zoned *p = &ub->params.zoned; in ublk_dev_param_zoned_validate()
236 if (!ublk_dev_is_zoned(ub)) in ublk_dev_param_zoned_validate()
242 nr_zones = ublk_get_nr_zones(ub); in ublk_dev_param_zoned_validate()
253 static int ublk_dev_param_zoned_apply(struct ublk_device *ub) in ublk_dev_param_zoned_apply() argument
255 const struct ublk_param_zoned *p = &ub->params.zoned; in ublk_dev_param_zoned_apply()
257 disk_set_zoned(ub->ub_disk, BLK_ZONED_HM); in ublk_dev_param_zoned_apply()
258 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue); in ublk_dev_param_zoned_apply()
259 blk_queue_required_elevator_features(ub->ub_disk->queue, in ublk_dev_param_zoned_apply()
261 disk_set_max_active_zones(ub->ub_disk, p->max_active_zones); in ublk_dev_param_zoned_apply()
262 disk_set_max_open_zones(ub->ub_disk, p->max_open_zones); in ublk_dev_param_zoned_apply()
263 blk_queue_max_zone_append_sectors(ub->ub_disk->queue, p->max_zone_append_sectors); in ublk_dev_param_zoned_apply()
265 ub->ub_disk->nr_zones = ublk_get_nr_zones(ub); in ublk_dev_param_zoned_apply()
301 struct ublk_device *ub = disk->private_data; in ublk_report_zones() local
310 nr_zones = min_t(unsigned int, ub->ub_disk->nr_zones - first_zone, in ublk_report_zones()
313 buffer = ublk_alloc_report_buffer(ub, nr_zones, &buffer_length); in ublk_report_zones()
434 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub) in ublk_dev_param_zoned_validate() argument
439 static int ublk_dev_param_zoned_apply(struct ublk_device *ub) in ublk_dev_param_zoned_apply() argument
444 static int ublk_revalidate_disk_zones(struct ublk_device *ub) in ublk_revalidate_disk_zones() argument
499 static void ublk_dev_param_basic_apply(struct ublk_device *ub) in ublk_dev_param_basic_apply() argument
501 struct request_queue *q = ub->ub_disk->queue; in ublk_dev_param_basic_apply()
502 const struct ublk_param_basic *p = &ub->params.basic; in ublk_dev_param_basic_apply()
521 set_disk_ro(ub->ub_disk, true); in ublk_dev_param_basic_apply()
523 set_capacity(ub->ub_disk, p->dev_sectors); in ublk_dev_param_basic_apply()
526 static void ublk_dev_param_discard_apply(struct ublk_device *ub) in ublk_dev_param_discard_apply() argument
528 struct request_queue *q = ub->ub_disk->queue; in ublk_dev_param_discard_apply()
529 const struct ublk_param_discard *p = &ub->params.discard; in ublk_dev_param_discard_apply()
539 static int ublk_validate_params(const struct ublk_device *ub) in ublk_validate_params() argument
542 if (ub->params.types & UBLK_PARAM_TYPE_BASIC) { in ublk_validate_params()
543 const struct ublk_param_basic *p = &ub->params.basic; in ublk_validate_params()
551 if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9)) in ublk_validate_params()
554 if (ublk_dev_is_zoned(ub) && !p->chunk_sectors) in ublk_validate_params()
559 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) { in ublk_validate_params()
560 const struct ublk_param_discard *p = &ub->params.discard; in ublk_validate_params()
571 if (ub->params.types & UBLK_PARAM_TYPE_DEVT) in ublk_validate_params()
574 if (ub->params.types & UBLK_PARAM_TYPE_ZONED) in ublk_validate_params()
575 return ublk_dev_param_zoned_validate(ub); in ublk_validate_params()
576 else if (ublk_dev_is_zoned(ub)) in ublk_validate_params()
582 static int ublk_apply_params(struct ublk_device *ub) in ublk_apply_params() argument
584 if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC)) in ublk_apply_params()
587 ublk_dev_param_basic_apply(ub); in ublk_apply_params()
589 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) in ublk_apply_params()
590 ublk_dev_param_discard_apply(ub); in ublk_apply_params()
592 if (ub->params.types & UBLK_PARAM_TYPE_ZONED) in ublk_apply_params()
593 return ublk_dev_param_zoned_apply(ub); in ublk_apply_params()
651 static struct ublk_device *ublk_get_device(struct ublk_device *ub) in ublk_get_device() argument
653 if (kobject_get_unless_zero(&ub->cdev_dev.kobj)) in ublk_get_device()
654 return ub; in ublk_get_device()
658 static void ublk_put_device(struct ublk_device *ub) in ublk_put_device() argument
660 put_device(&ub->cdev_dev); in ublk_put_device()
681 static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id) in ublk_queue_cmd_buf() argument
683 return ublk_get_queue(ub, q_id)->io_cmd_buf; in ublk_queue_cmd_buf()
686 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id) in ublk_queue_cmd_buf_size() argument
688 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_queue_cmd_buf_size()
707 static inline bool ublk_can_use_recovery(struct ublk_device *ub) in ublk_can_use_recovery() argument
709 return ub->dev_info.flags & UBLK_F_USER_RECOVERY; in ublk_can_use_recovery()
714 struct ublk_device *ub = disk->private_data; in ublk_free_disk() local
716 clear_bit(UB_STATE_USED, &ub->state); in ublk_free_disk()
717 put_device(&ub->cdev_dev); in ublk_free_disk()
734 struct ublk_device *ub = disk->private_data; in ublk_open() local
748 if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) { in ublk_open()
753 if (curr_uid != ub->dev_info.owner_uid || curr_gid != in ublk_open()
754 ub->dev_info.owner_gid) in ublk_open()
1323 struct ublk_device *ub = driver_data; in ublk_init_hctx() local
1324 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num); in ublk_init_hctx()
1338 struct ublk_device *ub = container_of(inode->i_cdev, in ublk_ch_open() local
1341 if (test_and_set_bit(UB_STATE_OPEN, &ub->state)) in ublk_ch_open()
1343 filp->private_data = ub; in ublk_ch_open()
1349 struct ublk_device *ub = filp->private_data; in ublk_ch_release() local
1351 clear_bit(UB_STATE_OPEN, &ub->state); in ublk_ch_release()
1358 struct ublk_device *ub = filp->private_data; in ublk_ch_mmap() local
1364 spin_lock(&ub->mm_lock); in ublk_ch_mmap()
1365 if (!ub->mm) in ublk_ch_mmap()
1366 ub->mm = current->mm; in ublk_ch_mmap()
1367 if (current->mm != ub->mm) in ublk_ch_mmap()
1369 spin_unlock(&ub->mm_lock); in ublk_ch_mmap()
1377 end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz; in ublk_ch_mmap()
1386 if (sz != ublk_queue_cmd_buf_size(ub, q_id)) in ublk_ch_mmap()
1389 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT; in ublk_ch_mmap()
1393 static void ublk_commit_completion(struct ublk_device *ub, in ublk_commit_completion() argument
1397 struct ublk_queue *ubq = ublk_get_queue(ub, qid); in ublk_commit_completion()
1406 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag); in ublk_commit_completion()
1422 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq) in ublk_abort_queue() argument
1426 if (!ublk_get_device(ub)) in ublk_abort_queue()
1439 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i); in ublk_abort_queue()
1444 ublk_put_device(ub); in ublk_abort_queue()
1449 struct ublk_device *ub = in ublk_daemon_monitor_work() local
1453 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { in ublk_daemon_monitor_work()
1454 struct ublk_queue *ubq = ublk_get_queue(ub, i); in ublk_daemon_monitor_work()
1458 schedule_work(&ub->quiesce_work); in ublk_daemon_monitor_work()
1460 schedule_work(&ub->stop_work); in ublk_daemon_monitor_work()
1463 ublk_abort_queue(ub, ubq); in ublk_daemon_monitor_work()
1474 if (ub->dev_info.state == UBLK_S_DEV_LIVE) in ublk_daemon_monitor_work()
1475 schedule_delayed_work(&ub->monitor_work, in ublk_daemon_monitor_work()
1509 static void ublk_cancel_dev(struct ublk_device *ub) in ublk_cancel_dev() argument
1513 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) in ublk_cancel_dev()
1514 ublk_cancel_queue(ublk_get_queue(ub, i)); in ublk_cancel_dev()
1528 static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub) in ublk_wait_tagset_rqs_idle() argument
1532 WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue)); in ublk_wait_tagset_rqs_idle()
1535 blk_mq_tagset_busy_iter(&ub->tag_set, in ublk_wait_tagset_rqs_idle()
1543 static void __ublk_quiesce_dev(struct ublk_device *ub) in __ublk_quiesce_dev() argument
1546 __func__, ub->dev_info.dev_id, in __ublk_quiesce_dev()
1547 ub->dev_info.state == UBLK_S_DEV_LIVE ? in __ublk_quiesce_dev()
1549 blk_mq_quiesce_queue(ub->ub_disk->queue); in __ublk_quiesce_dev()
1550 ublk_wait_tagset_rqs_idle(ub); in __ublk_quiesce_dev()
1551 ub->dev_info.state = UBLK_S_DEV_QUIESCED; in __ublk_quiesce_dev()
1560 cancel_delayed_work_sync(&ub->monitor_work); in __ublk_quiesce_dev()
1565 struct ublk_device *ub = in ublk_quiesce_work_fn() local
1568 mutex_lock(&ub->mutex); in ublk_quiesce_work_fn()
1569 if (ub->dev_info.state != UBLK_S_DEV_LIVE) in ublk_quiesce_work_fn()
1571 __ublk_quiesce_dev(ub); in ublk_quiesce_work_fn()
1573 mutex_unlock(&ub->mutex); in ublk_quiesce_work_fn()
1574 ublk_cancel_dev(ub); in ublk_quiesce_work_fn()
1577 static void ublk_unquiesce_dev(struct ublk_device *ub) in ublk_unquiesce_dev() argument
1582 __func__, ub->dev_info.dev_id, in ublk_unquiesce_dev()
1583 ub->dev_info.state == UBLK_S_DEV_LIVE ? in ublk_unquiesce_dev()
1590 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) in ublk_unquiesce_dev()
1591 ublk_get_queue(ub, i)->force_abort = true; in ublk_unquiesce_dev()
1593 blk_mq_unquiesce_queue(ub->ub_disk->queue); in ublk_unquiesce_dev()
1595 blk_mq_kick_requeue_list(ub->ub_disk->queue); in ublk_unquiesce_dev()
1598 static void ublk_stop_dev(struct ublk_device *ub) in ublk_stop_dev() argument
1600 mutex_lock(&ub->mutex); in ublk_stop_dev()
1601 if (ub->dev_info.state == UBLK_S_DEV_DEAD) in ublk_stop_dev()
1603 if (ublk_can_use_recovery(ub)) { in ublk_stop_dev()
1604 if (ub->dev_info.state == UBLK_S_DEV_LIVE) in ublk_stop_dev()
1605 __ublk_quiesce_dev(ub); in ublk_stop_dev()
1606 ublk_unquiesce_dev(ub); in ublk_stop_dev()
1608 del_gendisk(ub->ub_disk); in ublk_stop_dev()
1609 ub->dev_info.state = UBLK_S_DEV_DEAD; in ublk_stop_dev()
1610 ub->dev_info.ublksrv_pid = -1; in ublk_stop_dev()
1611 put_disk(ub->ub_disk); in ublk_stop_dev()
1612 ub->ub_disk = NULL; in ublk_stop_dev()
1614 mutex_unlock(&ub->mutex); in ublk_stop_dev()
1615 ublk_cancel_dev(ub); in ublk_stop_dev()
1616 cancel_delayed_work_sync(&ub->monitor_work); in ublk_stop_dev()
1620 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) in ublk_mark_io_ready() argument
1622 mutex_lock(&ub->mutex); in ublk_mark_io_ready()
1627 ub->nr_queues_ready++; in ublk_mark_io_ready()
1630 ub->nr_privileged_daemon++; in ublk_mark_io_ready()
1632 if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) in ublk_mark_io_ready()
1633 complete_all(&ub->completion); in ublk_mark_io_ready()
1634 mutex_unlock(&ub->mutex); in ublk_mark_io_ready()
1637 static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id, in ublk_handle_need_get_data() argument
1640 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_handle_need_get_data()
1641 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag); in ublk_handle_need_get_data()
1671 struct ublk_device *ub = cmd->file->private_data; in __ublk_ch_uring_cmd() local
1683 if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues) in __ublk_ch_uring_cmd()
1686 ubq = ublk_get_queue(ub, ub_cmd->q_id); in __ublk_ch_uring_cmd()
1745 ublk_mark_io_ready(ub, ubq); in __ublk_ch_uring_cmd()
1748 req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag); in __ublk_ch_uring_cmd()
1771 ublk_commit_completion(ub, ub_cmd); in __ublk_ch_uring_cmd()
1777 ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag); in __ublk_ch_uring_cmd()
1791 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, in __ublk_check_and_get_req() argument
1799 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag); in __ublk_check_and_get_req()
1858 struct ublk_device *ub = iocb->ki_filp->private_data; in ublk_check_and_get_req() local
1864 if (!ub) in ublk_check_and_get_req()
1870 if (ub->dev_info.state == UBLK_S_DEV_DEAD) in ublk_check_and_get_req()
1877 if (q_id >= ub->dev_info.nr_hw_queues) in ublk_check_and_get_req()
1880 ubq = ublk_get_queue(ub, q_id); in ublk_check_and_get_req()
1887 req = __ublk_check_and_get_req(ub, ubq, tag, buf_off); in ublk_check_and_get_req()
1951 static void ublk_deinit_queue(struct ublk_device *ub, int q_id) in ublk_deinit_queue() argument
1953 int size = ublk_queue_cmd_buf_size(ub, q_id); in ublk_deinit_queue()
1954 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_deinit_queue()
1962 static int ublk_init_queue(struct ublk_device *ub, int q_id) in ublk_init_queue() argument
1964 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_init_queue()
1970 ubq->flags = ub->dev_info.flags; in ublk_init_queue()
1972 ubq->q_depth = ub->dev_info.queue_depth; in ublk_init_queue()
1973 size = ublk_queue_cmd_buf_size(ub, q_id); in ublk_init_queue()
1980 ubq->dev = ub; in ublk_init_queue()
1984 static void ublk_deinit_queues(struct ublk_device *ub) in ublk_deinit_queues() argument
1986 int nr_queues = ub->dev_info.nr_hw_queues; in ublk_deinit_queues()
1989 if (!ub->__queues) in ublk_deinit_queues()
1993 ublk_deinit_queue(ub, i); in ublk_deinit_queues()
1994 kfree(ub->__queues); in ublk_deinit_queues()
1997 static int ublk_init_queues(struct ublk_device *ub) in ublk_init_queues() argument
1999 int nr_queues = ub->dev_info.nr_hw_queues; in ublk_init_queues()
2000 int depth = ub->dev_info.queue_depth; in ublk_init_queues()
2004 ub->queue_size = ubq_size; in ublk_init_queues()
2005 ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL); in ublk_init_queues()
2006 if (!ub->__queues) in ublk_init_queues()
2010 if (ublk_init_queue(ub, i)) in ublk_init_queues()
2014 init_completion(&ub->completion); in ublk_init_queues()
2018 ublk_deinit_queues(ub); in ublk_init_queues()
2022 static int ublk_alloc_dev_number(struct ublk_device *ub, int idx) in ublk_alloc_dev_number() argument
2030 err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT); in ublk_alloc_dev_number()
2034 err = idr_alloc(&ublk_index_idr, ub, 0, 0, GFP_NOWAIT); in ublk_alloc_dev_number()
2039 ub->ub_number = err; in ublk_alloc_dev_number()
2044 static void ublk_free_dev_number(struct ublk_device *ub) in ublk_free_dev_number() argument
2047 idr_remove(&ublk_index_idr, ub->ub_number); in ublk_free_dev_number()
2054 struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev); in ublk_cdev_rel() local
2056 blk_mq_free_tag_set(&ub->tag_set); in ublk_cdev_rel()
2057 ublk_deinit_queues(ub); in ublk_cdev_rel()
2058 ublk_free_dev_number(ub); in ublk_cdev_rel()
2059 mutex_destroy(&ub->mutex); in ublk_cdev_rel()
2060 kfree(ub); in ublk_cdev_rel()
2063 static int ublk_add_chdev(struct ublk_device *ub) in ublk_add_chdev() argument
2065 struct device *dev = &ub->cdev_dev; in ublk_add_chdev()
2066 int minor = ub->ub_number; in ublk_add_chdev()
2079 cdev_init(&ub->cdev, &ublk_ch_fops); in ublk_add_chdev()
2080 ret = cdev_device_add(&ub->cdev, dev); in ublk_add_chdev()
2093 struct ublk_device *ub = in ublk_stop_work_fn() local
2096 ublk_stop_dev(ub); in ublk_stop_work_fn()
2100 static void ublk_align_max_io_size(struct ublk_device *ub) in ublk_align_max_io_size() argument
2102 unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes; in ublk_align_max_io_size()
2104 ub->dev_info.max_io_buf_bytes = in ublk_align_max_io_size()
2108 static int ublk_add_tag_set(struct ublk_device *ub) in ublk_add_tag_set() argument
2110 ub->tag_set.ops = &ublk_mq_ops; in ublk_add_tag_set()
2111 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues; in ublk_add_tag_set()
2112 ub->tag_set.queue_depth = ub->dev_info.queue_depth; in ublk_add_tag_set()
2113 ub->tag_set.numa_node = NUMA_NO_NODE; in ublk_add_tag_set()
2114 ub->tag_set.cmd_size = sizeof(struct ublk_rq_data); in ublk_add_tag_set()
2115 ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in ublk_add_tag_set()
2116 ub->tag_set.driver_data = ub; in ublk_add_tag_set()
2117 return blk_mq_alloc_tag_set(&ub->tag_set); in ublk_add_tag_set()
2120 static void ublk_remove(struct ublk_device *ub) in ublk_remove() argument
2122 ublk_stop_dev(ub); in ublk_remove()
2123 cancel_work_sync(&ub->stop_work); in ublk_remove()
2124 cancel_work_sync(&ub->quiesce_work); in ublk_remove()
2125 cdev_device_del(&ub->cdev, &ub->cdev_dev); in ublk_remove()
2126 put_device(&ub->cdev_dev); in ublk_remove()
2132 struct ublk_device *ub = NULL; in ublk_get_device_from_id() local
2138 ub = idr_find(&ublk_index_idr, idx); in ublk_get_device_from_id()
2139 if (ub) in ublk_get_device_from_id()
2140 ub = ublk_get_device(ub); in ublk_get_device_from_id()
2143 return ub; in ublk_get_device_from_id()
2146 static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) in ublk_ctrl_start_dev() argument
2156 if (wait_for_completion_interruptible(&ub->completion) != 0) in ublk_ctrl_start_dev()
2159 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD); in ublk_ctrl_start_dev()
2161 mutex_lock(&ub->mutex); in ublk_ctrl_start_dev()
2162 if (ub->dev_info.state == UBLK_S_DEV_LIVE || in ublk_ctrl_start_dev()
2163 test_bit(UB_STATE_USED, &ub->state)) { in ublk_ctrl_start_dev()
2168 disk = blk_mq_alloc_disk(&ub->tag_set, NULL); in ublk_ctrl_start_dev()
2173 sprintf(disk->disk_name, "ublkb%d", ub->ub_number); in ublk_ctrl_start_dev()
2175 disk->private_data = ub; in ublk_ctrl_start_dev()
2177 ub->dev_info.ublksrv_pid = ublksrv_pid; in ublk_ctrl_start_dev()
2178 ub->ub_disk = disk; in ublk_ctrl_start_dev()
2180 ret = ublk_apply_params(ub); in ublk_ctrl_start_dev()
2185 if (ub->nr_privileged_daemon != ub->nr_queues_ready) in ublk_ctrl_start_dev()
2188 get_device(&ub->cdev_dev); in ublk_ctrl_start_dev()
2189 ub->dev_info.state = UBLK_S_DEV_LIVE; in ublk_ctrl_start_dev()
2191 if (ublk_dev_is_zoned(ub)) { in ublk_ctrl_start_dev()
2192 ret = ublk_revalidate_disk_zones(ub); in ublk_ctrl_start_dev()
2201 set_bit(UB_STATE_USED, &ub->state); in ublk_ctrl_start_dev()
2205 ub->dev_info.state = UBLK_S_DEV_DEAD; in ublk_ctrl_start_dev()
2206 ublk_put_device(ub); in ublk_ctrl_start_dev()
2212 mutex_unlock(&ub->mutex); in ublk_ctrl_start_dev()
2216 static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub, in ublk_ctrl_get_queue_affinity() argument
2235 if (queue >= ub->dev_info.nr_hw_queues) in ublk_ctrl_get_queue_affinity()
2242 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue) in ublk_ctrl_get_queue_affinity()
2273 struct ublk_device *ub; in ublk_ctrl_add_dev() local
2324 ub = kzalloc(sizeof(*ub), GFP_KERNEL); in ublk_ctrl_add_dev()
2325 if (!ub) in ublk_ctrl_add_dev()
2327 mutex_init(&ub->mutex); in ublk_ctrl_add_dev()
2328 spin_lock_init(&ub->mm_lock); in ublk_ctrl_add_dev()
2329 INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn); in ublk_ctrl_add_dev()
2330 INIT_WORK(&ub->stop_work, ublk_stop_work_fn); in ublk_ctrl_add_dev()
2331 INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work); in ublk_ctrl_add_dev()
2333 ret = ublk_alloc_dev_number(ub, header->dev_id); in ublk_ctrl_add_dev()
2337 memcpy(&ub->dev_info, &info, sizeof(info)); in ublk_ctrl_add_dev()
2340 ub->dev_info.dev_id = ub->ub_number; in ublk_ctrl_add_dev()
2348 ub->dev_info.flags &= UBLK_F_ALL; in ublk_ctrl_add_dev()
2350 ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE | in ublk_ctrl_add_dev()
2354 if (ublk_dev_is_user_copy(ub)) in ublk_ctrl_add_dev()
2355 ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA; in ublk_ctrl_add_dev()
2358 if (ublk_dev_is_zoned(ub) && in ublk_ctrl_add_dev()
2359 (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) { in ublk_ctrl_add_dev()
2365 ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY; in ublk_ctrl_add_dev()
2367 ub->dev_info.nr_hw_queues = min_t(unsigned int, in ublk_ctrl_add_dev()
2368 ub->dev_info.nr_hw_queues, nr_cpu_ids); in ublk_ctrl_add_dev()
2369 ublk_align_max_io_size(ub); in ublk_ctrl_add_dev()
2371 ret = ublk_init_queues(ub); in ublk_ctrl_add_dev()
2375 ret = ublk_add_tag_set(ub); in ublk_ctrl_add_dev()
2380 if (copy_to_user(argp, &ub->dev_info, sizeof(info))) in ublk_ctrl_add_dev()
2387 ret = ublk_add_chdev(ub); in ublk_ctrl_add_dev()
2391 blk_mq_free_tag_set(&ub->tag_set); in ublk_ctrl_add_dev()
2393 ublk_deinit_queues(ub); in ublk_ctrl_add_dev()
2395 ublk_free_dev_number(ub); in ublk_ctrl_add_dev()
2397 mutex_destroy(&ub->mutex); in ublk_ctrl_add_dev()
2398 kfree(ub); in ublk_ctrl_add_dev()
2417 struct ublk_device *ub = *p_ub; in ublk_ctrl_del_dev() local
2418 int idx = ub->ub_number; in ublk_ctrl_del_dev()
2425 if (!test_bit(UB_STATE_DELETED, &ub->state)) { in ublk_ctrl_del_dev()
2426 ublk_remove(ub); in ublk_ctrl_del_dev()
2427 set_bit(UB_STATE_DELETED, &ub->state); in ublk_ctrl_del_dev()
2432 ublk_put_device(ub); in ublk_ctrl_del_dev()
2463 static int ublk_ctrl_stop_dev(struct ublk_device *ub) in ublk_ctrl_stop_dev() argument
2465 ublk_stop_dev(ub); in ublk_ctrl_stop_dev()
2466 cancel_work_sync(&ub->stop_work); in ublk_ctrl_stop_dev()
2467 cancel_work_sync(&ub->quiesce_work); in ublk_ctrl_stop_dev()
2472 static int ublk_ctrl_get_dev_info(struct ublk_device *ub, in ublk_ctrl_get_dev_info() argument
2481 if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info))) in ublk_ctrl_get_dev_info()
2488 static void ublk_ctrl_fill_params_devt(struct ublk_device *ub) in ublk_ctrl_fill_params_devt() argument
2490 ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt); in ublk_ctrl_fill_params_devt()
2491 ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt); in ublk_ctrl_fill_params_devt()
2493 if (ub->ub_disk) { in ublk_ctrl_fill_params_devt()
2494 ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk)); in ublk_ctrl_fill_params_devt()
2495 ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk)); in ublk_ctrl_fill_params_devt()
2497 ub->params.devt.disk_major = 0; in ublk_ctrl_fill_params_devt()
2498 ub->params.devt.disk_minor = 0; in ublk_ctrl_fill_params_devt()
2500 ub->params.types |= UBLK_PARAM_TYPE_DEVT; in ublk_ctrl_fill_params_devt()
2503 static int ublk_ctrl_get_params(struct ublk_device *ub, in ublk_ctrl_get_params() argument
2523 mutex_lock(&ub->mutex); in ublk_ctrl_get_params()
2524 ublk_ctrl_fill_params_devt(ub); in ublk_ctrl_get_params()
2525 if (copy_to_user(argp, &ub->params, ph.len)) in ublk_ctrl_get_params()
2529 mutex_unlock(&ub->mutex); in ublk_ctrl_get_params()
2534 static int ublk_ctrl_set_params(struct ublk_device *ub, in ublk_ctrl_set_params() argument
2555 mutex_lock(&ub->mutex); in ublk_ctrl_set_params()
2556 if (ub->dev_info.state == UBLK_S_DEV_LIVE) { in ublk_ctrl_set_params()
2558 } else if (copy_from_user(&ub->params, argp, ph.len)) { in ublk_ctrl_set_params()
2562 ub->params.types &= UBLK_PARAM_TYPE_ALL; in ublk_ctrl_set_params()
2563 ret = ublk_validate_params(ub); in ublk_ctrl_set_params()
2565 ub->params.types = 0; in ublk_ctrl_set_params()
2567 mutex_unlock(&ub->mutex); in ublk_ctrl_set_params()
2572 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq) in ublk_queue_reinit() argument
2596 static int ublk_ctrl_start_recovery(struct ublk_device *ub, in ublk_ctrl_start_recovery() argument
2603 mutex_lock(&ub->mutex); in ublk_ctrl_start_recovery()
2604 if (!ublk_can_use_recovery(ub)) in ublk_ctrl_start_recovery()
2619 if (test_bit(UB_STATE_OPEN, &ub->state) || in ublk_ctrl_start_recovery()
2620 ub->dev_info.state != UBLK_S_DEV_QUIESCED) { in ublk_ctrl_start_recovery()
2625 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) in ublk_ctrl_start_recovery()
2626 ublk_queue_reinit(ub, ublk_get_queue(ub, i)); in ublk_ctrl_start_recovery()
2628 ub->mm = NULL; in ublk_ctrl_start_recovery()
2629 ub->nr_queues_ready = 0; in ublk_ctrl_start_recovery()
2630 ub->nr_privileged_daemon = 0; in ublk_ctrl_start_recovery()
2631 init_completion(&ub->completion); in ublk_ctrl_start_recovery()
2634 mutex_unlock(&ub->mutex); in ublk_ctrl_start_recovery()
2638 static int ublk_ctrl_end_recovery(struct ublk_device *ub, in ublk_ctrl_end_recovery() argument
2646 __func__, ub->dev_info.nr_hw_queues, header->dev_id); in ublk_ctrl_end_recovery()
2648 if (wait_for_completion_interruptible(&ub->completion)) in ublk_ctrl_end_recovery()
2652 __func__, ub->dev_info.nr_hw_queues, header->dev_id); in ublk_ctrl_end_recovery()
2654 mutex_lock(&ub->mutex); in ublk_ctrl_end_recovery()
2655 if (!ublk_can_use_recovery(ub)) in ublk_ctrl_end_recovery()
2658 if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) { in ublk_ctrl_end_recovery()
2662 ub->dev_info.ublksrv_pid = ublksrv_pid; in ublk_ctrl_end_recovery()
2665 blk_mq_unquiesce_queue(ub->ub_disk->queue); in ublk_ctrl_end_recovery()
2668 blk_mq_kick_requeue_list(ub->ub_disk->queue); in ublk_ctrl_end_recovery()
2669 ub->dev_info.state = UBLK_S_DEV_LIVE; in ublk_ctrl_end_recovery()
2670 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD); in ublk_ctrl_end_recovery()
2673 mutex_unlock(&ub->mutex); in ublk_ctrl_end_recovery()
2696 static int ublk_char_dev_permission(struct ublk_device *ub, in ublk_char_dev_permission() argument
2712 if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode)) in ublk_char_dev_permission()
2722 static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub, in ublk_ctrl_uring_cmd_permission() argument
2726 bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV; in ublk_ctrl_uring_cmd_permission()
2783 ret = ublk_char_dev_permission(ub, dev_path, mask); in ublk_ctrl_uring_cmd_permission()
2789 __func__, ub->ub_number, cmd->cmd_op, in ublk_ctrl_uring_cmd_permission()
2790 ub->dev_info.owner_uid, ub->dev_info.owner_gid, in ublk_ctrl_uring_cmd_permission()
2801 struct ublk_device *ub = NULL; in ublk_ctrl_uring_cmd() local
2824 ub = ublk_get_device_from_id(header->dev_id); in ublk_ctrl_uring_cmd()
2825 if (!ub) in ublk_ctrl_uring_cmd()
2828 ret = ublk_ctrl_uring_cmd_permission(ub, cmd); in ublk_ctrl_uring_cmd()
2835 ret = ublk_ctrl_start_dev(ub, cmd); in ublk_ctrl_uring_cmd()
2838 ret = ublk_ctrl_stop_dev(ub); in ublk_ctrl_uring_cmd()
2842 ret = ublk_ctrl_get_dev_info(ub, cmd); in ublk_ctrl_uring_cmd()
2848 ret = ublk_ctrl_del_dev(&ub); in ublk_ctrl_uring_cmd()
2851 ret = ublk_ctrl_get_queue_affinity(ub, cmd); in ublk_ctrl_uring_cmd()
2854 ret = ublk_ctrl_get_params(ub, cmd); in ublk_ctrl_uring_cmd()
2857 ret = ublk_ctrl_set_params(ub, cmd); in ublk_ctrl_uring_cmd()
2860 ret = ublk_ctrl_start_recovery(ub, cmd); in ublk_ctrl_uring_cmd()
2863 ret = ublk_ctrl_end_recovery(ub, cmd); in ublk_ctrl_uring_cmd()
2871 if (ub) in ublk_ctrl_uring_cmd()
2872 ublk_put_device(ub); in ublk_ctrl_uring_cmd()
2925 struct ublk_device *ub; in ublk_exit() local
2928 idr_for_each_entry(&ublk_index_idr, ub, id) in ublk_exit()
2929 ublk_remove(ub); in ublk_exit()