Lines Matching refs:hpb

38 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
57 static int ufshpb_get_state(struct ufshpb_lu *hpb) in ufshpb_get_state() argument
59 return atomic_read(&hpb->hpb_state); in ufshpb_get_state()
62 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state) in ufshpb_set_state() argument
64 atomic_set(&hpb->hpb_state, state); in ufshpb_set_state()
85 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len) in ufshpb_is_supported_chunk() argument
87 return transfer_len <= hpb->pre_req_max_tr_len; in ufshpb_is_supported_chunk()
95 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx) in ufshpb_is_pinned_region() argument
97 return hpb->lu_pinned_end != PINNED_NOT_SET && in ufshpb_is_pinned_region()
98 rgn_idx >= hpb->lu_pinned_start && rgn_idx <= hpb->lu_pinned_end; in ufshpb_is_pinned_region()
101 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb) in ufshpb_kick_map_work() argument
106 if (ufshpb_get_state(hpb) != HPB_PRESENT) in ufshpb_kick_map_work()
109 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_kick_map_work()
110 if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn)) in ufshpb_kick_map_work()
112 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_kick_map_work()
115 queue_work(ufshpb_wq, &hpb->map_work); in ufshpb_kick_map_work()
146 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx, in ufshpb_iterate_rgn() argument
156 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_iterate_rgn()
160 bitmap_len = hpb->entries_per_srgn; in ufshpb_iterate_rgn()
162 bitmap_len = hpb->last_srgn_entries; in ufshpb_iterate_rgn()
169 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_iterate_rgn()
175 } else if (hpb->is_hcm) { in ufshpb_iterate_rgn()
178 rgn->hpb->params.read_timeout_ms); in ufshpb_iterate_rgn()
180 rgn->hpb->params.read_timeout_expiries; in ufshpb_iterate_rgn()
183 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_iterate_rgn()
185 if (hpb->is_hcm && prev_srgn != srgn) { in ufshpb_iterate_rgn()
196 if (srgn->reads == hpb->params.activation_thld) in ufshpb_iterate_rgn()
203 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_iterate_rgn()
204 ufshpb_update_active_info(hpb, rgn_idx, srgn_idx); in ufshpb_iterate_rgn()
205 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_iterate_rgn()
206 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_iterate_rgn()
214 if (++srgn_idx == hpb->srgns_per_rgn) { in ufshpb_iterate_rgn()
224 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx, in ufshpb_test_ppn_dirty() argument
233 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_test_ppn_dirty()
237 bitmap_len = hpb->entries_per_srgn; in ufshpb_test_ppn_dirty()
239 bitmap_len = hpb->last_srgn_entries; in ufshpb_test_ppn_dirty()
250 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_test_ppn_dirty()
266 if (++srgn_idx == hpb->srgns_per_rgn) { in ufshpb_test_ppn_dirty()
283 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb, in ufshpb_fill_ppn_from_page() argument
301 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_fill_ppn_from_page()
313 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx, in ufshpb_get_pos_from_lpn() argument
318 *rgn_idx = lpn >> hpb->entries_per_rgn_shift; in ufshpb_get_pos_from_lpn()
319 rgn_offset = lpn & hpb->entries_per_rgn_mask; in ufshpb_get_pos_from_lpn()
320 *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift; in ufshpb_get_pos_from_lpn()
321 *offset = rgn_offset & hpb->entries_per_srgn_mask; in ufshpb_get_pos_from_lpn()
348 struct ufshpb_lu *hpb; in ufshpb_prep() local
358 hpb = ufshpb_get_hpb_data(cmd->device); in ufshpb_prep()
359 if (!hpb) in ufshpb_prep()
362 if (ufshpb_get_state(hpb) == HPB_INIT) in ufshpb_prep()
365 if (ufshpb_get_state(hpb) != HPB_PRESENT) { in ufshpb_prep()
366 dev_notice(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_prep()
382 ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); in ufshpb_prep()
383 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_prep()
388 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, in ufshpb_prep()
393 if (!ufshpb_is_supported_chunk(hpb, transfer_len)) in ufshpb_prep()
396 if (hpb->is_hcm) { in ufshpb_prep()
401 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, in ufshpb_prep()
405 if (rgn->reads > hpb->entries_per_srgn) in ufshpb_prep()
406 schedule_work(&hpb->ufshpb_normalization_work); in ufshpb_prep()
409 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_prep()
410 if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset, in ufshpb_prep()
412 hpb->stats.miss_cnt++; in ufshpb_prep()
413 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_prep()
417 err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn); in ufshpb_prep()
418 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_prep()
432 hpb->stats.hit_cnt++; in ufshpb_prep()
436 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, in ufshpb_get_req() argument
444 rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL); in ufshpb_get_req()
449 req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir, in ufshpb_get_req()
460 rq->hpb = hpb; in ufshpb_get_req()
467 kmem_cache_free(hpb->map_req_cache, rq); in ufshpb_get_req()
471 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq) in ufshpb_put_req() argument
474 kmem_cache_free(hpb->map_req_cache, rq); in ufshpb_put_req()
477 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, in ufshpb_get_map_req() argument
484 if (hpb->is_hcm && in ufshpb_get_map_req()
485 hpb->num_inflight_map_req >= hpb->params.inflight_map_req) { in ufshpb_get_map_req()
486 dev_info(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_get_map_req()
488 hpb->num_inflight_map_req, in ufshpb_get_map_req()
489 hpb->params.inflight_map_req); in ufshpb_get_map_req()
493 map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false); in ufshpb_get_map_req()
497 bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL); in ufshpb_get_map_req()
499 ufshpb_put_req(hpb, map_req); in ufshpb_get_map_req()
508 spin_lock_irqsave(&hpb->param_lock, flags); in ufshpb_get_map_req()
509 hpb->num_inflight_map_req++; in ufshpb_get_map_req()
510 spin_unlock_irqrestore(&hpb->param_lock, flags); in ufshpb_get_map_req()
515 static void ufshpb_put_map_req(struct ufshpb_lu *hpb, in ufshpb_put_map_req() argument
521 ufshpb_put_req(hpb, map_req); in ufshpb_put_map_req()
523 spin_lock_irqsave(&hpb->param_lock, flags); in ufshpb_put_map_req()
524 hpb->num_inflight_map_req--; in ufshpb_put_map_req()
525 spin_unlock_irqrestore(&hpb->param_lock, flags); in ufshpb_put_map_req()
528 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, in ufshpb_clear_dirty_bitmap() argument
532 u32 num_entries = hpb->entries_per_srgn; in ufshpb_clear_dirty_bitmap()
535 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_clear_dirty_bitmap()
542 num_entries = hpb->last_srgn_entries; in ufshpb_clear_dirty_bitmap()
546 rgn = hpb->rgn_tbl + srgn->rgn_idx; in ufshpb_clear_dirty_bitmap()
552 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, in ufshpb_update_active_info() argument
558 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_update_active_info()
564 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); in ufshpb_update_active_info()
566 hpb->stats.rcmd_active_cnt++; in ufshpb_update_active_info()
569 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx) in ufshpb_update_inactive_info() argument
575 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_update_inactive_info()
581 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn); in ufshpb_update_inactive_info()
583 hpb->stats.rcmd_inactive_cnt++; in ufshpb_update_inactive_info()
586 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb, in ufshpb_activate_subregion() argument
598 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_activate_subregion()
605 rgn = hpb->rgn_tbl + srgn->rgn_idx; in ufshpb_activate_subregion()
608 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_activate_subregion()
621 ufshpb_put_req(umap_req->hpb, umap_req); in ufshpb_umap_req_compl_fn()
627 struct ufshpb_lu *hpb = map_req->hpb; in ufshpb_map_req_compl_fn() local
631 srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl + in ufshpb_map_req_compl_fn()
634 ufshpb_clear_dirty_bitmap(hpb, srgn); in ufshpb_map_req_compl_fn()
635 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_map_req_compl_fn()
636 ufshpb_activate_subregion(hpb, srgn); in ufshpb_map_req_compl_fn()
637 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_map_req_compl_fn()
639 ufshpb_put_map_req(map_req->hpb, map_req); in ufshpb_map_req_compl_fn()
665 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb, in ufshpb_execute_umap_req() argument
681 hpb->stats.umap_req_cnt++; in ufshpb_execute_umap_req()
684 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, in ufshpb_execute_map_req() argument
690 int mem_size = hpb->srgn_mem_size; in ufshpb_execute_map_req()
694 q = hpb->sdev_ufs_lu->request_queue; in ufshpb_execute_map_req()
695 for (i = 0; i < hpb->pages_per_srgn; i++) { in ufshpb_execute_map_req()
699 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_execute_map_req()
714 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE; in ufshpb_execute_map_req()
723 hpb->stats.map_req_cnt++; in ufshpb_execute_map_req()
727 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb, in ufshpb_get_map_ctx() argument
731 u32 num_entries = hpb->entries_per_srgn; in ufshpb_get_map_ctx()
738 mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL); in ufshpb_get_map_ctx()
743 num_entries = hpb->last_srgn_entries; in ufshpb_get_map_ctx()
749 for (i = 0; i < hpb->pages_per_srgn; i++) { in ufshpb_get_map_ctx()
764 kmem_cache_free(hpb->m_page_cache, mctx->m_page); in ufshpb_get_map_ctx()
770 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb, in ufshpb_put_map_ctx() argument
775 for (i = 0; i < hpb->pages_per_srgn; i++) in ufshpb_put_map_ctx()
779 kmem_cache_free(hpb->m_page_cache, mctx->m_page); in ufshpb_put_map_ctx()
783 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb, in ufshpb_check_srgns_issue_state() argument
798 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, in ufshpb_read_to_handler() local
800 struct victim_select_info *lru_info = &hpb->lru_info; in ufshpb_read_to_handler()
806 if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits)) in ufshpb_read_to_handler()
809 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_read_to_handler()
822 hpb->params.read_timeout_ms); in ufshpb_read_to_handler()
826 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_read_to_handler()
831 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_read_to_handler()
832 ufshpb_update_inactive_info(hpb, rgn->rgn_idx); in ufshpb_read_to_handler()
833 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_read_to_handler()
836 ufshpb_kick_map_work(hpb); in ufshpb_read_to_handler()
838 clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits); in ufshpb_read_to_handler()
840 poll = hpb->params.timeout_polling_interval_ms; in ufshpb_read_to_handler()
841 schedule_delayed_work(&hpb->ufshpb_read_to_work, in ufshpb_read_to_handler()
851 if (rgn->hpb->is_hcm) { in ufshpb_add_lru_info()
854 rgn->hpb->params.read_timeout_ms); in ufshpb_add_lru_info()
856 rgn->hpb->params.read_timeout_expiries; in ufshpb_add_lru_info()
866 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) in ufshpb_victim_lru_info() argument
868 struct victim_select_info *lru_info = &hpb->lru_info; in ufshpb_victim_lru_info()
872 if (ufshpb_check_srgns_issue_state(hpb, rgn)) in ufshpb_victim_lru_info()
879 if (hpb->is_hcm && in ufshpb_victim_lru_info()
880 rgn->reads > hpb->params.eviction_thld_exit) in ufshpb_victim_lru_info()
888 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_victim_lru_info()
903 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb, in ufshpb_purge_active_subregion() argument
907 ufshpb_put_map_ctx(hpb, srgn->mctx); in ufshpb_purge_active_subregion()
913 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb, in ufshpb_issue_umap_req() argument
920 umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic); in ufshpb_issue_umap_req()
924 ufshpb_execute_umap_req(hpb, umap_req, rgn); in ufshpb_issue_umap_req()
929 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb, in ufshpb_issue_umap_single_req() argument
932 return ufshpb_issue_umap_req(hpb, rgn, true); in ufshpb_issue_umap_single_req()
935 static void __ufshpb_evict_region(struct ufshpb_lu *hpb, in __ufshpb_evict_region() argument
942 lru_info = &hpb->lru_info; in __ufshpb_evict_region()
944 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx); in __ufshpb_evict_region()
949 ufshpb_purge_active_subregion(hpb, srgn); in __ufshpb_evict_region()
952 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) in ufshpb_evict_region() argument
957 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_evict_region()
959 dev_warn(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_evict_region()
966 if (ufshpb_check_srgns_issue_state(hpb, rgn)) { in ufshpb_evict_region()
971 if (hpb->is_hcm) { in ufshpb_evict_region()
972 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_evict_region()
973 ret = ufshpb_issue_umap_single_req(hpb, rgn); in ufshpb_evict_region()
974 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_evict_region()
979 __ufshpb_evict_region(hpb, rgn); in ufshpb_evict_region()
982 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_evict_region()
986 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb, in ufshpb_issue_map_req() argument
997 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_issue_map_req()
999 if (ufshpb_get_state(hpb) != HPB_PRESENT) { in ufshpb_issue_map_req()
1000 dev_notice(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_issue_map_req()
1026 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_issue_map_req()
1029 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last); in ufshpb_issue_map_req()
1031 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_issue_map_req()
1039 map_req = ufshpb_get_map_req(hpb, srgn); in ufshpb_issue_map_req()
1044 ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last); in ufshpb_issue_map_req()
1046 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_issue_map_req()
1054 ufshpb_put_map_req(hpb, map_req); in ufshpb_issue_map_req()
1056 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_issue_map_req()
1059 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_issue_map_req()
1063 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) in ufshpb_add_region() argument
1066 struct victim_select_info *lru_info = &hpb->lru_info; in ufshpb_add_region()
1070 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_add_region()
1096 if (hpb->is_hcm && in ufshpb_add_region()
1097 rgn->reads < hpb->params.eviction_thld_enter) { in ufshpb_add_region()
1102 victim_rgn = ufshpb_victim_lru_info(hpb); in ufshpb_add_region()
1104 dev_warn(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_add_region()
1106 hpb->is_hcm ? "" : "error"); in ufshpb_add_region()
1111 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_add_region()
1116 if (hpb->is_hcm) { in ufshpb_add_region()
1117 spin_unlock_irqrestore(&hpb->rgn_state_lock, in ufshpb_add_region()
1119 ret = ufshpb_issue_umap_single_req(hpb, in ufshpb_add_region()
1121 spin_lock_irqsave(&hpb->rgn_state_lock, in ufshpb_add_region()
1127 __ufshpb_evict_region(hpb, victim_rgn); in ufshpb_add_region()
1139 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_add_region()
1147 static void ufshpb_submit_region_inactive(struct ufshpb_lu *hpb, int region_index) in ufshpb_submit_region_inactive() argument
1156 spin_lock(&hpb->rsp_list_lock); in ufshpb_submit_region_inactive()
1157 ufshpb_update_inactive_info(hpb, region_index); in ufshpb_submit_region_inactive()
1158 spin_unlock(&hpb->rsp_list_lock); in ufshpb_submit_region_inactive()
1160 rgn = hpb->rgn_tbl + region_index; in ufshpb_submit_region_inactive()
1165 spin_lock(&hpb->rgn_state_lock); in ufshpb_submit_region_inactive()
1173 spin_unlock(&hpb->rgn_state_lock); in ufshpb_submit_region_inactive()
1176 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, in ufshpb_rsp_req_region_update() argument
1196 rgn = hpb->rgn_tbl + rgn_i; in ufshpb_rsp_req_region_update()
1197 if (hpb->is_hcm && in ufshpb_rsp_req_region_update()
1208 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_rsp_req_region_update()
1211 spin_lock(&hpb->rsp_list_lock); in ufshpb_rsp_req_region_update()
1212 ufshpb_update_active_info(hpb, rgn_i, srgn_i); in ufshpb_rsp_req_region_update()
1213 spin_unlock(&hpb->rsp_list_lock); in ufshpb_rsp_req_region_update()
1218 spin_lock(&hpb->rgn_state_lock); in ufshpb_rsp_req_region_update()
1221 spin_unlock(&hpb->rgn_state_lock); in ufshpb_rsp_req_region_update()
1224 if (hpb->is_hcm) { in ufshpb_rsp_req_region_update()
1234 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "inactivate(%d) region %d\n", i, rgn_i); in ufshpb_rsp_req_region_update()
1235 ufshpb_submit_region_inactive(hpb, rgn_i); in ufshpb_rsp_req_region_update()
1239 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n", in ufshpb_rsp_req_region_update()
1242 if (ufshpb_get_state(hpb) == HPB_PRESENT) in ufshpb_rsp_req_region_update()
1243 queue_work(ufshpb_wq, &hpb->map_work); in ufshpb_rsp_req_region_update()
1249 static void ufshpb_set_regions_update(struct ufshpb_lu *hpb) in ufshpb_set_regions_update() argument
1251 struct victim_select_info *lru_info = &hpb->lru_info; in ufshpb_set_regions_update()
1255 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_set_regions_update()
1260 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_set_regions_update()
1266 struct ufshpb_lu *hpb; in ufshpb_dev_reset_handler() local
1269 hpb = ufshpb_get_hpb_data(sdev); in ufshpb_dev_reset_handler()
1270 if (!hpb) in ufshpb_dev_reset_handler()
1273 if (hpb->is_hcm) { in ufshpb_dev_reset_handler()
1279 ufshpb_set_regions_update(hpb); in ufshpb_dev_reset_handler()
1287 struct victim_select_info *lru_info = &hpb->lru_info; in ufshpb_dev_reset_handler()
1291 ufshpb_submit_region_inactive(hpb, rgn->rgn_idx); in ufshpb_dev_reset_handler()
1293 if (ufshpb_get_state(hpb) == HPB_PRESENT) in ufshpb_dev_reset_handler()
1294 queue_work(ufshpb_wq, &hpb->map_work); in ufshpb_dev_reset_handler()
1305 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device); in ufshpb_rsp_upiu() local
1321 hpb = ufshpb_get_hpb_data(sdev); in ufshpb_rsp_upiu()
1323 if (!hpb) in ufshpb_rsp_upiu()
1326 if (rsp_field->lun == hpb->lun) { in ufshpb_rsp_upiu()
1336 if (!hpb) in ufshpb_rsp_upiu()
1339 if (ufshpb_get_state(hpb) == HPB_INIT) in ufshpb_rsp_upiu()
1342 if ((ufshpb_get_state(hpb) != HPB_PRESENT) && in ufshpb_rsp_upiu()
1343 (ufshpb_get_state(hpb) != HPB_SUSPEND)) { in ufshpb_rsp_upiu()
1344 dev_notice(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_rsp_upiu()
1355 hpb->stats.rcmd_noti_cnt++; in ufshpb_rsp_upiu()
1360 dev_warn(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_rsp_upiu()
1363 ufshpb_rsp_req_region_update(hpb, rsp_field); in ufshpb_rsp_upiu()
1366 dev_warn(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_rsp_upiu()
1372 dev_notice(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_rsp_upiu()
1379 static void ufshpb_add_active_list(struct ufshpb_lu *hpb, in ufshpb_add_active_list() argument
1387 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn); in ufshpb_add_active_list()
1391 list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn); in ufshpb_add_active_list()
1394 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb, in ufshpb_add_pending_evict_list() argument
1411 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb) in ufshpb_run_active_subregion_list() argument
1418 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_run_active_subregion_list()
1419 while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn, in ufshpb_run_active_subregion_list()
1422 if (ufshpb_get_state(hpb) == HPB_SUSPEND) in ufshpb_run_active_subregion_list()
1426 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_run_active_subregion_list()
1428 rgn = hpb->rgn_tbl + srgn->rgn_idx; in ufshpb_run_active_subregion_list()
1429 ret = ufshpb_add_region(hpb, rgn); in ufshpb_run_active_subregion_list()
1433 ret = ufshpb_issue_map_req(hpb, rgn, srgn); in ufshpb_run_active_subregion_list()
1435 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_run_active_subregion_list()
1440 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_run_active_subregion_list()
1442 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_run_active_subregion_list()
1446 dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n", in ufshpb_run_active_subregion_list()
1448 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_run_active_subregion_list()
1449 ufshpb_add_active_list(hpb, rgn, srgn); in ufshpb_run_active_subregion_list()
1450 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_run_active_subregion_list()
1453 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb) in ufshpb_run_inactive_region_list() argument
1460 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_run_inactive_region_list()
1461 while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn, in ufshpb_run_inactive_region_list()
1464 if (ufshpb_get_state(hpb) == HPB_SUSPEND) in ufshpb_run_inactive_region_list()
1468 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_run_inactive_region_list()
1470 ret = ufshpb_evict_region(hpb, rgn); in ufshpb_run_inactive_region_list()
1472 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_run_inactive_region_list()
1473 ufshpb_add_pending_evict_list(hpb, rgn, &pending_list); in ufshpb_run_inactive_region_list()
1474 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_run_inactive_region_list()
1477 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_run_inactive_region_list()
1480 list_splice(&pending_list, &hpb->lh_inact_rgn); in ufshpb_run_inactive_region_list()
1481 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_run_inactive_region_list()
1486 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, in ufshpb_normalization_work_handler() local
1489 u8 factor = hpb->params.normalization_factor; in ufshpb_normalization_work_handler()
1491 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { in ufshpb_normalization_work_handler()
1492 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_normalization_work_handler()
1497 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) { in ufshpb_normalization_work_handler()
1509 spin_lock(&hpb->rsp_list_lock); in ufshpb_normalization_work_handler()
1510 ufshpb_update_inactive_info(hpb, rgn->rgn_idx); in ufshpb_normalization_work_handler()
1511 spin_unlock(&hpb->rsp_list_lock); in ufshpb_normalization_work_handler()
1517 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work); in ufshpb_map_work_handler() local
1519 if (ufshpb_get_state(hpb) != HPB_PRESENT) { in ufshpb_map_work_handler()
1520 dev_notice(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_map_work_handler()
1525 ufshpb_run_inactive_region_list(hpb); in ufshpb_map_work_handler()
1526 ufshpb_run_active_subregion_list(hpb); in ufshpb_map_work_handler()
1534 struct ufshpb_lu *hpb, in ufshpb_init_pinned_active_region() argument
1542 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last); in ufshpb_init_pinned_active_region()
1551 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); in ufshpb_init_pinned_active_region()
1560 ufshpb_put_map_ctx(hpb, srgn->mctx); in ufshpb_init_pinned_active_region()
1565 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb, in ufshpb_init_subregion_tbl() argument
1579 if (unlikely(last && hpb->last_srgn_entries)) in ufshpb_init_subregion_tbl()
1583 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb, in ufshpb_alloc_subregion_tbl() argument
1596 struct ufshpb_lu *hpb, in ufshpb_lu_parameter_init() argument
1604 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH; in ufshpb_lu_parameter_init()
1606 hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd; in ufshpb_lu_parameter_init()
1608 hpb->lu_pinned_start = hpb_lu_info->pinned_start; in ufshpb_lu_parameter_init()
1609 hpb->lu_pinned_end = hpb_lu_info->num_pinned ? in ufshpb_lu_parameter_init()
1612 hpb->lru_info.max_lru_active_cnt = in ufshpb_lu_parameter_init()
1618 hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size) in ufshpb_lu_parameter_init()
1624 hpb->entries_per_rgn_shift = ilog2(entries_per_rgn); in ufshpb_lu_parameter_init()
1625 hpb->entries_per_rgn_mask = entries_per_rgn - 1; in ufshpb_lu_parameter_init()
1627 hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE; in ufshpb_lu_parameter_init()
1628 hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn); in ufshpb_lu_parameter_init()
1629 hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1; in ufshpb_lu_parameter_init()
1632 do_div(tmp, hpb->srgn_mem_size); in ufshpb_lu_parameter_init()
1633 hpb->srgns_per_rgn = (int)tmp; in ufshpb_lu_parameter_init()
1635 hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, in ufshpb_lu_parameter_init()
1637 hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, in ufshpb_lu_parameter_init()
1638 (hpb->srgn_mem_size / HPB_ENTRY_SIZE)); in ufshpb_lu_parameter_init()
1639 hpb->last_srgn_entries = hpb_lu_info->num_blocks in ufshpb_lu_parameter_init()
1640 % (hpb->srgn_mem_size / HPB_ENTRY_SIZE); in ufshpb_lu_parameter_init()
1642 hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE); in ufshpb_lu_parameter_init()
1645 hpb->is_hcm = true; in ufshpb_lu_parameter_init()
1648 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) in ufshpb_alloc_region_tbl() argument
1654 rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region), in ufshpb_alloc_region_tbl()
1659 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { in ufshpb_alloc_region_tbl()
1660 int srgn_cnt = hpb->srgns_per_rgn; in ufshpb_alloc_region_tbl()
1672 if (rgn_idx == hpb->rgns_per_lu - 1) { in ufshpb_alloc_region_tbl()
1673 srgn_cnt = ((hpb->srgns_per_lu - 1) % in ufshpb_alloc_region_tbl()
1674 hpb->srgns_per_rgn) + 1; in ufshpb_alloc_region_tbl()
1678 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt); in ufshpb_alloc_region_tbl()
1681 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn); in ufshpb_alloc_region_tbl()
1683 if (ufshpb_is_pinned_region(hpb, rgn_idx)) { in ufshpb_alloc_region_tbl()
1684 ret = ufshpb_init_pinned_active_region(hba, hpb, rgn); in ufshpb_alloc_region_tbl()
1692 rgn->hpb = hpb; in ufshpb_alloc_region_tbl()
1695 hpb->rgn_tbl = rgn_table; in ufshpb_alloc_region_tbl()
1707 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb, in ufshpb_destroy_subregion_tbl() argument
1716 ufshpb_put_map_ctx(hpb, srgn->mctx); in ufshpb_destroy_subregion_tbl()
1720 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb) in ufshpb_destroy_region_tbl() argument
1724 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { in ufshpb_destroy_region_tbl()
1727 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_destroy_region_tbl()
1731 ufshpb_destroy_subregion_tbl(hpb, rgn); in ufshpb_destroy_region_tbl()
1737 kvfree(hpb->rgn_tbl); in ufshpb_destroy_region_tbl()
1746 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
1748 if (!hpb) \
1751 return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \
1786 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
1788 if (!hpb) \
1791 return sysfs_emit(buf, "%d\n", hpb->params.__name); \
1800 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in requeue_timeout_ms_store() local
1803 if (!hpb) in requeue_timeout_ms_store()
1812 hpb->params.requeue_timeout_ms = val; in requeue_timeout_ms_store()
1824 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in activation_thld_store() local
1827 if (!hpb) in activation_thld_store()
1830 if (!hpb->is_hcm) in activation_thld_store()
1839 hpb->params.activation_thld = val; in activation_thld_store()
1851 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in normalization_factor_store() local
1854 if (!hpb) in normalization_factor_store()
1857 if (!hpb->is_hcm) in normalization_factor_store()
1863 if (val <= 0 || val > ilog2(hpb->entries_per_srgn)) in normalization_factor_store()
1866 hpb->params.normalization_factor = val; in normalization_factor_store()
1878 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in eviction_thld_enter_store() local
1881 if (!hpb) in eviction_thld_enter_store()
1884 if (!hpb->is_hcm) in eviction_thld_enter_store()
1890 if (val <= hpb->params.eviction_thld_exit) in eviction_thld_enter_store()
1893 hpb->params.eviction_thld_enter = val; in eviction_thld_enter_store()
1905 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in eviction_thld_exit_store() local
1908 if (!hpb) in eviction_thld_exit_store()
1911 if (!hpb->is_hcm) in eviction_thld_exit_store()
1917 if (val <= hpb->params.activation_thld) in eviction_thld_exit_store()
1920 hpb->params.eviction_thld_exit = val; in eviction_thld_exit_store()
1932 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in read_timeout_ms_store() local
1935 if (!hpb) in read_timeout_ms_store()
1938 if (!hpb->is_hcm) in read_timeout_ms_store()
1945 if (val < hpb->params.timeout_polling_interval_ms * 2) in read_timeout_ms_store()
1948 hpb->params.read_timeout_ms = val; in read_timeout_ms_store()
1960 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in read_timeout_expiries_store() local
1963 if (!hpb) in read_timeout_expiries_store()
1966 if (!hpb->is_hcm) in read_timeout_expiries_store()
1975 hpb->params.read_timeout_expiries = val; in read_timeout_expiries_store()
1988 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in timeout_polling_interval_ms_store() local
1991 if (!hpb) in timeout_polling_interval_ms_store()
1994 if (!hpb->is_hcm) in timeout_polling_interval_ms_store()
2001 if (val <= 0 || val > hpb->params.read_timeout_ms / 2) in timeout_polling_interval_ms_store()
2004 hpb->params.timeout_polling_interval_ms = val; in timeout_polling_interval_ms_store()
2016 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in inflight_map_req_store() local
2019 if (!hpb) in inflight_map_req_store()
2022 if (!hpb->is_hcm) in inflight_map_req_store()
2028 if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1) in inflight_map_req_store()
2031 hpb->params.inflight_map_req = val; in inflight_map_req_store()
2037 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb) in ufshpb_hcm_param_init() argument
2039 hpb->params.activation_thld = ACTIVATION_THRESHOLD; in ufshpb_hcm_param_init()
2040 hpb->params.normalization_factor = 1; in ufshpb_hcm_param_init()
2041 hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5); in ufshpb_hcm_param_init()
2042 hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4); in ufshpb_hcm_param_init()
2043 hpb->params.read_timeout_ms = READ_TO_MS; in ufshpb_hcm_param_init()
2044 hpb->params.read_timeout_expiries = READ_TO_EXPIRIES; in ufshpb_hcm_param_init()
2045 hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS; in ufshpb_hcm_param_init()
2046 hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT; in ufshpb_hcm_param_init()
2067 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb) in ufshpb_pre_req_mempool_init() argument
2070 int qd = hpb->sdev_ufs_lu->queue_depth / 2; in ufshpb_pre_req_mempool_init()
2073 INIT_LIST_HEAD(&hpb->lh_pre_req_free); in ufshpb_pre_req_mempool_init()
2075 hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL); in ufshpb_pre_req_mempool_init()
2076 hpb->throttle_pre_req = qd; in ufshpb_pre_req_mempool_init()
2077 hpb->num_inflight_pre_req = 0; in ufshpb_pre_req_mempool_init()
2079 if (!hpb->pre_req) in ufshpb_pre_req_mempool_init()
2083 pre_req = hpb->pre_req + i; in ufshpb_pre_req_mempool_init()
2097 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free); in ufshpb_pre_req_mempool_init()
2102 list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) { in ufshpb_pre_req_mempool_init()
2108 kfree(hpb->pre_req); in ufshpb_pre_req_mempool_init()
2112 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb) in ufshpb_pre_req_mempool_destroy() argument
2117 for (i = 0; i < hpb->throttle_pre_req; i++) { in ufshpb_pre_req_mempool_destroy()
2118 pre_req = hpb->pre_req + i; in ufshpb_pre_req_mempool_destroy()
2119 bio_put(hpb->pre_req[i].bio); in ufshpb_pre_req_mempool_destroy()
2121 __free_page(hpb->pre_req[i].wb.m_page); in ufshpb_pre_req_mempool_destroy()
2125 kfree(hpb->pre_req); in ufshpb_pre_req_mempool_destroy()
2128 static void ufshpb_stat_init(struct ufshpb_lu *hpb) in ufshpb_stat_init() argument
2130 hpb->stats.hit_cnt = 0; in ufshpb_stat_init()
2131 hpb->stats.miss_cnt = 0; in ufshpb_stat_init()
2132 hpb->stats.rcmd_noti_cnt = 0; in ufshpb_stat_init()
2133 hpb->stats.rcmd_active_cnt = 0; in ufshpb_stat_init()
2134 hpb->stats.rcmd_inactive_cnt = 0; in ufshpb_stat_init()
2135 hpb->stats.map_req_cnt = 0; in ufshpb_stat_init()
2136 hpb->stats.umap_req_cnt = 0; in ufshpb_stat_init()
2139 static void ufshpb_param_init(struct ufshpb_lu *hpb) in ufshpb_param_init() argument
2141 hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS; in ufshpb_param_init()
2142 if (hpb->is_hcm) in ufshpb_param_init()
2143 ufshpb_hcm_param_init(hpb); in ufshpb_param_init()
2146 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) in ufshpb_lu_hpb_init() argument
2150 spin_lock_init(&hpb->rgn_state_lock); in ufshpb_lu_hpb_init()
2151 spin_lock_init(&hpb->rsp_list_lock); in ufshpb_lu_hpb_init()
2152 spin_lock_init(&hpb->param_lock); in ufshpb_lu_hpb_init()
2154 INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn); in ufshpb_lu_hpb_init()
2155 INIT_LIST_HEAD(&hpb->lh_act_srgn); in ufshpb_lu_hpb_init()
2156 INIT_LIST_HEAD(&hpb->lh_inact_rgn); in ufshpb_lu_hpb_init()
2157 INIT_LIST_HEAD(&hpb->list_hpb_lu); in ufshpb_lu_hpb_init()
2159 INIT_WORK(&hpb->map_work, ufshpb_map_work_handler); in ufshpb_lu_hpb_init()
2160 if (hpb->is_hcm) { in ufshpb_lu_hpb_init()
2161 INIT_WORK(&hpb->ufshpb_normalization_work, in ufshpb_lu_hpb_init()
2163 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work, in ufshpb_lu_hpb_init()
2167 hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache", in ufshpb_lu_hpb_init()
2169 if (!hpb->map_req_cache) { in ufshpb_lu_hpb_init()
2171 hpb->lun); in ufshpb_lu_hpb_init()
2175 hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache", in ufshpb_lu_hpb_init()
2176 sizeof(struct page *) * hpb->pages_per_srgn, in ufshpb_lu_hpb_init()
2178 if (!hpb->m_page_cache) { in ufshpb_lu_hpb_init()
2180 hpb->lun); in ufshpb_lu_hpb_init()
2185 ret = ufshpb_pre_req_mempool_init(hpb); in ufshpb_lu_hpb_init()
2188 hpb->lun); in ufshpb_lu_hpb_init()
2192 ret = ufshpb_alloc_region_tbl(hba, hpb); in ufshpb_lu_hpb_init()
2196 ufshpb_stat_init(hpb); in ufshpb_lu_hpb_init()
2197 ufshpb_param_init(hpb); in ufshpb_lu_hpb_init()
2199 if (hpb->is_hcm) { in ufshpb_lu_hpb_init()
2202 poll = hpb->params.timeout_polling_interval_ms; in ufshpb_lu_hpb_init()
2203 schedule_delayed_work(&hpb->ufshpb_read_to_work, in ufshpb_lu_hpb_init()
2210 ufshpb_pre_req_mempool_destroy(hpb); in ufshpb_lu_hpb_init()
2212 kmem_cache_destroy(hpb->m_page_cache); in ufshpb_lu_hpb_init()
2214 kmem_cache_destroy(hpb->map_req_cache); in ufshpb_lu_hpb_init()
2223 struct ufshpb_lu *hpb; in ufshpb_alloc_hpb_lu() local
2226 hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL); in ufshpb_alloc_hpb_lu()
2227 if (!hpb) in ufshpb_alloc_hpb_lu()
2230 hpb->lun = sdev->lun; in ufshpb_alloc_hpb_lu()
2231 hpb->sdev_ufs_lu = sdev; in ufshpb_alloc_hpb_lu()
2233 ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info); in ufshpb_alloc_hpb_lu()
2235 ret = ufshpb_lu_hpb_init(hba, hpb); in ufshpb_alloc_hpb_lu()
2241 sdev->hostdata = hpb; in ufshpb_alloc_hpb_lu()
2242 return hpb; in ufshpb_alloc_hpb_lu()
2245 kfree(hpb); in ufshpb_alloc_hpb_lu()
2249 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb) in ufshpb_discard_rsp_lists() argument
2260 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_discard_rsp_lists()
2261 list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn, in ufshpb_discard_rsp_lists()
2265 list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn, in ufshpb_discard_rsp_lists()
2268 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_discard_rsp_lists()
2271 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb) in ufshpb_cancel_jobs() argument
2273 if (hpb->is_hcm) { in ufshpb_cancel_jobs()
2274 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work); in ufshpb_cancel_jobs()
2275 cancel_work_sync(&hpb->ufshpb_normalization_work); in ufshpb_cancel_jobs()
2277 cancel_work_sync(&hpb->map_work); in ufshpb_cancel_jobs()
2325 struct ufshpb_lu *hpb; in ufshpb_toggle_state() local
2329 hpb = ufshpb_get_hpb_data(sdev); in ufshpb_toggle_state()
2331 if (!hpb || ufshpb_get_state(hpb) != src) in ufshpb_toggle_state()
2333 ufshpb_set_state(hpb, dest); in ufshpb_toggle_state()
2336 ufshpb_cancel_jobs(hpb); in ufshpb_toggle_state()
2337 ufshpb_discard_rsp_lists(hpb); in ufshpb_toggle_state()
2344 struct ufshpb_lu *hpb; in ufshpb_suspend() local
2348 hpb = ufshpb_get_hpb_data(sdev); in ufshpb_suspend()
2349 if (!hpb || ufshpb_get_state(hpb) != HPB_PRESENT) in ufshpb_suspend()
2352 ufshpb_set_state(hpb, HPB_SUSPEND); in ufshpb_suspend()
2353 ufshpb_cancel_jobs(hpb); in ufshpb_suspend()
2359 struct ufshpb_lu *hpb; in ufshpb_resume() local
2363 hpb = ufshpb_get_hpb_data(sdev); in ufshpb_resume()
2364 if (!hpb || ufshpb_get_state(hpb) != HPB_SUSPEND) in ufshpb_resume()
2367 ufshpb_set_state(hpb, HPB_PRESENT); in ufshpb_resume()
2368 ufshpb_kick_map_work(hpb); in ufshpb_resume()
2369 if (hpb->is_hcm) { in ufshpb_resume()
2370 unsigned int poll = hpb->params.timeout_polling_interval_ms; in ufshpb_resume()
2372 schedule_delayed_work(&hpb->ufshpb_read_to_work, msecs_to_jiffies(poll)); in ufshpb_resume()
2426 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in ufshpb_destroy_lu() local
2428 if (!hpb) in ufshpb_destroy_lu()
2431 ufshpb_set_state(hpb, HPB_FAILED); in ufshpb_destroy_lu()
2433 sdev = hpb->sdev_ufs_lu; in ufshpb_destroy_lu()
2436 ufshpb_cancel_jobs(hpb); in ufshpb_destroy_lu()
2438 ufshpb_pre_req_mempool_destroy(hpb); in ufshpb_destroy_lu()
2439 ufshpb_destroy_region_tbl(hpb); in ufshpb_destroy_lu()
2441 kmem_cache_destroy(hpb->map_req_cache); in ufshpb_destroy_lu()
2442 kmem_cache_destroy(hpb->m_page_cache); in ufshpb_destroy_lu()
2444 list_del_init(&hpb->list_hpb_lu); in ufshpb_destroy_lu()
2446 kfree(hpb); in ufshpb_destroy_lu()
2452 struct ufshpb_lu *hpb; in ufshpb_hpb_lu_prepared() local
2470 hpb = ufshpb_get_hpb_data(sdev); in ufshpb_hpb_lu_prepared()
2471 if (!hpb) in ufshpb_hpb_lu_prepared()
2475 ufshpb_set_state(hpb, HPB_PRESENT); in ufshpb_hpb_lu_prepared()
2476 if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0) in ufshpb_hpb_lu_prepared()
2477 queue_work(ufshpb_wq, &hpb->map_work); in ufshpb_hpb_lu_prepared()
2479 dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun); in ufshpb_hpb_lu_prepared()
2490 struct ufshpb_lu *hpb; in ufshpb_init_hpb_lu() local
2502 hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev, in ufshpb_init_hpb_lu()
2504 if (!hpb) in ufshpb_init_hpb_lu()
2508 hpb->srgns_per_rgn * hpb->pages_per_srgn; in ufshpb_init_hpb_lu()