1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Universal Flash Storage Host Performance Booster
4  *
5  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
6  *
7  * Authors:
8  *	Yongmyung Lee <ymhungry.lee@samsung.com>
9  *	Jinyoung Choi <j-young.choi@samsung.com>
10  */
11 
12 #include <asm/unaligned.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/module.h>
16 #include <scsi/scsi_cmnd.h>
17 
18 #include "ufshcd-priv.h"
19 #include "ufshpb.h"
20 #include "../../scsi/sd.h"
21 
22 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
23 #define READ_TO_MS 1000
24 #define READ_TO_EXPIRIES 100
25 #define POLLING_INTERVAL_MS 200
26 #define THROTTLE_MAP_REQ_DEFAULT 1
27 
28 /* memory management */
29 static struct kmem_cache *ufshpb_mctx_cache;
30 static mempool_t *ufshpb_mctx_pool;
31 static mempool_t *ufshpb_page_pool;
32 /* A cache size of 2MB can cache ppn in the 1GB range. */
33 static unsigned int ufshpb_host_map_kbytes = 2048;
34 static int tot_active_srgn_pages;
35 
36 static struct workqueue_struct *ufshpb_wq;
37 
38 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
39 				      int srgn_idx);
40 
ufshpb_is_allowed(struct ufs_hba * hba)41 bool ufshpb_is_allowed(struct ufs_hba *hba)
42 {
43 	return !(hba->ufshpb_dev.hpb_disabled);
44 }
45 
46 /* HPB version 1.0 is called as legacy version. */
ufshpb_is_legacy(struct ufs_hba * hba)47 bool ufshpb_is_legacy(struct ufs_hba *hba)
48 {
49 	return hba->ufshpb_dev.is_legacy;
50 }
51 
ufshpb_get_hpb_data(struct scsi_device * sdev)52 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
53 {
54 	return sdev->hostdata;
55 }
56 
ufshpb_get_state(struct ufshpb_lu * hpb)57 static int ufshpb_get_state(struct ufshpb_lu *hpb)
58 {
59 	return atomic_read(&hpb->hpb_state);
60 }
61 
ufshpb_set_state(struct ufshpb_lu * hpb,int state)62 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
63 {
64 	atomic_set(&hpb->hpb_state, state);
65 }
66 
ufshpb_is_valid_srgn(struct ufshpb_region * rgn,struct ufshpb_subregion * srgn)67 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
68 				struct ufshpb_subregion *srgn)
69 {
70 	return rgn->rgn_state != HPB_RGN_INACTIVE &&
71 		srgn->srgn_state == HPB_SRGN_VALID;
72 }
73 
ufshpb_is_read_cmd(struct scsi_cmnd * cmd)74 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
75 {
76 	return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
77 }
78 
ufshpb_is_write_or_discard(struct scsi_cmnd * cmd)79 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
80 {
81 	return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
82 	       op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
83 }
84 
ufshpb_is_supported_chunk(struct ufshpb_lu * hpb,int transfer_len)85 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
86 {
87 	return transfer_len <= hpb->pre_req_max_tr_len;
88 }
89 
ufshpb_is_general_lun(int lun)90 static bool ufshpb_is_general_lun(int lun)
91 {
92 	return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
93 }
94 
ufshpb_is_pinned_region(struct ufshpb_lu * hpb,int rgn_idx)95 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
96 {
97 	return hpb->lu_pinned_end != PINNED_NOT_SET &&
98 	       rgn_idx >= hpb->lu_pinned_start && rgn_idx <= hpb->lu_pinned_end;
99 }
100 
ufshpb_kick_map_work(struct ufshpb_lu * hpb)101 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
102 {
103 	bool ret = false;
104 	unsigned long flags;
105 
106 	if (ufshpb_get_state(hpb) != HPB_PRESENT)
107 		return;
108 
109 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
110 	if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
111 		ret = true;
112 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
113 
114 	if (ret)
115 		queue_work(ufshpb_wq, &hpb->map_work);
116 }
117 
ufshpb_is_hpb_rsp_valid(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,struct utp_hpb_rsp * rsp_field)118 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
119 				    struct ufshcd_lrb *lrbp,
120 				    struct utp_hpb_rsp *rsp_field)
121 {
122 	/* Check HPB_UPDATE_ALERT */
123 	if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
124 	      UPIU_HEADER_DWORD(0, 2, 0, 0)))
125 		return false;
126 
127 	if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
128 	    rsp_field->desc_type != DEV_DES_TYPE ||
129 	    rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
130 	    rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
131 	    rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
132 	    rsp_field->hpb_op == HPB_RSP_NONE ||
133 	    (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
134 	     !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
135 		return false;
136 
137 	if (!ufshpb_is_general_lun(rsp_field->lun)) {
138 		dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
139 			 lrbp->lun);
140 		return false;
141 	}
142 
143 	return true;
144 }
145 
ufshpb_iterate_rgn(struct ufshpb_lu * hpb,int rgn_idx,int srgn_idx,int srgn_offset,int cnt,bool set_dirty)146 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
147 			       int srgn_offset, int cnt, bool set_dirty)
148 {
149 	struct ufshpb_region *rgn;
150 	struct ufshpb_subregion *srgn, *prev_srgn = NULL;
151 	int set_bit_len;
152 	int bitmap_len;
153 	unsigned long flags;
154 
155 next_srgn:
156 	rgn = hpb->rgn_tbl + rgn_idx;
157 	srgn = rgn->srgn_tbl + srgn_idx;
158 
159 	if (likely(!srgn->is_last))
160 		bitmap_len = hpb->entries_per_srgn;
161 	else
162 		bitmap_len = hpb->last_srgn_entries;
163 
164 	if ((srgn_offset + cnt) > bitmap_len)
165 		set_bit_len = bitmap_len - srgn_offset;
166 	else
167 		set_bit_len = cnt;
168 
169 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
170 	if (rgn->rgn_state != HPB_RGN_INACTIVE) {
171 		if (set_dirty) {
172 			if (srgn->srgn_state == HPB_SRGN_VALID)
173 				bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
174 					   set_bit_len);
175 		} else if (hpb->is_hcm) {
176 			 /* rewind the read timer for lru regions */
177 			rgn->read_timeout = ktime_add_ms(ktime_get(),
178 					rgn->hpb->params.read_timeout_ms);
179 			rgn->read_timeout_expiries =
180 				rgn->hpb->params.read_timeout_expiries;
181 		}
182 	}
183 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
184 
185 	if (hpb->is_hcm && prev_srgn != srgn) {
186 		bool activate = false;
187 
188 		spin_lock(&rgn->rgn_lock);
189 		if (set_dirty) {
190 			rgn->reads -= srgn->reads;
191 			srgn->reads = 0;
192 			set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
193 		} else {
194 			srgn->reads++;
195 			rgn->reads++;
196 			if (srgn->reads == hpb->params.activation_thld)
197 				activate = true;
198 		}
199 		spin_unlock(&rgn->rgn_lock);
200 
201 		if (activate ||
202 		    test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
203 			spin_lock_irqsave(&hpb->rsp_list_lock, flags);
204 			ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
205 			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
206 			dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
207 				"activate region %d-%d\n", rgn_idx, srgn_idx);
208 		}
209 
210 		prev_srgn = srgn;
211 	}
212 
213 	srgn_offset = 0;
214 	if (++srgn_idx == hpb->srgns_per_rgn) {
215 		srgn_idx = 0;
216 		rgn_idx++;
217 	}
218 
219 	cnt -= set_bit_len;
220 	if (cnt > 0)
221 		goto next_srgn;
222 }
223 
ufshpb_test_ppn_dirty(struct ufshpb_lu * hpb,int rgn_idx,int srgn_idx,int srgn_offset,int cnt)224 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
225 				  int srgn_idx, int srgn_offset, int cnt)
226 {
227 	struct ufshpb_region *rgn;
228 	struct ufshpb_subregion *srgn;
229 	int bitmap_len;
230 	int bit_len;
231 
232 next_srgn:
233 	rgn = hpb->rgn_tbl + rgn_idx;
234 	srgn = rgn->srgn_tbl + srgn_idx;
235 
236 	if (likely(!srgn->is_last))
237 		bitmap_len = hpb->entries_per_srgn;
238 	else
239 		bitmap_len = hpb->last_srgn_entries;
240 
241 	if (!ufshpb_is_valid_srgn(rgn, srgn))
242 		return true;
243 
244 	/*
245 	 * If the region state is active, mctx must be allocated.
246 	 * In this case, check whether the region is evicted or
247 	 * mctx allocation fail.
248 	 */
249 	if (unlikely(!srgn->mctx)) {
250 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
251 			"no mctx in region %d subregion %d.\n",
252 			srgn->rgn_idx, srgn->srgn_idx);
253 		return true;
254 	}
255 
256 	if ((srgn_offset + cnt) > bitmap_len)
257 		bit_len = bitmap_len - srgn_offset;
258 	else
259 		bit_len = cnt;
260 
261 	if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
262 			  srgn_offset) < bit_len + srgn_offset)
263 		return true;
264 
265 	srgn_offset = 0;
266 	if (++srgn_idx == hpb->srgns_per_rgn) {
267 		srgn_idx = 0;
268 		rgn_idx++;
269 	}
270 
271 	cnt -= bit_len;
272 	if (cnt > 0)
273 		goto next_srgn;
274 
275 	return false;
276 }
277 
is_rgn_dirty(struct ufshpb_region * rgn)278 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
279 {
280 	return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
281 }
282 
ufshpb_fill_ppn_from_page(struct ufshpb_lu * hpb,struct ufshpb_map_ctx * mctx,int pos,int len,__be64 * ppn_buf)283 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
284 				     struct ufshpb_map_ctx *mctx, int pos,
285 				     int len, __be64 *ppn_buf)
286 {
287 	struct page *page;
288 	int index, offset;
289 	int copied;
290 
291 	index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
292 	offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
293 
294 	if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
295 		copied = len;
296 	else
297 		copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
298 
299 	page = mctx->m_page[index];
300 	if (unlikely(!page)) {
301 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
302 			"error. cannot find page in mctx\n");
303 		return -ENOMEM;
304 	}
305 
306 	memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
307 	       copied * HPB_ENTRY_SIZE);
308 
309 	return copied;
310 }
311 
312 static void
ufshpb_get_pos_from_lpn(struct ufshpb_lu * hpb,unsigned long lpn,int * rgn_idx,int * srgn_idx,int * offset)313 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
314 			int *srgn_idx, int *offset)
315 {
316 	int rgn_offset;
317 
318 	*rgn_idx = lpn >> hpb->entries_per_rgn_shift;
319 	rgn_offset = lpn & hpb->entries_per_rgn_mask;
320 	*srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
321 	*offset = rgn_offset & hpb->entries_per_srgn_mask;
322 }
323 
324 static void
ufshpb_set_hpb_read_to_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,__be64 ppn,u8 transfer_len)325 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
326 			    __be64 ppn, u8 transfer_len)
327 {
328 	unsigned char *cdb = lrbp->cmd->cmnd;
329 	__be64 ppn_tmp = ppn;
330 	cdb[0] = UFSHPB_READ;
331 
332 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
333 		ppn_tmp = (__force __be64)swab64((__force u64)ppn);
334 
335 	/* ppn value is stored as big-endian in the host memory */
336 	memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
337 	cdb[14] = transfer_len;
338 	cdb[15] = 0;
339 
340 	lrbp->cmd->cmd_len = UFS_CDB_SIZE;
341 }
342 
343 /*
344  * This function will set up HPB read command using host-side L2P map data.
345  */
ufshpb_prep(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)346 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
347 {
348 	struct ufshpb_lu *hpb;
349 	struct ufshpb_region *rgn;
350 	struct ufshpb_subregion *srgn;
351 	struct scsi_cmnd *cmd = lrbp->cmd;
352 	u32 lpn;
353 	__be64 ppn;
354 	unsigned long flags;
355 	int transfer_len, rgn_idx, srgn_idx, srgn_offset;
356 	int err = 0;
357 
358 	hpb = ufshpb_get_hpb_data(cmd->device);
359 	if (!hpb)
360 		return -ENODEV;
361 
362 	if (ufshpb_get_state(hpb) == HPB_INIT)
363 		return -ENODEV;
364 
365 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
366 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
367 			   "%s: ufshpb state is not PRESENT", __func__);
368 		return -ENODEV;
369 	}
370 
371 	if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
372 	    (!ufshpb_is_write_or_discard(cmd) &&
373 	     !ufshpb_is_read_cmd(cmd)))
374 		return 0;
375 
376 	transfer_len = sectors_to_logical(cmd->device,
377 					  blk_rq_sectors(scsi_cmd_to_rq(cmd)));
378 	if (unlikely(!transfer_len))
379 		return 0;
380 
381 	lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
382 	ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
383 	rgn = hpb->rgn_tbl + rgn_idx;
384 	srgn = rgn->srgn_tbl + srgn_idx;
385 
386 	/* If command type is WRITE or DISCARD, set bitmap as dirty */
387 	if (ufshpb_is_write_or_discard(cmd)) {
388 		ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
389 				   transfer_len, true);
390 		return 0;
391 	}
392 
393 	if (!ufshpb_is_supported_chunk(hpb, transfer_len))
394 		return 0;
395 
396 	if (hpb->is_hcm) {
397 		/*
398 		 * in host control mode, reads are the main source for
399 		 * activation trials.
400 		 */
401 		ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
402 				   transfer_len, false);
403 
404 		/* keep those counters normalized */
405 		if (rgn->reads > hpb->entries_per_srgn)
406 			schedule_work(&hpb->ufshpb_normalization_work);
407 	}
408 
409 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
410 	if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
411 				   transfer_len)) {
412 		hpb->stats.miss_cnt++;
413 		spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
414 		return 0;
415 	}
416 
417 	err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
418 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
419 	if (unlikely(err < 0)) {
420 		/*
421 		 * In this case, the region state is active,
422 		 * but the ppn table is not allocated.
423 		 * Make sure that ppn table must be allocated on
424 		 * active state.
425 		 */
426 		dev_err(hba->dev, "get ppn failed. err %d\n", err);
427 		return err;
428 	}
429 
430 	ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
431 
432 	hpb->stats.hit_cnt++;
433 	return 0;
434 }
435 
ufshpb_get_req(struct ufshpb_lu * hpb,int rgn_idx,enum req_op op,bool atomic)436 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, int rgn_idx,
437 					 enum req_op op, bool atomic)
438 {
439 	struct ufshpb_req *rq;
440 	struct request *req;
441 	int retries = HPB_MAP_REQ_RETRIES;
442 
443 	rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
444 	if (!rq)
445 		return NULL;
446 
447 retry:
448 	req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, op,
449 			      BLK_MQ_REQ_NOWAIT);
450 
451 	if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
452 		usleep_range(3000, 3100);
453 		goto retry;
454 	}
455 
456 	if (IS_ERR(req))
457 		goto free_rq;
458 
459 	rq->hpb = hpb;
460 	rq->req = req;
461 	rq->rb.rgn_idx = rgn_idx;
462 
463 	return rq;
464 
465 free_rq:
466 	kmem_cache_free(hpb->map_req_cache, rq);
467 	return NULL;
468 }
469 
ufshpb_put_req(struct ufshpb_lu * hpb,struct ufshpb_req * rq)470 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
471 {
472 	blk_mq_free_request(rq->req);
473 	kmem_cache_free(hpb->map_req_cache, rq);
474 }
475 
ufshpb_get_map_req(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)476 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
477 					     struct ufshpb_subregion *srgn)
478 {
479 	struct ufshpb_req *map_req;
480 	struct bio *bio;
481 	unsigned long flags;
482 
483 	if (hpb->is_hcm &&
484 	    hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
485 		dev_info(&hpb->sdev_ufs_lu->sdev_dev,
486 			 "map_req throttle. inflight %d throttle %d",
487 			 hpb->num_inflight_map_req,
488 			 hpb->params.inflight_map_req);
489 		return NULL;
490 	}
491 
492 	map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
493 	if (!map_req)
494 		return NULL;
495 
496 	bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
497 	if (!bio) {
498 		ufshpb_put_req(hpb, map_req);
499 		return NULL;
500 	}
501 
502 	map_req->bio = bio;
503 
504 	map_req->rb.srgn_idx = srgn->srgn_idx;
505 	map_req->rb.mctx = srgn->mctx;
506 
507 	spin_lock_irqsave(&hpb->param_lock, flags);
508 	hpb->num_inflight_map_req++;
509 	spin_unlock_irqrestore(&hpb->param_lock, flags);
510 
511 	return map_req;
512 }
513 
ufshpb_put_map_req(struct ufshpb_lu * hpb,struct ufshpb_req * map_req)514 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
515 			       struct ufshpb_req *map_req)
516 {
517 	unsigned long flags;
518 
519 	bio_put(map_req->bio);
520 	ufshpb_put_req(hpb, map_req);
521 
522 	spin_lock_irqsave(&hpb->param_lock, flags);
523 	hpb->num_inflight_map_req--;
524 	spin_unlock_irqrestore(&hpb->param_lock, flags);
525 }
526 
ufshpb_clear_dirty_bitmap(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)527 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
528 				     struct ufshpb_subregion *srgn)
529 {
530 	struct ufshpb_region *rgn;
531 	u32 num_entries = hpb->entries_per_srgn;
532 
533 	if (!srgn->mctx) {
534 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
535 			"no mctx in region %d subregion %d.\n",
536 			srgn->rgn_idx, srgn->srgn_idx);
537 		return -1;
538 	}
539 
540 	if (unlikely(srgn->is_last))
541 		num_entries = hpb->last_srgn_entries;
542 
543 	bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
544 
545 	rgn = hpb->rgn_tbl + srgn->rgn_idx;
546 	clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
547 
548 	return 0;
549 }
550 
ufshpb_update_active_info(struct ufshpb_lu * hpb,int rgn_idx,int srgn_idx)551 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
552 				      int srgn_idx)
553 {
554 	struct ufshpb_region *rgn;
555 	struct ufshpb_subregion *srgn;
556 
557 	rgn = hpb->rgn_tbl + rgn_idx;
558 	srgn = rgn->srgn_tbl + srgn_idx;
559 
560 	list_del_init(&rgn->list_inact_rgn);
561 
562 	if (list_empty(&srgn->list_act_srgn))
563 		list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
564 
565 	hpb->stats.rcmd_active_cnt++;
566 }
567 
ufshpb_update_inactive_info(struct ufshpb_lu * hpb,int rgn_idx)568 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
569 {
570 	struct ufshpb_region *rgn;
571 	struct ufshpb_subregion *srgn;
572 	int srgn_idx;
573 
574 	rgn = hpb->rgn_tbl + rgn_idx;
575 
576 	for_each_sub_region(rgn, srgn_idx, srgn)
577 		list_del_init(&srgn->list_act_srgn);
578 
579 	if (list_empty(&rgn->list_inact_rgn))
580 		list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
581 
582 	hpb->stats.rcmd_inactive_cnt++;
583 }
584 
ufshpb_activate_subregion(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)585 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
586 				      struct ufshpb_subregion *srgn)
587 {
588 	struct ufshpb_region *rgn;
589 
590 	/*
591 	 * If there is no mctx in subregion
592 	 * after I/O progress for HPB_READ_BUFFER, the region to which the
593 	 * subregion belongs was evicted.
594 	 * Make sure the region must not evict in I/O progress
595 	 */
596 	if (!srgn->mctx) {
597 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
598 			"no mctx in region %d subregion %d.\n",
599 			srgn->rgn_idx, srgn->srgn_idx);
600 		srgn->srgn_state = HPB_SRGN_INVALID;
601 		return;
602 	}
603 
604 	rgn = hpb->rgn_tbl + srgn->rgn_idx;
605 
606 	if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
607 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
608 			"region %d subregion %d evicted\n",
609 			srgn->rgn_idx, srgn->srgn_idx);
610 		srgn->srgn_state = HPB_SRGN_INVALID;
611 		return;
612 	}
613 	srgn->srgn_state = HPB_SRGN_VALID;
614 }
615 
ufshpb_umap_req_compl_fn(struct request * req,blk_status_t error)616 static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
617 						   blk_status_t error)
618 {
619 	struct ufshpb_req *umap_req = req->end_io_data;
620 
621 	ufshpb_put_req(umap_req->hpb, umap_req);
622 	return RQ_END_IO_NONE;
623 }
624 
ufshpb_map_req_compl_fn(struct request * req,blk_status_t error)625 static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req,
626 						  blk_status_t error)
627 {
628 	struct ufshpb_req *map_req = req->end_io_data;
629 	struct ufshpb_lu *hpb = map_req->hpb;
630 	struct ufshpb_subregion *srgn;
631 	unsigned long flags;
632 
633 	srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
634 		map_req->rb.srgn_idx;
635 
636 	ufshpb_clear_dirty_bitmap(hpb, srgn);
637 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
638 	ufshpb_activate_subregion(hpb, srgn);
639 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
640 
641 	ufshpb_put_map_req(map_req->hpb, map_req);
642 	return RQ_END_IO_NONE;
643 }
644 
ufshpb_set_unmap_cmd(unsigned char * cdb,struct ufshpb_region * rgn)645 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
646 {
647 	cdb[0] = UFSHPB_WRITE_BUFFER;
648 	cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
649 			  UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
650 	if (rgn)
651 		put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
652 	cdb[9] = 0x00;
653 }
654 
ufshpb_set_read_buf_cmd(unsigned char * cdb,int rgn_idx,int srgn_idx,int srgn_mem_size)655 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
656 				    int srgn_idx, int srgn_mem_size)
657 {
658 	cdb[0] = UFSHPB_READ_BUFFER;
659 	cdb[1] = UFSHPB_READ_BUFFER_ID;
660 
661 	put_unaligned_be16(rgn_idx, &cdb[2]);
662 	put_unaligned_be16(srgn_idx, &cdb[4]);
663 	put_unaligned_be24(srgn_mem_size, &cdb[6]);
664 
665 	cdb[9] = 0x00;
666 }
667 
ufshpb_execute_umap_req(struct ufshpb_lu * hpb,struct ufshpb_req * umap_req,struct ufshpb_region * rgn)668 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
669 				   struct ufshpb_req *umap_req,
670 				   struct ufshpb_region *rgn)
671 {
672 	struct request *req = umap_req->req;
673 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
674 
675 	req->timeout = 0;
676 	req->end_io_data = umap_req;
677 	req->end_io = ufshpb_umap_req_compl_fn;
678 
679 	ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
680 	scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
681 
682 	blk_execute_rq_nowait(req, true);
683 
684 	hpb->stats.umap_req_cnt++;
685 }
686 
ufshpb_execute_map_req(struct ufshpb_lu * hpb,struct ufshpb_req * map_req,bool last)687 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
688 				  struct ufshpb_req *map_req, bool last)
689 {
690 	struct request_queue *q;
691 	struct request *req;
692 	struct scsi_cmnd *scmd;
693 	int mem_size = hpb->srgn_mem_size;
694 	int ret = 0;
695 	int i;
696 
697 	q = hpb->sdev_ufs_lu->request_queue;
698 	for (i = 0; i < hpb->pages_per_srgn; i++) {
699 		ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
700 				      PAGE_SIZE, 0);
701 		if (ret != PAGE_SIZE) {
702 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
703 				   "bio_add_pc_page fail %d - %d\n",
704 				   map_req->rb.rgn_idx, map_req->rb.srgn_idx);
705 			return ret;
706 		}
707 	}
708 
709 	req = map_req->req;
710 
711 	blk_rq_append_bio(req, map_req->bio);
712 
713 	req->end_io_data = map_req;
714 	req->end_io = ufshpb_map_req_compl_fn;
715 
716 	if (unlikely(last))
717 		mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
718 
719 	scmd = blk_mq_rq_to_pdu(req);
720 	ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx,
721 				map_req->rb.srgn_idx, mem_size);
722 	scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
723 
724 	blk_execute_rq_nowait(req, true);
725 
726 	hpb->stats.map_req_cnt++;
727 	return 0;
728 }
729 
ufshpb_get_map_ctx(struct ufshpb_lu * hpb,bool last)730 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
731 						 bool last)
732 {
733 	struct ufshpb_map_ctx *mctx;
734 	u32 num_entries = hpb->entries_per_srgn;
735 	int i, j;
736 
737 	mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
738 	if (!mctx)
739 		return NULL;
740 
741 	mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
742 	if (!mctx->m_page)
743 		goto release_mctx;
744 
745 	if (unlikely(last))
746 		num_entries = hpb->last_srgn_entries;
747 
748 	mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
749 	if (!mctx->ppn_dirty)
750 		goto release_m_page;
751 
752 	for (i = 0; i < hpb->pages_per_srgn; i++) {
753 		mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
754 		if (!mctx->m_page[i]) {
755 			for (j = 0; j < i; j++)
756 				mempool_free(mctx->m_page[j], ufshpb_page_pool);
757 			goto release_ppn_dirty;
758 		}
759 		clear_page(page_address(mctx->m_page[i]));
760 	}
761 
762 	return mctx;
763 
764 release_ppn_dirty:
765 	bitmap_free(mctx->ppn_dirty);
766 release_m_page:
767 	kmem_cache_free(hpb->m_page_cache, mctx->m_page);
768 release_mctx:
769 	mempool_free(mctx, ufshpb_mctx_pool);
770 	return NULL;
771 }
772 
ufshpb_put_map_ctx(struct ufshpb_lu * hpb,struct ufshpb_map_ctx * mctx)773 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
774 			       struct ufshpb_map_ctx *mctx)
775 {
776 	int i;
777 
778 	for (i = 0; i < hpb->pages_per_srgn; i++)
779 		mempool_free(mctx->m_page[i], ufshpb_page_pool);
780 
781 	bitmap_free(mctx->ppn_dirty);
782 	kmem_cache_free(hpb->m_page_cache, mctx->m_page);
783 	mempool_free(mctx, ufshpb_mctx_pool);
784 }
785 
ufshpb_check_srgns_issue_state(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)786 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
787 					  struct ufshpb_region *rgn)
788 {
789 	struct ufshpb_subregion *srgn;
790 	int srgn_idx;
791 
792 	for_each_sub_region(rgn, srgn_idx, srgn)
793 		if (srgn->srgn_state == HPB_SRGN_ISSUED)
794 			return -EPERM;
795 
796 	return 0;
797 }
798 
ufshpb_read_to_handler(struct work_struct * work)799 static void ufshpb_read_to_handler(struct work_struct *work)
800 {
801 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
802 					     ufshpb_read_to_work.work);
803 	struct victim_select_info *lru_info = &hpb->lru_info;
804 	struct ufshpb_region *rgn, *next_rgn;
805 	unsigned long flags;
806 	unsigned int poll;
807 	LIST_HEAD(expired_list);
808 
809 	if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
810 		return;
811 
812 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
813 
814 	list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
815 				 list_lru_rgn) {
816 		bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
817 
818 		if (timedout) {
819 			rgn->read_timeout_expiries--;
820 			if (is_rgn_dirty(rgn) ||
821 			    rgn->read_timeout_expiries == 0)
822 				list_add(&rgn->list_expired_rgn, &expired_list);
823 			else
824 				rgn->read_timeout = ktime_add_ms(ktime_get(),
825 						hpb->params.read_timeout_ms);
826 		}
827 	}
828 
829 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
830 
831 	list_for_each_entry_safe(rgn, next_rgn, &expired_list,
832 				 list_expired_rgn) {
833 		list_del_init(&rgn->list_expired_rgn);
834 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
835 		ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
836 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
837 	}
838 
839 	ufshpb_kick_map_work(hpb);
840 
841 	clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
842 
843 	poll = hpb->params.timeout_polling_interval_ms;
844 	schedule_delayed_work(&hpb->ufshpb_read_to_work,
845 			      msecs_to_jiffies(poll));
846 }
847 
ufshpb_add_lru_info(struct victim_select_info * lru_info,struct ufshpb_region * rgn)848 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
849 				struct ufshpb_region *rgn)
850 {
851 	rgn->rgn_state = HPB_RGN_ACTIVE;
852 	list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
853 	atomic_inc(&lru_info->active_cnt);
854 	if (rgn->hpb->is_hcm) {
855 		rgn->read_timeout =
856 			ktime_add_ms(ktime_get(),
857 				     rgn->hpb->params.read_timeout_ms);
858 		rgn->read_timeout_expiries =
859 			rgn->hpb->params.read_timeout_expiries;
860 	}
861 }
862 
ufshpb_hit_lru_info(struct victim_select_info * lru_info,struct ufshpb_region * rgn)863 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
864 				struct ufshpb_region *rgn)
865 {
866 	list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
867 }
868 
ufshpb_victim_lru_info(struct ufshpb_lu * hpb)869 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
870 {
871 	struct victim_select_info *lru_info = &hpb->lru_info;
872 	struct ufshpb_region *rgn, *victim_rgn = NULL;
873 
874 	list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
875 		if (ufshpb_check_srgns_issue_state(hpb, rgn))
876 			continue;
877 
878 		/*
879 		 * in host control mode, verify that the exiting region
880 		 * has fewer reads
881 		 */
882 		if (hpb->is_hcm &&
883 		    rgn->reads > hpb->params.eviction_thld_exit)
884 			continue;
885 
886 		victim_rgn = rgn;
887 		break;
888 	}
889 
890 	if (!victim_rgn)
891 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
892 			"%s: no region allocated\n",
893 			__func__);
894 
895 	return victim_rgn;
896 }
897 
ufshpb_cleanup_lru_info(struct victim_select_info * lru_info,struct ufshpb_region * rgn)898 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
899 				    struct ufshpb_region *rgn)
900 {
901 	list_del_init(&rgn->list_lru_rgn);
902 	rgn->rgn_state = HPB_RGN_INACTIVE;
903 	atomic_dec(&lru_info->active_cnt);
904 }
905 
ufshpb_purge_active_subregion(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)906 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
907 					  struct ufshpb_subregion *srgn)
908 {
909 	if (srgn->srgn_state != HPB_SRGN_UNUSED) {
910 		ufshpb_put_map_ctx(hpb, srgn->mctx);
911 		srgn->srgn_state = HPB_SRGN_UNUSED;
912 		srgn->mctx = NULL;
913 	}
914 }
915 
ufshpb_issue_umap_req(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,bool atomic)916 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
917 				 struct ufshpb_region *rgn,
918 				 bool atomic)
919 {
920 	struct ufshpb_req *umap_req;
921 	int rgn_idx = rgn ? rgn->rgn_idx : 0;
922 
923 	umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
924 	if (!umap_req)
925 		return -ENOMEM;
926 
927 	ufshpb_execute_umap_req(hpb, umap_req, rgn);
928 
929 	return 0;
930 }
931 
ufshpb_issue_umap_single_req(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)932 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
933 					struct ufshpb_region *rgn)
934 {
935 	return ufshpb_issue_umap_req(hpb, rgn, true);
936 }
937 
__ufshpb_evict_region(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)938 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
939 				 struct ufshpb_region *rgn)
940 {
941 	struct victim_select_info *lru_info;
942 	struct ufshpb_subregion *srgn;
943 	int srgn_idx;
944 
945 	lru_info = &hpb->lru_info;
946 
947 	dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
948 
949 	ufshpb_cleanup_lru_info(lru_info, rgn);
950 
951 	for_each_sub_region(rgn, srgn_idx, srgn)
952 		ufshpb_purge_active_subregion(hpb, srgn);
953 }
954 
ufshpb_evict_region(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)955 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
956 {
957 	unsigned long flags;
958 	int ret = 0;
959 
960 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
961 	if (rgn->rgn_state == HPB_RGN_PINNED) {
962 		dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
963 			 "pinned region cannot drop-out. region %d\n",
964 			 rgn->rgn_idx);
965 		goto out;
966 	}
967 
968 	if (!list_empty(&rgn->list_lru_rgn)) {
969 		if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
970 			ret = -EBUSY;
971 			goto out;
972 		}
973 
974 		if (hpb->is_hcm) {
975 			spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
976 			ret = ufshpb_issue_umap_single_req(hpb, rgn);
977 			spin_lock_irqsave(&hpb->rgn_state_lock, flags);
978 			if (ret)
979 				goto out;
980 		}
981 
982 		__ufshpb_evict_region(hpb, rgn);
983 	}
984 out:
985 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
986 	return ret;
987 }
988 
ufshpb_issue_map_req(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,struct ufshpb_subregion * srgn)989 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
990 				struct ufshpb_region *rgn,
991 				struct ufshpb_subregion *srgn)
992 {
993 	struct ufshpb_req *map_req;
994 	unsigned long flags;
995 	int ret;
996 	int err = -EAGAIN;
997 	bool alloc_required = false;
998 	enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
999 
1000 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1001 
1002 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1003 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1004 			   "%s: ufshpb state is not PRESENT\n", __func__);
1005 		goto unlock_out;
1006 	}
1007 
1008 	if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1009 	    (srgn->srgn_state == HPB_SRGN_INVALID)) {
1010 		err = 0;
1011 		goto unlock_out;
1012 	}
1013 
1014 	if (srgn->srgn_state == HPB_SRGN_UNUSED)
1015 		alloc_required = true;
1016 
1017 	/*
1018 	 * If the subregion is already ISSUED state,
1019 	 * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1020 	 * the device and HPB response for map loading is received.
1021 	 * In this case, after finishing the HPB_READ_BUFFER,
1022 	 * the next HPB_READ_BUFFER is performed again to obtain the latest
1023 	 * map data.
1024 	 */
1025 	if (srgn->srgn_state == HPB_SRGN_ISSUED)
1026 		goto unlock_out;
1027 
1028 	srgn->srgn_state = HPB_SRGN_ISSUED;
1029 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1030 
1031 	if (alloc_required) {
1032 		srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1033 		if (!srgn->mctx) {
1034 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1035 			    "get map_ctx failed. region %d - %d\n",
1036 			    rgn->rgn_idx, srgn->srgn_idx);
1037 			state = HPB_SRGN_UNUSED;
1038 			goto change_srgn_state;
1039 		}
1040 	}
1041 
1042 	map_req = ufshpb_get_map_req(hpb, srgn);
1043 	if (!map_req)
1044 		goto change_srgn_state;
1045 
1046 
1047 	ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1048 	if (ret) {
1049 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1050 			   "%s: issue map_req failed: %d, region %d - %d\n",
1051 			   __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1052 		goto free_map_req;
1053 	}
1054 	return 0;
1055 
1056 free_map_req:
1057 	ufshpb_put_map_req(hpb, map_req);
1058 change_srgn_state:
1059 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1060 	srgn->srgn_state = state;
1061 unlock_out:
1062 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1063 	return err;
1064 }
1065 
ufshpb_add_region(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1066 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1067 {
1068 	struct ufshpb_region *victim_rgn = NULL;
1069 	struct victim_select_info *lru_info = &hpb->lru_info;
1070 	unsigned long flags;
1071 	int ret = 0;
1072 
1073 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1074 	/*
1075 	 * If region belongs to lru_list, just move the region
1076 	 * to the front of lru list because the state of the region
1077 	 * is already active-state.
1078 	 */
1079 	if (!list_empty(&rgn->list_lru_rgn)) {
1080 		ufshpb_hit_lru_info(lru_info, rgn);
1081 		goto out;
1082 	}
1083 
1084 	if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1085 		if (atomic_read(&lru_info->active_cnt) ==
1086 		    lru_info->max_lru_active_cnt) {
1087 			/*
1088 			 * If the maximum number of active regions
1089 			 * is exceeded, evict the least recently used region.
1090 			 * This case may occur when the device responds
1091 			 * to the eviction information late.
1092 			 * It is okay to evict the least recently used region,
1093 			 * because the device could detect this region
1094 			 * by not issuing HPB_READ
1095 			 *
1096 			 * in host control mode, verify that the entering
1097 			 * region has enough reads
1098 			 */
1099 			if (hpb->is_hcm &&
1100 			    rgn->reads < hpb->params.eviction_thld_enter) {
1101 				ret = -EACCES;
1102 				goto out;
1103 			}
1104 
1105 			victim_rgn = ufshpb_victim_lru_info(hpb);
1106 			if (!victim_rgn) {
1107 				dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1108 				    "cannot get victim region %s\n",
1109 				    hpb->is_hcm ? "" : "error");
1110 				ret = -ENOMEM;
1111 				goto out;
1112 			}
1113 
1114 			dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1115 				"LRU full (%d), choose victim %d\n",
1116 				atomic_read(&lru_info->active_cnt),
1117 				victim_rgn->rgn_idx);
1118 
1119 			if (hpb->is_hcm) {
1120 				spin_unlock_irqrestore(&hpb->rgn_state_lock,
1121 						       flags);
1122 				ret = ufshpb_issue_umap_single_req(hpb,
1123 								victim_rgn);
1124 				spin_lock_irqsave(&hpb->rgn_state_lock,
1125 						  flags);
1126 				if (ret)
1127 					goto out;
1128 			}
1129 
1130 			__ufshpb_evict_region(hpb, victim_rgn);
1131 		}
1132 
1133 		/*
1134 		 * When a region is added to lru_info list_head,
1135 		 * it is guaranteed that the subregion has been
1136 		 * assigned all mctx. If failed, try to receive mctx again
1137 		 * without being added to lru_info list_head
1138 		 */
1139 		ufshpb_add_lru_info(lru_info, rgn);
1140 	}
1141 out:
1142 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1143 	return ret;
1144 }
1145 /**
1146  *ufshpb_submit_region_inactive() - submit a region to be inactivated later
1147  *@hpb: per-LU HPB instance
1148  *@region_index: the index associated with the region that will be inactivated later
1149  */
ufshpb_submit_region_inactive(struct ufshpb_lu * hpb,int region_index)1150 static void ufshpb_submit_region_inactive(struct ufshpb_lu *hpb, int region_index)
1151 {
1152 	int subregion_index;
1153 	struct ufshpb_region *rgn;
1154 	struct ufshpb_subregion *srgn;
1155 
1156 	/*
1157 	 * Remove this region from active region list and add it to inactive list
1158 	 */
1159 	spin_lock(&hpb->rsp_list_lock);
1160 	ufshpb_update_inactive_info(hpb, region_index);
1161 	spin_unlock(&hpb->rsp_list_lock);
1162 
1163 	rgn = hpb->rgn_tbl + region_index;
1164 
1165 	/*
1166 	 * Set subregion state to be HPB_SRGN_INVALID, there will no HPB read on this subregion
1167 	 */
1168 	spin_lock(&hpb->rgn_state_lock);
1169 	if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1170 		for (subregion_index = 0; subregion_index < rgn->srgn_cnt; subregion_index++) {
1171 			srgn = rgn->srgn_tbl + subregion_index;
1172 			if (srgn->srgn_state == HPB_SRGN_VALID)
1173 				srgn->srgn_state = HPB_SRGN_INVALID;
1174 		}
1175 	}
1176 	spin_unlock(&hpb->rgn_state_lock);
1177 }
1178 
ufshpb_rsp_req_region_update(struct ufshpb_lu * hpb,struct utp_hpb_rsp * rsp_field)1179 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1180 					 struct utp_hpb_rsp *rsp_field)
1181 {
1182 	struct ufshpb_region *rgn;
1183 	struct ufshpb_subregion *srgn;
1184 	int i, rgn_i, srgn_i;
1185 
1186 	BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1187 	/*
1188 	 * If the active region and the inactive region are the same,
1189 	 * we will inactivate this region.
1190 	 * The device could check this (region inactivated) and
1191 	 * will response the proper active region information
1192 	 */
1193 	for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1194 		rgn_i =
1195 			be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1196 		srgn_i =
1197 			be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1198 
1199 		rgn = hpb->rgn_tbl + rgn_i;
1200 		if (hpb->is_hcm &&
1201 		    (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1202 			/*
1203 			 * in host control mode, subregion activation
1204 			 * recommendations are only allowed to active regions.
1205 			 * Also, ignore recommendations for dirty regions - the
1206 			 * host will make decisions concerning those by himself
1207 			 */
1208 			continue;
1209 		}
1210 
1211 		dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1212 			"activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1213 
1214 		spin_lock(&hpb->rsp_list_lock);
1215 		ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1216 		spin_unlock(&hpb->rsp_list_lock);
1217 
1218 		srgn = rgn->srgn_tbl + srgn_i;
1219 
1220 		/* blocking HPB_READ */
1221 		spin_lock(&hpb->rgn_state_lock);
1222 		if (srgn->srgn_state == HPB_SRGN_VALID)
1223 			srgn->srgn_state = HPB_SRGN_INVALID;
1224 		spin_unlock(&hpb->rgn_state_lock);
1225 	}
1226 
1227 	if (hpb->is_hcm) {
1228 		/*
1229 		 * in host control mode the device is not allowed to inactivate
1230 		 * regions
1231 		 */
1232 		goto out;
1233 	}
1234 
1235 	for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1236 		rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1237 		dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "inactivate(%d) region %d\n", i, rgn_i);
1238 		ufshpb_submit_region_inactive(hpb, rgn_i);
1239 	}
1240 
1241 out:
1242 	dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1243 		rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1244 
1245 	if (ufshpb_get_state(hpb) == HPB_PRESENT)
1246 		queue_work(ufshpb_wq, &hpb->map_work);
1247 }
1248 
1249 /*
1250  * Set the flags of all active regions to RGN_FLAG_UPDATE to let host side reload L2P entries later
1251  */
ufshpb_set_regions_update(struct ufshpb_lu * hpb)1252 static void ufshpb_set_regions_update(struct ufshpb_lu *hpb)
1253 {
1254 	struct victim_select_info *lru_info = &hpb->lru_info;
1255 	struct ufshpb_region *rgn;
1256 	unsigned long flags;
1257 
1258 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1259 
1260 	list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1261 		set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1262 
1263 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1264 }
1265 
ufshpb_dev_reset_handler(struct ufs_hba * hba)1266 static void ufshpb_dev_reset_handler(struct ufs_hba *hba)
1267 {
1268 	struct scsi_device *sdev;
1269 	struct ufshpb_lu *hpb;
1270 
1271 	__shost_for_each_device(sdev, hba->host) {
1272 		hpb = ufshpb_get_hpb_data(sdev);
1273 		if (!hpb)
1274 			continue;
1275 
1276 		if (hpb->is_hcm) {
1277 			/*
1278 			 * For the HPB host control mode, in case device powered up and lost HPB
1279 			 * information, we will set the region flag to be RGN_FLAG_UPDATE, it will
1280 			 * let host reload its L2P entries(reactivate region in the UFS device).
1281 			 */
1282 			ufshpb_set_regions_update(hpb);
1283 		} else {
1284 			/*
1285 			 * For the HPB device control mode, if host side receives 02h:HPB Operation
1286 			 * in UPIU response, which means device recommends the host side should
1287 			 * inactivate all active regions. Here we add all active regions to inactive
1288 			 * list, they will be inactivated later in ufshpb_map_work_handler().
1289 			 */
1290 			struct victim_select_info *lru_info = &hpb->lru_info;
1291 			struct ufshpb_region *rgn;
1292 
1293 			list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1294 				ufshpb_submit_region_inactive(hpb, rgn->rgn_idx);
1295 
1296 			if (ufshpb_get_state(hpb) == HPB_PRESENT)
1297 				queue_work(ufshpb_wq, &hpb->map_work);
1298 		}
1299 	}
1300 }
1301 
1302 /*
1303  * This function will parse recommended active subregion information in sense
1304  * data field of response UPIU with SAM_STAT_GOOD state.
1305  */
ufshpb_rsp_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)1306 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1307 {
1308 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1309 	struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1310 	int data_seg_len;
1311 
1312 	data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1313 		& MASK_RSP_UPIU_DATA_SEG_LEN;
1314 
1315 	/* If data segment length is zero, rsp_field is not valid */
1316 	if (!data_seg_len)
1317 		return;
1318 
1319 	if (unlikely(lrbp->lun != rsp_field->lun)) {
1320 		struct scsi_device *sdev;
1321 		bool found = false;
1322 
1323 		__shost_for_each_device(sdev, hba->host) {
1324 			hpb = ufshpb_get_hpb_data(sdev);
1325 
1326 			if (!hpb)
1327 				continue;
1328 
1329 			if (rsp_field->lun == hpb->lun) {
1330 				found = true;
1331 				break;
1332 			}
1333 		}
1334 
1335 		if (!found)
1336 			return;
1337 	}
1338 
1339 	if (!hpb)
1340 		return;
1341 
1342 	if (ufshpb_get_state(hpb) == HPB_INIT)
1343 		return;
1344 
1345 	if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1346 	    (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1347 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1348 			   "%s: ufshpb state is not PRESENT/SUSPEND\n",
1349 			   __func__);
1350 		return;
1351 	}
1352 
1353 	BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1354 
1355 	if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1356 		return;
1357 
1358 	hpb->stats.rcmd_noti_cnt++;
1359 
1360 	switch (rsp_field->hpb_op) {
1361 	case HPB_RSP_REQ_REGION_UPDATE:
1362 		if (data_seg_len != DEV_DATA_SEG_LEN)
1363 			dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1364 				 "%s: data seg length is not same.\n",
1365 				 __func__);
1366 		ufshpb_rsp_req_region_update(hpb, rsp_field);
1367 		break;
1368 	case HPB_RSP_DEV_RESET:
1369 		dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1370 			 "UFS device lost HPB information during PM.\n");
1371 		ufshpb_dev_reset_handler(hba);
1372 
1373 		break;
1374 	default:
1375 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1376 			   "hpb_op is not available: %d\n",
1377 			   rsp_field->hpb_op);
1378 		break;
1379 	}
1380 }
1381 
ufshpb_add_active_list(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,struct ufshpb_subregion * srgn)1382 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1383 				   struct ufshpb_region *rgn,
1384 				   struct ufshpb_subregion *srgn)
1385 {
1386 	if (!list_empty(&rgn->list_inact_rgn))
1387 		return;
1388 
1389 	if (!list_empty(&srgn->list_act_srgn)) {
1390 		list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1391 		return;
1392 	}
1393 
1394 	list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1395 }
1396 
ufshpb_add_pending_evict_list(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,struct list_head * pending_list)1397 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1398 					  struct ufshpb_region *rgn,
1399 					  struct list_head *pending_list)
1400 {
1401 	struct ufshpb_subregion *srgn;
1402 	int srgn_idx;
1403 
1404 	if (!list_empty(&rgn->list_inact_rgn))
1405 		return;
1406 
1407 	for_each_sub_region(rgn, srgn_idx, srgn)
1408 		if (!list_empty(&srgn->list_act_srgn))
1409 			return;
1410 
1411 	list_add_tail(&rgn->list_inact_rgn, pending_list);
1412 }
1413 
ufshpb_run_active_subregion_list(struct ufshpb_lu * hpb)1414 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1415 {
1416 	struct ufshpb_region *rgn;
1417 	struct ufshpb_subregion *srgn;
1418 	unsigned long flags;
1419 	int ret = 0;
1420 
1421 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1422 	while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1423 						struct ufshpb_subregion,
1424 						list_act_srgn))) {
1425 		if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1426 			break;
1427 
1428 		list_del_init(&srgn->list_act_srgn);
1429 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1430 
1431 		rgn = hpb->rgn_tbl + srgn->rgn_idx;
1432 		ret = ufshpb_add_region(hpb, rgn);
1433 		if (ret)
1434 			goto active_failed;
1435 
1436 		ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1437 		if (ret) {
1438 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1439 			    "issue map_req failed. ret %d, region %d - %d\n",
1440 			    ret, rgn->rgn_idx, srgn->srgn_idx);
1441 			goto active_failed;
1442 		}
1443 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1444 	}
1445 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1446 	return;
1447 
1448 active_failed:
1449 	dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1450 		   rgn->rgn_idx, srgn->srgn_idx);
1451 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1452 	ufshpb_add_active_list(hpb, rgn, srgn);
1453 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1454 }
1455 
ufshpb_run_inactive_region_list(struct ufshpb_lu * hpb)1456 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1457 {
1458 	struct ufshpb_region *rgn;
1459 	unsigned long flags;
1460 	int ret;
1461 	LIST_HEAD(pending_list);
1462 
1463 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1464 	while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1465 					       struct ufshpb_region,
1466 					       list_inact_rgn))) {
1467 		if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1468 			break;
1469 
1470 		list_del_init(&rgn->list_inact_rgn);
1471 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1472 
1473 		ret = ufshpb_evict_region(hpb, rgn);
1474 		if (ret) {
1475 			spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1476 			ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1477 			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1478 		}
1479 
1480 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1481 	}
1482 
1483 	list_splice(&pending_list, &hpb->lh_inact_rgn);
1484 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1485 }
1486 
ufshpb_normalization_work_handler(struct work_struct * work)1487 static void ufshpb_normalization_work_handler(struct work_struct *work)
1488 {
1489 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1490 					     ufshpb_normalization_work);
1491 	int rgn_idx;
1492 	u8 factor = hpb->params.normalization_factor;
1493 
1494 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1495 		struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1496 		int srgn_idx;
1497 
1498 		spin_lock(&rgn->rgn_lock);
1499 		rgn->reads = 0;
1500 		for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1501 			struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1502 
1503 			srgn->reads >>= factor;
1504 			rgn->reads += srgn->reads;
1505 		}
1506 		spin_unlock(&rgn->rgn_lock);
1507 
1508 		if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1509 			continue;
1510 
1511 		/* if region is active but has no reads - inactivate it */
1512 		spin_lock(&hpb->rsp_list_lock);
1513 		ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1514 		spin_unlock(&hpb->rsp_list_lock);
1515 	}
1516 }
1517 
ufshpb_map_work_handler(struct work_struct * work)1518 static void ufshpb_map_work_handler(struct work_struct *work)
1519 {
1520 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1521 
1522 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1523 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1524 			   "%s: ufshpb state is not PRESENT\n", __func__);
1525 		return;
1526 	}
1527 
1528 	ufshpb_run_inactive_region_list(hpb);
1529 	ufshpb_run_active_subregion_list(hpb);
1530 }
1531 
1532 /*
1533  * this function doesn't need to hold lock due to be called in init.
1534  * (rgn_state_lock, rsp_list_lock, etc..)
1535  */
ufshpb_init_pinned_active_region(struct ufs_hba * hba,struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1536 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1537 					    struct ufshpb_lu *hpb,
1538 					    struct ufshpb_region *rgn)
1539 {
1540 	struct ufshpb_subregion *srgn;
1541 	int srgn_idx, i;
1542 	int err = 0;
1543 
1544 	for_each_sub_region(rgn, srgn_idx, srgn) {
1545 		srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1546 		srgn->srgn_state = HPB_SRGN_INVALID;
1547 		if (!srgn->mctx) {
1548 			err = -ENOMEM;
1549 			dev_err(hba->dev,
1550 				"alloc mctx for pinned region failed\n");
1551 			goto release;
1552 		}
1553 
1554 		list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1555 	}
1556 
1557 	rgn->rgn_state = HPB_RGN_PINNED;
1558 	return 0;
1559 
1560 release:
1561 	for (i = 0; i < srgn_idx; i++) {
1562 		srgn = rgn->srgn_tbl + i;
1563 		ufshpb_put_map_ctx(hpb, srgn->mctx);
1564 	}
1565 	return err;
1566 }
1567 
ufshpb_init_subregion_tbl(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,bool last)1568 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1569 				      struct ufshpb_region *rgn, bool last)
1570 {
1571 	int srgn_idx;
1572 	struct ufshpb_subregion *srgn;
1573 
1574 	for_each_sub_region(rgn, srgn_idx, srgn) {
1575 		INIT_LIST_HEAD(&srgn->list_act_srgn);
1576 
1577 		srgn->rgn_idx = rgn->rgn_idx;
1578 		srgn->srgn_idx = srgn_idx;
1579 		srgn->srgn_state = HPB_SRGN_UNUSED;
1580 	}
1581 
1582 	if (unlikely(last && hpb->last_srgn_entries))
1583 		srgn->is_last = true;
1584 }
1585 
ufshpb_alloc_subregion_tbl(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,int srgn_cnt)1586 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1587 				      struct ufshpb_region *rgn, int srgn_cnt)
1588 {
1589 	rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1590 				 GFP_KERNEL);
1591 	if (!rgn->srgn_tbl)
1592 		return -ENOMEM;
1593 
1594 	rgn->srgn_cnt = srgn_cnt;
1595 	return 0;
1596 }
1597 
ufshpb_lu_parameter_init(struct ufs_hba * hba,struct ufshpb_lu * hpb,struct ufshpb_dev_info * hpb_dev_info,struct ufshpb_lu_info * hpb_lu_info)1598 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1599 				     struct ufshpb_lu *hpb,
1600 				     struct ufshpb_dev_info *hpb_dev_info,
1601 				     struct ufshpb_lu_info *hpb_lu_info)
1602 {
1603 	u32 entries_per_rgn;
1604 	u64 rgn_mem_size, tmp;
1605 
1606 	if (ufshpb_is_legacy(hba))
1607 		hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1608 	else
1609 		hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
1610 
1611 	hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1612 	hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1613 		(hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1614 		: PINNED_NOT_SET;
1615 	hpb->lru_info.max_lru_active_cnt =
1616 		hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1617 
1618 	rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1619 			* HPB_ENTRY_SIZE;
1620 	do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1621 	hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1622 		* HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1623 
1624 	tmp = rgn_mem_size;
1625 	do_div(tmp, HPB_ENTRY_SIZE);
1626 	entries_per_rgn = (u32)tmp;
1627 	hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1628 	hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1629 
1630 	hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1631 	hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1632 	hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1633 
1634 	tmp = rgn_mem_size;
1635 	do_div(tmp, hpb->srgn_mem_size);
1636 	hpb->srgns_per_rgn = (int)tmp;
1637 
1638 	hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1639 				entries_per_rgn);
1640 	hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1641 				(hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1642 	hpb->last_srgn_entries = hpb_lu_info->num_blocks
1643 				 % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1644 
1645 	hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1646 
1647 	if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1648 		hpb->is_hcm = true;
1649 }
1650 
ufshpb_alloc_region_tbl(struct ufs_hba * hba,struct ufshpb_lu * hpb)1651 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1652 {
1653 	struct ufshpb_region *rgn_table, *rgn;
1654 	int rgn_idx, i;
1655 	int ret = 0;
1656 
1657 	rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1658 			    GFP_KERNEL);
1659 	if (!rgn_table)
1660 		return -ENOMEM;
1661 
1662 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1663 		int srgn_cnt = hpb->srgns_per_rgn;
1664 		bool last_srgn = false;
1665 
1666 		rgn = rgn_table + rgn_idx;
1667 		rgn->rgn_idx = rgn_idx;
1668 
1669 		spin_lock_init(&rgn->rgn_lock);
1670 
1671 		INIT_LIST_HEAD(&rgn->list_inact_rgn);
1672 		INIT_LIST_HEAD(&rgn->list_lru_rgn);
1673 		INIT_LIST_HEAD(&rgn->list_expired_rgn);
1674 
1675 		if (rgn_idx == hpb->rgns_per_lu - 1) {
1676 			srgn_cnt = ((hpb->srgns_per_lu - 1) %
1677 				    hpb->srgns_per_rgn) + 1;
1678 			last_srgn = true;
1679 		}
1680 
1681 		ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1682 		if (ret)
1683 			goto release_srgn_table;
1684 		ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1685 
1686 		if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1687 			ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1688 			if (ret)
1689 				goto release_srgn_table;
1690 		} else {
1691 			rgn->rgn_state = HPB_RGN_INACTIVE;
1692 		}
1693 
1694 		rgn->rgn_flags = 0;
1695 		rgn->hpb = hpb;
1696 	}
1697 
1698 	hpb->rgn_tbl = rgn_table;
1699 
1700 	return 0;
1701 
1702 release_srgn_table:
1703 	for (i = 0; i <= rgn_idx; i++)
1704 		kvfree(rgn_table[i].srgn_tbl);
1705 
1706 	kvfree(rgn_table);
1707 	return ret;
1708 }
1709 
ufshpb_destroy_subregion_tbl(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1710 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1711 					 struct ufshpb_region *rgn)
1712 {
1713 	int srgn_idx;
1714 	struct ufshpb_subregion *srgn;
1715 
1716 	for_each_sub_region(rgn, srgn_idx, srgn)
1717 		if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1718 			srgn->srgn_state = HPB_SRGN_UNUSED;
1719 			ufshpb_put_map_ctx(hpb, srgn->mctx);
1720 		}
1721 }
1722 
ufshpb_destroy_region_tbl(struct ufshpb_lu * hpb)1723 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1724 {
1725 	int rgn_idx;
1726 
1727 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1728 		struct ufshpb_region *rgn;
1729 
1730 		rgn = hpb->rgn_tbl + rgn_idx;
1731 		if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1732 			rgn->rgn_state = HPB_RGN_INACTIVE;
1733 
1734 			ufshpb_destroy_subregion_tbl(hpb, rgn);
1735 		}
1736 
1737 		kvfree(rgn->srgn_tbl);
1738 	}
1739 
1740 	kvfree(hpb->rgn_tbl);
1741 }
1742 
1743 /* SYSFS functions */
1744 #define ufshpb_sysfs_attr_show_func(__name)				\
1745 static ssize_t __name##_show(struct device *dev,			\
1746 	struct device_attribute *attr, char *buf)			\
1747 {									\
1748 	struct scsi_device *sdev = to_scsi_device(dev);			\
1749 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);		\
1750 									\
1751 	if (!hpb)							\
1752 		return -ENODEV;						\
1753 									\
1754 	return sysfs_emit(buf, "%llu\n", hpb->stats.__name);		\
1755 }									\
1756 \
1757 static DEVICE_ATTR_RO(__name)
1758 
1759 ufshpb_sysfs_attr_show_func(hit_cnt);
1760 ufshpb_sysfs_attr_show_func(miss_cnt);
1761 ufshpb_sysfs_attr_show_func(rcmd_noti_cnt);
1762 ufshpb_sysfs_attr_show_func(rcmd_active_cnt);
1763 ufshpb_sysfs_attr_show_func(rcmd_inactive_cnt);
1764 ufshpb_sysfs_attr_show_func(map_req_cnt);
1765 ufshpb_sysfs_attr_show_func(umap_req_cnt);
1766 
1767 static struct attribute *hpb_dev_stat_attrs[] = {
1768 	&dev_attr_hit_cnt.attr,
1769 	&dev_attr_miss_cnt.attr,
1770 	&dev_attr_rcmd_noti_cnt.attr,
1771 	&dev_attr_rcmd_active_cnt.attr,
1772 	&dev_attr_rcmd_inactive_cnt.attr,
1773 	&dev_attr_map_req_cnt.attr,
1774 	&dev_attr_umap_req_cnt.attr,
1775 	NULL,
1776 };
1777 
1778 struct attribute_group ufs_sysfs_hpb_stat_group = {
1779 	.name = "hpb_stats",
1780 	.attrs = hpb_dev_stat_attrs,
1781 };
1782 
1783 /* SYSFS functions */
1784 #define ufshpb_sysfs_param_show_func(__name)				\
1785 static ssize_t __name##_show(struct device *dev,			\
1786 	struct device_attribute *attr, char *buf)			\
1787 {									\
1788 	struct scsi_device *sdev = to_scsi_device(dev);			\
1789 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);		\
1790 									\
1791 	if (!hpb)							\
1792 		return -ENODEV;						\
1793 									\
1794 	return sysfs_emit(buf, "%d\n", hpb->params.__name);		\
1795 }
1796 
1797 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
1798 static ssize_t
requeue_timeout_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1799 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1800 			 const char *buf, size_t count)
1801 {
1802 	struct scsi_device *sdev = to_scsi_device(dev);
1803 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1804 	int val;
1805 
1806 	if (!hpb)
1807 		return -ENODEV;
1808 
1809 	if (kstrtouint(buf, 0, &val))
1810 		return -EINVAL;
1811 
1812 	if (val < 0)
1813 		return -EINVAL;
1814 
1815 	hpb->params.requeue_timeout_ms = val;
1816 
1817 	return count;
1818 }
1819 static DEVICE_ATTR_RW(requeue_timeout_ms);
1820 
1821 ufshpb_sysfs_param_show_func(activation_thld);
1822 static ssize_t
activation_thld_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1823 activation_thld_store(struct device *dev, struct device_attribute *attr,
1824 		      const char *buf, size_t count)
1825 {
1826 	struct scsi_device *sdev = to_scsi_device(dev);
1827 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1828 	int val;
1829 
1830 	if (!hpb)
1831 		return -ENODEV;
1832 
1833 	if (!hpb->is_hcm)
1834 		return -EOPNOTSUPP;
1835 
1836 	if (kstrtouint(buf, 0, &val))
1837 		return -EINVAL;
1838 
1839 	if (val <= 0)
1840 		return -EINVAL;
1841 
1842 	hpb->params.activation_thld = val;
1843 
1844 	return count;
1845 }
1846 static DEVICE_ATTR_RW(activation_thld);
1847 
1848 ufshpb_sysfs_param_show_func(normalization_factor);
1849 static ssize_t
normalization_factor_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1850 normalization_factor_store(struct device *dev, struct device_attribute *attr,
1851 			   const char *buf, size_t count)
1852 {
1853 	struct scsi_device *sdev = to_scsi_device(dev);
1854 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1855 	int val;
1856 
1857 	if (!hpb)
1858 		return -ENODEV;
1859 
1860 	if (!hpb->is_hcm)
1861 		return -EOPNOTSUPP;
1862 
1863 	if (kstrtouint(buf, 0, &val))
1864 		return -EINVAL;
1865 
1866 	if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
1867 		return -EINVAL;
1868 
1869 	hpb->params.normalization_factor = val;
1870 
1871 	return count;
1872 }
1873 static DEVICE_ATTR_RW(normalization_factor);
1874 
1875 ufshpb_sysfs_param_show_func(eviction_thld_enter);
1876 static ssize_t
eviction_thld_enter_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1877 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
1878 			  const char *buf, size_t count)
1879 {
1880 	struct scsi_device *sdev = to_scsi_device(dev);
1881 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1882 	int val;
1883 
1884 	if (!hpb)
1885 		return -ENODEV;
1886 
1887 	if (!hpb->is_hcm)
1888 		return -EOPNOTSUPP;
1889 
1890 	if (kstrtouint(buf, 0, &val))
1891 		return -EINVAL;
1892 
1893 	if (val <= hpb->params.eviction_thld_exit)
1894 		return -EINVAL;
1895 
1896 	hpb->params.eviction_thld_enter = val;
1897 
1898 	return count;
1899 }
1900 static DEVICE_ATTR_RW(eviction_thld_enter);
1901 
1902 ufshpb_sysfs_param_show_func(eviction_thld_exit);
1903 static ssize_t
eviction_thld_exit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1904 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
1905 			 const char *buf, size_t count)
1906 {
1907 	struct scsi_device *sdev = to_scsi_device(dev);
1908 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1909 	int val;
1910 
1911 	if (!hpb)
1912 		return -ENODEV;
1913 
1914 	if (!hpb->is_hcm)
1915 		return -EOPNOTSUPP;
1916 
1917 	if (kstrtouint(buf, 0, &val))
1918 		return -EINVAL;
1919 
1920 	if (val <= hpb->params.activation_thld)
1921 		return -EINVAL;
1922 
1923 	hpb->params.eviction_thld_exit = val;
1924 
1925 	return count;
1926 }
1927 static DEVICE_ATTR_RW(eviction_thld_exit);
1928 
1929 ufshpb_sysfs_param_show_func(read_timeout_ms);
1930 static ssize_t
read_timeout_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1931 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1932 		      const char *buf, size_t count)
1933 {
1934 	struct scsi_device *sdev = to_scsi_device(dev);
1935 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1936 	int val;
1937 
1938 	if (!hpb)
1939 		return -ENODEV;
1940 
1941 	if (!hpb->is_hcm)
1942 		return -EOPNOTSUPP;
1943 
1944 	if (kstrtouint(buf, 0, &val))
1945 		return -EINVAL;
1946 
1947 	/* read_timeout >> timeout_polling_interval */
1948 	if (val < hpb->params.timeout_polling_interval_ms * 2)
1949 		return -EINVAL;
1950 
1951 	hpb->params.read_timeout_ms = val;
1952 
1953 	return count;
1954 }
1955 static DEVICE_ATTR_RW(read_timeout_ms);
1956 
1957 ufshpb_sysfs_param_show_func(read_timeout_expiries);
1958 static ssize_t
read_timeout_expiries_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1959 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
1960 			    const char *buf, size_t count)
1961 {
1962 	struct scsi_device *sdev = to_scsi_device(dev);
1963 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1964 	int val;
1965 
1966 	if (!hpb)
1967 		return -ENODEV;
1968 
1969 	if (!hpb->is_hcm)
1970 		return -EOPNOTSUPP;
1971 
1972 	if (kstrtouint(buf, 0, &val))
1973 		return -EINVAL;
1974 
1975 	if (val <= 0)
1976 		return -EINVAL;
1977 
1978 	hpb->params.read_timeout_expiries = val;
1979 
1980 	return count;
1981 }
1982 static DEVICE_ATTR_RW(read_timeout_expiries);
1983 
1984 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
1985 static ssize_t
timeout_polling_interval_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1986 timeout_polling_interval_ms_store(struct device *dev,
1987 				  struct device_attribute *attr,
1988 				  const char *buf, size_t count)
1989 {
1990 	struct scsi_device *sdev = to_scsi_device(dev);
1991 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1992 	int val;
1993 
1994 	if (!hpb)
1995 		return -ENODEV;
1996 
1997 	if (!hpb->is_hcm)
1998 		return -EOPNOTSUPP;
1999 
2000 	if (kstrtouint(buf, 0, &val))
2001 		return -EINVAL;
2002 
2003 	/* timeout_polling_interval << read_timeout */
2004 	if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
2005 		return -EINVAL;
2006 
2007 	hpb->params.timeout_polling_interval_ms = val;
2008 
2009 	return count;
2010 }
2011 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
2012 
2013 ufshpb_sysfs_param_show_func(inflight_map_req);
inflight_map_req_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2014 static ssize_t inflight_map_req_store(struct device *dev,
2015 				      struct device_attribute *attr,
2016 				      const char *buf, size_t count)
2017 {
2018 	struct scsi_device *sdev = to_scsi_device(dev);
2019 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2020 	int val;
2021 
2022 	if (!hpb)
2023 		return -ENODEV;
2024 
2025 	if (!hpb->is_hcm)
2026 		return -EOPNOTSUPP;
2027 
2028 	if (kstrtouint(buf, 0, &val))
2029 		return -EINVAL;
2030 
2031 	if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
2032 		return -EINVAL;
2033 
2034 	hpb->params.inflight_map_req = val;
2035 
2036 	return count;
2037 }
2038 static DEVICE_ATTR_RW(inflight_map_req);
2039 
ufshpb_hcm_param_init(struct ufshpb_lu * hpb)2040 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2041 {
2042 	hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2043 	hpb->params.normalization_factor = 1;
2044 	hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2045 	hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2046 	hpb->params.read_timeout_ms = READ_TO_MS;
2047 	hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2048 	hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2049 	hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2050 }
2051 
2052 static struct attribute *hpb_dev_param_attrs[] = {
2053 	&dev_attr_requeue_timeout_ms.attr,
2054 	&dev_attr_activation_thld.attr,
2055 	&dev_attr_normalization_factor.attr,
2056 	&dev_attr_eviction_thld_enter.attr,
2057 	&dev_attr_eviction_thld_exit.attr,
2058 	&dev_attr_read_timeout_ms.attr,
2059 	&dev_attr_read_timeout_expiries.attr,
2060 	&dev_attr_timeout_polling_interval_ms.attr,
2061 	&dev_attr_inflight_map_req.attr,
2062 	NULL,
2063 };
2064 
2065 struct attribute_group ufs_sysfs_hpb_param_group = {
2066 	.name = "hpb_params",
2067 	.attrs = hpb_dev_param_attrs,
2068 };
2069 
ufshpb_pre_req_mempool_init(struct ufshpb_lu * hpb)2070 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2071 {
2072 	struct ufshpb_req *pre_req = NULL, *t;
2073 	int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2074 	int i;
2075 
2076 	INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2077 
2078 	hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2079 	hpb->throttle_pre_req = qd;
2080 	hpb->num_inflight_pre_req = 0;
2081 
2082 	if (!hpb->pre_req)
2083 		goto release_mem;
2084 
2085 	for (i = 0; i < qd; i++) {
2086 		pre_req = hpb->pre_req + i;
2087 		INIT_LIST_HEAD(&pre_req->list_req);
2088 		pre_req->req = NULL;
2089 
2090 		pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
2091 		if (!pre_req->bio)
2092 			goto release_mem;
2093 
2094 		pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2095 		if (!pre_req->wb.m_page) {
2096 			bio_put(pre_req->bio);
2097 			goto release_mem;
2098 		}
2099 
2100 		list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2101 	}
2102 
2103 	return 0;
2104 release_mem:
2105 	list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2106 		list_del_init(&pre_req->list_req);
2107 		bio_put(pre_req->bio);
2108 		__free_page(pre_req->wb.m_page);
2109 	}
2110 
2111 	kfree(hpb->pre_req);
2112 	return -ENOMEM;
2113 }
2114 
ufshpb_pre_req_mempool_destroy(struct ufshpb_lu * hpb)2115 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2116 {
2117 	struct ufshpb_req *pre_req = NULL;
2118 	int i;
2119 
2120 	for (i = 0; i < hpb->throttle_pre_req; i++) {
2121 		pre_req = hpb->pre_req + i;
2122 		bio_put(hpb->pre_req[i].bio);
2123 		if (!pre_req->wb.m_page)
2124 			__free_page(hpb->pre_req[i].wb.m_page);
2125 		list_del_init(&pre_req->list_req);
2126 	}
2127 
2128 	kfree(hpb->pre_req);
2129 }
2130 
ufshpb_stat_init(struct ufshpb_lu * hpb)2131 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2132 {
2133 	hpb->stats.hit_cnt = 0;
2134 	hpb->stats.miss_cnt = 0;
2135 	hpb->stats.rcmd_noti_cnt = 0;
2136 	hpb->stats.rcmd_active_cnt = 0;
2137 	hpb->stats.rcmd_inactive_cnt = 0;
2138 	hpb->stats.map_req_cnt = 0;
2139 	hpb->stats.umap_req_cnt = 0;
2140 }
2141 
ufshpb_param_init(struct ufshpb_lu * hpb)2142 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2143 {
2144 	hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2145 	if (hpb->is_hcm)
2146 		ufshpb_hcm_param_init(hpb);
2147 }
2148 
ufshpb_lu_hpb_init(struct ufs_hba * hba,struct ufshpb_lu * hpb)2149 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2150 {
2151 	int ret;
2152 
2153 	spin_lock_init(&hpb->rgn_state_lock);
2154 	spin_lock_init(&hpb->rsp_list_lock);
2155 	spin_lock_init(&hpb->param_lock);
2156 
2157 	INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2158 	INIT_LIST_HEAD(&hpb->lh_act_srgn);
2159 	INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2160 	INIT_LIST_HEAD(&hpb->list_hpb_lu);
2161 
2162 	INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2163 	if (hpb->is_hcm) {
2164 		INIT_WORK(&hpb->ufshpb_normalization_work,
2165 			  ufshpb_normalization_work_handler);
2166 		INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2167 				  ufshpb_read_to_handler);
2168 	}
2169 
2170 	hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2171 			  sizeof(struct ufshpb_req), 0, 0, NULL);
2172 	if (!hpb->map_req_cache) {
2173 		dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2174 			hpb->lun);
2175 		return -ENOMEM;
2176 	}
2177 
2178 	hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2179 			  sizeof(struct page *) * hpb->pages_per_srgn,
2180 			  0, 0, NULL);
2181 	if (!hpb->m_page_cache) {
2182 		dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2183 			hpb->lun);
2184 		ret = -ENOMEM;
2185 		goto release_req_cache;
2186 	}
2187 
2188 	ret = ufshpb_pre_req_mempool_init(hpb);
2189 	if (ret) {
2190 		dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2191 			hpb->lun);
2192 		goto release_m_page_cache;
2193 	}
2194 
2195 	ret = ufshpb_alloc_region_tbl(hba, hpb);
2196 	if (ret)
2197 		goto release_pre_req_mempool;
2198 
2199 	ufshpb_stat_init(hpb);
2200 	ufshpb_param_init(hpb);
2201 
2202 	if (hpb->is_hcm) {
2203 		unsigned int poll;
2204 
2205 		poll = hpb->params.timeout_polling_interval_ms;
2206 		schedule_delayed_work(&hpb->ufshpb_read_to_work,
2207 				      msecs_to_jiffies(poll));
2208 	}
2209 
2210 	return 0;
2211 
2212 release_pre_req_mempool:
2213 	ufshpb_pre_req_mempool_destroy(hpb);
2214 release_m_page_cache:
2215 	kmem_cache_destroy(hpb->m_page_cache);
2216 release_req_cache:
2217 	kmem_cache_destroy(hpb->map_req_cache);
2218 	return ret;
2219 }
2220 
2221 static struct ufshpb_lu *
ufshpb_alloc_hpb_lu(struct ufs_hba * hba,struct scsi_device * sdev,struct ufshpb_dev_info * hpb_dev_info,struct ufshpb_lu_info * hpb_lu_info)2222 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2223 		    struct ufshpb_dev_info *hpb_dev_info,
2224 		    struct ufshpb_lu_info *hpb_lu_info)
2225 {
2226 	struct ufshpb_lu *hpb;
2227 	int ret;
2228 
2229 	hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2230 	if (!hpb)
2231 		return NULL;
2232 
2233 	hpb->lun = sdev->lun;
2234 	hpb->sdev_ufs_lu = sdev;
2235 
2236 	ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2237 
2238 	ret = ufshpb_lu_hpb_init(hba, hpb);
2239 	if (ret) {
2240 		dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2241 		goto release_hpb;
2242 	}
2243 
2244 	sdev->hostdata = hpb;
2245 	return hpb;
2246 
2247 release_hpb:
2248 	kfree(hpb);
2249 	return NULL;
2250 }
2251 
ufshpb_discard_rsp_lists(struct ufshpb_lu * hpb)2252 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2253 {
2254 	struct ufshpb_region *rgn, *next_rgn;
2255 	struct ufshpb_subregion *srgn, *next_srgn;
2256 	unsigned long flags;
2257 
2258 	/*
2259 	 * If the device reset occurred, the remaining HPB region information
2260 	 * may be stale. Therefore, by discarding the lists of HPB response
2261 	 * that remained after reset, we prevent unnecessary work.
2262 	 */
2263 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2264 	list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2265 				 list_inact_rgn)
2266 		list_del_init(&rgn->list_inact_rgn);
2267 
2268 	list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2269 				 list_act_srgn)
2270 		list_del_init(&srgn->list_act_srgn);
2271 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2272 }
2273 
ufshpb_cancel_jobs(struct ufshpb_lu * hpb)2274 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2275 {
2276 	if (hpb->is_hcm) {
2277 		cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2278 		cancel_work_sync(&hpb->ufshpb_normalization_work);
2279 	}
2280 	cancel_work_sync(&hpb->map_work);
2281 }
2282 
ufshpb_check_hpb_reset_query(struct ufs_hba * hba)2283 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2284 {
2285 	int err = 0;
2286 	bool flag_res = true;
2287 	int try;
2288 
2289 	/* wait for the device to complete HPB reset query */
2290 	for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2291 		dev_dbg(hba->dev,
2292 			"%s start flag reset polling %d times\n",
2293 			__func__, try);
2294 
2295 		/* Poll fHpbReset flag to be cleared */
2296 		err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2297 				QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2298 
2299 		if (err) {
2300 			dev_err(hba->dev,
2301 				"%s reading fHpbReset flag failed with error %d\n",
2302 				__func__, err);
2303 			return flag_res;
2304 		}
2305 
2306 		if (!flag_res)
2307 			goto out;
2308 
2309 		usleep_range(1000, 1100);
2310 	}
2311 	if (flag_res) {
2312 		dev_err(hba->dev,
2313 			"%s fHpbReset was not cleared by the device\n",
2314 			__func__);
2315 	}
2316 out:
2317 	return flag_res;
2318 }
2319 
2320 /**
2321  * ufshpb_toggle_state - switch HPB state of all LUs
2322  * @hba: per-adapter instance
2323  * @src: expected current HPB state
2324  * @dest: target HPB state to switch to
2325  */
ufshpb_toggle_state(struct ufs_hba * hba,enum UFSHPB_STATE src,enum UFSHPB_STATE dest)2326 void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest)
2327 {
2328 	struct ufshpb_lu *hpb;
2329 	struct scsi_device *sdev;
2330 
2331 	shost_for_each_device(sdev, hba->host) {
2332 		hpb = ufshpb_get_hpb_data(sdev);
2333 
2334 		if (!hpb || ufshpb_get_state(hpb) != src)
2335 			continue;
2336 		ufshpb_set_state(hpb, dest);
2337 
2338 		if (dest == HPB_RESET) {
2339 			ufshpb_cancel_jobs(hpb);
2340 			ufshpb_discard_rsp_lists(hpb);
2341 		}
2342 	}
2343 }
2344 
ufshpb_suspend(struct ufs_hba * hba)2345 void ufshpb_suspend(struct ufs_hba *hba)
2346 {
2347 	struct ufshpb_lu *hpb;
2348 	struct scsi_device *sdev;
2349 
2350 	shost_for_each_device(sdev, hba->host) {
2351 		hpb = ufshpb_get_hpb_data(sdev);
2352 		if (!hpb || ufshpb_get_state(hpb) != HPB_PRESENT)
2353 			continue;
2354 
2355 		ufshpb_set_state(hpb, HPB_SUSPEND);
2356 		ufshpb_cancel_jobs(hpb);
2357 	}
2358 }
2359 
ufshpb_resume(struct ufs_hba * hba)2360 void ufshpb_resume(struct ufs_hba *hba)
2361 {
2362 	struct ufshpb_lu *hpb;
2363 	struct scsi_device *sdev;
2364 
2365 	shost_for_each_device(sdev, hba->host) {
2366 		hpb = ufshpb_get_hpb_data(sdev);
2367 		if (!hpb || ufshpb_get_state(hpb) != HPB_SUSPEND)
2368 			continue;
2369 
2370 		ufshpb_set_state(hpb, HPB_PRESENT);
2371 		ufshpb_kick_map_work(hpb);
2372 		if (hpb->is_hcm) {
2373 			unsigned int poll = hpb->params.timeout_polling_interval_ms;
2374 
2375 			schedule_delayed_work(&hpb->ufshpb_read_to_work, msecs_to_jiffies(poll));
2376 		}
2377 	}
2378 }
2379 
ufshpb_get_lu_info(struct ufs_hba * hba,int lun,struct ufshpb_lu_info * hpb_lu_info)2380 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2381 			      struct ufshpb_lu_info *hpb_lu_info)
2382 {
2383 	u16 max_active_rgns;
2384 	u8 lu_enable;
2385 	int size;
2386 	int ret;
2387 	char desc_buf[QUERY_DESC_MAX_SIZE];
2388 
2389 	ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2390 
2391 	ufshcd_rpm_get_sync(hba);
2392 	ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2393 					    QUERY_DESC_IDN_UNIT, lun, 0,
2394 					    desc_buf, &size);
2395 	ufshcd_rpm_put_sync(hba);
2396 
2397 	if (ret) {
2398 		dev_err(hba->dev,
2399 			"%s: idn: %d lun: %d  query request failed",
2400 			__func__, QUERY_DESC_IDN_UNIT, lun);
2401 		return ret;
2402 	}
2403 
2404 	lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2405 	if (lu_enable != LU_ENABLED_HPB_FUNC)
2406 		return -ENODEV;
2407 
2408 	max_active_rgns = get_unaligned_be16(
2409 			desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2410 	if (!max_active_rgns) {
2411 		dev_err(hba->dev,
2412 			"lun %d wrong number of max active regions\n", lun);
2413 		return -ENODEV;
2414 	}
2415 
2416 	hpb_lu_info->num_blocks = get_unaligned_be64(
2417 			desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2418 	hpb_lu_info->pinned_start = get_unaligned_be16(
2419 			desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2420 	hpb_lu_info->num_pinned = get_unaligned_be16(
2421 			desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2422 	hpb_lu_info->max_active_rgns = max_active_rgns;
2423 
2424 	return 0;
2425 }
2426 
ufshpb_destroy_lu(struct ufs_hba * hba,struct scsi_device * sdev)2427 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2428 {
2429 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2430 
2431 	if (!hpb)
2432 		return;
2433 
2434 	ufshpb_set_state(hpb, HPB_FAILED);
2435 
2436 	sdev = hpb->sdev_ufs_lu;
2437 	sdev->hostdata = NULL;
2438 
2439 	ufshpb_cancel_jobs(hpb);
2440 
2441 	ufshpb_pre_req_mempool_destroy(hpb);
2442 	ufshpb_destroy_region_tbl(hpb);
2443 
2444 	kmem_cache_destroy(hpb->map_req_cache);
2445 	kmem_cache_destroy(hpb->m_page_cache);
2446 
2447 	list_del_init(&hpb->list_hpb_lu);
2448 
2449 	kfree(hpb);
2450 }
2451 
ufshpb_hpb_lu_prepared(struct ufs_hba * hba)2452 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2453 {
2454 	int pool_size;
2455 	struct ufshpb_lu *hpb;
2456 	struct scsi_device *sdev;
2457 	bool init_success;
2458 
2459 	if (tot_active_srgn_pages == 0) {
2460 		ufshpb_remove(hba);
2461 		return;
2462 	}
2463 
2464 	init_success = !ufshpb_check_hpb_reset_query(hba);
2465 
2466 	pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2467 	if (pool_size > tot_active_srgn_pages) {
2468 		mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2469 		mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2470 	}
2471 
2472 	shost_for_each_device(sdev, hba->host) {
2473 		hpb = ufshpb_get_hpb_data(sdev);
2474 		if (!hpb)
2475 			continue;
2476 
2477 		if (init_success) {
2478 			ufshpb_set_state(hpb, HPB_PRESENT);
2479 			if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2480 				queue_work(ufshpb_wq, &hpb->map_work);
2481 		} else {
2482 			dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2483 			ufshpb_destroy_lu(hba, sdev);
2484 		}
2485 	}
2486 
2487 	if (!init_success)
2488 		ufshpb_remove(hba);
2489 }
2490 
ufshpb_init_hpb_lu(struct ufs_hba * hba,struct scsi_device * sdev)2491 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2492 {
2493 	struct ufshpb_lu *hpb;
2494 	int ret;
2495 	struct ufshpb_lu_info hpb_lu_info = { 0 };
2496 	int lun = sdev->lun;
2497 
2498 	if (lun >= hba->dev_info.max_lu_supported)
2499 		goto out;
2500 
2501 	ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2502 	if (ret)
2503 		goto out;
2504 
2505 	hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2506 				  &hpb_lu_info);
2507 	if (!hpb)
2508 		goto out;
2509 
2510 	tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2511 			hpb->srgns_per_rgn * hpb->pages_per_srgn;
2512 
2513 out:
2514 	/* All LUs are initialized */
2515 	if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2516 		ufshpb_hpb_lu_prepared(hba);
2517 }
2518 
ufshpb_init_mem_wq(struct ufs_hba * hba)2519 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2520 {
2521 	int ret;
2522 	unsigned int pool_size;
2523 
2524 	ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2525 					sizeof(struct ufshpb_map_ctx),
2526 					0, 0, NULL);
2527 	if (!ufshpb_mctx_cache) {
2528 		dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2529 		return -ENOMEM;
2530 	}
2531 
2532 	pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2533 	dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2534 	       __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2535 
2536 	ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2537 						    ufshpb_mctx_cache);
2538 	if (!ufshpb_mctx_pool) {
2539 		dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2540 		ret = -ENOMEM;
2541 		goto release_mctx_cache;
2542 	}
2543 
2544 	ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2545 	if (!ufshpb_page_pool) {
2546 		dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2547 		ret = -ENOMEM;
2548 		goto release_mctx_pool;
2549 	}
2550 
2551 	ufshpb_wq = alloc_workqueue("ufshpb-wq",
2552 					WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2553 	if (!ufshpb_wq) {
2554 		dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2555 		ret = -ENOMEM;
2556 		goto release_page_pool;
2557 	}
2558 
2559 	return 0;
2560 
2561 release_page_pool:
2562 	mempool_destroy(ufshpb_page_pool);
2563 release_mctx_pool:
2564 	mempool_destroy(ufshpb_mctx_pool);
2565 release_mctx_cache:
2566 	kmem_cache_destroy(ufshpb_mctx_cache);
2567 	return ret;
2568 }
2569 
ufshpb_get_geo_info(struct ufs_hba * hba,u8 * geo_buf)2570 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2571 {
2572 	struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2573 	int max_active_rgns = 0;
2574 	int hpb_num_lu;
2575 
2576 	hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2577 	if (hpb_num_lu == 0) {
2578 		dev_err(hba->dev, "No HPB LU supported\n");
2579 		hpb_info->hpb_disabled = true;
2580 		return;
2581 	}
2582 
2583 	hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2584 	hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2585 	max_active_rgns = get_unaligned_be16(geo_buf +
2586 			  GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2587 
2588 	if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2589 	    max_active_rgns == 0) {
2590 		dev_err(hba->dev, "No HPB supported device\n");
2591 		hpb_info->hpb_disabled = true;
2592 		return;
2593 	}
2594 }
2595 
ufshpb_get_dev_info(struct ufs_hba * hba,u8 * desc_buf)2596 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2597 {
2598 	struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2599 	int version, ret;
2600 	int max_single_cmd;
2601 
2602 	hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2603 
2604 	version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2605 	if ((version != HPB_SUPPORT_VERSION) &&
2606 	    (version != HPB_SUPPORT_LEGACY_VERSION)) {
2607 		dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2608 			__func__, version);
2609 		hpb_dev_info->hpb_disabled = true;
2610 		return;
2611 	}
2612 
2613 	if (version == HPB_SUPPORT_LEGACY_VERSION)
2614 		hpb_dev_info->is_legacy = true;
2615 
2616 	/*
2617 	 * Get the number of user logical unit to check whether all
2618 	 * scsi_device finish initialization
2619 	 */
2620 	hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2621 
2622 	if (hpb_dev_info->is_legacy)
2623 		return;
2624 
2625 	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2626 		QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
2627 
2628 	if (ret)
2629 		hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
2630 	else
2631 		hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
2632 }
2633 
ufshpb_init(struct ufs_hba * hba)2634 void ufshpb_init(struct ufs_hba *hba)
2635 {
2636 	struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2637 	int try;
2638 	int ret;
2639 
2640 	if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2641 		return;
2642 
2643 	if (ufshpb_init_mem_wq(hba)) {
2644 		hpb_dev_info->hpb_disabled = true;
2645 		return;
2646 	}
2647 
2648 	atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2649 	tot_active_srgn_pages = 0;
2650 	/* issue HPB reset query */
2651 	for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2652 		ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2653 					QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2654 		if (!ret)
2655 			break;
2656 	}
2657 }
2658 
ufshpb_remove(struct ufs_hba * hba)2659 void ufshpb_remove(struct ufs_hba *hba)
2660 {
2661 	mempool_destroy(ufshpb_page_pool);
2662 	mempool_destroy(ufshpb_mctx_pool);
2663 	kmem_cache_destroy(ufshpb_mctx_cache);
2664 
2665 	destroy_workqueue(ufshpb_wq);
2666 }
2667 
2668 module_param(ufshpb_host_map_kbytes, uint, 0644);
2669 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2670 	"ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");
2671