1 // SPDX-License-Identifier: GPL-2.0
3 * Universal Flash Storage Host Performance Booster
5 * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
8 * Yongmyung Lee <ymhungry.lee@samsung.com>
9 * Jinyoung Choi <j-young.choi@samsung.com>
12 #include <asm/unaligned.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/module.h>
16 #include <scsi/scsi_cmnd.h>
18 #include "ufshcd-priv.h"
20 #include "../../scsi/sd.h"
22 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
23 #define READ_TO_MS 1000
24 #define READ_TO_EXPIRIES 100
25 #define POLLING_INTERVAL_MS 200
26 #define THROTTLE_MAP_REQ_DEFAULT 1
28 /* memory management */
29 static struct kmem_cache *ufshpb_mctx_cache;
30 static mempool_t *ufshpb_mctx_pool;
31 static mempool_t *ufshpb_page_pool;
32 /* A cache size of 2MB can cache ppn in the 1GB range. */
33 static unsigned int ufshpb_host_map_kbytes = 2048;
34 static int tot_active_srgn_pages;
36 static struct workqueue_struct *ufshpb_wq;
38 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
41 bool ufshpb_is_allowed(struct ufs_hba *hba)
43 return !(hba->ufshpb_dev.hpb_disabled);
46 /* HPB version 1.0 is called as legacy version. */
47 bool ufshpb_is_legacy(struct ufs_hba *hba)
49 return hba->ufshpb_dev.is_legacy;
52 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
54 return sdev->hostdata;
57 static int ufshpb_get_state(struct ufshpb_lu *hpb)
59 return atomic_read(&hpb->hpb_state);
62 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
64 atomic_set(&hpb->hpb_state, state);
67 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
68 struct ufshpb_subregion *srgn)
70 return rgn->rgn_state != HPB_RGN_INACTIVE &&
71 srgn->srgn_state == HPB_SRGN_VALID;
74 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
76 return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
79 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
81 return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
82 op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
85 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
87 return transfer_len <= hpb->pre_req_max_tr_len;
90 static bool ufshpb_is_general_lun(int lun)
92 return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
95 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
97 return hpb->lu_pinned_end != PINNED_NOT_SET &&
98 rgn_idx >= hpb->lu_pinned_start && rgn_idx <= hpb->lu_pinned_end;
101 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
106 if (ufshpb_get_state(hpb) != HPB_PRESENT)
109 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
110 if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
112 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
115 queue_work(ufshpb_wq, &hpb->map_work);
118 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
119 struct ufshcd_lrb *lrbp,
120 struct utp_hpb_rsp *rsp_field)
122 /* Check HPB_UPDATE_ALERT */
123 if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
124 UPIU_HEADER_DWORD(0, 2, 0, 0)))
127 if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
128 rsp_field->desc_type != DEV_DES_TYPE ||
129 rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
130 rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
131 rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
132 rsp_field->hpb_op == HPB_RSP_NONE ||
133 (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
134 !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
137 if (!ufshpb_is_general_lun(rsp_field->lun)) {
138 dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
146 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
147 int srgn_offset, int cnt, bool set_dirty)
149 struct ufshpb_region *rgn;
150 struct ufshpb_subregion *srgn, *prev_srgn = NULL;
156 rgn = hpb->rgn_tbl + rgn_idx;
157 srgn = rgn->srgn_tbl + srgn_idx;
159 if (likely(!srgn->is_last))
160 bitmap_len = hpb->entries_per_srgn;
162 bitmap_len = hpb->last_srgn_entries;
164 if ((srgn_offset + cnt) > bitmap_len)
165 set_bit_len = bitmap_len - srgn_offset;
169 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
170 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
172 if (srgn->srgn_state == HPB_SRGN_VALID)
173 bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
175 } else if (hpb->is_hcm) {
176 /* rewind the read timer for lru regions */
177 rgn->read_timeout = ktime_add_ms(ktime_get(),
178 rgn->hpb->params.read_timeout_ms);
179 rgn->read_timeout_expiries =
180 rgn->hpb->params.read_timeout_expiries;
183 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
185 if (hpb->is_hcm && prev_srgn != srgn) {
186 bool activate = false;
188 spin_lock(&rgn->rgn_lock);
190 rgn->reads -= srgn->reads;
192 set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
196 if (srgn->reads == hpb->params.activation_thld)
199 spin_unlock(&rgn->rgn_lock);
202 test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
203 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
204 ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
205 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
206 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
207 "activate region %d-%d\n", rgn_idx, srgn_idx);
214 if (++srgn_idx == hpb->srgns_per_rgn) {
224 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
225 int srgn_idx, int srgn_offset, int cnt)
227 struct ufshpb_region *rgn;
228 struct ufshpb_subregion *srgn;
233 rgn = hpb->rgn_tbl + rgn_idx;
234 srgn = rgn->srgn_tbl + srgn_idx;
236 if (likely(!srgn->is_last))
237 bitmap_len = hpb->entries_per_srgn;
239 bitmap_len = hpb->last_srgn_entries;
241 if (!ufshpb_is_valid_srgn(rgn, srgn))
245 * If the region state is active, mctx must be allocated.
246 * In this case, check whether the region is evicted or
247 * mctx allocation fail.
249 if (unlikely(!srgn->mctx)) {
250 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
251 "no mctx in region %d subregion %d.\n",
252 srgn->rgn_idx, srgn->srgn_idx);
256 if ((srgn_offset + cnt) > bitmap_len)
257 bit_len = bitmap_len - srgn_offset;
261 if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
262 srgn_offset) < bit_len + srgn_offset)
266 if (++srgn_idx == hpb->srgns_per_rgn) {
278 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
280 return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
283 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
284 struct ufshpb_map_ctx *mctx, int pos,
285 int len, __be64 *ppn_buf)
291 index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
292 offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
294 if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
297 copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
299 page = mctx->m_page[index];
300 if (unlikely(!page)) {
301 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
302 "error. cannot find page in mctx\n");
306 memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
307 copied * HPB_ENTRY_SIZE);
313 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
314 int *srgn_idx, int *offset)
318 *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
319 rgn_offset = lpn & hpb->entries_per_rgn_mask;
320 *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
321 *offset = rgn_offset & hpb->entries_per_srgn_mask;
325 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
326 __be64 ppn, u8 transfer_len)
328 unsigned char *cdb = lrbp->cmd->cmnd;
329 __be64 ppn_tmp = ppn;
330 cdb[0] = UFSHPB_READ;
332 if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
333 ppn_tmp = (__force __be64)swab64((__force u64)ppn);
335 /* ppn value is stored as big-endian in the host memory */
336 memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
337 cdb[14] = transfer_len;
340 lrbp->cmd->cmd_len = UFS_CDB_SIZE;
344 * This function will set up HPB read command using host-side L2P map data.
346 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
348 struct ufshpb_lu *hpb;
349 struct ufshpb_region *rgn;
350 struct ufshpb_subregion *srgn;
351 struct scsi_cmnd *cmd = lrbp->cmd;
355 int transfer_len, rgn_idx, srgn_idx, srgn_offset;
358 hpb = ufshpb_get_hpb_data(cmd->device);
362 if (ufshpb_get_state(hpb) == HPB_INIT)
365 if (ufshpb_get_state(hpb) != HPB_PRESENT) {
366 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
367 "%s: ufshpb state is not PRESENT", __func__);
371 if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
372 (!ufshpb_is_write_or_discard(cmd) &&
373 !ufshpb_is_read_cmd(cmd)))
376 transfer_len = sectors_to_logical(cmd->device,
377 blk_rq_sectors(scsi_cmd_to_rq(cmd)));
378 if (unlikely(!transfer_len))
381 lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
382 ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
383 rgn = hpb->rgn_tbl + rgn_idx;
384 srgn = rgn->srgn_tbl + srgn_idx;
386 /* If command type is WRITE or DISCARD, set bitmap as drity */
387 if (ufshpb_is_write_or_discard(cmd)) {
388 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
393 if (!ufshpb_is_supported_chunk(hpb, transfer_len))
398 * in host control mode, reads are the main source for
401 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
402 transfer_len, false);
404 /* keep those counters normalized */
405 if (rgn->reads > hpb->entries_per_srgn)
406 schedule_work(&hpb->ufshpb_normalization_work);
409 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
410 if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
412 hpb->stats.miss_cnt++;
413 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
417 err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
418 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
419 if (unlikely(err < 0)) {
421 * In this case, the region state is active,
422 * but the ppn table is not allocated.
423 * Make sure that ppn table must be allocated on
426 dev_err(hba->dev, "get ppn failed. err %d\n", err);
430 ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
432 hpb->stats.hit_cnt++;
436 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, int rgn_idx,
437 enum req_op op, bool atomic)
439 struct ufshpb_req *rq;
441 int retries = HPB_MAP_REQ_RETRIES;
443 rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
448 req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, op,
451 if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
452 usleep_range(3000, 3100);
461 rq->rb.rgn_idx = rgn_idx;
466 kmem_cache_free(hpb->map_req_cache, rq);
470 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
472 blk_mq_free_request(rq->req);
473 kmem_cache_free(hpb->map_req_cache, rq);
476 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
477 struct ufshpb_subregion *srgn)
479 struct ufshpb_req *map_req;
484 hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
485 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
486 "map_req throttle. inflight %d throttle %d",
487 hpb->num_inflight_map_req,
488 hpb->params.inflight_map_req);
492 map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
496 bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
498 ufshpb_put_req(hpb, map_req);
504 map_req->rb.srgn_idx = srgn->srgn_idx;
505 map_req->rb.mctx = srgn->mctx;
507 spin_lock_irqsave(&hpb->param_lock, flags);
508 hpb->num_inflight_map_req++;
509 spin_unlock_irqrestore(&hpb->param_lock, flags);
514 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
515 struct ufshpb_req *map_req)
519 bio_put(map_req->bio);
520 ufshpb_put_req(hpb, map_req);
522 spin_lock_irqsave(&hpb->param_lock, flags);
523 hpb->num_inflight_map_req--;
524 spin_unlock_irqrestore(&hpb->param_lock, flags);
527 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
528 struct ufshpb_subregion *srgn)
530 struct ufshpb_region *rgn;
531 u32 num_entries = hpb->entries_per_srgn;
534 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
535 "no mctx in region %d subregion %d.\n",
536 srgn->rgn_idx, srgn->srgn_idx);
540 if (unlikely(srgn->is_last))
541 num_entries = hpb->last_srgn_entries;
543 bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
545 rgn = hpb->rgn_tbl + srgn->rgn_idx;
546 clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
551 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
554 struct ufshpb_region *rgn;
555 struct ufshpb_subregion *srgn;
557 rgn = hpb->rgn_tbl + rgn_idx;
558 srgn = rgn->srgn_tbl + srgn_idx;
560 list_del_init(&rgn->list_inact_rgn);
562 if (list_empty(&srgn->list_act_srgn))
563 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
565 hpb->stats.rcmd_active_cnt++;
568 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
570 struct ufshpb_region *rgn;
571 struct ufshpb_subregion *srgn;
574 rgn = hpb->rgn_tbl + rgn_idx;
576 for_each_sub_region(rgn, srgn_idx, srgn)
577 list_del_init(&srgn->list_act_srgn);
579 if (list_empty(&rgn->list_inact_rgn))
580 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
582 hpb->stats.rcmd_inactive_cnt++;
585 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
586 struct ufshpb_subregion *srgn)
588 struct ufshpb_region *rgn;
591 * If there is no mctx in subregion
592 * after I/O progress for HPB_READ_BUFFER, the region to which the
593 * subregion belongs was evicted.
594 * Make sure the region must not evict in I/O progress
597 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
598 "no mctx in region %d subregion %d.\n",
599 srgn->rgn_idx, srgn->srgn_idx);
600 srgn->srgn_state = HPB_SRGN_INVALID;
604 rgn = hpb->rgn_tbl + srgn->rgn_idx;
606 if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
607 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
608 "region %d subregion %d evicted\n",
609 srgn->rgn_idx, srgn->srgn_idx);
610 srgn->srgn_state = HPB_SRGN_INVALID;
613 srgn->srgn_state = HPB_SRGN_VALID;
616 static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
619 struct ufshpb_req *umap_req = req->end_io_data;
621 ufshpb_put_req(umap_req->hpb, umap_req);
622 return RQ_END_IO_NONE;
625 static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req,
628 struct ufshpb_req *map_req = req->end_io_data;
629 struct ufshpb_lu *hpb = map_req->hpb;
630 struct ufshpb_subregion *srgn;
633 srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
634 map_req->rb.srgn_idx;
636 ufshpb_clear_dirty_bitmap(hpb, srgn);
637 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
638 ufshpb_activate_subregion(hpb, srgn);
639 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
641 ufshpb_put_map_req(map_req->hpb, map_req);
642 return RQ_END_IO_NONE;
645 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
647 cdb[0] = UFSHPB_WRITE_BUFFER;
648 cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
649 UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
651 put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
655 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
656 int srgn_idx, int srgn_mem_size)
658 cdb[0] = UFSHPB_READ_BUFFER;
659 cdb[1] = UFSHPB_READ_BUFFER_ID;
661 put_unaligned_be16(rgn_idx, &cdb[2]);
662 put_unaligned_be16(srgn_idx, &cdb[4]);
663 put_unaligned_be24(srgn_mem_size, &cdb[6]);
668 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
669 struct ufshpb_req *umap_req,
670 struct ufshpb_region *rgn)
672 struct request *req = umap_req->req;
673 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
676 req->end_io_data = umap_req;
677 req->end_io = ufshpb_umap_req_compl_fn;
679 ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
680 scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
682 blk_execute_rq_nowait(req, true);
684 hpb->stats.umap_req_cnt++;
687 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
688 struct ufshpb_req *map_req, bool last)
690 struct request_queue *q;
692 struct scsi_cmnd *scmd;
693 int mem_size = hpb->srgn_mem_size;
697 q = hpb->sdev_ufs_lu->request_queue;
698 for (i = 0; i < hpb->pages_per_srgn; i++) {
699 ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
701 if (ret != PAGE_SIZE) {
702 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
703 "bio_add_pc_page fail %d - %d\n",
704 map_req->rb.rgn_idx, map_req->rb.srgn_idx);
711 blk_rq_append_bio(req, map_req->bio);
713 req->end_io_data = map_req;
714 req->end_io = ufshpb_map_req_compl_fn;
717 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
719 scmd = blk_mq_rq_to_pdu(req);
720 ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx,
721 map_req->rb.srgn_idx, mem_size);
722 scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
724 blk_execute_rq_nowait(req, true);
726 hpb->stats.map_req_cnt++;
730 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
733 struct ufshpb_map_ctx *mctx;
734 u32 num_entries = hpb->entries_per_srgn;
737 mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
741 mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
746 num_entries = hpb->last_srgn_entries;
748 mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
749 if (!mctx->ppn_dirty)
752 for (i = 0; i < hpb->pages_per_srgn; i++) {
753 mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
754 if (!mctx->m_page[i]) {
755 for (j = 0; j < i; j++)
756 mempool_free(mctx->m_page[j], ufshpb_page_pool);
757 goto release_ppn_dirty;
759 clear_page(page_address(mctx->m_page[i]));
765 bitmap_free(mctx->ppn_dirty);
767 kmem_cache_free(hpb->m_page_cache, mctx->m_page);
769 mempool_free(mctx, ufshpb_mctx_pool);
773 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
774 struct ufshpb_map_ctx *mctx)
778 for (i = 0; i < hpb->pages_per_srgn; i++)
779 mempool_free(mctx->m_page[i], ufshpb_page_pool);
781 bitmap_free(mctx->ppn_dirty);
782 kmem_cache_free(hpb->m_page_cache, mctx->m_page);
783 mempool_free(mctx, ufshpb_mctx_pool);
786 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
787 struct ufshpb_region *rgn)
789 struct ufshpb_subregion *srgn;
792 for_each_sub_region(rgn, srgn_idx, srgn)
793 if (srgn->srgn_state == HPB_SRGN_ISSUED)
799 static void ufshpb_read_to_handler(struct work_struct *work)
801 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
802 ufshpb_read_to_work.work);
803 struct victim_select_info *lru_info = &hpb->lru_info;
804 struct ufshpb_region *rgn, *next_rgn;
807 LIST_HEAD(expired_list);
809 if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
812 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
814 list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
816 bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
819 rgn->read_timeout_expiries--;
820 if (is_rgn_dirty(rgn) ||
821 rgn->read_timeout_expiries == 0)
822 list_add(&rgn->list_expired_rgn, &expired_list);
824 rgn->read_timeout = ktime_add_ms(ktime_get(),
825 hpb->params.read_timeout_ms);
829 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
831 list_for_each_entry_safe(rgn, next_rgn, &expired_list,
833 list_del_init(&rgn->list_expired_rgn);
834 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
835 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
836 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
839 ufshpb_kick_map_work(hpb);
841 clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
843 poll = hpb->params.timeout_polling_interval_ms;
844 schedule_delayed_work(&hpb->ufshpb_read_to_work,
845 msecs_to_jiffies(poll));
848 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
849 struct ufshpb_region *rgn)
851 rgn->rgn_state = HPB_RGN_ACTIVE;
852 list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
853 atomic_inc(&lru_info->active_cnt);
854 if (rgn->hpb->is_hcm) {
856 ktime_add_ms(ktime_get(),
857 rgn->hpb->params.read_timeout_ms);
858 rgn->read_timeout_expiries =
859 rgn->hpb->params.read_timeout_expiries;
863 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
864 struct ufshpb_region *rgn)
866 list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
869 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
871 struct victim_select_info *lru_info = &hpb->lru_info;
872 struct ufshpb_region *rgn, *victim_rgn = NULL;
874 list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
875 if (ufshpb_check_srgns_issue_state(hpb, rgn))
879 * in host control mode, verify that the exiting region
883 rgn->reads > hpb->params.eviction_thld_exit)
891 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
892 "%s: no region allocated\n",
898 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
899 struct ufshpb_region *rgn)
901 list_del_init(&rgn->list_lru_rgn);
902 rgn->rgn_state = HPB_RGN_INACTIVE;
903 atomic_dec(&lru_info->active_cnt);
906 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
907 struct ufshpb_subregion *srgn)
909 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
910 ufshpb_put_map_ctx(hpb, srgn->mctx);
911 srgn->srgn_state = HPB_SRGN_UNUSED;
916 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
917 struct ufshpb_region *rgn,
920 struct ufshpb_req *umap_req;
921 int rgn_idx = rgn ? rgn->rgn_idx : 0;
923 umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
927 ufshpb_execute_umap_req(hpb, umap_req, rgn);
932 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
933 struct ufshpb_region *rgn)
935 return ufshpb_issue_umap_req(hpb, rgn, true);
938 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
939 struct ufshpb_region *rgn)
941 struct victim_select_info *lru_info;
942 struct ufshpb_subregion *srgn;
945 lru_info = &hpb->lru_info;
947 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
949 ufshpb_cleanup_lru_info(lru_info, rgn);
951 for_each_sub_region(rgn, srgn_idx, srgn)
952 ufshpb_purge_active_subregion(hpb, srgn);
955 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
960 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
961 if (rgn->rgn_state == HPB_RGN_PINNED) {
962 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
963 "pinned region cannot drop-out. region %d\n",
968 if (!list_empty(&rgn->list_lru_rgn)) {
969 if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
975 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
976 ret = ufshpb_issue_umap_single_req(hpb, rgn);
977 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
982 __ufshpb_evict_region(hpb, rgn);
985 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
989 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
990 struct ufshpb_region *rgn,
991 struct ufshpb_subregion *srgn)
993 struct ufshpb_req *map_req;
997 bool alloc_required = false;
998 enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
1000 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1002 if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1003 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1004 "%s: ufshpb state is not PRESENT\n", __func__);
1008 if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1009 (srgn->srgn_state == HPB_SRGN_INVALID)) {
1014 if (srgn->srgn_state == HPB_SRGN_UNUSED)
1015 alloc_required = true;
1018 * If the subregion is already ISSUED state,
1019 * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1020 * the device and HPB response for map loading is received.
1021 * In this case, after finishing the HPB_READ_BUFFER,
1022 * the next HPB_READ_BUFFER is performed again to obtain the latest
1025 if (srgn->srgn_state == HPB_SRGN_ISSUED)
1028 srgn->srgn_state = HPB_SRGN_ISSUED;
1029 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1031 if (alloc_required) {
1032 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1034 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1035 "get map_ctx failed. region %d - %d\n",
1036 rgn->rgn_idx, srgn->srgn_idx);
1037 state = HPB_SRGN_UNUSED;
1038 goto change_srgn_state;
1042 map_req = ufshpb_get_map_req(hpb, srgn);
1044 goto change_srgn_state;
1047 ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1049 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1050 "%s: issue map_req failed: %d, region %d - %d\n",
1051 __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1057 ufshpb_put_map_req(hpb, map_req);
1059 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1060 srgn->srgn_state = state;
1062 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1066 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1068 struct ufshpb_region *victim_rgn = NULL;
1069 struct victim_select_info *lru_info = &hpb->lru_info;
1070 unsigned long flags;
1073 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1075 * If region belongs to lru_list, just move the region
1076 * to the front of lru list because the state of the region
1077 * is already active-state.
1079 if (!list_empty(&rgn->list_lru_rgn)) {
1080 ufshpb_hit_lru_info(lru_info, rgn);
1084 if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1085 if (atomic_read(&lru_info->active_cnt) ==
1086 lru_info->max_lru_active_cnt) {
1088 * If the maximum number of active regions
1089 * is exceeded, evict the least recently used region.
1090 * This case may occur when the device responds
1091 * to the eviction information late.
1092 * It is okay to evict the least recently used region,
1093 * because the device could detect this region
1094 * by not issuing HPB_READ
1096 * in host control mode, verify that the entering
1097 * region has enough reads
1100 rgn->reads < hpb->params.eviction_thld_enter) {
1105 victim_rgn = ufshpb_victim_lru_info(hpb);
1107 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1108 "cannot get victim region %s\n",
1109 hpb->is_hcm ? "" : "error");
1114 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1115 "LRU full (%d), choose victim %d\n",
1116 atomic_read(&lru_info->active_cnt),
1117 victim_rgn->rgn_idx);
1120 spin_unlock_irqrestore(&hpb->rgn_state_lock,
1122 ret = ufshpb_issue_umap_single_req(hpb,
1124 spin_lock_irqsave(&hpb->rgn_state_lock,
1130 __ufshpb_evict_region(hpb, victim_rgn);
1134 * When a region is added to lru_info list_head,
1135 * it is guaranteed that the subregion has been
1136 * assigned all mctx. If failed, try to receive mctx again
1137 * without being added to lru_info list_head
1139 ufshpb_add_lru_info(lru_info, rgn);
1142 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1146 *ufshpb_submit_region_inactive() - submit a region to be inactivated later
1147 *@hpb: per-LU HPB instance
1148 *@region_index: the index associated with the region that will be inactivated later
1150 static void ufshpb_submit_region_inactive(struct ufshpb_lu *hpb, int region_index)
1152 int subregion_index;
1153 struct ufshpb_region *rgn;
1154 struct ufshpb_subregion *srgn;
1157 * Remove this region from active region list and add it to inactive list
1159 spin_lock(&hpb->rsp_list_lock);
1160 ufshpb_update_inactive_info(hpb, region_index);
1161 spin_unlock(&hpb->rsp_list_lock);
1163 rgn = hpb->rgn_tbl + region_index;
1166 * Set subregion state to be HPB_SRGN_INVALID, there will no HPB read on this subregion
1168 spin_lock(&hpb->rgn_state_lock);
1169 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1170 for (subregion_index = 0; subregion_index < rgn->srgn_cnt; subregion_index++) {
1171 srgn = rgn->srgn_tbl + subregion_index;
1172 if (srgn->srgn_state == HPB_SRGN_VALID)
1173 srgn->srgn_state = HPB_SRGN_INVALID;
1176 spin_unlock(&hpb->rgn_state_lock);
1179 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1180 struct utp_hpb_rsp *rsp_field)
1182 struct ufshpb_region *rgn;
1183 struct ufshpb_subregion *srgn;
1184 int i, rgn_i, srgn_i;
1186 BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1188 * If the active region and the inactive region are the same,
1189 * we will inactivate this region.
1190 * The device could check this (region inactivated) and
1191 * will response the proper active region information
1193 for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1195 be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1197 be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1199 rgn = hpb->rgn_tbl + rgn_i;
1201 (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1203 * in host control mode, subregion activation
1204 * recommendations are only allowed to active regions.
1205 * Also, ignore recommendations for dirty regions - the
1206 * host will make decisions concerning those by himself
1211 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1212 "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1214 spin_lock(&hpb->rsp_list_lock);
1215 ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1216 spin_unlock(&hpb->rsp_list_lock);
1218 srgn = rgn->srgn_tbl + srgn_i;
1220 /* blocking HPB_READ */
1221 spin_lock(&hpb->rgn_state_lock);
1222 if (srgn->srgn_state == HPB_SRGN_VALID)
1223 srgn->srgn_state = HPB_SRGN_INVALID;
1224 spin_unlock(&hpb->rgn_state_lock);
1229 * in host control mode the device is not allowed to inactivate
1235 for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1236 rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1237 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "inactivate(%d) region %d\n", i, rgn_i);
1238 ufshpb_submit_region_inactive(hpb, rgn_i);
1242 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1243 rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1245 if (ufshpb_get_state(hpb) == HPB_PRESENT)
1246 queue_work(ufshpb_wq, &hpb->map_work);
1250 * Set the flags of all active regions to RGN_FLAG_UPDATE to let host side reload L2P entries later
1252 static void ufshpb_set_regions_update(struct ufshpb_lu *hpb)
1254 struct victim_select_info *lru_info = &hpb->lru_info;
1255 struct ufshpb_region *rgn;
1256 unsigned long flags;
1258 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1260 list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1261 set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1263 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1266 static void ufshpb_dev_reset_handler(struct ufs_hba *hba)
1268 struct scsi_device *sdev;
1269 struct ufshpb_lu *hpb;
1271 __shost_for_each_device(sdev, hba->host) {
1272 hpb = ufshpb_get_hpb_data(sdev);
1278 * For the HPB host control mode, in case device powered up and lost HPB
1279 * information, we will set the region flag to be RGN_FLAG_UPDATE, it will
1280 * let host reload its L2P entries(reactivate region in the UFS device).
1282 ufshpb_set_regions_update(hpb);
1285 * For the HPB device control mode, if host side receives 02h:HPB Operation
1286 * in UPIU response, which means device recommends the host side should
1287 * inactivate all active regions. Here we add all active regions to inactive
1288 * list, they will be inactivated later in ufshpb_map_work_handler().
1290 struct victim_select_info *lru_info = &hpb->lru_info;
1291 struct ufshpb_region *rgn;
1293 list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1294 ufshpb_submit_region_inactive(hpb, rgn->rgn_idx);
1296 if (ufshpb_get_state(hpb) == HPB_PRESENT)
1297 queue_work(ufshpb_wq, &hpb->map_work);
1303 * This function will parse recommended active subregion information in sense
1304 * data field of response UPIU with SAM_STAT_GOOD state.
1306 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1308 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1309 struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1312 data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1313 & MASK_RSP_UPIU_DATA_SEG_LEN;
1315 /* If data segment length is zero, rsp_field is not valid */
1319 if (unlikely(lrbp->lun != rsp_field->lun)) {
1320 struct scsi_device *sdev;
1323 __shost_for_each_device(sdev, hba->host) {
1324 hpb = ufshpb_get_hpb_data(sdev);
1329 if (rsp_field->lun == hpb->lun) {
1342 if (ufshpb_get_state(hpb) == HPB_INIT)
1345 if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1346 (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1347 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1348 "%s: ufshpb state is not PRESENT/SUSPEND\n",
1353 BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1355 if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1358 hpb->stats.rcmd_noti_cnt++;
1360 switch (rsp_field->hpb_op) {
1361 case HPB_RSP_REQ_REGION_UPDATE:
1362 if (data_seg_len != DEV_DATA_SEG_LEN)
1363 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1364 "%s: data seg length is not same.\n",
1366 ufshpb_rsp_req_region_update(hpb, rsp_field);
1368 case HPB_RSP_DEV_RESET:
1369 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1370 "UFS device lost HPB information during PM.\n");
1371 ufshpb_dev_reset_handler(hba);
1375 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1376 "hpb_op is not available: %d\n",
1382 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1383 struct ufshpb_region *rgn,
1384 struct ufshpb_subregion *srgn)
1386 if (!list_empty(&rgn->list_inact_rgn))
1389 if (!list_empty(&srgn->list_act_srgn)) {
1390 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1394 list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1397 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1398 struct ufshpb_region *rgn,
1399 struct list_head *pending_list)
1401 struct ufshpb_subregion *srgn;
1404 if (!list_empty(&rgn->list_inact_rgn))
1407 for_each_sub_region(rgn, srgn_idx, srgn)
1408 if (!list_empty(&srgn->list_act_srgn))
1411 list_add_tail(&rgn->list_inact_rgn, pending_list);
1414 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1416 struct ufshpb_region *rgn;
1417 struct ufshpb_subregion *srgn;
1418 unsigned long flags;
1421 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1422 while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1423 struct ufshpb_subregion,
1425 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1428 list_del_init(&srgn->list_act_srgn);
1429 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1431 rgn = hpb->rgn_tbl + srgn->rgn_idx;
1432 ret = ufshpb_add_region(hpb, rgn);
1436 ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1438 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1439 "issue map_req failed. ret %d, region %d - %d\n",
1440 ret, rgn->rgn_idx, srgn->srgn_idx);
1443 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1445 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1449 dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1450 rgn->rgn_idx, srgn->srgn_idx);
1451 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1452 ufshpb_add_active_list(hpb, rgn, srgn);
1453 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1456 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1458 struct ufshpb_region *rgn;
1459 unsigned long flags;
1461 LIST_HEAD(pending_list);
1463 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1464 while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1465 struct ufshpb_region,
1467 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1470 list_del_init(&rgn->list_inact_rgn);
1471 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1473 ret = ufshpb_evict_region(hpb, rgn);
1475 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1476 ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1477 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1480 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1483 list_splice(&pending_list, &hpb->lh_inact_rgn);
1484 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1487 static void ufshpb_normalization_work_handler(struct work_struct *work)
1489 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1490 ufshpb_normalization_work);
1492 u8 factor = hpb->params.normalization_factor;
1494 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1495 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1498 spin_lock(&rgn->rgn_lock);
1500 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1501 struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1503 srgn->reads >>= factor;
1504 rgn->reads += srgn->reads;
1506 spin_unlock(&rgn->rgn_lock);
1508 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1511 /* if region is active but has no reads - inactivate it */
1512 spin_lock(&hpb->rsp_list_lock);
1513 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1514 spin_unlock(&hpb->rsp_list_lock);
1518 static void ufshpb_map_work_handler(struct work_struct *work)
1520 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1522 if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1523 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1524 "%s: ufshpb state is not PRESENT\n", __func__);
1528 ufshpb_run_inactive_region_list(hpb);
1529 ufshpb_run_active_subregion_list(hpb);
1533 * this function doesn't need to hold lock due to be called in init.
1534 * (rgn_state_lock, rsp_list_lock, etc..)
1536 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1537 struct ufshpb_lu *hpb,
1538 struct ufshpb_region *rgn)
1540 struct ufshpb_subregion *srgn;
1544 for_each_sub_region(rgn, srgn_idx, srgn) {
1545 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1546 srgn->srgn_state = HPB_SRGN_INVALID;
1550 "alloc mctx for pinned region failed\n");
1554 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1557 rgn->rgn_state = HPB_RGN_PINNED;
1561 for (i = 0; i < srgn_idx; i++) {
1562 srgn = rgn->srgn_tbl + i;
1563 ufshpb_put_map_ctx(hpb, srgn->mctx);
1568 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1569 struct ufshpb_region *rgn, bool last)
1572 struct ufshpb_subregion *srgn;
1574 for_each_sub_region(rgn, srgn_idx, srgn) {
1575 INIT_LIST_HEAD(&srgn->list_act_srgn);
1577 srgn->rgn_idx = rgn->rgn_idx;
1578 srgn->srgn_idx = srgn_idx;
1579 srgn->srgn_state = HPB_SRGN_UNUSED;
1582 if (unlikely(last && hpb->last_srgn_entries))
1583 srgn->is_last = true;
1586 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1587 struct ufshpb_region *rgn, int srgn_cnt)
1589 rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1594 rgn->srgn_cnt = srgn_cnt;
1598 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1599 struct ufshpb_lu *hpb,
1600 struct ufshpb_dev_info *hpb_dev_info,
1601 struct ufshpb_lu_info *hpb_lu_info)
1603 u32 entries_per_rgn;
1604 u64 rgn_mem_size, tmp;
1606 if (ufshpb_is_legacy(hba))
1607 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1609 hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
1611 hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1612 hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1613 (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1615 hpb->lru_info.max_lru_active_cnt =
1616 hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1618 rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1620 do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1621 hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1622 * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1625 do_div(tmp, HPB_ENTRY_SIZE);
1626 entries_per_rgn = (u32)tmp;
1627 hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1628 hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1630 hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1631 hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1632 hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1635 do_div(tmp, hpb->srgn_mem_size);
1636 hpb->srgns_per_rgn = (int)tmp;
1638 hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1640 hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1641 (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1642 hpb->last_srgn_entries = hpb_lu_info->num_blocks
1643 % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1645 hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1647 if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1651 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1653 struct ufshpb_region *rgn_table, *rgn;
1657 rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1662 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1663 int srgn_cnt = hpb->srgns_per_rgn;
1664 bool last_srgn = false;
1666 rgn = rgn_table + rgn_idx;
1667 rgn->rgn_idx = rgn_idx;
1669 spin_lock_init(&rgn->rgn_lock);
1671 INIT_LIST_HEAD(&rgn->list_inact_rgn);
1672 INIT_LIST_HEAD(&rgn->list_lru_rgn);
1673 INIT_LIST_HEAD(&rgn->list_expired_rgn);
1675 if (rgn_idx == hpb->rgns_per_lu - 1) {
1676 srgn_cnt = ((hpb->srgns_per_lu - 1) %
1677 hpb->srgns_per_rgn) + 1;
1681 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1683 goto release_srgn_table;
1684 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1686 if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1687 ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1689 goto release_srgn_table;
1691 rgn->rgn_state = HPB_RGN_INACTIVE;
1698 hpb->rgn_tbl = rgn_table;
1703 for (i = 0; i <= rgn_idx; i++)
1704 kvfree(rgn_table[i].srgn_tbl);
1710 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1711 struct ufshpb_region *rgn)
1714 struct ufshpb_subregion *srgn;
1716 for_each_sub_region(rgn, srgn_idx, srgn)
1717 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1718 srgn->srgn_state = HPB_SRGN_UNUSED;
1719 ufshpb_put_map_ctx(hpb, srgn->mctx);
1723 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1727 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1728 struct ufshpb_region *rgn;
1730 rgn = hpb->rgn_tbl + rgn_idx;
1731 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1732 rgn->rgn_state = HPB_RGN_INACTIVE;
1734 ufshpb_destroy_subregion_tbl(hpb, rgn);
1737 kvfree(rgn->srgn_tbl);
1740 kvfree(hpb->rgn_tbl);
1743 /* SYSFS functions */
1744 #define ufshpb_sysfs_attr_show_func(__name) \
1745 static ssize_t __name##_show(struct device *dev, \
1746 struct device_attribute *attr, char *buf) \
1748 struct scsi_device *sdev = to_scsi_device(dev); \
1749 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
1754 return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \
1757 static DEVICE_ATTR_RO(__name)
1759 ufshpb_sysfs_attr_show_func(hit_cnt);
1760 ufshpb_sysfs_attr_show_func(miss_cnt);
1761 ufshpb_sysfs_attr_show_func(rcmd_noti_cnt);
1762 ufshpb_sysfs_attr_show_func(rcmd_active_cnt);
1763 ufshpb_sysfs_attr_show_func(rcmd_inactive_cnt);
1764 ufshpb_sysfs_attr_show_func(map_req_cnt);
1765 ufshpb_sysfs_attr_show_func(umap_req_cnt);
1767 static struct attribute *hpb_dev_stat_attrs[] = {
1768 &dev_attr_hit_cnt.attr,
1769 &dev_attr_miss_cnt.attr,
1770 &dev_attr_rcmd_noti_cnt.attr,
1771 &dev_attr_rcmd_active_cnt.attr,
1772 &dev_attr_rcmd_inactive_cnt.attr,
1773 &dev_attr_map_req_cnt.attr,
1774 &dev_attr_umap_req_cnt.attr,
1778 struct attribute_group ufs_sysfs_hpb_stat_group = {
1779 .name = "hpb_stats",
1780 .attrs = hpb_dev_stat_attrs,
1783 /* SYSFS functions */
1784 #define ufshpb_sysfs_param_show_func(__name) \
1785 static ssize_t __name##_show(struct device *dev, \
1786 struct device_attribute *attr, char *buf) \
1788 struct scsi_device *sdev = to_scsi_device(dev); \
1789 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
1794 return sysfs_emit(buf, "%d\n", hpb->params.__name); \
1797 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
1799 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1800 const char *buf, size_t count)
1802 struct scsi_device *sdev = to_scsi_device(dev);
1803 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1809 if (kstrtouint(buf, 0, &val))
1815 hpb->params.requeue_timeout_ms = val;
1819 static DEVICE_ATTR_RW(requeue_timeout_ms);
1821 ufshpb_sysfs_param_show_func(activation_thld);
1823 activation_thld_store(struct device *dev, struct device_attribute *attr,
1824 const char *buf, size_t count)
1826 struct scsi_device *sdev = to_scsi_device(dev);
1827 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1836 if (kstrtouint(buf, 0, &val))
1842 hpb->params.activation_thld = val;
1846 static DEVICE_ATTR_RW(activation_thld);
1848 ufshpb_sysfs_param_show_func(normalization_factor);
1850 normalization_factor_store(struct device *dev, struct device_attribute *attr,
1851 const char *buf, size_t count)
1853 struct scsi_device *sdev = to_scsi_device(dev);
1854 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1863 if (kstrtouint(buf, 0, &val))
1866 if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
1869 hpb->params.normalization_factor = val;
1873 static DEVICE_ATTR_RW(normalization_factor);
1875 ufshpb_sysfs_param_show_func(eviction_thld_enter);
1877 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
1878 const char *buf, size_t count)
1880 struct scsi_device *sdev = to_scsi_device(dev);
1881 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1890 if (kstrtouint(buf, 0, &val))
1893 if (val <= hpb->params.eviction_thld_exit)
1896 hpb->params.eviction_thld_enter = val;
1900 static DEVICE_ATTR_RW(eviction_thld_enter);
1902 ufshpb_sysfs_param_show_func(eviction_thld_exit);
1904 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
1905 const char *buf, size_t count)
1907 struct scsi_device *sdev = to_scsi_device(dev);
1908 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1917 if (kstrtouint(buf, 0, &val))
1920 if (val <= hpb->params.activation_thld)
1923 hpb->params.eviction_thld_exit = val;
1927 static DEVICE_ATTR_RW(eviction_thld_exit);
1929 ufshpb_sysfs_param_show_func(read_timeout_ms);
1931 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1932 const char *buf, size_t count)
1934 struct scsi_device *sdev = to_scsi_device(dev);
1935 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1944 if (kstrtouint(buf, 0, &val))
1947 /* read_timeout >> timeout_polling_interval */
1948 if (val < hpb->params.timeout_polling_interval_ms * 2)
1951 hpb->params.read_timeout_ms = val;
1955 static DEVICE_ATTR_RW(read_timeout_ms);
1957 ufshpb_sysfs_param_show_func(read_timeout_expiries);
1959 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
1960 const char *buf, size_t count)
1962 struct scsi_device *sdev = to_scsi_device(dev);
1963 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1972 if (kstrtouint(buf, 0, &val))
1978 hpb->params.read_timeout_expiries = val;
1982 static DEVICE_ATTR_RW(read_timeout_expiries);
1984 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
1986 timeout_polling_interval_ms_store(struct device *dev,
1987 struct device_attribute *attr,
1988 const char *buf, size_t count)
1990 struct scsi_device *sdev = to_scsi_device(dev);
1991 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2000 if (kstrtouint(buf, 0, &val))
2003 /* timeout_polling_interval << read_timeout */
2004 if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
2007 hpb->params.timeout_polling_interval_ms = val;
2011 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
2013 ufshpb_sysfs_param_show_func(inflight_map_req);
2014 static ssize_t inflight_map_req_store(struct device *dev,
2015 struct device_attribute *attr,
2016 const char *buf, size_t count)
2018 struct scsi_device *sdev = to_scsi_device(dev);
2019 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2028 if (kstrtouint(buf, 0, &val))
2031 if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
2034 hpb->params.inflight_map_req = val;
2038 static DEVICE_ATTR_RW(inflight_map_req);
2040 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2042 hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2043 hpb->params.normalization_factor = 1;
2044 hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2045 hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2046 hpb->params.read_timeout_ms = READ_TO_MS;
2047 hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2048 hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2049 hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2052 static struct attribute *hpb_dev_param_attrs[] = {
2053 &dev_attr_requeue_timeout_ms.attr,
2054 &dev_attr_activation_thld.attr,
2055 &dev_attr_normalization_factor.attr,
2056 &dev_attr_eviction_thld_enter.attr,
2057 &dev_attr_eviction_thld_exit.attr,
2058 &dev_attr_read_timeout_ms.attr,
2059 &dev_attr_read_timeout_expiries.attr,
2060 &dev_attr_timeout_polling_interval_ms.attr,
2061 &dev_attr_inflight_map_req.attr,
2065 struct attribute_group ufs_sysfs_hpb_param_group = {
2066 .name = "hpb_params",
2067 .attrs = hpb_dev_param_attrs,
2070 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2072 struct ufshpb_req *pre_req = NULL, *t;
2073 int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2076 INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2078 hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2079 hpb->throttle_pre_req = qd;
2080 hpb->num_inflight_pre_req = 0;
2085 for (i = 0; i < qd; i++) {
2086 pre_req = hpb->pre_req + i;
2087 INIT_LIST_HEAD(&pre_req->list_req);
2088 pre_req->req = NULL;
2090 pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
2094 pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2095 if (!pre_req->wb.m_page) {
2096 bio_put(pre_req->bio);
2100 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2105 list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2106 list_del_init(&pre_req->list_req);
2107 bio_put(pre_req->bio);
2108 __free_page(pre_req->wb.m_page);
2111 kfree(hpb->pre_req);
2115 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2117 struct ufshpb_req *pre_req = NULL;
2120 for (i = 0; i < hpb->throttle_pre_req; i++) {
2121 pre_req = hpb->pre_req + i;
2122 bio_put(hpb->pre_req[i].bio);
2123 if (!pre_req->wb.m_page)
2124 __free_page(hpb->pre_req[i].wb.m_page);
2125 list_del_init(&pre_req->list_req);
2128 kfree(hpb->pre_req);
2131 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2133 hpb->stats.hit_cnt = 0;
2134 hpb->stats.miss_cnt = 0;
2135 hpb->stats.rcmd_noti_cnt = 0;
2136 hpb->stats.rcmd_active_cnt = 0;
2137 hpb->stats.rcmd_inactive_cnt = 0;
2138 hpb->stats.map_req_cnt = 0;
2139 hpb->stats.umap_req_cnt = 0;
2142 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2144 hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2146 ufshpb_hcm_param_init(hpb);
2149 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2153 spin_lock_init(&hpb->rgn_state_lock);
2154 spin_lock_init(&hpb->rsp_list_lock);
2155 spin_lock_init(&hpb->param_lock);
2157 INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2158 INIT_LIST_HEAD(&hpb->lh_act_srgn);
2159 INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2160 INIT_LIST_HEAD(&hpb->list_hpb_lu);
2162 INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2164 INIT_WORK(&hpb->ufshpb_normalization_work,
2165 ufshpb_normalization_work_handler);
2166 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2167 ufshpb_read_to_handler);
2170 hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2171 sizeof(struct ufshpb_req), 0, 0, NULL);
2172 if (!hpb->map_req_cache) {
2173 dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2178 hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2179 sizeof(struct page *) * hpb->pages_per_srgn,
2181 if (!hpb->m_page_cache) {
2182 dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2185 goto release_req_cache;
2188 ret = ufshpb_pre_req_mempool_init(hpb);
2190 dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2192 goto release_m_page_cache;
2195 ret = ufshpb_alloc_region_tbl(hba, hpb);
2197 goto release_pre_req_mempool;
2199 ufshpb_stat_init(hpb);
2200 ufshpb_param_init(hpb);
2205 poll = hpb->params.timeout_polling_interval_ms;
2206 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2207 msecs_to_jiffies(poll));
2212 release_pre_req_mempool:
2213 ufshpb_pre_req_mempool_destroy(hpb);
2214 release_m_page_cache:
2215 kmem_cache_destroy(hpb->m_page_cache);
2217 kmem_cache_destroy(hpb->map_req_cache);
2221 static struct ufshpb_lu *
2222 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2223 struct ufshpb_dev_info *hpb_dev_info,
2224 struct ufshpb_lu_info *hpb_lu_info)
2226 struct ufshpb_lu *hpb;
2229 hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2233 hpb->lun = sdev->lun;
2234 hpb->sdev_ufs_lu = sdev;
2236 ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2238 ret = ufshpb_lu_hpb_init(hba, hpb);
2240 dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2244 sdev->hostdata = hpb;
2252 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2254 struct ufshpb_region *rgn, *next_rgn;
2255 struct ufshpb_subregion *srgn, *next_srgn;
2256 unsigned long flags;
2259 * If the device reset occurred, the remaining HPB region information
2260 * may be stale. Therefore, by discarding the lists of HPB response
2261 * that remained after reset, we prevent unnecessary work.
2263 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2264 list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2266 list_del_init(&rgn->list_inact_rgn);
2268 list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2270 list_del_init(&srgn->list_act_srgn);
2271 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2274 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2277 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2278 cancel_work_sync(&hpb->ufshpb_normalization_work);
2280 cancel_work_sync(&hpb->map_work);
2283 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2286 bool flag_res = true;
2289 /* wait for the device to complete HPB reset query */
2290 for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2292 "%s start flag reset polling %d times\n",
2295 /* Poll fHpbReset flag to be cleared */
2296 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2297 QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2301 "%s reading fHpbReset flag failed with error %d\n",
2309 usleep_range(1000, 1100);
2313 "%s fHpbReset was not cleared by the device\n",
2321 * ufshpb_toggle_state - switch HPB state of all LUs
2322 * @hba: per-adapter instance
2323 * @src: expected current HPB state
2324 * @dest: target HPB state to switch to
2326 void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest)
2328 struct ufshpb_lu *hpb;
2329 struct scsi_device *sdev;
2331 shost_for_each_device(sdev, hba->host) {
2332 hpb = ufshpb_get_hpb_data(sdev);
2334 if (!hpb || ufshpb_get_state(hpb) != src)
2336 ufshpb_set_state(hpb, dest);
2338 if (dest == HPB_RESET) {
2339 ufshpb_cancel_jobs(hpb);
2340 ufshpb_discard_rsp_lists(hpb);
2345 void ufshpb_suspend(struct ufs_hba *hba)
2347 struct ufshpb_lu *hpb;
2348 struct scsi_device *sdev;
2350 shost_for_each_device(sdev, hba->host) {
2351 hpb = ufshpb_get_hpb_data(sdev);
2352 if (!hpb || ufshpb_get_state(hpb) != HPB_PRESENT)
2355 ufshpb_set_state(hpb, HPB_SUSPEND);
2356 ufshpb_cancel_jobs(hpb);
2360 void ufshpb_resume(struct ufs_hba *hba)
2362 struct ufshpb_lu *hpb;
2363 struct scsi_device *sdev;
2365 shost_for_each_device(sdev, hba->host) {
2366 hpb = ufshpb_get_hpb_data(sdev);
2367 if (!hpb || ufshpb_get_state(hpb) != HPB_SUSPEND)
2370 ufshpb_set_state(hpb, HPB_PRESENT);
2371 ufshpb_kick_map_work(hpb);
2373 unsigned int poll = hpb->params.timeout_polling_interval_ms;
2375 schedule_delayed_work(&hpb->ufshpb_read_to_work, msecs_to_jiffies(poll));
2380 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2381 struct ufshpb_lu_info *hpb_lu_info)
2383 u16 max_active_rgns;
2387 char desc_buf[QUERY_DESC_MAX_SIZE];
2389 ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2391 ufshcd_rpm_get_sync(hba);
2392 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2393 QUERY_DESC_IDN_UNIT, lun, 0,
2395 ufshcd_rpm_put_sync(hba);
2399 "%s: idn: %d lun: %d query request failed",
2400 __func__, QUERY_DESC_IDN_UNIT, lun);
2404 lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2405 if (lu_enable != LU_ENABLED_HPB_FUNC)
2408 max_active_rgns = get_unaligned_be16(
2409 desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2410 if (!max_active_rgns) {
2412 "lun %d wrong number of max active regions\n", lun);
2416 hpb_lu_info->num_blocks = get_unaligned_be64(
2417 desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2418 hpb_lu_info->pinned_start = get_unaligned_be16(
2419 desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2420 hpb_lu_info->num_pinned = get_unaligned_be16(
2421 desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2422 hpb_lu_info->max_active_rgns = max_active_rgns;
2427 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2429 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2434 ufshpb_set_state(hpb, HPB_FAILED);
2436 sdev = hpb->sdev_ufs_lu;
2437 sdev->hostdata = NULL;
2439 ufshpb_cancel_jobs(hpb);
2441 ufshpb_pre_req_mempool_destroy(hpb);
2442 ufshpb_destroy_region_tbl(hpb);
2444 kmem_cache_destroy(hpb->map_req_cache);
2445 kmem_cache_destroy(hpb->m_page_cache);
2447 list_del_init(&hpb->list_hpb_lu);
2452 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2455 struct ufshpb_lu *hpb;
2456 struct scsi_device *sdev;
2459 if (tot_active_srgn_pages == 0) {
2464 init_success = !ufshpb_check_hpb_reset_query(hba);
2466 pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2467 if (pool_size > tot_active_srgn_pages) {
2468 mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2469 mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2472 shost_for_each_device(sdev, hba->host) {
2473 hpb = ufshpb_get_hpb_data(sdev);
2478 ufshpb_set_state(hpb, HPB_PRESENT);
2479 if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2480 queue_work(ufshpb_wq, &hpb->map_work);
2482 dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2483 ufshpb_destroy_lu(hba, sdev);
2491 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2493 struct ufshpb_lu *hpb;
2495 struct ufshpb_lu_info hpb_lu_info = { 0 };
2496 int lun = sdev->lun;
2498 if (lun >= hba->dev_info.max_lu_supported)
2501 ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2505 hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2510 tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2511 hpb->srgns_per_rgn * hpb->pages_per_srgn;
2514 /* All LUs are initialized */
2515 if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2516 ufshpb_hpb_lu_prepared(hba);
2519 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2522 unsigned int pool_size;
2524 ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2525 sizeof(struct ufshpb_map_ctx),
2527 if (!ufshpb_mctx_cache) {
2528 dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2532 pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2533 dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2534 __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2536 ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2538 if (!ufshpb_mctx_pool) {
2539 dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2541 goto release_mctx_cache;
2544 ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2545 if (!ufshpb_page_pool) {
2546 dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2548 goto release_mctx_pool;
2551 ufshpb_wq = alloc_workqueue("ufshpb-wq",
2552 WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2554 dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2556 goto release_page_pool;
2562 mempool_destroy(ufshpb_page_pool);
2564 mempool_destroy(ufshpb_mctx_pool);
2566 kmem_cache_destroy(ufshpb_mctx_cache);
2570 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2572 struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2573 int max_active_rgns = 0;
2576 hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2577 if (hpb_num_lu == 0) {
2578 dev_err(hba->dev, "No HPB LU supported\n");
2579 hpb_info->hpb_disabled = true;
2583 hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2584 hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2585 max_active_rgns = get_unaligned_be16(geo_buf +
2586 GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2588 if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2589 max_active_rgns == 0) {
2590 dev_err(hba->dev, "No HPB supported device\n");
2591 hpb_info->hpb_disabled = true;
2596 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2598 struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2602 hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2604 version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2605 if ((version != HPB_SUPPORT_VERSION) &&
2606 (version != HPB_SUPPORT_LEGACY_VERSION)) {
2607 dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2609 hpb_dev_info->hpb_disabled = true;
2613 if (version == HPB_SUPPORT_LEGACY_VERSION)
2614 hpb_dev_info->is_legacy = true;
2617 * Get the number of user logical unit to check whether all
2618 * scsi_device finish initialization
2620 hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2622 if (hpb_dev_info->is_legacy)
2625 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2626 QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
2629 hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
2631 hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
2634 void ufshpb_init(struct ufs_hba *hba)
2636 struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2640 if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2643 if (ufshpb_init_mem_wq(hba)) {
2644 hpb_dev_info->hpb_disabled = true;
2648 atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2649 tot_active_srgn_pages = 0;
2650 /* issue HPB reset query */
2651 for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2652 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2653 QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2659 void ufshpb_remove(struct ufs_hba *hba)
2661 mempool_destroy(ufshpb_page_pool);
2662 mempool_destroy(ufshpb_mctx_pool);
2663 kmem_cache_destroy(ufshpb_mctx_cache);
2665 destroy_workqueue(ufshpb_wq);
2668 module_param(ufshpb_host_map_kbytes, uint, 0644);
2669 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2670 "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");