Merge patch series "scsi: ufs: core: Always read the descriptors with max length"
authorMartin K. Petersen <martin.petersen@oracle.com>
Fri, 30 Dec 2022 21:48:47 +0000 (21:48 +0000)
committerMartin K. Petersen <martin.petersen@oracle.com>
Fri, 30 Dec 2022 21:48:47 +0000 (21:48 +0000)
Arthur Simchaev <Arthur.Simchaev@wdc.com> says:

Read any descriptor with a maximum size of QUERY_DESC_MAX_SIZE.
According to the spec the device returns the actual size.  Thus can
improve code readability and save CPU cycles.  While at it, clean up
few leftovers around the descriptor size parameter.

Link: https://lore.kernel.org/r/1670763911-8695-1-git-send-email-Arthur.Simchaev@wdc.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
1  2 
drivers/ufs/core/ufs_bsg.c
drivers/ufs/core/ufshcd.c
drivers/ufs/core/ufshpb.c
include/ufs/ufs.h
include/ufs/ufshcd.h

index a8e58faa7da2f1a8fbfa3931dd18d09e85311ed8,dc441ac8e2c4561c88218dffe5d682c4b7d7e451..0044029bcf7bd652eb6e9551985a7d106f969722
@@@ -6,7 -6,6 +6,7 @@@
   */
  
  #include <linux/bsg-lib.h>
 +#include <linux/dma-mapping.h>
  #include <scsi/scsi.h>
  #include <scsi/scsi_host.h>
  #include "ufs_bsg.h"
@@@ -17,20 -16,30 +17,15 @@@ static int ufs_bsg_get_query_desc_size(
                                       struct utp_upiu_query *qr)
  {
        int desc_size = be16_to_cpu(qr->length);
-       int desc_id = qr->idn;
  
        if (desc_size <= 0)
                return -EINVAL;
  
-       ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
-       if (!*desc_len)
-               return -EINVAL;
-       *desc_len = min_t(int, *desc_len, desc_size);
+       *desc_len = min_t(int, QUERY_DESC_MAX_SIZE, desc_size);
  
        return 0;
  }
  
 -static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
 -                                   unsigned int request_len,
 -                                   unsigned int reply_len)
 -{
 -      int min_req_len = sizeof(struct ufs_bsg_request);
 -      int min_rsp_len = sizeof(struct ufs_bsg_reply);
 -
 -      if (min_req_len > request_len || min_rsp_len > reply_len) {
 -              dev_err(hba->dev, "not enough space assigned\n");
 -              return -EINVAL;
 -      }
 -
 -      return 0;
 -}
 -
  static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job,
                                     uint8_t **desc_buff, int *desc_len,
                                     enum query_opcode desc_op)
@@@ -69,84 -78,23 +64,84 @@@ out
        return 0;
  }
  
 +static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job)
 +{
 +      struct ufs_rpmb_request *rpmb_request = job->request;
 +      struct ufs_rpmb_reply *rpmb_reply = job->reply;
 +      struct bsg_buffer *payload = NULL;
 +      enum dma_data_direction dir;
 +      struct scatterlist *sg_list;
 +      int rpmb_req_type;
 +      int sg_cnt;
 +      int ret;
 +      int data_len;
 +
 +      if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en ||
 +          !(hba->capabilities & MASK_EHSLUTRD_SUPPORTED))
 +              return -EINVAL;
 +
 +      if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1)
 +              return -EINVAL;
 +
 +      rpmb_req_type = be16_to_cpu(rpmb_request->ehs_req.meta.req_resp_type);
 +
 +      switch (rpmb_req_type) {
 +      case UFS_RPMB_WRITE_KEY:
 +      case UFS_RPMB_READ_CNT:
 +      case UFS_RPMB_PURGE_ENABLE:
 +              dir = DMA_NONE;
 +              break;
 +      case UFS_RPMB_WRITE:
 +      case UFS_RPMB_SEC_CONF_WRITE:
 +              dir = DMA_TO_DEVICE;
 +              break;
 +      case UFS_RPMB_READ:
 +      case UFS_RPMB_SEC_CONF_READ:
 +      case UFS_RPMB_PURGE_STATUS_READ:
 +              dir = DMA_FROM_DEVICE;
 +              break;
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      if (dir != DMA_NONE) {
 +              payload = &job->request_payload;
 +              if (!payload || !payload->payload_len || !payload->sg_cnt)
 +                      return -EINVAL;
 +
 +              sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
 +              if (unlikely(!sg_cnt))
 +                      return -ENOMEM;
 +              sg_list = payload->sg_list;
 +              data_len = payload->payload_len;
 +      }
 +
 +      ret = ufshcd_advanced_rpmb_req_handler(hba, &rpmb_request->bsg_request.upiu_req,
 +                                 &rpmb_reply->bsg_reply.upiu_rsp, &rpmb_request->ehs_req,
 +                                 &rpmb_reply->ehs_rsp, sg_cnt, sg_list, dir);
 +
 +      if (dir != DMA_NONE) {
 +              dma_unmap_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
 +
 +              if (!ret)
 +                      rpmb_reply->bsg_reply.reply_payload_rcv_len = data_len;
 +      }
 +
 +      return ret;
 +}
 +
  static int ufs_bsg_request(struct bsg_job *job)
  {
        struct ufs_bsg_request *bsg_request = job->request;
        struct ufs_bsg_reply *bsg_reply = job->reply;
        struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
 -      unsigned int req_len = job->request_len;
 -      unsigned int reply_len = job->reply_len;
        struct uic_command uc = {};
        int msgcode;
 -      uint8_t *desc_buff = NULL;
 +      uint8_t *buff = NULL;
        int desc_len = 0;
        enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
        int ret;
 -
 -      ret = ufs_bsg_verify_query_size(hba, req_len, reply_len);
 -      if (ret)
 -              goto out;
 +      bool rpmb = false;
  
        bsg_reply->reply_payload_rcv_len = 0;
  
        switch (msgcode) {
        case UPIU_TRANSACTION_QUERY_REQ:
                desc_op = bsg_request->upiu_req.qr.opcode;
 -              ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
 -                                              &desc_len, desc_op);
 -              if (ret) {
 -                      ufshcd_rpm_put_sync(hba);
 +              ret = ufs_bsg_alloc_desc_buffer(hba, job, &buff, &desc_len, desc_op);
 +              if (ret)
                        goto out;
 -              }
 -
                fallthrough;
        case UPIU_TRANSACTION_NOP_OUT:
        case UPIU_TRANSACTION_TASK_REQ:
                ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
                                               &bsg_reply->upiu_rsp, msgcode,
 -                                             desc_buff, &desc_len, desc_op);
 +                                             buff, &desc_len, desc_op);
                if (ret)
 -                      dev_err(hba->dev,
 -                              "exe raw upiu: error code %d\n", ret);
 -
 +                      dev_err(hba->dev, "exe raw upiu: error code %d\n", ret);
 +              else if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) {
 +                      bsg_reply->reply_payload_rcv_len =
 +                              sg_copy_from_buffer(job->request_payload.sg_list,
 +                                                  job->request_payload.sg_cnt,
 +                                                  buff, desc_len);
 +              }
                break;
        case UPIU_TRANSACTION_UIC_CMD:
                memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
                ret = ufshcd_send_uic_cmd(hba, &uc);
                if (ret)
 -                      dev_err(hba->dev,
 -                              "send uic cmd: error code %d\n", ret);
 +                      dev_err(hba->dev, "send uic cmd: error code %d\n", ret);
  
                memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
  
                break;
 +      case UPIU_TRANSACTION_ARPMB_CMD:
 +              rpmb = true;
 +              ret = ufs_bsg_exec_advanced_rpmb_req(hba, job);
 +              if (ret)
 +                      dev_err(hba->dev, "ARPMB OP failed: error code  %d\n", ret);
 +              break;
        default:
                ret = -ENOTSUPP;
                dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
                break;
        }
  
 -      ufshcd_rpm_put_sync(hba);
 -
 -      if (!desc_buff)
 -              goto out;
 -
 -      if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len)
 -              bsg_reply->reply_payload_rcv_len =
 -                      sg_copy_from_buffer(job->request_payload.sg_list,
 -                                          job->request_payload.sg_cnt,
 -                                          desc_buff, desc_len);
 -
 -      kfree(desc_buff);
 -
  out:
 +      ufshcd_rpm_put_sync(hba);
 +      kfree(buff);
        bsg_reply->result = ret;
 -      job->reply_len = sizeof(struct ufs_bsg_reply);
 +      job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
        /* complete the job here only if no error */
        if (ret == 0)
                bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
index 1fe16aebc1391106aa1d6aa2dcebc7b4a1d6a749,7f896268dffc0f24e9ed8e88b8ae36f9f1c85593..99ca5b03502853df85fb6c02b9c4f63f13849b6f
@@@ -56,9 -56,6 +56,9 @@@
  /* Query request timeout */
  #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
  
 +/* Advanced RPMB request timeout */
 +#define ADVANCED_RPMB_REQ_TIMEOUT  3000 /* 3 seconds */
 +
  /* Task management command timeout */
  #define TM_CMD_TIMEOUT        100 /* msecs */
  
@@@ -531,7 -528,7 +531,7 @@@ void ufshcd_print_trs(struct ufs_hba *h
                prdt_length = le16_to_cpu(
                        lrbp->utr_descriptor_ptr->prd_table_length);
                if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
 -                      prdt_length /= sizeof(struct ufshcd_sg_entry);
 +                      prdt_length /= ufshcd_sg_entry_size(hba);
  
                dev_err(hba->dev,
                        "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
  
                if (pr_prdt)
                        ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
 -                              sizeof(struct ufshcd_sg_entry) * prdt_length);
 +                              ufshcd_sg_entry_size(hba) * prdt_length);
        }
  }
  
@@@ -778,7 -775,7 +778,7 @@@ static inline void ufshcd_utrl_clear(st
  }
  
  /**
 - * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
 + * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
   * @hba: per adapter instance
   * @pos: position of the bit to be cleared
   */
@@@ -1124,12 -1121,6 +1124,12 @@@ static u32 ufshcd_pending_cmds(struct u
        return pending;
  }
  
 +/*
 + * Wait until all pending SCSI commands and TMFs have finished or the timeout
 + * has expired.
 + *
 + * Return: 0 upon success; -EBUSY upon timeout.
 + */
  static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
                                        u64 wait_timeout_us)
  {
                }
  
                spin_unlock_irqrestore(hba->host->host_lock, flags);
 -              schedule();
 +              io_schedule_timeout(msecs_to_jiffies(20));
                if (ktime_to_us(ktime_sub(ktime_get(), start)) >
                    wait_timeout_us) {
                        timeout = true;
@@@ -1234,14 -1225,9 +1234,14 @@@ static int ufshcd_scale_gear(struct ufs
        return ret;
  }
  
 -static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
 +/*
 + * Wait until all pending SCSI commands and TMFs have finished or the timeout
 + * has expired.
 + *
 + * Return: 0 upon success; -EBUSY upon timeout.
 + */
 +static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
  {
 -      #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
        int ret = 0;
        /*
         * make sure that there are no outstanding requests when
        down_write(&hba->clk_scaling_lock);
  
        if (!hba->clk_scaling.is_allowed ||
 -          ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
 +          ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
                ret = -EBUSY;
                up_write(&hba->clk_scaling_lock);
                ufshcd_scsi_unblock_requests(hba);
@@@ -1289,7 -1275,7 +1289,7 @@@ static int ufshcd_devfreq_scale(struct 
        int ret = 0;
        bool is_writelock = true;
  
 -      ret = ufshcd_clock_scaling_prepare(hba);
 +      ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
        if (ret)
                return ret;
  
@@@ -2413,30 -2399,38 +2413,30 @@@ int ufshcd_send_uic_cmd(struct ufs_hba 
  }
  
  /**
 - * ufshcd_map_sg - Map scatter-gather list to prdt
 - * @hba: per adapter instance
 - * @lrbp: pointer to local reference block
 - *
 - * Returns 0 in case of success, non-zero value in case of failure
 + * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
 + * @hba:      per-adapter instance
 + * @lrbp:     pointer to local reference block
 + * @sg_entries:       The number of sg lists actually used
 + * @sg_list:  Pointer to SG list
   */
 -static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 +static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
 +                             struct scatterlist *sg_list)
  {
 -      struct ufshcd_sg_entry *prd_table;
 +      struct ufshcd_sg_entry *prd;
        struct scatterlist *sg;
 -      struct scsi_cmnd *cmd;
 -      int sg_segments;
        int i;
  
 -      cmd = lrbp->cmd;
 -      sg_segments = scsi_dma_map(cmd);
 -      if (sg_segments < 0)
 -              return sg_segments;
 -
 -      if (sg_segments) {
 +      if (sg_entries) {
  
                if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
                        lrbp->utr_descriptor_ptr->prd_table_length =
 -                              cpu_to_le16((sg_segments *
 -                                      sizeof(struct ufshcd_sg_entry)));
 +                              cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
                else
 -                      lrbp->utr_descriptor_ptr->prd_table_length =
 -                              cpu_to_le16(sg_segments);
 +                      lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
  
 -              prd_table = lrbp->ucd_prdt_ptr;
 +              prd = lrbp->ucd_prdt_ptr;
  
 -              scsi_for_each_sg(cmd, sg, sg_segments, i) {
 +              for_each_sg(sg_list, sg, sg_entries, i) {
                        const unsigned int len = sg_dma_len(sg);
  
                        /*
                         * indicates 4 bytes, '7' indicates 8 bytes, etc."
                         */
                        WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
 -                      prd_table[i].size = cpu_to_le32(len - 1);
 -                      prd_table[i].addr = cpu_to_le64(sg->dma_address);
 -                      prd_table[i].reserved = 0;
 +                      prd->size = cpu_to_le32(len - 1);
 +                      prd->addr = cpu_to_le64(sg->dma_address);
 +                      prd->reserved = 0;
 +                      prd = (void *)prd + ufshcd_sg_entry_size(hba);
                }
        } else {
                lrbp->utr_descriptor_ptr->prd_table_length = 0;
        }
 +}
 +
 +/**
 + * ufshcd_map_sg - Map scatter-gather list to prdt
 + * @hba: per adapter instance
 + * @lrbp: pointer to local reference block
 + *
 + * Returns 0 in case of success, non-zero value in case of failure
 + */
 +static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 +{
 +      struct scsi_cmnd *cmd = lrbp->cmd;
 +      int sg_segments = scsi_dma_map(cmd);
 +
 +      if (sg_segments < 0)
 +              return sg_segments;
 +
 +      ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
  
        return 0;
  }
@@@ -2521,15 -2496,14 +2521,15 @@@ static void ufshcd_disable_intr(struct 
  }
  
  /**
 - * ufshcd_prepare_req_desc_hdr() - Fills the requests header
 + * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
   * descriptor according to request
   * @lrbp: pointer to local reference block
   * @upiu_flags: flags required in the header
   * @cmd_dir: requests data direction
 + * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
   */
 -static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
 -                      u8 *upiu_flags, enum dma_data_direction cmd_dir)
 +static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
 +                                      enum dma_data_direction cmd_dir, int ehs_length)
  {
        struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
        u32 data_direction;
                *upiu_flags = UPIU_CMD_FLAGS_NONE;
        }
  
 -      dword_0 = data_direction | (lrbp->command_type
 -                              << UPIU_COMMAND_TYPE_OFFSET);
 +      dword_0 = data_direction | (lrbp->command_type << UPIU_COMMAND_TYPE_OFFSET) |
 +              ehs_length << 8;
        if (lrbp->intr_cmd)
                dword_0 |= UTP_REQ_DESC_INT_CMD;
  
@@@ -2604,7 -2578,8 +2604,7 @@@ void ufshcd_prepare_utp_scsi_cmd_upiu(s
  }
  
  /**
 - * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
 - * for query requsts
 + * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
   * @hba: UFS hba
   * @lrbp: local reference block pointer
   * @upiu_flags: flags
@@@ -2675,7 -2650,7 +2675,7 @@@ static int ufshcd_compose_devman_upiu(s
        else
                lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
  
 -      ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
 +      ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
        if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
                ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
        else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
@@@ -2703,7 -2678,8 +2703,7 @@@ static int ufshcd_comp_scsi_upiu(struc
                lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
  
        if (likely(lrbp->cmd)) {
 -              ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
 -                                              lrbp->cmd->sc_data_direction);
 +              ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
                ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
        } else {
                ret = -EINVAL;
@@@ -2758,11 -2734,10 +2758,11 @@@ static void ufshcd_map_queues(struct Sc
  
  static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
  {
 -      struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
 +      struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
 +              i * sizeof_utp_transfer_cmd_desc(hba);
        struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
        dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
 -              i * sizeof(struct utp_transfer_cmd_desc);
 +              i * sizeof_utp_transfer_cmd_desc(hba);
        u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
                                       response_upiu);
        u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
        lrb->utr_descriptor_ptr = utrdlp + i;
        lrb->utrd_dma_addr = hba->utrdl_dma_addr +
                i * sizeof(struct utp_transfer_req_desc);
 -      lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
 +      lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
        lrb->ucd_req_dma_addr = cmd_desc_element_addr;
 -      lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
 +      lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
        lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
 -      lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
 +      lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
        lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
  }
  
@@@ -2970,12 -2945,6 +2970,12 @@@ ufshcd_dev_cmd_completion(struct ufs_hb
                dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
                                __func__);
                break;
 +      case UPIU_TRANSACTION_RESPONSE:
 +              if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
 +                      err = -EINVAL;
 +                      dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
 +              }
 +              break;
        default:
                err = -EINVAL;
                dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
@@@ -3131,7 -3100,7 +3131,7 @@@ static int ufshcd_query_flag_retry(stru
  
        if (ret)
                dev_err(hba->dev,
 -                      "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retries\n",
 +                      "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
                        __func__, opcode, idn, ret, retries);
        return ret;
  }
@@@ -3399,37 -3368,6 +3399,6 @@@ int ufshcd_query_descriptor_retry(struc
        return err;
  }
  
- /**
-  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
-  * @hba: Pointer to adapter instance
-  * @desc_id: descriptor idn value
-  * @desc_len: mapped desc length (out)
-  */
- void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
-                                 int *desc_len)
- {
-       if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
-           desc_id == QUERY_DESC_IDN_RFU_1)
-               *desc_len = 0;
-       else
-               *desc_len = hba->desc_size[desc_id];
- }
- EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
- static void ufshcd_update_desc_length(struct ufs_hba *hba,
-                                     enum desc_idn desc_id, int desc_index,
-                                     unsigned char desc_len)
- {
-       if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
-           desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
-               /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
-                * than the RPMB unit, however, both descriptors share the same
-                * desc_idn, to cover both unit descriptors with one length, we
-                * choose the normal unit descriptor length by desc_index.
-                */
-               hba->desc_size[desc_id] = desc_len;
- }
  /**
   * ufshcd_read_desc_param - read the specified descriptor parameter
   * @hba: Pointer to adapter instance
@@@ -3450,26 -3388,13 +3419,13 @@@ int ufshcd_read_desc_param(struct ufs_h
  {
        int ret;
        u8 *desc_buf;
-       int buff_len;
+       int buff_len = QUERY_DESC_MAX_SIZE;
        bool is_kmalloc = true;
  
        /* Safety check */
        if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
                return -EINVAL;
  
-       /* Get the length of descriptor */
-       ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
-       if (!buff_len) {
-               dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
-               return -EINVAL;
-       }
-       if (param_offset >= buff_len) {
-               dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
-                       __func__, param_offset, desc_id, buff_len);
-               return -EINVAL;
-       }
        /* Check whether we need temp memory */
        if (param_offset != 0 || param_size < buff_len) {
                desc_buf = kzalloc(buff_len, GFP_KERNEL);
  
        /* Request for full descriptor */
        ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
-                                       desc_id, desc_index, 0,
-                                       desc_buf, &buff_len);
+                                           desc_id, desc_index, 0,
+                                           desc_buf, &buff_len);
        if (ret) {
                dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
                        __func__, desc_id, desc_index, param_offset, ret);
                goto out;
        }
  
+       /* Update descriptor length */
+       buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
+       if (param_offset >= buff_len) {
+               dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
+                       __func__, param_offset, desc_id, buff_len);
+               return -EINVAL;
+       }
        /* Sanity check */
        if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
                dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
                goto out;
        }
  
-       /* Update descriptor length */
-       buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
-       ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
        if (is_kmalloc) {
                /* Make sure we don't copy more data than available */
                if (param_offset >= buff_len)
@@@ -3689,7 -3618,7 +3649,7 @@@ static int ufshcd_memory_alloc(struct u
        size_t utmrdl_size, utrdl_size, ucdl_size;
  
        /* Allocate memory for UTP command descriptors */
 -      ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
 +      ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
        hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
                                                  ucdl_size,
                                                  &hba->ucdl_dma_addr,
@@@ -3783,7 -3712,7 +3743,7 @@@ static void ufshcd_host_memory_configur
        prdt_offset =
                offsetof(struct utp_transfer_cmd_desc, prd_table);
  
 -      cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
 +      cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
        cmd_desc_dma_addr = hba->ucdl_dma_addr;
  
        for (i = 0; i < hba->nutrs; i++) {
@@@ -4940,7 -4869,7 +4900,7 @@@ static void ufshcd_setup_links(struct u
   */
  static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
  {
-       int len = hba->desc_size[QUERY_DESC_IDN_UNIT];
+       int len = QUERY_DESC_MAX_SIZE;
        u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
        u8 lun_qdepth = hba->nutrs;
        u8 *desc_buf;
            desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
                hba->dev_info.is_lu_power_on_wp = true;
  
 +      /* In case of RPMB LU, check if advanced RPMB mode is enabled */
 +      if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
 +          desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
 +              hba->dev_info.b_advanced_rpmb_en = true;
 +
 +
        kfree(desc_buf);
  set_qdepth:
        /*
@@@ -6905,7 -6828,7 +6865,7 @@@ static int ufshcd_issue_devman_upiu_cmd
        /* update the task tag in the request upiu */
        req_upiu->header.dword_0 |= cpu_to_be32(tag);
  
 -      ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
 +      ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
  
        /* just copy the upiu request as it is */
        memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
@@@ -7028,100 -6951,6 +6988,100 @@@ int ufshcd_exec_raw_upiu_cmd(struct ufs
        return err;
  }
  
 +/**
 + * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
 + * @hba:      per adapter instance
 + * @req_upiu: upiu request
 + * @rsp_upiu: upiu reply
 + * @req_ehs:  EHS field which contains Advanced RPMB Request Message
 + * @rsp_ehs:  EHS field which returns Advanced RPMB Response Message
 + * @sg_cnt:   The number of sg lists actually used
 + * @sg_list:  Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
 + * @dir:      DMA direction
 + *
 + * Returns zero on success, non-zero on failure
 + */
 +int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
 +                       struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
 +                       struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
 +                       enum dma_data_direction dir)
 +{
 +      DECLARE_COMPLETION_ONSTACK(wait);
 +      const u32 tag = hba->reserved_slot;
 +      struct ufshcd_lrb *lrbp;
 +      int err = 0;
 +      int result;
 +      u8 upiu_flags;
 +      u8 *ehs_data;
 +      u16 ehs_len;
 +
 +      /* Protects use of hba->reserved_slot. */
 +      ufshcd_hold(hba, false);
 +      mutex_lock(&hba->dev_cmd.lock);
 +      down_read(&hba->clk_scaling_lock);
 +
 +      lrbp = &hba->lrb[tag];
 +      WARN_ON(lrbp->cmd);
 +      lrbp->cmd = NULL;
 +      lrbp->task_tag = tag;
 +      lrbp->lun = UFS_UPIU_RPMB_WLUN;
 +
 +      lrbp->intr_cmd = true;
 +      ufshcd_prepare_lrbp_crypto(NULL, lrbp);
 +      hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
 +
 +      /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
 +      lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
 +
 +      ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
 +
 +      /* update the task tag and LUN in the request upiu */
 +      req_upiu->header.dword_0 |= cpu_to_be32(upiu_flags << 16 | UFS_UPIU_RPMB_WLUN << 8 | tag);
 +
 +      /* copy the UPIU(contains CDB) request as it is */
 +      memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
 +      /* Copy EHS, starting with byte32, immediately after the CDB package */
 +      memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
 +
 +      if (dir != DMA_NONE && sg_list)
 +              ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
 +
 +      memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 +
 +      hba->dev_cmd.complete = &wait;
 +
 +      ufshcd_send_command(hba, tag);
 +
 +      err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
 +
 +      if (!err) {
 +              /* Just copy the upiu response as it is */
 +              memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
 +              /* Get the response UPIU result */
 +              result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
 +
 +              ehs_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) >> 24;
 +              /*
 +               * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
 +               * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
 +               * Message is 02h
 +               */
 +              if (ehs_len == 2 && rsp_ehs) {
 +                      /*
 +                       * ucd_rsp_ptr points to a buffer with a length of 512 bytes
 +                       * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
 +                       */
 +                      ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
 +                      memcpy(rsp_ehs, ehs_data, ehs_len * 32);
 +              }
 +      }
 +
 +      up_read(&hba->clk_scaling_lock);
 +      mutex_unlock(&hba->dev_cmd.lock);
 +      ufshcd_release(hba);
 +      return err ? : result;
 +}
 +
  /**
   * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
   * @cmd: SCSI command pointer
@@@ -7564,12 -7393,11 +7524,11 @@@ static u32 ufshcd_get_max_icc_level(in
   * In case regulators are not initialized we'll return 0
   * @hba: per-adapter instance
   * @desc_buf: power descriptor buffer to extract ICC levels from.
-  * @len: length of desc_buff
   *
   * Returns calculated ICC level
   */
  static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
-                                               const u8 *desc_buf, int len)
+                                               const u8 *desc_buf)
  {
        u32 icc_level = 0;
  
  static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
  {
        int ret;
-       int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
        u8 *desc_buf;
        u32 icc_level;
  
-       desc_buf = kmalloc(buff_len, GFP_KERNEL);
+       desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
        if (!desc_buf)
                return;
  
        ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
-                                    desc_buf, buff_len);
+                                    desc_buf, QUERY_DESC_MAX_SIZE);
        if (ret) {
                dev_err(hba->dev,
-                       "%s: Failed reading power descriptor.len = %d ret = %d",
-                       __func__, buff_len, ret);
+                       "%s: Failed reading power descriptor ret = %d",
+                       __func__, ret);
                goto out;
        }
  
-       icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
-                                                        buff_len);
+       icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
        dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
  
        ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
@@@ -7739,10 -7565,6 +7696,6 @@@ static void ufshcd_wb_probe(struct ufs_
             (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
                goto wb_disabled;
  
-       if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
-           DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
-               goto wb_disabled;
        ext_ufs_feature = get_unaligned_be32(desc_buf +
                                        DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
  
@@@ -7850,14 -7672,14 +7803,14 @@@ static int ufs_get_device_desc(struct u
        u8 *desc_buf;
        struct ufs_dev_info *dev_info = &hba->dev_info;
  
-       desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
+       desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
        if (!desc_buf) {
                err = -ENOMEM;
                goto out;
        }
  
        err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
-                                    hba->desc_size[QUERY_DESC_IDN_DEVICE]);
+                                    QUERY_DESC_MAX_SIZE);
        if (err) {
                dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
                        __func__, err);
@@@ -8104,18 -7926,16 +8057,16 @@@ static void ufshcd_clear_dbg_ufs_stats(
  static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
  {
        int err;
-       size_t buff_len;
        u8 *desc_buf;
  
-       buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
-       desc_buf = kmalloc(buff_len, GFP_KERNEL);
+       desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
        if (!desc_buf) {
                err = -ENOMEM;
                goto out;
        }
  
        err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
-                                    desc_buf, buff_len);
+                                    desc_buf, QUERY_DESC_MAX_SIZE);
        if (err) {
                dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
                                __func__, err);
        else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
                hba->dev_info.max_lu_supported = 8;
  
-       if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
+       if (desc_buf[QUERY_DESC_LENGTH_OFFSET] >=
                GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
                ufshpb_get_geo_info(hba, desc_buf);
  
@@@ -8212,11 -8032,7 +8163,7 @@@ out
  static int ufshcd_device_params_init(struct ufs_hba *hba)
  {
        bool flag;
-       int ret, i;
-        /* Init device descriptor sizes */
-       for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
-               hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
+       int ret;
  
        /* Init UFS geometry descriptor related parameters */
        ret = ufshcd_device_geo_params_init(hba);
@@@ -9711,7 -9527,6 +9658,7 @@@ void ufshcd_remove(struct ufs_hba *hba
        ufshpb_remove(hba);
        ufs_sysfs_remove_nodes(hba->dev);
        blk_mq_destroy_queue(hba->tmf_queue);
 +      blk_put_queue(hba->tmf_queue);
        blk_mq_free_tag_set(&hba->tmf_tag_set);
        scsi_remove_host(hba->host);
        /* disable interrupts */
@@@ -9779,7 -9594,6 +9726,7 @@@ int ufshcd_alloc_host(struct device *de
        hba->dev = dev;
        hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
        hba->nop_out_timeout = NOP_OUT_TIMEOUT;
 +      ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
        INIT_LIST_HEAD(&hba->clk_list_head);
        spin_lock_init(&hba->outstanding_lock);
  
@@@ -10009,7 -9823,6 +9956,7 @@@ int ufshcd_init(struct ufs_hba *hba, vo
  
  free_tmf_queue:
        blk_mq_destroy_queue(hba->tmf_queue);
 +      blk_put_queue(hba->tmf_queue);
  free_tmf_tag_set:
        blk_mq_free_tag_set(&hba->tmf_tag_set);
  out_remove_scsi_host:
@@@ -10159,6 -9972,11 +10106,6 @@@ static int __init ufshcd_core_init(void
  {
        int ret;
  
 -      /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
 -      static_assert(sizeof(struct utp_transfer_cmd_desc) ==
 -                    2 * ALIGNED_UPIU_SIZE +
 -                            SG_ALL * sizeof(struct ufshcd_sg_entry));
 -
        ufs_debugfs_init();
  
        ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
index 994f4ac9df5a51bada1444877eee7c9983db2ca8,19c9b5d1dcf8209e489336ba4c751534140afb7a..a46a7666c891b49a8ff7a50f20137367943e919a
@@@ -383,7 -383,7 +383,7 @@@ int ufshpb_prep(struct ufs_hba *hba, st
        rgn = hpb->rgn_tbl + rgn_idx;
        srgn = rgn->srgn_tbl + srgn_idx;
  
 -      /* If command type is WRITE or DISCARD, set bitmap as drity */
 +      /* If command type is WRITE or DISCARD, set bitmap as dirty */
        if (ufshpb_is_write_or_discard(cmd)) {
                ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
                                   transfer_len, true);
@@@ -616,7 -616,7 +616,7 @@@ static void ufshpb_activate_subregion(s
  static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
                                                   blk_status_t error)
  {
 -      struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
 +      struct ufshpb_req *umap_req = req->end_io_data;
  
        ufshpb_put_req(umap_req->hpb, umap_req);
        return RQ_END_IO_NONE;
  static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req,
                                                  blk_status_t error)
  {
 -      struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
 +      struct ufshpb_req *map_req = req->end_io_data;
        struct ufshpb_lu *hpb = map_req->hpb;
        struct ufshpb_subregion *srgn;
        unsigned long flags;
@@@ -2382,12 -2382,10 +2382,10 @@@ static int ufshpb_get_lu_info(struct uf
  {
        u16 max_active_rgns;
        u8 lu_enable;
-       int size;
+       int size = QUERY_DESC_MAX_SIZE;
        int ret;
        char desc_buf[QUERY_DESC_MAX_SIZE];
  
-       ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
        ufshcd_rpm_get_sync(hba);
        ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
                                            QUERY_DESC_IDN_UNIT, lun, 0,
diff --combined include/ufs/ufs.h
index 0c112195b288d24c399662553160f4e519c70d1a,2fc71075c5a1e09b779eb7e3139787824a6c27e1..c146de52918d3a768d7f5ca2c0ffaac43a09002b
@@@ -38,7 -38,6 +38,6 @@@
  #define UFS_UPIU_MAX_UNIT_NUM_ID      0x7F
  #define UFS_MAX_LUNS          (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
  #define UFS_UPIU_WLUN_ID      (1 << 7)
- #define UFS_RPMB_UNIT         0xC4
  
  /* WriteBooster buffer is available only for the logical unit from 0 to 7 */
  #define UFS_UPIU_MAX_WB_LUN_ID        8
   */
  #define UFS_WB_EXCEED_LIFETIME                0x0B
  
 +/*
 + * In UFS Spec, the Extra Header Segment (EHS) starts from byte 32 in UPIU request/response packet
 + */
 +#define EHS_OFFSET_IN_RESPONSE 32
 +
  /* Well known logical unit id in LUN field of UPIU */
  enum {
        UFS_UPIU_REPORT_LUNS_WLUN       = 0x81,
@@@ -217,28 -211,6 +216,28 @@@ enum unit_desc_param 
        UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS      = 0x29,
  };
  
 +/* RPMB Unit descriptor parameters offsets in bytes*/
 +enum rpmb_unit_desc_param {
 +      RPMB_UNIT_DESC_PARAM_LEN                = 0x0,
 +      RPMB_UNIT_DESC_PARAM_TYPE               = 0x1,
 +      RPMB_UNIT_DESC_PARAM_UNIT_INDEX         = 0x2,
 +      RPMB_UNIT_DESC_PARAM_LU_ENABLE          = 0x3,
 +      RPMB_UNIT_DESC_PARAM_BOOT_LUN_ID        = 0x4,
 +      RPMB_UNIT_DESC_PARAM_LU_WR_PROTECT      = 0x5,
 +      RPMB_UNIT_DESC_PARAM_LU_Q_DEPTH         = 0x6,
 +      RPMB_UNIT_DESC_PARAM_PSA_SENSITIVE      = 0x7,
 +      RPMB_UNIT_DESC_PARAM_MEM_TYPE           = 0x8,
 +      RPMB_UNIT_DESC_PARAM_REGION_EN          = 0x9,
 +      RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE   = 0xA,
 +      RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT  = 0xB,
 +      RPMB_UNIT_DESC_PARAM_REGION0_SIZE       = 0x13,
 +      RPMB_UNIT_DESC_PARAM_REGION1_SIZE       = 0x14,
 +      RPMB_UNIT_DESC_PARAM_REGION2_SIZE       = 0x15,
 +      RPMB_UNIT_DESC_PARAM_REGION3_SIZE       = 0x16,
 +      RPMB_UNIT_DESC_PARAM_PROVISIONING_TYPE  = 0x17,
 +      RPMB_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT   = 0x18,
 +};
 +
  /* Device descriptor parameters offsets in bytes*/
  enum device_desc_param {
        DEVICE_DESC_PARAM_LEN                   = 0x0,
@@@ -628,8 -600,6 +627,8 @@@ struct ufs_dev_info 
  
        bool    b_rpm_dev_flush_capable;
        u8      b_presrv_uspc_en;
 +
 +      bool    b_advanced_rpmb_en;
  };
  
  /*
diff --combined include/ufs/ufshcd.h
index acdfa72d7230bfd0dd513d58b2a5d29bc14e56ff,830ababe993214ff48267f4c83433734ddd4b70f..dd5912b4db77c3d642192e7748dcdde938efc5a9
@@@ -30,7 -30,6 +30,7 @@@ struct ufs_hba
  enum dev_cmd_type {
        DEV_CMD_TYPE_NOP                = 0x0,
        DEV_CMD_TYPE_QUERY              = 0x1,
 +      DEV_CMD_TYPE_RPMB               = 0x2,
  };
  
  enum ufs_event_type {
@@@ -755,7 -754,6 +755,7 @@@ struct ufs_hba_monitor 
   * @vops: pointer to variant specific operations
   * @vps: pointer to variant specific parameters
   * @priv: pointer to variant specific private data
 + * @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields)
   * @irq: Irq number of the controller
   * @is_irq_enabled: whether or not the UFS controller interrupt is enabled.
   * @dev_ref_clk_freq: reference clock frequency
@@@ -879,9 -877,6 +879,9 @@@ struct ufs_hba 
        const struct ufs_hba_variant_ops *vops;
        struct ufs_hba_variant_params *vps;
        void *priv;
 +#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
 +      size_t sg_entry_size;
 +#endif
        unsigned int irq;
        bool is_irq_enabled;
        enum ufs_ref_clk_freq dev_ref_clk_freq;
        bool is_urgent_bkops_lvl_checked;
  
        struct rw_semaphore clk_scaling_lock;
-       unsigned char desc_size[QUERY_DESC_IDN_MAX];
        atomic_t scsi_block_reqs_cnt;
  
        struct device           bsg_dev;
        bool complete_put;
  };
  
 +#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
 +static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
 +{
 +      return hba->sg_entry_size;
 +}
 +
 +static inline void ufshcd_set_sg_entry_size(struct ufs_hba *hba, size_t sg_entry_size)
 +{
 +      WARN_ON_ONCE(sg_entry_size < sizeof(struct ufshcd_sg_entry));
 +      hba->sg_entry_size = sg_entry_size;
 +}
 +#else
 +static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
 +{
 +      return sizeof(struct ufshcd_sg_entry);
 +}
 +
 +#define ufshcd_set_sg_entry_size(hba, sg_entry_size)                   \
 +      ({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
 +#endif
 +
 +static inline size_t sizeof_utp_transfer_cmd_desc(const struct ufs_hba *hba)
 +{
 +      return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
 +}
 +
  /* Returns true if clocks can be gated. Otherwise false */
  static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
  {
@@@ -1217,9 -1185,6 +1216,6 @@@ void ufshcd_release(struct ufs_hba *hba
  
  void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
  
- void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
-                                 int *desc_length);
  u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
  
  int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
@@@ -1232,10 -1197,7 +1228,10 @@@ int ufshcd_exec_raw_upiu_cmd(struct ufs
                             int msgcode,
                             u8 *desc_buff, int *buff_len,
                             enum query_opcode desc_op);
 -
 +int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
 +                                   struct utp_upiu_req *rsp_upiu, struct ufs_ehs *ehs_req,
 +                                   struct ufs_ehs *ehs_rsp, int sg_cnt,
 +                                   struct scatterlist *sg_list, enum dma_data_direction dir);
  int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
  int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
  int ufshcd_suspend_prepare(struct device *dev);