Merge branch '5.20/scsi-queue' into 6.0/scsi-fixes
authorMartin K. Petersen <martin.petersen@oracle.com>
Fri, 19 Aug 2022 21:28:54 +0000 (17:28 -0400)
committerMartin K. Petersen <martin.petersen@oracle.com>
Fri, 19 Aug 2022 21:28:54 +0000 (17:28 -0400)
Include commits that weren't submitted during the 6.0 merge window.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
1  2 
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/scsi_lib.c

index a3e117a4b8e746981daa7315b716e6e0dc2c8a4e,6b3d54c04baa8c6fcc8821fab18739e1140a784f..f6c37a97544ea838ef8274f903f2feba83162f50
@@@ -3195,9 -3195,6 +3195,9 @@@ static int megasas_map_queues(struct Sc
        qoff += map->nr_queues;
        offset += map->nr_queues;
  
 +      /* we never use READ queue, so can't cheat blk-mq */
 +      shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0;
 +
        /* Setup Poll hctx */
        map = &shost->tag_set.map[HCTX_TYPE_POLL];
        map->nr_queues = instance->iopoll_q_count;
@@@ -7153,22 -7150,18 +7153,18 @@@ static int megasas_alloc_ctrl_mem(struc
        switch (instance->adapter_type) {
        case MFI_SERIES:
                if (megasas_alloc_mfi_ctrl_mem(instance))
-                       goto fail;
+                       return -ENOMEM;
                break;
        case AERO_SERIES:
        case VENTURA_SERIES:
        case THUNDERBOLT_SERIES:
        case INVADER_SERIES:
                if (megasas_alloc_fusion_context(instance))
-                       goto fail;
+                       return -ENOMEM;
                break;
        }
  
        return 0;
-  fail:
-       kfree(instance->reply_map);
-       instance->reply_map = NULL;
-       return -ENOMEM;
  }
  
  /*
diff --combined drivers/scsi/scsi_lib.c
index 4dbd29ab1dcc37792688849a468e5b8e5dc72ede,7846610355906646d78a93320e334e8a90984386..ac2e70e2cd969fd511e316c63d6fb6eb0b839a53
@@@ -111,7 -111,7 +111,7 @@@ scsi_set_blocked(struct scsi_cmnd *cmd
        }
  }
  
- static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
+ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
  {
        struct request *rq = scsi_cmd_to_rq(cmd);
  
        } else {
                WARN_ON_ONCE(true);
        }
-       blk_mq_requeue_request(rq, true);
+       if (msecs) {
+               blk_mq_requeue_request(rq, false);
+               blk_mq_delay_kick_requeue_list(rq->q, msecs);
+       } else
+               blk_mq_requeue_request(rq, true);
  }
  
  /**
@@@ -156,7 -161,7 +161,7 @@@ static void __scsi_queue_insert(struct 
         * Requeue this command.  It will go before all other commands
         * that are already in the queue. Schedule requeue work under
         * lock such that the kblockd_schedule_work() call happens
 -       * before blk_cleanup_queue() finishes.
 +       * before blk_mq_destroy_queue() finishes.
         */
        cmd->result = 0;
  
@@@ -202,8 -207,8 +207,8 @@@ void scsi_queue_insert(struct scsi_cmn
  int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
                 int data_direction, void *buffer, unsigned bufflen,
                 unsigned char *sense, struct scsi_sense_hdr *sshdr,
 -               int timeout, int retries, u64 flags, req_flags_t rq_flags,
 -               int *resid)
 +               int timeout, int retries, blk_opf_t flags,
 +               req_flags_t rq_flags, int *resid)
  {
        struct request *req;
        struct scsi_cmnd *scmd;
@@@ -417,9 -422,9 +422,9 @@@ static void scsi_starved_list_run(struc
                 * it and the queue.  Mitigate by taking a reference to the
                 * queue and never touching the sdev again after we drop the
                 * host lock.  Note: if __scsi_remove_device() invokes
 -               * blk_cleanup_queue() before the queue is run from this
 +               * blk_mq_destroy_queue() before the queue is run from this
                 * function then blk_run_queue() will return immediately since
 -               * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
 +               * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING.
                 */
                slq = sdev->request_queue;
                if (!blk_get_queue(slq))
@@@ -626,7 -631,7 +631,7 @@@ static blk_status_t scsi_result_to_blk_
   */
  static unsigned int scsi_rq_err_bytes(const struct request *rq)
  {
 -      unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 +      blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
        unsigned int bytes = 0;
        struct bio *bio;
  
        return bytes;
  }
  
- /* Helper for scsi_io_completion() when "reprep" action required. */
- static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
-                                     struct request_queue *q)
- {
-       /* A new command will be prepared and issued. */
-       scsi_mq_requeue_cmd(cmd);
- }
  static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
  {
        struct request *req = scsi_cmd_to_rq(cmd);
        return false;
  }
  
+ /*
+  * When ALUA transition state is returned, reprep the cmd to
+  * use the ALUA handler's transition timeout. Delay the reprep
+  * 1 sec to avoid aggressive retries of the target in that
+  * state.
+  */
+ #define ALUA_TRANSITION_REPREP_DELAY  1000
  /* Helper for scsi_io_completion() when special action required. */
  static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
  {
-       struct request_queue *q = cmd->device->request_queue;
        struct request *req = scsi_cmd_to_rq(cmd);
        int level = 0;
-       enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
-             ACTION_DELAYED_RETRY} action;
+       enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
+             ACTION_RETRY, ACTION_DELAYED_RETRY} action;
        struct scsi_sense_hdr sshdr;
        bool sense_valid;
        bool sense_current = true;      /* false implies "deferred sense" */
                                        action = ACTION_DELAYED_RETRY;
                                        break;
                                case 0x0a: /* ALUA state transition */
-                                       blk_stat = BLK_STS_TRANSPORT;
-                                       fallthrough;
+                                       action = ACTION_DELAYED_REPREP;
+                                       break;
                                default:
                                        action = ACTION_FAIL;
                                        break;
                        return;
                fallthrough;
        case ACTION_REPREP:
-               scsi_io_completion_reprep(cmd, q);
+               scsi_mq_requeue_cmd(cmd, 0);
+               break;
+       case ACTION_DELAYED_REPREP:
+               scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
                break;
        case ACTION_RETRY:
                /* Retry the same command immediately */
@@@ -926,7 -933,7 +933,7 @@@ static int scsi_io_completion_nz_result
   * command block will be released and the queue function will be goosed. If we
   * are not done then we have to figure out what to do next:
   *
-  *   a) We can call scsi_io_completion_reprep().  The request will be
+  *   a) We can call scsi_mq_requeue_cmd().  The request will be
   *    unprepared and put back on the queue.  Then a new command will
   *    be created for it.  This should be used if we made forward
   *    progress, or if we want to switch from READ(10) to READ(6) for
  void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  {
        int result = cmd->result;
-       struct request_queue *q = cmd->device->request_queue;
        struct request *req = scsi_cmd_to_rq(cmd);
        blk_status_t blk_stat = BLK_STS_OK;
  
         * request just queue the command up again.
         */
        if (likely(result == 0))
-               scsi_io_completion_reprep(cmd, q);
+               scsi_mq_requeue_cmd(cmd, 0);
        else
                scsi_io_completion_action(cmd, result);
  }
@@@ -1118,12 -1124,12 +1124,12 @@@ static void scsi_initialize_rq(struct r
        cmd->retries = 0;
  }
  
 -struct request *scsi_alloc_request(struct request_queue *q,
 -              unsigned int op, blk_mq_req_flags_t flags)
 +struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
 +                                 blk_mq_req_flags_t flags)
  {
        struct request *rq;
  
 -      rq = blk_mq_alloc_request(q, op, flags);
 +      rq = blk_mq_alloc_request(q, opf, flags);
        if (!IS_ERR(rq))
                scsi_initialize_rq(rq);
        return rq;
@@@ -1790,6 -1796,14 +1796,6 @@@ out_put_budget
        return ret;
  }
  
 -static enum blk_eh_timer_return scsi_timeout(struct request *req,
 -              bool reserved)
 -{
 -      if (reserved)
 -              return BLK_EH_RESET_TIMER;
 -      return scsi_times_out(req);
 -}
 -
  static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
                                unsigned int hctx_idx, unsigned int numa_node)
  {
@@@ -1876,6 -1890,10 +1882,6 @@@ void __scsi_init_queue(struct Scsi_Hos
                blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
        }
  
 -      if (dev->dma_mask) {
 -              shost->max_sectors = min_t(unsigned int, shost->max_sectors,
 -                              dma_max_mapping_size(dev) >> SECTOR_SHIFT);
 -      }
        blk_queue_max_hw_sectors(q, shost->max_sectors);
        blk_queue_segment_boundary(q, shost->dma_boundary);
        dma_set_seg_boundary(dev, shost->dma_boundary);