Merge branch 'for-linus' into test
authorJens Axboe <axboe@kernel.dk>
Fri, 2 Feb 2018 03:27:02 +0000 (20:27 -0700)
committerJens Axboe <axboe@kernel.dk>
Fri, 2 Feb 2018 03:27:02 +0000 (20:27 -0700)
* for-linus:
  blk-mq-sched: Enable merging discard bio into request
  blk-mq: fix discard merge with scheduler attached
  blk-mq: introduce BLK_STS_DEV_RESOURCE

1  2 
drivers/md/dm-rq.c
drivers/scsi/scsi_lib.c

diff --combined drivers/md/dm-rq.c
index aeaaaef43effda534a338f28d8ca660513994e21,348a0cb6963a8012647114e06803283422644ef6..bf0b840645cc8b64c522e99265881ddaf98ed93e
@@@ -315,10 -315,6 +315,10 @@@ static void dm_done(struct request *clo
                /* The target wants to requeue the I/O */
                dm_requeue_original_request(tio, false);
                break;
 +      case DM_ENDIO_DELAY_REQUEUE:
 +              /* The target wants to requeue the I/O after a delay */
 +              dm_requeue_original_request(tio, true);
 +              break;
        default:
                DMWARN("unimplemented target endio return value: %d", r);
                BUG();
@@@ -408,7 -404,7 +408,7 @@@ static blk_status_t dm_dispatch_clone_r
  
        clone->start_time = jiffies;
        r = blk_insert_cloned_request(clone->q, clone);
-       if (r != BLK_STS_OK && r != BLK_STS_RESOURCE)
+       if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
                /* must complete clone in terms of original request */
                dm_complete_request(rq, r);
        return r;
@@@ -500,7 -496,7 +500,7 @@@ check_again
                trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
                                     blk_rq_pos(rq));
                ret = dm_dispatch_clone_request(clone, rq);
-               if (ret == BLK_STS_RESOURCE) {
+               if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
                        blk_rq_unprep_clone(clone);
                        tio->ti->type->release_clone_rq(clone);
                        tio->clone = NULL;
@@@ -717,6 -713,7 +717,6 @@@ int dm_old_init_request_queue(struct ma
        /* disable dm_old_request_fn's merge heuristic by default */
        md->seq_rq_merge_deadline_usecs = 0;
  
 -      dm_init_normal_md_queue(md);
        blk_queue_softirq_done(md->queue, dm_softirq_done);
  
        /* Initialize the request-based DM worker thread */
@@@ -772,7 -769,6 +772,6 @@@ static blk_status_t dm_mq_queue_rq(stru
                /* Undo dm_start_request() before requeuing */
                rq_end_stats(md, rq);
                rq_completed(md, rq_data_dir(rq), false);
-               blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
                return BLK_STS_RESOURCE;
        }
  
@@@ -824,6 -820,7 +823,6 @@@ int dm_mq_init_request_queue(struct map
                err = PTR_ERR(q);
                goto out_tag_set;
        }
 -      dm_init_md_queue(md);
  
        return 0;
  
diff --combined drivers/scsi/scsi_lib.c
index 976c936029cb7f216fec159ff95e4bc307dc518b,55be2550c555b3b910c63b3b7ca623b8abf9a98d..9bdf9200cacbc58c0e07a724be26906c4cc378d0
@@@ -164,7 -164,7 +164,7 @@@ static void scsi_mq_requeue_cmd(struct 
   * for a requeue after completion, which should only occur in this
   * file.
   */
 -static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
 +static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
  {
        struct scsi_device *device = cmd->device;
        struct request_queue *q = device->request_queue;
   */
  void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
  {
 -      __scsi_queue_insert(cmd, reason, 1);
 +      __scsi_queue_insert(cmd, reason, true);
  }
  
  
@@@ -318,39 -318,22 +318,39 @@@ static void scsi_init_cmd_errh(struct s
                cmd->cmd_len = scsi_command_size(cmd->cmnd);
  }
  
 -void scsi_device_unbusy(struct scsi_device *sdev)
 +/*
 + * Decrement the host_busy counter and wake up the error handler if necessary.
 + * Avoid as follows that the error handler is not woken up if shost->host_busy
 + * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
 + * with an RCU read lock in this function to ensure that this function in its
 + * entirety either finishes before scsi_eh_scmd_add() increases the
 + * host_failed counter or that it notices the shost state change made by
 + * scsi_eh_scmd_add().
 + */
 +static void scsi_dec_host_busy(struct Scsi_Host *shost)
  {
 -      struct Scsi_Host *shost = sdev->host;
 -      struct scsi_target *starget = scsi_target(sdev);
        unsigned long flags;
  
 +      rcu_read_lock();
        atomic_dec(&shost->host_busy);
 -      if (starget->can_queue > 0)
 -              atomic_dec(&starget->target_busy);
 -
 -      if (unlikely(scsi_host_in_recovery(shost) &&
 -                   (shost->host_failed || shost->host_eh_scheduled))) {
 +      if (unlikely(scsi_host_in_recovery(shost))) {
                spin_lock_irqsave(shost->host_lock, flags);
 -              scsi_eh_wakeup(shost);
 +              if (shost->host_failed || shost->host_eh_scheduled)
 +                      scsi_eh_wakeup(shost);
                spin_unlock_irqrestore(shost->host_lock, flags);
        }
 +      rcu_read_unlock();
 +}
 +
 +void scsi_device_unbusy(struct scsi_device *sdev)
 +{
 +      struct Scsi_Host *shost = sdev->host;
 +      struct scsi_target *starget = scsi_target(sdev);
 +
 +      scsi_dec_host_busy(shost);
 +
 +      if (starget->can_queue > 0)
 +              atomic_dec(&starget->target_busy);
  
        atomic_dec(&sdev->device_busy);
  }
@@@ -1015,11 -998,11 +1015,11 @@@ void scsi_io_completion(struct scsi_cmn
                break;
        case ACTION_RETRY:
                /* Retry the same command immediately */
 -              __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
 +              __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
                break;
        case ACTION_DELAYED_RETRY:
                /* Retry the same command after a delay */
 -              __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
 +              __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
                break;
        }
  }
@@@ -1145,7 -1128,7 +1145,7 @@@ EXPORT_SYMBOL(scsi_init_io)
   * Called from inside blk_get_request() for pass-through requests and from
   * inside scsi_init_command() for filesystem requests.
   */
 -void scsi_initialize_rq(struct request *rq)
 +static void scsi_initialize_rq(struct request *rq)
  {
        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
  
        cmd->jiffies_at_alloc = jiffies;
        cmd->retries = 0;
  }
 -EXPORT_SYMBOL(scsi_initialize_rq);
  
  /* Add a command to the list used by the aacraid and dpt_i2o drivers */
  void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
@@@ -1548,7 -1532,7 +1548,7 @@@ starved
                list_add_tail(&sdev->starved_entry, &shost->starved_list);
        spin_unlock_irq(shost->host_lock);
  out_dec:
 -      atomic_dec(&shost->host_busy);
 +      scsi_dec_host_busy(shost);
        return 0;
  }
  
@@@ -2036,7 -2020,7 +2036,7 @@@ static blk_status_t scsi_queue_rq(struc
        return BLK_STS_OK;
  
  out_dec_host_busy:
 -       atomic_dec(&shost->host_busy);
 +      scsi_dec_host_busy(shost);
  out_dec_target_busy:
        if (scsi_target(sdev)->can_queue > 0)
                atomic_dec(&scsi_target(sdev)->target_busy);
@@@ -2046,9 -2030,9 +2046,9 @@@ out_put_budget
        case BLK_STS_OK:
                break;
        case BLK_STS_RESOURCE:
-               if (atomic_read(&sdev->device_busy) == 0 &&
-                   !scsi_device_blocked(sdev))
-                       blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
+               if (atomic_read(&sdev->device_busy) ||
+                   scsi_device_blocked(sdev))
+                       ret = BLK_STS_DEV_RESOURCE;
                break;
        default:
                /*