scsi: core: Complain if scsi_target_block() fails
[linux-2.6-block.git] / drivers / scsi / scsi_lib.c
index e1da8c70a266df24d1ae89181328007f33f4d711..d47d637e6be256c8fa0d0309398c5c212d7ee258 100644 (file)
@@ -84,11 +84,11 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
        struct kmem_cache *cache;
        int ret = 0;
 
+       mutex_lock(&scsi_sense_cache_mutex);
        cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
        if (cache)
-               return 0;
+               goto exit;
 
-       mutex_lock(&scsi_sense_cache_mutex);
        if (shost->unchecked_isa_dma) {
                scsi_sense_isadma_cache =
                        kmem_cache_create("scsi_sense_cache(DMA)",
@@ -104,7 +104,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
                if (!scsi_sense_cache)
                        ret = -ENOMEM;
        }
-
+ exit:
        mutex_unlock(&scsi_sense_cache_mutex);
        return ret;
 }
@@ -1452,7 +1452,7 @@ static void scsi_softirq_done(struct request *rq)
        disposition = scsi_decide_disposition(cmd);
        if (disposition != SUCCESS &&
            time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
-               sdev_printk(KERN_ERR, cmd->device,
+               scmd_printk(KERN_ERR, cmd,
                            "timing out command, waited %lus\n",
                            wait_for/HZ);
                disposition = SUCCESS;
@@ -1666,10 +1666,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
                blk_mq_start_request(req);
        }
 
+       cmd->flags &= SCMD_PRESERVED_FLAGS;
        if (sdev->simple_tags)
                cmd->flags |= SCMD_TAGGED;
-       else
-               cmd->flags &= ~SCMD_TAGGED;
+       if (bd->last)
+               cmd->flags |= SCMD_LAST;
 
        scsi_init_cmd_errh(cmd);
        cmd->scsi_done = scsi_mq_done;
@@ -1784,6 +1785,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
                blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
        }
 
+       shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+                       dma_max_mapping_size(dev) << SECTOR_SHIFT);
        blk_queue_max_hw_sectors(q, shost->max_sectors);
        if (shost->unchecked_isa_dma)
                blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
@@ -1791,7 +1794,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
        dma_set_seg_boundary(dev, shost->dma_boundary);
 
        blk_queue_max_segment_size(q, shost->max_segment_size);
-       dma_set_max_seg_size(dev, shost->max_segment_size);
+       blk_queue_virt_boundary(q, shost->virt_boundary_mask);
+       dma_set_max_seg_size(dev, queue_max_segment_size(q));
 
        /*
         * Set a reasonable default alignment:  The larger of 32-byte (dword),
@@ -1804,10 +1808,37 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(__scsi_init_queue);
 
+static const struct blk_mq_ops scsi_mq_ops_no_commit = {
+       .get_budget     = scsi_mq_get_budget,
+       .put_budget     = scsi_mq_put_budget,
+       .queue_rq       = scsi_queue_rq,
+       .complete       = scsi_softirq_done,
+       .timeout        = scsi_timeout,
+#ifdef CONFIG_BLK_DEBUG_FS
+       .show_rq        = scsi_show_rq,
+#endif
+       .init_request   = scsi_mq_init_request,
+       .exit_request   = scsi_mq_exit_request,
+       .initialize_rq_fn = scsi_initialize_rq,
+       .busy           = scsi_mq_lld_busy,
+       .map_queues     = scsi_map_queues,
+};
+
+
+static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+       struct request_queue *q = hctx->queue;
+       struct scsi_device *sdev = q->queuedata;
+       struct Scsi_Host *shost = sdev->host;
+
+       shost->hostt->commit_rqs(shost, hctx->queue_num);
+}
+
 static const struct blk_mq_ops scsi_mq_ops = {
        .get_budget     = scsi_mq_get_budget,
        .put_budget     = scsi_mq_put_budget,
        .queue_rq       = scsi_queue_rq,
+       .commit_rqs     = scsi_commit_rqs,
        .complete       = scsi_softirq_done,
        .timeout        = scsi_timeout,
 #ifdef CONFIG_BLK_DEBUG_FS
@@ -1843,7 +1874,10 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
                        sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
 
        memset(&shost->tag_set, 0, sizeof(shost->tag_set));
-       shost->tag_set.ops = &scsi_mq_ops;
+       if (shost->hostt->commit_rqs)
+               shost->tag_set.ops = &scsi_mq_ops;
+       else
+               shost->tag_set.ops = &scsi_mq_ops_no_commit;
        shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
        shost->tag_set.queue_depth = shost->can_queue;
        shost->tag_set.cmd_size = cmd_size;
@@ -2673,6 +2707,14 @@ void scsi_start_queue(struct scsi_device *sdev)
 int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
                                        enum scsi_device_state new_state)
 {
+       switch (new_state) {
+       case SDEV_RUNNING:
+       case SDEV_TRANSPORT_OFFLINE:
+               break;
+       default:
+               return -EINVAL;
+       }
+
        /*
         * Try to transition the scsi device to SDEV_RUNNING or one of the
         * offlined states and goose the device queue if successful.
@@ -2730,7 +2772,12 @@ static int scsi_internal_device_unblock(struct scsi_device *sdev,
 static void
 device_block(struct scsi_device *sdev, void *data)
 {
-       scsi_internal_device_block(sdev);
+       int ret;
+
+       ret = scsi_internal_device_block(sdev);
+
+       WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n",
+                 dev_name(&sdev->sdev_gendev), ret);
 }
 
 static int