2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/interrupt.h>
9 #include <linux/virtio.h>
10 #include <linux/virtio_blk.h>
11 #include <linux/scatterlist.h>
12 #include <linux/string_helpers.h>
13 #include <scsi/scsi_cmnd.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
20 #define VQ_NAME_LEN 16
23 static DEFINE_IDA(vd_index_ida);
25 static struct workqueue_struct *virtblk_wq;
27 struct virtio_blk_vq {
30 char name[VQ_NAME_LEN];
31 } ____cacheline_aligned_in_smp;
34 struct virtio_device *vdev;
36 /* The disk structure for the kernel. */
39 /* Block layer tags. */
40 struct blk_mq_tag_set tag_set;
42 /* Process context for config space updates */
43 struct work_struct config_work;
45 /* What host tells us, plus 2 for header & tailer. */
46 unsigned int sg_elems;
48 /* Ida index - used to track minor number allocations. */
53 struct virtio_blk_vq *vqs;
57 #ifdef CONFIG_VIRTIO_BLK_SCSI
58 struct scsi_request sreq; /* for SCSI passthrough, must be first */
59 u8 sense[SCSI_SENSE_BUFFERSIZE];
60 struct virtio_scsi_inhdr in_hdr;
62 struct virtio_blk_outhdr out_hdr;
64 struct scatterlist sg[];
67 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
69 switch (vbr->status) {
72 case VIRTIO_BLK_S_UNSUPP:
73 return BLK_STS_NOTSUPP;
80 * If this is a packet command we need a couple of additional headers. Behind
81 * the normal outhdr we put a segment with the scsi command block, and before
82 * the normal inhdr we put the sense data and the inhdr with additional status
85 #ifdef CONFIG_VIRTIO_BLK_SCSI
86 static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
87 struct scatterlist *data_sg, bool have_data)
89 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
90 unsigned int num_out = 0, num_in = 0;
92 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
93 sgs[num_out++] = &hdr;
94 sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
95 sgs[num_out++] = &cmd;
98 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
99 sgs[num_out++] = data_sg;
101 sgs[num_out + num_in++] = data_sg;
104 sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
105 sgs[num_out + num_in++] = &sense;
106 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
107 sgs[num_out + num_in++] = &inhdr;
108 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
109 sgs[num_out + num_in++] = &status;
111 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
114 static inline void virtblk_scsi_request_done(struct request *req)
116 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
117 struct virtio_blk *vblk = req->q->queuedata;
118 struct scsi_request *sreq = &vbr->sreq;
120 sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
121 sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
122 sreq->result = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
125 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
126 unsigned int cmd, unsigned long data)
128 struct gendisk *disk = bdev->bd_disk;
129 struct virtio_blk *vblk = disk->private_data;
132 * Only allow the generic SCSI ioctls if the host can support it.
134 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
137 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
138 (void __user *)data);
141 static inline int virtblk_add_req_scsi(struct virtqueue *vq,
142 struct virtblk_req *vbr, struct scatterlist *data_sg,
147 static inline void virtblk_scsi_request_done(struct request *req)
150 #define virtblk_ioctl NULL
151 #endif /* CONFIG_VIRTIO_BLK_SCSI */
153 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
154 struct scatterlist *data_sg, bool have_data)
156 struct scatterlist hdr, status, *sgs[3];
157 unsigned int num_out = 0, num_in = 0;
159 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
160 sgs[num_out++] = &hdr;
163 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
164 sgs[num_out++] = data_sg;
166 sgs[num_out + num_in++] = data_sg;
169 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
170 sgs[num_out + num_in++] = &status;
172 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
175 static inline void virtblk_request_done(struct request *req)
177 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
179 switch (req_op(req)) {
181 case REQ_OP_SCSI_OUT:
182 virtblk_scsi_request_done(req);
186 blk_mq_end_request(req, virtblk_result(vbr));
189 static void virtblk_done(struct virtqueue *vq)
191 struct virtio_blk *vblk = vq->vdev->priv;
192 bool req_done = false;
194 struct virtblk_req *vbr;
198 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
200 virtqueue_disable_cb(vq);
201 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
202 struct request *req = blk_mq_rq_from_pdu(vbr);
204 blk_mq_complete_request(req);
207 if (unlikely(virtqueue_is_broken(vq)))
209 } while (!virtqueue_enable_cb(vq));
211 /* In case queue is stopped waiting for more buffers. */
213 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
214 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
217 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
219 struct virtio_blk *vblk = hctx->queue->queuedata;
220 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
223 spin_lock_irq(&vq->lock);
224 kick = virtqueue_kick_prepare(vq->vq);
225 spin_unlock_irq(&vq->lock);
228 virtqueue_notify(vq->vq);
231 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
232 const struct blk_mq_queue_data *bd)
234 struct virtio_blk *vblk = hctx->queue->queuedata;
235 struct request *req = bd->rq;
236 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
239 int qid = hctx->queue_num;
244 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
246 switch (req_op(req)) {
252 type = VIRTIO_BLK_T_FLUSH;
255 case REQ_OP_SCSI_OUT:
256 type = VIRTIO_BLK_T_SCSI_CMD;
259 type = VIRTIO_BLK_T_GET_ID;
263 return BLK_STS_IOERR;
266 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
267 vbr->out_hdr.sector = type ?
268 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
269 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
271 blk_mq_start_request(req);
273 num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
275 if (rq_data_dir(req) == WRITE)
276 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
278 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
281 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
282 if (blk_rq_is_scsi(req))
283 err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
285 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
287 virtqueue_kick(vblk->vqs[qid].vq);
288 blk_mq_stop_hw_queue(hctx);
289 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
290 /* Out of mem doesn't actually happen, since we fall back
291 * to direct descriptors */
292 if (err == -ENOMEM || err == -ENOSPC)
293 return BLK_STS_DEV_RESOURCE;
294 return BLK_STS_IOERR;
297 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
299 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
302 virtqueue_notify(vblk->vqs[qid].vq);
306 /* return id (s/n) string for *disk to *id_str
308 static int virtblk_get_id(struct gendisk *disk, char *id_str)
310 struct virtio_blk *vblk = disk->private_data;
311 struct request_queue *q = vblk->disk->queue;
315 req = blk_get_request(q, REQ_OP_DRV_IN, 0);
319 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
323 blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
324 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
326 blk_put_request(req);
330 /* We provide getgeo only to please some old bootloader/partitioning tools */
331 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
333 struct virtio_blk *vblk = bd->bd_disk->private_data;
335 /* see if the host passed in geometry config */
336 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
337 virtio_cread(vblk->vdev, struct virtio_blk_config,
338 geometry.cylinders, &geo->cylinders);
339 virtio_cread(vblk->vdev, struct virtio_blk_config,
340 geometry.heads, &geo->heads);
341 virtio_cread(vblk->vdev, struct virtio_blk_config,
342 geometry.sectors, &geo->sectors);
344 /* some standard values, similar to sd */
346 geo->sectors = 1 << 5;
347 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
352 static const struct block_device_operations virtblk_fops = {
353 .ioctl = virtblk_ioctl,
354 .owner = THIS_MODULE,
355 .getgeo = virtblk_getgeo,
358 static int index_to_minor(int index)
360 return index << PART_BITS;
363 static int minor_to_index(int minor)
365 return minor >> PART_BITS;
368 static ssize_t serial_show(struct device *dev,
369 struct device_attribute *attr, char *buf)
371 struct gendisk *disk = dev_to_disk(dev);
374 /* sysfs gives us a PAGE_SIZE buffer */
375 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
377 buf[VIRTIO_BLK_ID_BYTES] = '\0';
378 err = virtblk_get_id(disk, buf);
382 if (err == -EIO) /* Unsupported? Make it empty. */
388 static DEVICE_ATTR_RO(serial);
390 /* The queue's logical block size must be set before calling this */
391 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
393 struct virtio_device *vdev = vblk->vdev;
394 struct request_queue *q = vblk->disk->queue;
395 char cap_str_2[10], cap_str_10[10];
396 unsigned long long nblocks;
399 /* Host must always specify the capacity. */
400 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
402 /* If capacity is too big, truncate with warning. */
403 if ((sector_t)capacity != capacity) {
404 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
405 (unsigned long long)capacity);
406 capacity = (sector_t)-1;
409 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
411 string_get_size(nblocks, queue_logical_block_size(q),
412 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
413 string_get_size(nblocks, queue_logical_block_size(q),
414 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
416 dev_notice(&vdev->dev,
417 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
418 vblk->disk->disk_name,
419 resize ? "new size: " : "",
421 queue_logical_block_size(q),
425 set_capacity(vblk->disk, capacity);
428 static void virtblk_config_changed_work(struct work_struct *work)
430 struct virtio_blk *vblk =
431 container_of(work, struct virtio_blk, config_work);
432 char *envp[] = { "RESIZE=1", NULL };
434 virtblk_update_capacity(vblk, true);
435 revalidate_disk(vblk->disk);
436 kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
439 static void virtblk_config_changed(struct virtio_device *vdev)
441 struct virtio_blk *vblk = vdev->priv;
443 queue_work(virtblk_wq, &vblk->config_work);
446 static int init_vq(struct virtio_blk *vblk)
450 vq_callback_t **callbacks;
452 struct virtqueue **vqs;
453 unsigned short num_vqs;
454 struct virtio_device *vdev = vblk->vdev;
455 struct irq_affinity desc = { 0, };
457 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
458 struct virtio_blk_config, num_queues,
463 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
467 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
468 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
469 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
470 if (!names || !callbacks || !vqs) {
475 for (i = 0; i < num_vqs; i++) {
476 callbacks[i] = virtblk_done;
477 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
478 names[i] = vblk->vqs[i].name;
481 /* Discover virtqueues and write information to configuration. */
482 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
486 for (i = 0; i < num_vqs; i++) {
487 spin_lock_init(&vblk->vqs[i].lock);
488 vblk->vqs[i].vq = vqs[i];
490 vblk->num_vqs = num_vqs;
502 * Legacy naming scheme used for virtio devices. We are stuck with it for
503 * virtio blk but don't ever use it for any new driver.
505 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
507 const int base = 'z' - 'a' + 1;
508 char *begin = buf + strlen(prefix);
509 char *end = buf + buflen;
519 *--p = 'a' + (index % unit);
520 index = (index / unit) - 1;
521 } while (index >= 0);
523 memmove(begin, p, end - p);
524 memcpy(buf, prefix, strlen(prefix));
529 static int virtblk_get_cache_mode(struct virtio_device *vdev)
534 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
535 struct virtio_blk_config, wce,
539 * If WCE is not configurable and flush is not available,
540 * assume no writeback cache is in use.
543 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
548 static void virtblk_update_cache_mode(struct virtio_device *vdev)
550 u8 writeback = virtblk_get_cache_mode(vdev);
551 struct virtio_blk *vblk = vdev->priv;
553 blk_queue_write_cache(vblk->disk->queue, writeback, false);
554 revalidate_disk(vblk->disk);
557 static const char *const virtblk_cache_types[] = {
558 "write through", "write back"
562 cache_type_store(struct device *dev, struct device_attribute *attr,
563 const char *buf, size_t count)
565 struct gendisk *disk = dev_to_disk(dev);
566 struct virtio_blk *vblk = disk->private_data;
567 struct virtio_device *vdev = vblk->vdev;
570 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
571 i = sysfs_match_string(virtblk_cache_types, buf);
575 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
576 virtblk_update_cache_mode(vdev);
581 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
583 struct gendisk *disk = dev_to_disk(dev);
584 struct virtio_blk *vblk = disk->private_data;
585 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
587 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
588 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
591 static DEVICE_ATTR_RW(cache_type);
593 static struct attribute *virtblk_attrs[] = {
594 &dev_attr_serial.attr,
595 &dev_attr_cache_type.attr,
599 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
600 struct attribute *a, int n)
602 struct device *dev = container_of(kobj, struct device, kobj);
603 struct gendisk *disk = dev_to_disk(dev);
604 struct virtio_blk *vblk = disk->private_data;
605 struct virtio_device *vdev = vblk->vdev;
607 if (a == &dev_attr_cache_type.attr &&
608 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
614 static const struct attribute_group virtblk_attr_group = {
615 .attrs = virtblk_attrs,
616 .is_visible = virtblk_attrs_are_visible,
619 static const struct attribute_group *virtblk_attr_groups[] = {
624 static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
625 unsigned int hctx_idx, unsigned int numa_node)
627 struct virtio_blk *vblk = set->driver_data;
628 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
630 #ifdef CONFIG_VIRTIO_BLK_SCSI
631 vbr->sreq.sense = vbr->sense;
633 sg_init_table(vbr->sg, vblk->sg_elems);
637 static int virtblk_map_queues(struct blk_mq_tag_set *set)
639 struct virtio_blk *vblk = set->driver_data;
641 return blk_mq_virtio_map_queues(&set->map[0], vblk->vdev, 0);
644 #ifdef CONFIG_VIRTIO_BLK_SCSI
645 static void virtblk_initialize_rq(struct request *req)
647 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
649 scsi_req_init(&vbr->sreq);
653 static const struct blk_mq_ops virtio_mq_ops = {
654 .queue_rq = virtio_queue_rq,
655 .commit_rqs = virtio_commit_rqs,
656 .complete = virtblk_request_done,
657 .init_request = virtblk_init_request,
658 #ifdef CONFIG_VIRTIO_BLK_SCSI
659 .initialize_rq_fn = virtblk_initialize_rq,
661 .map_queues = virtblk_map_queues,
664 static unsigned int virtblk_queue_depth;
665 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
667 static int virtblk_probe(struct virtio_device *vdev)
669 struct virtio_blk *vblk;
670 struct request_queue *q;
673 u32 v, blk_size, sg_elems, opt_io_size;
675 u8 physical_block_exp, alignment_offset;
677 if (!vdev->config->get) {
678 dev_err(&vdev->dev, "%s failure: config access disabled\n",
683 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
689 /* We need to know how many segments before we allocate. */
690 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
691 struct virtio_blk_config, seg_max,
694 /* We need at least one SG element, whatever they say. */
695 if (err || !sg_elems)
698 /* We need an extra sg elements at head and tail. */
700 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
707 vblk->sg_elems = sg_elems;
709 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
715 /* FIXME: How many partitions? How long is a piece of string? */
716 vblk->disk = alloc_disk(1 << PART_BITS);
722 /* Default queue sizing is to fill the ring. */
723 if (!virtblk_queue_depth) {
724 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
725 /* ... but without indirect descs, we use 2 descs per req */
726 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
727 virtblk_queue_depth /= 2;
730 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
731 vblk->tag_set.ops = &virtio_mq_ops;
732 vblk->tag_set.queue_depth = virtblk_queue_depth;
733 vblk->tag_set.numa_node = NUMA_NO_NODE;
734 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
735 vblk->tag_set.cmd_size =
736 sizeof(struct virtblk_req) +
737 sizeof(struct scatterlist) * sg_elems;
738 vblk->tag_set.driver_data = vblk;
739 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
741 err = blk_mq_alloc_tag_set(&vblk->tag_set);
745 q = blk_mq_init_queue(&vblk->tag_set);
750 vblk->disk->queue = q;
754 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
756 vblk->disk->major = major;
757 vblk->disk->first_minor = index_to_minor(index);
758 vblk->disk->private_data = vblk;
759 vblk->disk->fops = &virtblk_fops;
760 vblk->disk->flags |= GENHD_FL_EXT_DEVT;
763 /* configure queue flush support */
764 virtblk_update_cache_mode(vdev);
766 /* If disk is read-only in the host, the guest should obey */
767 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
768 set_disk_ro(vblk->disk, 1);
770 /* We can handle whatever the host told us to handle. */
771 blk_queue_max_segments(q, vblk->sg_elems-2);
773 /* No real sector limit. */
774 blk_queue_max_hw_sectors(q, -1U);
776 /* Host can optionally specify maximum segment size and number of
778 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
779 struct virtio_blk_config, size_max, &v);
781 blk_queue_max_segment_size(q, v);
783 blk_queue_max_segment_size(q, -1U);
785 /* Host can optionally specify the block size of the device */
786 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
787 struct virtio_blk_config, blk_size,
790 blk_queue_logical_block_size(q, blk_size);
792 blk_size = queue_logical_block_size(q);
794 /* Use topology information if available */
795 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
796 struct virtio_blk_config, physical_block_exp,
797 &physical_block_exp);
798 if (!err && physical_block_exp)
799 blk_queue_physical_block_size(q,
800 blk_size * (1 << physical_block_exp));
802 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
803 struct virtio_blk_config, alignment_offset,
805 if (!err && alignment_offset)
806 blk_queue_alignment_offset(q, blk_size * alignment_offset);
808 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
809 struct virtio_blk_config, min_io_size,
811 if (!err && min_io_size)
812 blk_queue_io_min(q, blk_size * min_io_size);
814 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
815 struct virtio_blk_config, opt_io_size,
817 if (!err && opt_io_size)
818 blk_queue_io_opt(q, blk_size * opt_io_size);
820 virtblk_update_capacity(vblk, false);
821 virtio_device_ready(vdev);
823 device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
827 blk_mq_free_tag_set(&vblk->tag_set);
829 put_disk(vblk->disk);
831 vdev->config->del_vqs(vdev);
835 ida_simple_remove(&vd_index_ida, index);
840 static void virtblk_remove(struct virtio_device *vdev)
842 struct virtio_blk *vblk = vdev->priv;
843 int index = vblk->index;
846 /* Make sure no work handler is accessing the device. */
847 flush_work(&vblk->config_work);
849 del_gendisk(vblk->disk);
850 blk_cleanup_queue(vblk->disk->queue);
852 blk_mq_free_tag_set(&vblk->tag_set);
854 /* Stop all the virtqueues. */
855 vdev->config->reset(vdev);
857 refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
858 put_disk(vblk->disk);
859 vdev->config->del_vqs(vdev);
863 /* Only free device id if we don't have any users */
865 ida_simple_remove(&vd_index_ida, index);
868 #ifdef CONFIG_PM_SLEEP
869 static int virtblk_freeze(struct virtio_device *vdev)
871 struct virtio_blk *vblk = vdev->priv;
873 /* Ensure we don't receive any more interrupts */
874 vdev->config->reset(vdev);
876 /* Make sure no work handler is accessing the device. */
877 flush_work(&vblk->config_work);
879 blk_mq_quiesce_queue(vblk->disk->queue);
881 vdev->config->del_vqs(vdev);
885 static int virtblk_restore(struct virtio_device *vdev)
887 struct virtio_blk *vblk = vdev->priv;
890 ret = init_vq(vdev->priv);
894 virtio_device_ready(vdev);
896 blk_mq_unquiesce_queue(vblk->disk->queue);
901 static const struct virtio_device_id id_table[] = {
902 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
906 static unsigned int features_legacy[] = {
907 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
908 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
909 #ifdef CONFIG_VIRTIO_BLK_SCSI
912 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
916 static unsigned int features[] = {
917 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
918 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
919 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
923 static struct virtio_driver virtio_blk = {
924 .feature_table = features,
925 .feature_table_size = ARRAY_SIZE(features),
926 .feature_table_legacy = features_legacy,
927 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
928 .driver.name = KBUILD_MODNAME,
929 .driver.owner = THIS_MODULE,
930 .id_table = id_table,
931 .probe = virtblk_probe,
932 .remove = virtblk_remove,
933 .config_changed = virtblk_config_changed,
934 #ifdef CONFIG_PM_SLEEP
935 .freeze = virtblk_freeze,
936 .restore = virtblk_restore,
940 static int __init init(void)
944 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
948 major = register_blkdev(0, "virtblk");
951 goto out_destroy_workqueue;
954 error = register_virtio_driver(&virtio_blk);
956 goto out_unregister_blkdev;
959 out_unregister_blkdev:
960 unregister_blkdev(major, "virtblk");
961 out_destroy_workqueue:
962 destroy_workqueue(virtblk_wq);
966 static void __exit fini(void)
968 unregister_virtio_driver(&virtio_blk);
969 unregister_blkdev(major, "virtblk");
970 destroy_workqueue(virtblk_wq);
975 MODULE_DEVICE_TABLE(virtio, id_table);
976 MODULE_DESCRIPTION("Virtio block driver");
977 MODULE_LICENSE("GPL");