Merge tag 'efi-efivars-removal-for-v5.20' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-2.6-block.git] / drivers / block / virtio_blk.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
e467cde2
RR
2//#define DEBUG
3#include <linux/spinlock.h>
5a0e3ad6 4#include <linux/slab.h>
e467cde2
RR
5#include <linux/blkdev.h>
6#include <linux/hdreg.h>
0c8d44f2 7#include <linux/module.h>
4678d6f9 8#include <linux/mutex.h>
ad71473d 9#include <linux/interrupt.h>
e467cde2
RR
10#include <linux/virtio.h>
11#include <linux/virtio_blk.h>
3d1266c7 12#include <linux/scatterlist.h>
7a7c924c 13#include <linux/string_helpers.h>
5087a50e 14#include <linux/idr.h>
1cf7e9c6 15#include <linux/blk-mq.h>
ad71473d 16#include <linux/blk-mq-virtio.h>
1cf7e9c6 17#include <linux/numa.h>
55a2415b 18#include <uapi/linux/virtio_ring.h>
3d1266c7 19
4f3bf19c 20#define PART_BITS 4
6a27b656 21#define VQ_NAME_LEN 16
1f23816b 22#define MAX_DISCARD_SEGMENTS 256u
e467cde2 23
63947b34
SH
24/* The maximum number of sg elements that fit into a virtqueue */
25#define VIRTIO_BLK_MAX_SG_ELEMS 32768
26
02746e26
MG
27#ifdef CONFIG_ARCH_NO_SG_CHAIN
28#define VIRTIO_BLK_INLINE_SG_CNT 0
29#else
30#define VIRTIO_BLK_INLINE_SG_CNT 2
31#endif
32
0989c41b 33static unsigned int num_request_queues;
ead65f76 34module_param(num_request_queues, uint, 0644);
0989c41b
MG
35MODULE_PARM_DESC(num_request_queues,
36 "Limit the number of request queues to use for blk device. "
37 "0 for no limit. "
38 "Values > nr_cpu_ids truncated to nr_cpu_ids.");
39
4e040052
SK
40static unsigned int poll_queues;
41module_param(poll_queues, uint, 0644);
42MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O");
43
5087a50e
MT
44static int major;
45static DEFINE_IDA(vd_index_ida);
46
2a647bfe 47static struct workqueue_struct *virtblk_wq;
4f3bf19c 48
6a27b656
ML
49struct virtio_blk_vq {
50 struct virtqueue *vq;
51 spinlock_t lock;
52 char name[VQ_NAME_LEN];
53} ____cacheline_aligned_in_smp;
54
bb6ec576 55struct virtio_blk {
90b5feb8
SH
56 /*
57 * This mutex must be held by anything that may run after
58 * virtblk_remove() sets vblk->vdev to NULL.
59 *
60 * blk-mq, virtqueue processing, and sysfs attribute code paths are
61 * shut down before vblk->vdev is set to NULL and therefore do not need
62 * to hold this mutex.
63 */
64 struct mutex vdev_mutex;
e467cde2 65 struct virtio_device *vdev;
e467cde2
RR
66
67 /* The disk structure for the kernel. */
68 struct gendisk *disk;
69
24d2f903
CH
70 /* Block layer tags. */
71 struct blk_mq_tag_set tag_set;
72
7a7c924c
CH
73 /* Process context for config space updates */
74 struct work_struct config_work;
75
5087a50e
MT
76 /* Ida index - used to track minor number allocations. */
77 int index;
6a27b656
ML
78
79 /* num of vqs */
80 int num_vqs;
4e040052 81 int io_queues[HCTX_MAX_TYPES];
6a27b656 82 struct virtio_blk_vq *vqs;
e467cde2
RR
83};
84
bb6ec576 85struct virtblk_req {
97b50a65 86 struct virtio_blk_outhdr out_hdr;
cb38fa23 87 u8 status;
02746e26 88 struct sg_table sg_table;
a98755c5 89 struct scatterlist sg[];
e467cde2
RR
90};
91
2a842aca 92static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
a98755c5
AH
93{
94 switch (vbr->status) {
95 case VIRTIO_BLK_S_OK:
2a842aca 96 return BLK_STS_OK;
a98755c5 97 case VIRTIO_BLK_S_UNSUPP:
2a842aca 98 return BLK_STS_NOTSUPP;
a98755c5 99 default:
2a842aca 100 return BLK_STS_IOERR;
a98755c5
AH
101 }
102}
103
0e9911fa 104static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
97b50a65
CH
105{
106 struct scatterlist hdr, status, *sgs[3];
107 unsigned int num_out = 0, num_in = 0;
108
109 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
110 sgs[num_out++] = &hdr;
20af3cfd 111
0e9911fa 112 if (vbr->sg_table.nents) {
19c1c5a6 113 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
0e9911fa 114 sgs[num_out++] = vbr->sg_table.sgl;
8f39db9d 115 else
0e9911fa 116 sgs[num_out + num_in++] = vbr->sg_table.sgl;
20af3cfd
PB
117 }
118
8f39db9d
PB
119 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
120 sgs[num_out + num_in++] = &status;
121
122 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
5ee21a52
PB
123}
124
1f23816b
CL
125static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
126{
127 unsigned short segments = blk_rq_nr_discard_segments(req);
128 unsigned short n = 0;
129 struct virtio_blk_discard_write_zeroes *range;
130 struct bio *bio;
131 u32 flags = 0;
132
133 if (unmap)
134 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
135
136 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
137 if (!range)
138 return -ENOMEM;
139
af822aa6
ML
140 /*
141 * Single max discard segment means multi-range discard isn't
142 * supported, and block layer only runs contiguity merge like
143 * normal RW request. So we can't reply on bio for retrieving
144 * each range info.
145 */
146 if (queue_max_discard_segments(req->q) == 1) {
147 range[0].flags = cpu_to_le32(flags);
148 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
149 range[0].sector = cpu_to_le64(blk_rq_pos(req));
150 n = 1;
151 } else {
152 __rq_for_each_bio(bio, req) {
153 u64 sector = bio->bi_iter.bi_sector;
154 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
155
156 range[n].flags = cpu_to_le32(flags);
157 range[n].num_sectors = cpu_to_le32(num_sectors);
158 range[n].sector = cpu_to_le64(sector);
159 n++;
160 }
1f23816b
CL
161 }
162
af822aa6
ML
163 WARN_ON_ONCE(n != segments);
164
1f23816b
CL
165 req->special_vec.bv_page = virt_to_page(range);
166 req->special_vec.bv_offset = offset_in_page(range);
167 req->special_vec.bv_len = sizeof(*range) * segments;
168 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
169
170 return 0;
171}
172
02746e26 173static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
a98755c5 174{
02746e26
MG
175 if (blk_rq_nr_phys_segments(req))
176 sg_free_table_chained(&vbr->sg_table,
177 VIRTIO_BLK_INLINE_SG_CNT);
178}
179
180static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
181 struct virtblk_req *vbr)
182{
183 int err;
184
185 if (!blk_rq_nr_phys_segments(req))
186 return 0;
a98755c5 187
02746e26
MG
188 vbr->sg_table.sgl = vbr->sg;
189 err = sg_alloc_table_chained(&vbr->sg_table,
190 blk_rq_nr_phys_segments(req),
191 vbr->sg_table.sgl,
192 VIRTIO_BLK_INLINE_SG_CNT);
193 if (unlikely(err))
194 return -ENOMEM;
a98755c5 195
02746e26
MG
196 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
197}
198
199static void virtblk_cleanup_cmd(struct request *req)
200{
358b348b
CH
201 if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
202 kfree(bvec_virt(&req->special_vec));
02746e26
MG
203}
204
f0839372
MT
205static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
206 struct request *req,
207 struct virtblk_req *vbr)
02746e26
MG
208{
209 bool unmap = false;
210 u32 type;
211
212 vbr->out_hdr.sector = 0;
213
214 switch (req_op(req)) {
215 case REQ_OP_READ:
216 type = VIRTIO_BLK_T_IN;
217 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
218 blk_rq_pos(req));
219 break;
220 case REQ_OP_WRITE:
221 type = VIRTIO_BLK_T_OUT;
222 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
223 blk_rq_pos(req));
224 break;
225 case REQ_OP_FLUSH:
226 type = VIRTIO_BLK_T_FLUSH;
227 break;
228 case REQ_OP_DISCARD:
229 type = VIRTIO_BLK_T_DISCARD;
230 break;
231 case REQ_OP_WRITE_ZEROES:
232 type = VIRTIO_BLK_T_WRITE_ZEROES;
233 unmap = !(req->cmd_flags & REQ_NOUNMAP);
234 break;
235 case REQ_OP_DRV_IN:
236 type = VIRTIO_BLK_T_GET_ID;
237 break;
238 default:
239 WARN_ON_ONCE(1);
240 return BLK_STS_IOERR;
241 }
242
243 vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
244 vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
245
246 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
247 if (virtblk_setup_discard_write_zeroes(req, unmap))
248 return BLK_STS_RESOURCE;
249 }
250
251 return 0;
252}
253
254static inline void virtblk_request_done(struct request *req)
255{
256 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
257
258 virtblk_unmap_data(req, vbr);
259 virtblk_cleanup_cmd(req);
d19633d5 260 blk_mq_end_request(req, virtblk_result(vbr));
a98755c5
AH
261}
262
263static void virtblk_done(struct virtqueue *vq)
e467cde2
RR
264{
265 struct virtio_blk *vblk = vq->vdev->priv;
1cf7e9c6 266 bool req_done = false;
6a27b656 267 int qid = vq->index;
e467cde2 268 struct virtblk_req *vbr;
e467cde2 269 unsigned long flags;
a98755c5 270 unsigned int len;
e467cde2 271
6a27b656 272 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
bb811108
AH
273 do {
274 virtqueue_disable_cb(vq);
6a27b656 275 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
85dada09
CH
276 struct request *req = blk_mq_rq_from_pdu(vbr);
277
15f73f5b
CH
278 if (likely(!blk_should_fake_timeout(req->q)))
279 blk_mq_complete_request(req);
1cf7e9c6 280 req_done = true;
33659ebb 281 }
7f03b17d
HG
282 if (unlikely(virtqueue_is_broken(vq)))
283 break;
bb811108 284 } while (!virtqueue_enable_cb(vq));
1cf7e9c6 285
e467cde2 286 /* In case queue is stopped waiting for more buffers. */
a98755c5 287 if (req_done)
1b4a3258 288 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
6a27b656 289 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
a98755c5
AH
290}
291
944e7c87
JA
292static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
293{
294 struct virtio_blk *vblk = hctx->queue->queuedata;
295 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
296 bool kick;
297
298 spin_lock_irq(&vq->lock);
299 kick = virtqueue_kick_prepare(vq->vq);
300 spin_unlock_irq(&vq->lock);
301
302 if (kick)
303 virtqueue_notify(vq->vq);
304}
305
0e9911fa
SK
306static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
307 struct virtio_blk *vblk,
308 struct request *req,
309 struct virtblk_req *vbr)
310{
311 blk_status_t status;
312
313 status = virtblk_setup_cmd(vblk->vdev, req, vbr);
314 if (unlikely(status))
315 return status;
316
317 blk_mq_start_request(req);
318
319 vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
320 if (unlikely(vbr->sg_table.nents < 0)) {
321 virtblk_cleanup_cmd(req);
322 return BLK_STS_RESOURCE;
323 }
324
325 return BLK_STS_OK;
326}
327
fc17b653 328static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
74c45052 329 const struct blk_mq_queue_data *bd)
e467cde2 330{
1cf7e9c6 331 struct virtio_blk *vblk = hctx->queue->queuedata;
74c45052 332 struct request *req = bd->rq;
9d74e257 333 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
1cf7e9c6 334 unsigned long flags;
6a27b656 335 int qid = hctx->queue_num;
e8edca6f 336 bool notify = false;
f0839372
MT
337 blk_status_t status;
338 int err;
e467cde2 339
0e9911fa 340 status = virtblk_prep_rq(hctx, vblk, req, vbr);
f0839372
MT
341 if (unlikely(status))
342 return status;
aebf526b 343
6a27b656 344 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
0e9911fa 345 err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
5261b85e 346 if (err) {
6a27b656 347 virtqueue_kick(vblk->vqs[qid].vq);
f5f6b95c
HP
348 /* Don't stop the queue if -ENOMEM: we may have failed to
349 * bounce the buffer due to global resource outage.
350 */
351 if (err == -ENOSPC)
352 blk_mq_stop_hw_queue(hctx);
6a27b656 353 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
02746e26
MG
354 virtblk_unmap_data(req, vbr);
355 virtblk_cleanup_cmd(req);
3d973b2e
HP
356 switch (err) {
357 case -ENOSPC:
86ff7c2a 358 return BLK_STS_DEV_RESOURCE;
3d973b2e
HP
359 case -ENOMEM:
360 return BLK_STS_RESOURCE;
361 default:
362 return BLK_STS_IOERR;
363 }
a98755c5
AH
364 }
365
74c45052 366 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
e8edca6f 367 notify = true;
6a27b656 368 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
e8edca6f
ML
369
370 if (notify)
6a27b656 371 virtqueue_notify(vblk->vqs[qid].vq);
fc17b653 372 return BLK_STS_OK;
a98755c5
AH
373}
374
0e9911fa
SK
375static bool virtblk_prep_rq_batch(struct request *req)
376{
377 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
378 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
379
380 req->mq_hctx->tags->rqs[req->tag] = req;
381
382 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
383}
384
385static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
386 struct request **rqlist,
387 struct request **requeue_list)
388{
389 unsigned long flags;
390 int err;
391 bool kick;
392
393 spin_lock_irqsave(&vq->lock, flags);
394
395 while (!rq_list_empty(*rqlist)) {
396 struct request *req = rq_list_pop(rqlist);
397 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
398
399 err = virtblk_add_req(vq->vq, vbr);
400 if (err) {
401 virtblk_unmap_data(req, vbr);
402 virtblk_cleanup_cmd(req);
403 rq_list_add(requeue_list, req);
404 }
405 }
406
407 kick = virtqueue_kick_prepare(vq->vq);
408 spin_unlock_irqrestore(&vq->lock, flags);
409
410 return kick;
411}
412
413static void virtio_queue_rqs(struct request **rqlist)
414{
415 struct request *req, *next, *prev = NULL;
416 struct request *requeue_list = NULL;
417
418 rq_list_for_each_safe(rqlist, req, next) {
419 struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
420 bool kick;
421
422 if (!virtblk_prep_rq_batch(req)) {
423 rq_list_move(rqlist, &requeue_list, req, prev);
424 req = prev;
425 if (!req)
426 continue;
427 }
428
429 if (!next || req->mq_hctx != next->mq_hctx) {
430 req->rq_next = NULL;
431 kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
432 if (kick)
433 virtqueue_notify(vq->vq);
434
435 *rqlist = next;
436 prev = NULL;
437 } else
438 prev = req;
439 }
440
441 *rqlist = requeue_list;
442}
443
4cb2ea28 444/* return id (s/n) string for *disk to *id_str
445 */
446static int virtblk_get_id(struct gendisk *disk, char *id_str)
447{
448 struct virtio_blk *vblk = disk->private_data;
f9596695 449 struct request_queue *q = vblk->disk->queue;
4cb2ea28 450 struct request *req;
e4c4776d 451 int err;
4cb2ea28 452
0bf6d96c 453 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
f9596695 454 if (IS_ERR(req))
4cb2ea28 455 return PTR_ERR(req);
f9596695
CH
456
457 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
458 if (err)
459 goto out;
460
b84ba30b 461 blk_execute_rq(req, false);
2a842aca 462 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
f9596695 463out:
0bf6d96c 464 blk_mq_free_request(req);
e4c4776d 465 return err;
4cb2ea28 466}
467
135da0b0
CB
468/* We provide getgeo only to please some old bootloader/partitioning tools */
469static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
470{
48e4043d 471 struct virtio_blk *vblk = bd->bd_disk->private_data;
90b5feb8
SH
472 int ret = 0;
473
474 mutex_lock(&vblk->vdev_mutex);
475
476 if (!vblk->vdev) {
477 ret = -ENXIO;
478 goto out;
479 }
48e4043d
RH
480
481 /* see if the host passed in geometry config */
855e0c52
RR
482 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
483 virtio_cread(vblk->vdev, struct virtio_blk_config,
484 geometry.cylinders, &geo->cylinders);
485 virtio_cread(vblk->vdev, struct virtio_blk_config,
486 geometry.heads, &geo->heads);
487 virtio_cread(vblk->vdev, struct virtio_blk_config,
488 geometry.sectors, &geo->sectors);
48e4043d
RH
489 } else {
490 /* some standard values, similar to sd */
491 geo->heads = 1 << 6;
492 geo->sectors = 1 << 5;
493 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
494 }
90b5feb8
SH
495out:
496 mutex_unlock(&vblk->vdev_mutex);
497 return ret;
135da0b0
CB
498}
499
24b45e6c
CH
500static void virtblk_free_disk(struct gendisk *disk)
501{
502 struct virtio_blk *vblk = disk->private_data;
503
504 ida_simple_remove(&vd_index_ida, vblk->index);
505 mutex_destroy(&vblk->vdev_mutex);
506 kfree(vblk);
507}
508
83d5cde4 509static const struct block_device_operations virtblk_fops = {
24b45e6c
CH
510 .owner = THIS_MODULE,
511 .getgeo = virtblk_getgeo,
512 .free_disk = virtblk_free_disk,
e467cde2
RR
513};
514
d50ed907
CB
515static int index_to_minor(int index)
516{
517 return index << PART_BITS;
518}
519
5087a50e
MT
520static int minor_to_index(int minor)
521{
522 return minor >> PART_BITS;
523}
524
e982c4d0
HR
525static ssize_t serial_show(struct device *dev,
526 struct device_attribute *attr, char *buf)
a5eb9e4f
RH
527{
528 struct gendisk *disk = dev_to_disk(dev);
529 int err;
530
531 /* sysfs gives us a PAGE_SIZE buffer */
532 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
533
534 buf[VIRTIO_BLK_ID_BYTES] = '\0';
535 err = virtblk_get_id(disk, buf);
536 if (!err)
537 return strlen(buf);
538
539 if (err == -EIO) /* Unsupported? Make it empty. */
540 return 0;
541
542 return err;
543}
393c525b 544
e982c4d0 545static DEVICE_ATTR_RO(serial);
a5eb9e4f 546
daf2a501
SH
547/* The queue's logical block size must be set before calling this */
548static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
7a7c924c 549{
7a7c924c
CH
550 struct virtio_device *vdev = vblk->vdev;
551 struct request_queue *q = vblk->disk->queue;
552 char cap_str_2[10], cap_str_10[10];
1046d304 553 unsigned long long nblocks;
b9f28d86 554 u64 capacity;
7a7c924c
CH
555
556 /* Host must always specify the capacity. */
855e0c52 557 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
7a7c924c 558
1046d304
SH
559 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
560
561 string_get_size(nblocks, queue_logical_block_size(q),
b9f28d86 562 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
1046d304 563 string_get_size(nblocks, queue_logical_block_size(q),
b9f28d86 564 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
7a7c924c
CH
565
566 dev_notice(&vdev->dev,
daf2a501
SH
567 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
568 vblk->disk->disk_name,
569 resize ? "new size: " : "",
1046d304
SH
570 nblocks,
571 queue_logical_block_size(q),
572 cap_str_10,
573 cap_str_2);
7a7c924c 574
449f4ec9 575 set_capacity_and_notify(vblk->disk, capacity);
daf2a501
SH
576}
577
578static void virtblk_config_changed_work(struct work_struct *work)
579{
580 struct virtio_blk *vblk =
581 container_of(work, struct virtio_blk, config_work);
daf2a501
SH
582
583 virtblk_update_capacity(vblk, true);
7a7c924c
CH
584}
585
586static void virtblk_config_changed(struct virtio_device *vdev)
587{
588 struct virtio_blk *vblk = vdev->priv;
589
590 queue_work(virtblk_wq, &vblk->config_work);
591}
592
6abd6e5a
AS
593static int init_vq(struct virtio_blk *vblk)
594{
2ff98449 595 int err;
6a27b656
ML
596 int i;
597 vq_callback_t **callbacks;
598 const char **names;
599 struct virtqueue **vqs;
600 unsigned short num_vqs;
4e040052 601 unsigned int num_poll_vqs;
6a27b656 602 struct virtio_device *vdev = vblk->vdev;
ad71473d 603 struct irq_affinity desc = { 0, };
6a27b656
ML
604
605 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
606 struct virtio_blk_config, num_queues,
607 &num_vqs);
608 if (err)
609 num_vqs = 1;
4e040052 610
6ae6ff6f 611 if (!err && !num_vqs) {
63b4ffa4 612 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
6ae6ff6f
JW
613 return -EINVAL;
614 }
6a27b656 615
0989c41b
MG
616 num_vqs = min_t(unsigned int,
617 min_not_zero(num_request_queues, nr_cpu_ids),
618 num_vqs);
bf348f9b 619
4e040052
SK
620 num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
621
622 vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
623 vblk->io_queues[HCTX_TYPE_READ] = 0;
624 vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
625
626 dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
627 vblk->io_queues[HCTX_TYPE_DEFAULT],
628 vblk->io_queues[HCTX_TYPE_READ],
629 vblk->io_queues[HCTX_TYPE_POLL]);
630
668866b6 631 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
347a5293
MH
632 if (!vblk->vqs)
633 return -ENOMEM;
6a27b656 634
668866b6
ME
635 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
636 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
637 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
347a5293
MH
638 if (!names || !callbacks || !vqs) {
639 err = -ENOMEM;
640 goto out;
641 }
6abd6e5a 642
4e040052 643 for (i = 0; i < num_vqs - num_poll_vqs; i++) {
6a27b656
ML
644 callbacks[i] = virtblk_done;
645 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
646 names[i] = vblk->vqs[i].name;
647 }
648
4e040052
SK
649 for (; i < num_vqs; i++) {
650 callbacks[i] = NULL;
651 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
652 names[i] = vblk->vqs[i].name;
653 }
654
6a27b656 655 /* Discover virtqueues and write information to configuration. */
9b2bbdb2 656 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
6a27b656 657 if (err)
347a5293 658 goto out;
6abd6e5a 659
6a27b656
ML
660 for (i = 0; i < num_vqs; i++) {
661 spin_lock_init(&vblk->vqs[i].lock);
662 vblk->vqs[i].vq = vqs[i];
663 }
664 vblk->num_vqs = num_vqs;
665
347a5293 666out:
6a27b656 667 kfree(vqs);
6a27b656 668 kfree(callbacks);
6a27b656 669 kfree(names);
6a27b656
ML
670 if (err)
671 kfree(vblk->vqs);
6abd6e5a
AS
672 return err;
673}
674
c0aa3e09
RM
675/*
676 * Legacy naming scheme used for virtio devices. We are stuck with it for
677 * virtio blk but don't ever use it for any new driver.
678 */
679static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
680{
681 const int base = 'z' - 'a' + 1;
682 char *begin = buf + strlen(prefix);
683 char *end = buf + buflen;
684 char *p;
685 int unit;
686
687 p = end - 1;
688 *p = '\0';
689 unit = base;
690 do {
691 if (p == begin)
692 return -EINVAL;
693 *--p = 'a' + (index % unit);
694 index = (index / unit) - 1;
695 } while (index >= 0);
696
697 memmove(begin, p, end - p);
698 memcpy(buf, prefix, strlen(prefix));
699
700 return 0;
701}
702
cd5d5038
PB
703static int virtblk_get_cache_mode(struct virtio_device *vdev)
704{
705 u8 writeback;
706 int err;
707
855e0c52
RR
708 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
709 struct virtio_blk_config, wce,
710 &writeback);
592002f5
MT
711
712 /*
713 * If WCE is not configurable and flush is not available,
714 * assume no writeback cache is in use.
715 */
cd5d5038 716 if (err)
592002f5 717 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
cd5d5038
PB
718
719 return writeback;
720}
721
722static void virtblk_update_cache_mode(struct virtio_device *vdev)
723{
724 u8 writeback = virtblk_get_cache_mode(vdev);
725 struct virtio_blk *vblk = vdev->priv;
726
ad9126ac 727 blk_queue_write_cache(vblk->disk->queue, writeback, false);
cd5d5038
PB
728}
729
730static const char *const virtblk_cache_types[] = {
731 "write through", "write back"
732};
733
734static ssize_t
e982c4d0
HR
735cache_type_store(struct device *dev, struct device_attribute *attr,
736 const char *buf, size_t count)
cd5d5038
PB
737{
738 struct gendisk *disk = dev_to_disk(dev);
739 struct virtio_blk *vblk = disk->private_data;
740 struct virtio_device *vdev = vblk->vdev;
741 int i;
cd5d5038
PB
742
743 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
f53d5aa0 744 i = sysfs_match_string(virtblk_cache_types, buf);
cd5d5038 745 if (i < 0)
f53d5aa0 746 return i;
cd5d5038 747
855e0c52 748 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
cd5d5038
PB
749 virtblk_update_cache_mode(vdev);
750 return count;
751}
752
753static ssize_t
e982c4d0 754cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
cd5d5038
PB
755{
756 struct gendisk *disk = dev_to_disk(dev);
757 struct virtio_blk *vblk = disk->private_data;
758 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
759
760 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
f1aa12f5 761 return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
cd5d5038
PB
762}
763
e982c4d0
HR
764static DEVICE_ATTR_RW(cache_type);
765
766static struct attribute *virtblk_attrs[] = {
767 &dev_attr_serial.attr,
768 &dev_attr_cache_type.attr,
769 NULL,
770};
771
772static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
773 struct attribute *a, int n)
774{
4ce79063 775 struct device *dev = kobj_to_dev(kobj);
e982c4d0
HR
776 struct gendisk *disk = dev_to_disk(dev);
777 struct virtio_blk *vblk = disk->private_data;
778 struct virtio_device *vdev = vblk->vdev;
779
780 if (a == &dev_attr_cache_type.attr &&
781 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
782 return S_IRUGO;
783
784 return a->mode;
785}
786
787static const struct attribute_group virtblk_attr_group = {
788 .attrs = virtblk_attrs,
789 .is_visible = virtblk_attrs_are_visible,
790};
791
792static const struct attribute_group *virtblk_attr_groups[] = {
793 &virtblk_attr_group,
794 NULL,
795};
cd5d5038 796
ad71473d
CH
797static int virtblk_map_queues(struct blk_mq_tag_set *set)
798{
799 struct virtio_blk *vblk = set->driver_data;
4e040052
SK
800 int i, qoff;
801
802 for (i = 0, qoff = 0; i < set->nr_maps; i++) {
803 struct blk_mq_queue_map *map = &set->map[i];
804
805 map->nr_queues = vblk->io_queues[i];
806 map->queue_offset = qoff;
807 qoff += map->nr_queues;
808
809 if (map->nr_queues == 0)
810 continue;
811
812 /*
813 * Regular queues have interrupts and hence CPU affinity is
814 * defined by the core virtio code, but polling queues have
815 * no interrupts so we let the block layer assign CPU affinity.
816 */
817 if (i == HCTX_TYPE_POLL)
818 blk_mq_map_queues(&set->map[i]);
819 else
820 blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
821 }
822
823 return 0;
824}
825
826static void virtblk_complete_batch(struct io_comp_batch *iob)
827{
828 struct request *req;
ad71473d 829
4e040052
SK
830 rq_list_for_each(&iob->req_list, req) {
831 virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
832 virtblk_cleanup_cmd(req);
833 }
834 blk_mq_end_request_batch(iob);
835}
836
837static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
838{
839 struct virtio_blk *vblk = hctx->queue->queuedata;
840 struct virtio_blk_vq *vq = hctx->driver_data;
841 struct virtblk_req *vbr;
842 unsigned long flags;
843 unsigned int len;
844 int found = 0;
845
846 spin_lock_irqsave(&vq->lock, flags);
847
848 while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
849 struct request *req = blk_mq_rq_from_pdu(vbr);
850
851 found++;
852 if (!blk_mq_add_to_batch(req, iob, vbr->status,
853 virtblk_complete_batch))
854 blk_mq_complete_request(req);
855 }
856
857 if (found)
858 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
859
860 spin_unlock_irqrestore(&vq->lock, flags);
861
862 return found;
863}
864
865static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
866 unsigned int hctx_idx)
867{
868 struct virtio_blk *vblk = data;
869 struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
870
871 WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
872 hctx->driver_data = vq;
873 return 0;
ad71473d
CH
874}
875
f363b089 876static const struct blk_mq_ops virtio_mq_ops = {
1cf7e9c6 877 .queue_rq = virtio_queue_rq,
0e9911fa 878 .queue_rqs = virtio_queue_rqs,
944e7c87 879 .commit_rqs = virtio_commit_rqs,
4e040052 880 .init_hctx = virtblk_init_hctx,
5124c285 881 .complete = virtblk_request_done,
ad71473d 882 .map_queues = virtblk_map_queues,
4e040052 883 .poll = virtblk_poll,
1cf7e9c6
JA
884};
885
24d2f903
CH
886static unsigned int virtblk_queue_depth;
887module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
1cf7e9c6 888
8d85fce7 889static int virtblk_probe(struct virtio_device *vdev)
e467cde2
RR
890{
891 struct virtio_blk *vblk;
69740c8b 892 struct request_queue *q;
5087a50e 893 int err, index;
a98755c5 894
fd1068e1 895 u32 v, blk_size, max_size, sg_elems, opt_io_size;
69740c8b
CH
896 u16 min_io_size;
897 u8 physical_block_exp, alignment_offset;
d1e9aa9c 898 unsigned int queue_depth;
e467cde2 899
ff631988
MT
900 if (!vdev->config->get) {
901 dev_err(&vdev->dev, "%s failure: config access disabled\n",
902 __func__);
903 return -EINVAL;
904 }
905
5087a50e
MT
906 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
907 GFP_KERNEL);
908 if (err < 0)
909 goto out;
910 index = err;
4f3bf19c 911
0864b79a 912 /* We need to know how many segments before we allocate. */
855e0c52
RR
913 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
914 struct virtio_blk_config, seg_max,
915 &sg_elems);
a5b365a6
CH
916
917 /* We need at least one SG element, whatever they say. */
918 if (err || !sg_elems)
0864b79a
RR
919 sg_elems = 1;
920
63947b34
SH
921 /* Prevent integer overflows and honor max vq size */
922 sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
923
1cf7e9c6 924 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
e467cde2
RR
925 if (!vblk) {
926 err = -ENOMEM;
5087a50e 927 goto out_free_index;
e467cde2
RR
928 }
929
90b5feb8
SH
930 mutex_init(&vblk->vdev_mutex);
931
e467cde2 932 vblk->vdev = vdev;
a98755c5 933
7a7c924c 934 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
e467cde2 935
6abd6e5a
AS
936 err = init_vq(vblk);
937 if (err)
e467cde2 938 goto out_free_vblk;
e467cde2 939
fc4324b4 940 /* Default queue sizing is to fill the ring. */
6105d1fe 941 if (!virtblk_queue_depth) {
d1e9aa9c 942 queue_depth = vblk->vqs[0].vq->num_free;
fc4324b4
RR
943 /* ... but without indirect descs, we use 2 descs per req */
944 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
d1e9aa9c
JQ
945 queue_depth /= 2;
946 } else {
947 queue_depth = virtblk_queue_depth;
fc4324b4 948 }
24d2f903
CH
949
950 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
951 vblk->tag_set.ops = &virtio_mq_ops;
d1e9aa9c 952 vblk->tag_set.queue_depth = queue_depth;
24d2f903
CH
953 vblk->tag_set.numa_node = NUMA_NO_NODE;
954 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
955 vblk->tag_set.cmd_size =
1cf7e9c6 956 sizeof(struct virtblk_req) +
02746e26 957 sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
24d2f903 958 vblk->tag_set.driver_data = vblk;
6a27b656 959 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
4e040052
SK
960 vblk->tag_set.nr_maps = 1;
961 if (vblk->io_queues[HCTX_TYPE_POLL])
962 vblk->tag_set.nr_maps = 3;
1cf7e9c6 963
24d2f903
CH
964 err = blk_mq_alloc_tag_set(&vblk->tag_set);
965 if (err)
89a5f065 966 goto out_free_vq;
24d2f903 967
89a5f065
CH
968 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
969 if (IS_ERR(vblk->disk)) {
970 err = PTR_ERR(vblk->disk);
24d2f903 971 goto out_free_tags;
e467cde2 972 }
89a5f065 973 q = vblk->disk->queue;
7d116b62 974
c0aa3e09 975 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
d50ed907 976
e467cde2 977 vblk->disk->major = major;
d50ed907 978 vblk->disk->first_minor = index_to_minor(index);
89a5f065 979 vblk->disk->minors = 1 << PART_BITS;
e467cde2
RR
980 vblk->disk->private_data = vblk;
981 vblk->disk->fops = &virtblk_fops;
5087a50e 982 vblk->index = index;
4f3bf19c 983
02c42b7a 984 /* configure queue flush support */
cd5d5038 985 virtblk_update_cache_mode(vdev);
e467cde2 986
3ef53609
CB
987 /* If disk is read-only in the host, the guest should obey */
988 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
989 set_disk_ro(vblk->disk, 1);
990
0864b79a 991 /* We can handle whatever the host told us to handle. */
e030759a 992 blk_queue_max_segments(q, sg_elems);
0864b79a 993
4b7f7e20 994 /* No real sector limit. */
ee714f2d 995 blk_queue_max_hw_sectors(q, -1U);
4b7f7e20 996
fd1068e1
JR
997 max_size = virtio_max_dma_size(vdev);
998
a586d4f6
RR
999 /* Host can optionally specify maximum segment size and number of
1000 * segments. */
855e0c52
RR
1001 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
1002 struct virtio_blk_config, size_max, &v);
e467cde2 1003 if (!err)
fd1068e1
JR
1004 max_size = min(max_size, v);
1005
1006 blk_queue_max_segment_size(q, max_size);
e467cde2 1007
066f4d82 1008 /* Host can optionally specify the block size of the device */
855e0c52
RR
1009 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
1010 struct virtio_blk_config, blk_size,
1011 &blk_size);
57a13a5b
XY
1012 if (!err) {
1013 err = blk_validate_block_size(blk_size);
1014 if (err) {
1015 dev_err(&vdev->dev,
1016 "virtio_blk: invalid block size: 0x%x\n",
1017 blk_size);
1018 goto out_cleanup_disk;
1019 }
1020
69740c8b 1021 blk_queue_logical_block_size(q, blk_size);
57a13a5b 1022 } else
69740c8b
CH
1023 blk_size = queue_logical_block_size(q);
1024
1025 /* Use topology information if available */
855e0c52
RR
1026 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1027 struct virtio_blk_config, physical_block_exp,
1028 &physical_block_exp);
69740c8b
CH
1029 if (!err && physical_block_exp)
1030 blk_queue_physical_block_size(q,
1031 blk_size * (1 << physical_block_exp));
1032
855e0c52
RR
1033 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1034 struct virtio_blk_config, alignment_offset,
1035 &alignment_offset);
69740c8b
CH
1036 if (!err && alignment_offset)
1037 blk_queue_alignment_offset(q, blk_size * alignment_offset);
1038
855e0c52
RR
1039 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1040 struct virtio_blk_config, min_io_size,
1041 &min_io_size);
69740c8b
CH
1042 if (!err && min_io_size)
1043 blk_queue_io_min(q, blk_size * min_io_size);
1044
855e0c52
RR
1045 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1046 struct virtio_blk_config, opt_io_size,
1047 &opt_io_size);
69740c8b
CH
1048 if (!err && opt_io_size)
1049 blk_queue_io_opt(q, blk_size * opt_io_size);
1050
1f23816b 1051 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
1f23816b
CL
1052 virtio_cread(vdev, struct virtio_blk_config,
1053 discard_sector_alignment, &v);
62952cc5
CH
1054 if (v)
1055 q->limits.discard_granularity = v << SECTOR_SHIFT;
1056 else
1057 q->limits.discard_granularity = blk_size;
1f23816b
CL
1058
1059 virtio_cread(vdev, struct virtio_blk_config,
1060 max_discard_sectors, &v);
1061 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
1062
1063 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
1064 &v);
dacc73ed
XY
1065
1066 /*
1067 * max_discard_seg == 0 is out of spec but we always
1068 * handled it.
1069 */
1070 if (!v)
e030759a 1071 v = sg_elems;
1f23816b 1072 blk_queue_max_discard_segments(q,
dacc73ed 1073 min(v, MAX_DISCARD_SEGMENTS));
1f23816b
CL
1074 }
1075
1076 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
1077 virtio_cread(vdev, struct virtio_blk_config,
1078 max_write_zeroes_sectors, &v);
1079 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
1080 }
1081
daf2a501 1082 virtblk_update_capacity(vblk, false);
7a11370e
MT
1083 virtio_device_ready(vdev);
1084
dbb301f9
LC
1085 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
1086 if (err)
1087 goto out_cleanup_disk;
1088
e467cde2
RR
1089 return 0;
1090
dbb301f9 1091out_cleanup_disk:
8b9ab626 1092 put_disk(vblk->disk);
24d2f903
CH
1093out_free_tags:
1094 blk_mq_free_tag_set(&vblk->tag_set);
e467cde2 1095out_free_vq:
d2a7ddda 1096 vdev->config->del_vqs(vdev);
e7eea44e 1097 kfree(vblk->vqs);
e467cde2
RR
1098out_free_vblk:
1099 kfree(vblk);
5087a50e
MT
1100out_free_index:
1101 ida_simple_remove(&vd_index_ida, index);
e467cde2
RR
1102out:
1103 return err;
1104}
1105
8d85fce7 1106static void virtblk_remove(struct virtio_device *vdev)
e467cde2
RR
1107{
1108 struct virtio_blk *vblk = vdev->priv;
e467cde2 1109
cc74f719
MT
1110 /* Make sure no work handler is accessing the device. */
1111 flush_work(&vblk->config_work);
7a7c924c 1112
02e2b124 1113 del_gendisk(vblk->disk);
24d2f903
CH
1114 blk_mq_free_tag_set(&vblk->tag_set);
1115
90b5feb8
SH
1116 mutex_lock(&vblk->vdev_mutex);
1117
6e5aa7ef 1118 /* Stop all the virtqueues. */
d9679d00 1119 virtio_reset_device(vdev);
6e5aa7ef 1120
90b5feb8
SH
1121 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
1122 vblk->vdev = NULL;
1123
d2a7ddda 1124 vdev->config->del_vqs(vdev);
6a27b656 1125 kfree(vblk->vqs);
f4953fe6 1126
90b5feb8
SH
1127 mutex_unlock(&vblk->vdev_mutex);
1128
24b45e6c 1129 put_disk(vblk->disk);
e467cde2
RR
1130}
1131
89107000 1132#ifdef CONFIG_PM_SLEEP
f8fb5bc2
AS
1133static int virtblk_freeze(struct virtio_device *vdev)
1134{
1135 struct virtio_blk *vblk = vdev->priv;
1136
1137 /* Ensure we don't receive any more interrupts */
d9679d00 1138 virtio_reset_device(vdev);
f8fb5bc2 1139
cc74f719 1140 /* Make sure no work handler is accessing the device. */
f8fb5bc2
AS
1141 flush_work(&vblk->config_work);
1142
9b3e9905 1143 blk_mq_quiesce_queue(vblk->disk->queue);
f8fb5bc2
AS
1144
1145 vdev->config->del_vqs(vdev);
b71ba22e
XY
1146 kfree(vblk->vqs);
1147
f8fb5bc2
AS
1148 return 0;
1149}
1150
1151static int virtblk_restore(struct virtio_device *vdev)
1152{
1153 struct virtio_blk *vblk = vdev->priv;
1154 int ret;
1155
f8fb5bc2 1156 ret = init_vq(vdev->priv);
6d62c37f
MT
1157 if (ret)
1158 return ret;
1159
1160 virtio_device_ready(vdev);
1cf7e9c6 1161
9b3e9905 1162 blk_mq_unquiesce_queue(vblk->disk->queue);
6d62c37f 1163 return 0;
f8fb5bc2
AS
1164}
1165#endif
1166
47483e25 1167static const struct virtio_device_id id_table[] = {
e467cde2
RR
1168 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
1169 { 0 },
1170};
1171
19c1c5a6 1172static unsigned int features_legacy[] = {
02c42b7a 1173 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
97b50a65 1174 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
592002f5 1175 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1f23816b 1176 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
19c1c5a6
MT
1177}
1178;
1179static unsigned int features[] = {
1180 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1181 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
592002f5 1182 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1f23816b 1183 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
c45a6816
RR
1184};
1185
8d85fce7 1186static struct virtio_driver virtio_blk = {
19c1c5a6
MT
1187 .feature_table = features,
1188 .feature_table_size = ARRAY_SIZE(features),
1189 .feature_table_legacy = features_legacy,
1190 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
1191 .driver.name = KBUILD_MODNAME,
1192 .driver.owner = THIS_MODULE,
1193 .id_table = id_table,
1194 .probe = virtblk_probe,
1195 .remove = virtblk_remove,
1196 .config_changed = virtblk_config_changed,
89107000 1197#ifdef CONFIG_PM_SLEEP
19c1c5a6
MT
1198 .freeze = virtblk_freeze,
1199 .restore = virtblk_restore,
f8fb5bc2 1200#endif
e467cde2
RR
1201};
1202
bcfe9b6c 1203static int __init virtio_blk_init(void)
e467cde2 1204{
7a7c924c
CH
1205 int error;
1206
1207 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1208 if (!virtblk_wq)
1209 return -ENOMEM;
1210
4f3bf19c 1211 major = register_blkdev(0, "virtblk");
7a7c924c
CH
1212 if (major < 0) {
1213 error = major;
1214 goto out_destroy_workqueue;
1215 }
1216
1217 error = register_virtio_driver(&virtio_blk);
1218 if (error)
1219 goto out_unregister_blkdev;
1220 return 0;
1221
1222out_unregister_blkdev:
1223 unregister_blkdev(major, "virtblk");
1224out_destroy_workqueue:
1225 destroy_workqueue(virtblk_wq);
1226 return error;
e467cde2
RR
1227}
1228
bcfe9b6c 1229static void __exit virtio_blk_fini(void)
e467cde2
RR
1230{
1231 unregister_virtio_driver(&virtio_blk);
38f37b57 1232 unregister_blkdev(major, "virtblk");
7a7c924c 1233 destroy_workqueue(virtblk_wq);
e467cde2 1234}
bcfe9b6c
RD
1235module_init(virtio_blk_init);
1236module_exit(virtio_blk_fini);
e467cde2
RR
1237
1238MODULE_DEVICE_TABLE(virtio, id_table);
1239MODULE_DESCRIPTION("Virtio block driver");
1240MODULE_LICENSE("GPL");