Merge tag 'f2fs-for-v5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeu...
[linux-2.6-block.git] / drivers / block / virtio_blk.c
1 //#define DEBUG
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/interrupt.h>
9 #include <linux/virtio.h>
10 #include <linux/virtio_blk.h>
11 #include <linux/scatterlist.h>
12 #include <linux/string_helpers.h>
13 #include <scsi/scsi_cmnd.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
18
19 #define PART_BITS 4
20 #define VQ_NAME_LEN 16
21 #define MAX_DISCARD_SEGMENTS 256u
22
23 static int major;
24 static DEFINE_IDA(vd_index_ida);
25
26 static struct workqueue_struct *virtblk_wq;
27
28 struct virtio_blk_vq {
29         struct virtqueue *vq;
30         spinlock_t lock;
31         char name[VQ_NAME_LEN];
32 } ____cacheline_aligned_in_smp;
33
34 struct virtio_blk {
35         struct virtio_device *vdev;
36
37         /* The disk structure for the kernel. */
38         struct gendisk *disk;
39
40         /* Block layer tags. */
41         struct blk_mq_tag_set tag_set;
42
43         /* Process context for config space updates */
44         struct work_struct config_work;
45
46         /* What host tells us, plus 2 for header & tailer. */
47         unsigned int sg_elems;
48
49         /* Ida index - used to track minor number allocations. */
50         int index;
51
52         /* num of vqs */
53         int num_vqs;
54         struct virtio_blk_vq *vqs;
55 };
56
57 struct virtblk_req {
58 #ifdef CONFIG_VIRTIO_BLK_SCSI
59         struct scsi_request sreq;       /* for SCSI passthrough, must be first */
60         u8 sense[SCSI_SENSE_BUFFERSIZE];
61         struct virtio_scsi_inhdr in_hdr;
62 #endif
63         struct virtio_blk_outhdr out_hdr;
64         u8 status;
65         struct scatterlist sg[];
66 };
67
68 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
69 {
70         switch (vbr->status) {
71         case VIRTIO_BLK_S_OK:
72                 return BLK_STS_OK;
73         case VIRTIO_BLK_S_UNSUPP:
74                 return BLK_STS_NOTSUPP;
75         default:
76                 return BLK_STS_IOERR;
77         }
78 }
79
80 /*
81  * If this is a packet command we need a couple of additional headers.  Behind
82  * the normal outhdr we put a segment with the scsi command block, and before
83  * the normal inhdr we put the sense data and the inhdr with additional status
84  * information.
85  */
86 #ifdef CONFIG_VIRTIO_BLK_SCSI
87 static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
88                 struct scatterlist *data_sg, bool have_data)
89 {
90         struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
91         unsigned int num_out = 0, num_in = 0;
92
93         sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
94         sgs[num_out++] = &hdr;
95         sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
96         sgs[num_out++] = &cmd;
97
98         if (have_data) {
99                 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
100                         sgs[num_out++] = data_sg;
101                 else
102                         sgs[num_out + num_in++] = data_sg;
103         }
104
105         sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
106         sgs[num_out + num_in++] = &sense;
107         sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
108         sgs[num_out + num_in++] = &inhdr;
109         sg_init_one(&status, &vbr->status, sizeof(vbr->status));
110         sgs[num_out + num_in++] = &status;
111
112         return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
113 }
114
115 static inline void virtblk_scsi_request_done(struct request *req)
116 {
117         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
118         struct virtio_blk *vblk = req->q->queuedata;
119         struct scsi_request *sreq = &vbr->sreq;
120
121         sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
122         sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
123         sreq->result = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
124 }
125
126 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
127                              unsigned int cmd, unsigned long data)
128 {
129         struct gendisk *disk = bdev->bd_disk;
130         struct virtio_blk *vblk = disk->private_data;
131
132         /*
133          * Only allow the generic SCSI ioctls if the host can support it.
134          */
135         if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
136                 return -ENOTTY;
137
138         return scsi_cmd_blk_ioctl(bdev, mode, cmd,
139                                   (void __user *)data);
140 }
141 #else
142 static inline int virtblk_add_req_scsi(struct virtqueue *vq,
143                 struct virtblk_req *vbr, struct scatterlist *data_sg,
144                 bool have_data)
145 {
146         return -EIO;
147 }
148 static inline void virtblk_scsi_request_done(struct request *req)
149 {
150 }
151 #define virtblk_ioctl   NULL
152 #endif /* CONFIG_VIRTIO_BLK_SCSI */
153
154 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
155                 struct scatterlist *data_sg, bool have_data)
156 {
157         struct scatterlist hdr, status, *sgs[3];
158         unsigned int num_out = 0, num_in = 0;
159
160         sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
161         sgs[num_out++] = &hdr;
162
163         if (have_data) {
164                 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
165                         sgs[num_out++] = data_sg;
166                 else
167                         sgs[num_out + num_in++] = data_sg;
168         }
169
170         sg_init_one(&status, &vbr->status, sizeof(vbr->status));
171         sgs[num_out + num_in++] = &status;
172
173         return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
174 }
175
176 static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
177 {
178         unsigned short segments = blk_rq_nr_discard_segments(req);
179         unsigned short n = 0;
180         struct virtio_blk_discard_write_zeroes *range;
181         struct bio *bio;
182         u32 flags = 0;
183
184         if (unmap)
185                 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
186
187         range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
188         if (!range)
189                 return -ENOMEM;
190
191         __rq_for_each_bio(bio, req) {
192                 u64 sector = bio->bi_iter.bi_sector;
193                 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
194
195                 range[n].flags = cpu_to_le32(flags);
196                 range[n].num_sectors = cpu_to_le32(num_sectors);
197                 range[n].sector = cpu_to_le64(sector);
198                 n++;
199         }
200
201         req->special_vec.bv_page = virt_to_page(range);
202         req->special_vec.bv_offset = offset_in_page(range);
203         req->special_vec.bv_len = sizeof(*range) * segments;
204         req->rq_flags |= RQF_SPECIAL_PAYLOAD;
205
206         return 0;
207 }
208
209 static inline void virtblk_request_done(struct request *req)
210 {
211         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
212
213         if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
214                 kfree(page_address(req->special_vec.bv_page) +
215                       req->special_vec.bv_offset);
216         }
217
218         switch (req_op(req)) {
219         case REQ_OP_SCSI_IN:
220         case REQ_OP_SCSI_OUT:
221                 virtblk_scsi_request_done(req);
222                 break;
223         }
224
225         blk_mq_end_request(req, virtblk_result(vbr));
226 }
227
228 static void virtblk_done(struct virtqueue *vq)
229 {
230         struct virtio_blk *vblk = vq->vdev->priv;
231         bool req_done = false;
232         int qid = vq->index;
233         struct virtblk_req *vbr;
234         unsigned long flags;
235         unsigned int len;
236
237         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
238         do {
239                 virtqueue_disable_cb(vq);
240                 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
241                         struct request *req = blk_mq_rq_from_pdu(vbr);
242
243                         blk_mq_complete_request(req);
244                         req_done = true;
245                 }
246                 if (unlikely(virtqueue_is_broken(vq)))
247                         break;
248         } while (!virtqueue_enable_cb(vq));
249
250         /* In case queue is stopped waiting for more buffers. */
251         if (req_done)
252                 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
253         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
254 }
255
256 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
257 {
258         struct virtio_blk *vblk = hctx->queue->queuedata;
259         struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
260         bool kick;
261
262         spin_lock_irq(&vq->lock);
263         kick = virtqueue_kick_prepare(vq->vq);
264         spin_unlock_irq(&vq->lock);
265
266         if (kick)
267                 virtqueue_notify(vq->vq);
268 }
269
270 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
271                            const struct blk_mq_queue_data *bd)
272 {
273         struct virtio_blk *vblk = hctx->queue->queuedata;
274         struct request *req = bd->rq;
275         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
276         unsigned long flags;
277         unsigned int num;
278         int qid = hctx->queue_num;
279         int err;
280         bool notify = false;
281         bool unmap = false;
282         u32 type;
283
284         BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
285
286         switch (req_op(req)) {
287         case REQ_OP_READ:
288         case REQ_OP_WRITE:
289                 type = 0;
290                 break;
291         case REQ_OP_FLUSH:
292                 type = VIRTIO_BLK_T_FLUSH;
293                 break;
294         case REQ_OP_DISCARD:
295                 type = VIRTIO_BLK_T_DISCARD;
296                 break;
297         case REQ_OP_WRITE_ZEROES:
298                 type = VIRTIO_BLK_T_WRITE_ZEROES;
299                 unmap = !(req->cmd_flags & REQ_NOUNMAP);
300                 break;
301         case REQ_OP_SCSI_IN:
302         case REQ_OP_SCSI_OUT:
303                 type = VIRTIO_BLK_T_SCSI_CMD;
304                 break;
305         case REQ_OP_DRV_IN:
306                 type = VIRTIO_BLK_T_GET_ID;
307                 break;
308         default:
309                 WARN_ON_ONCE(1);
310                 return BLK_STS_IOERR;
311         }
312
313         vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
314         vbr->out_hdr.sector = type ?
315                 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
316         vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
317
318         blk_mq_start_request(req);
319
320         if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
321                 err = virtblk_setup_discard_write_zeroes(req, unmap);
322                 if (err)
323                         return BLK_STS_RESOURCE;
324         }
325
326         num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
327         if (num) {
328                 if (rq_data_dir(req) == WRITE)
329                         vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
330                 else
331                         vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
332         }
333
334         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
335         if (blk_rq_is_scsi(req))
336                 err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
337         else
338                 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
339         if (err) {
340                 virtqueue_kick(vblk->vqs[qid].vq);
341                 blk_mq_stop_hw_queue(hctx);
342                 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
343                 /* Out of mem doesn't actually happen, since we fall back
344                  * to direct descriptors */
345                 if (err == -ENOMEM || err == -ENOSPC)
346                         return BLK_STS_DEV_RESOURCE;
347                 return BLK_STS_IOERR;
348         }
349
350         if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
351                 notify = true;
352         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
353
354         if (notify)
355                 virtqueue_notify(vblk->vqs[qid].vq);
356         return BLK_STS_OK;
357 }
358
359 /* return id (s/n) string for *disk to *id_str
360  */
361 static int virtblk_get_id(struct gendisk *disk, char *id_str)
362 {
363         struct virtio_blk *vblk = disk->private_data;
364         struct request_queue *q = vblk->disk->queue;
365         struct request *req;
366         int err;
367
368         req = blk_get_request(q, REQ_OP_DRV_IN, 0);
369         if (IS_ERR(req))
370                 return PTR_ERR(req);
371
372         err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
373         if (err)
374                 goto out;
375
376         blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
377         err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
378 out:
379         blk_put_request(req);
380         return err;
381 }
382
383 /* We provide getgeo only to please some old bootloader/partitioning tools */
384 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
385 {
386         struct virtio_blk *vblk = bd->bd_disk->private_data;
387
388         /* see if the host passed in geometry config */
389         if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
390                 virtio_cread(vblk->vdev, struct virtio_blk_config,
391                              geometry.cylinders, &geo->cylinders);
392                 virtio_cread(vblk->vdev, struct virtio_blk_config,
393                              geometry.heads, &geo->heads);
394                 virtio_cread(vblk->vdev, struct virtio_blk_config,
395                              geometry.sectors, &geo->sectors);
396         } else {
397                 /* some standard values, similar to sd */
398                 geo->heads = 1 << 6;
399                 geo->sectors = 1 << 5;
400                 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
401         }
402         return 0;
403 }
404
405 static const struct block_device_operations virtblk_fops = {
406         .ioctl  = virtblk_ioctl,
407         .owner  = THIS_MODULE,
408         .getgeo = virtblk_getgeo,
409 };
410
411 static int index_to_minor(int index)
412 {
413         return index << PART_BITS;
414 }
415
416 static int minor_to_index(int minor)
417 {
418         return minor >> PART_BITS;
419 }
420
421 static ssize_t serial_show(struct device *dev,
422                            struct device_attribute *attr, char *buf)
423 {
424         struct gendisk *disk = dev_to_disk(dev);
425         int err;
426
427         /* sysfs gives us a PAGE_SIZE buffer */
428         BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
429
430         buf[VIRTIO_BLK_ID_BYTES] = '\0';
431         err = virtblk_get_id(disk, buf);
432         if (!err)
433                 return strlen(buf);
434
435         if (err == -EIO) /* Unsupported? Make it empty. */
436                 return 0;
437
438         return err;
439 }
440
441 static DEVICE_ATTR_RO(serial);
442
443 /* The queue's logical block size must be set before calling this */
444 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
445 {
446         struct virtio_device *vdev = vblk->vdev;
447         struct request_queue *q = vblk->disk->queue;
448         char cap_str_2[10], cap_str_10[10];
449         unsigned long long nblocks;
450         u64 capacity;
451
452         /* Host must always specify the capacity. */
453         virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
454
455         /* If capacity is too big, truncate with warning. */
456         if ((sector_t)capacity != capacity) {
457                 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
458                          (unsigned long long)capacity);
459                 capacity = (sector_t)-1;
460         }
461
462         nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
463
464         string_get_size(nblocks, queue_logical_block_size(q),
465                         STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
466         string_get_size(nblocks, queue_logical_block_size(q),
467                         STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
468
469         dev_notice(&vdev->dev,
470                    "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
471                    vblk->disk->disk_name,
472                    resize ? "new size: " : "",
473                    nblocks,
474                    queue_logical_block_size(q),
475                    cap_str_10,
476                    cap_str_2);
477
478         set_capacity(vblk->disk, capacity);
479 }
480
481 static void virtblk_config_changed_work(struct work_struct *work)
482 {
483         struct virtio_blk *vblk =
484                 container_of(work, struct virtio_blk, config_work);
485         char *envp[] = { "RESIZE=1", NULL };
486
487         virtblk_update_capacity(vblk, true);
488         revalidate_disk(vblk->disk);
489         kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
490 }
491
492 static void virtblk_config_changed(struct virtio_device *vdev)
493 {
494         struct virtio_blk *vblk = vdev->priv;
495
496         queue_work(virtblk_wq, &vblk->config_work);
497 }
498
499 static int init_vq(struct virtio_blk *vblk)
500 {
501         int err;
502         int i;
503         vq_callback_t **callbacks;
504         const char **names;
505         struct virtqueue **vqs;
506         unsigned short num_vqs;
507         struct virtio_device *vdev = vblk->vdev;
508         struct irq_affinity desc = { 0, };
509
510         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
511                                    struct virtio_blk_config, num_queues,
512                                    &num_vqs);
513         if (err)
514                 num_vqs = 1;
515
516         num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
517
518         vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
519         if (!vblk->vqs)
520                 return -ENOMEM;
521
522         names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
523         callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
524         vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
525         if (!names || !callbacks || !vqs) {
526                 err = -ENOMEM;
527                 goto out;
528         }
529
530         for (i = 0; i < num_vqs; i++) {
531                 callbacks[i] = virtblk_done;
532                 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
533                 names[i] = vblk->vqs[i].name;
534         }
535
536         /* Discover virtqueues and write information to configuration.  */
537         err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
538         if (err)
539                 goto out;
540
541         for (i = 0; i < num_vqs; i++) {
542                 spin_lock_init(&vblk->vqs[i].lock);
543                 vblk->vqs[i].vq = vqs[i];
544         }
545         vblk->num_vqs = num_vqs;
546
547 out:
548         kfree(vqs);
549         kfree(callbacks);
550         kfree(names);
551         if (err)
552                 kfree(vblk->vqs);
553         return err;
554 }
555
556 /*
557  * Legacy naming scheme used for virtio devices.  We are stuck with it for
558  * virtio blk but don't ever use it for any new driver.
559  */
560 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
561 {
562         const int base = 'z' - 'a' + 1;
563         char *begin = buf + strlen(prefix);
564         char *end = buf + buflen;
565         char *p;
566         int unit;
567
568         p = end - 1;
569         *p = '\0';
570         unit = base;
571         do {
572                 if (p == begin)
573                         return -EINVAL;
574                 *--p = 'a' + (index % unit);
575                 index = (index / unit) - 1;
576         } while (index >= 0);
577
578         memmove(begin, p, end - p);
579         memcpy(buf, prefix, strlen(prefix));
580
581         return 0;
582 }
583
584 static int virtblk_get_cache_mode(struct virtio_device *vdev)
585 {
586         u8 writeback;
587         int err;
588
589         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
590                                    struct virtio_blk_config, wce,
591                                    &writeback);
592
593         /*
594          * If WCE is not configurable and flush is not available,
595          * assume no writeback cache is in use.
596          */
597         if (err)
598                 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
599
600         return writeback;
601 }
602
603 static void virtblk_update_cache_mode(struct virtio_device *vdev)
604 {
605         u8 writeback = virtblk_get_cache_mode(vdev);
606         struct virtio_blk *vblk = vdev->priv;
607
608         blk_queue_write_cache(vblk->disk->queue, writeback, false);
609         revalidate_disk(vblk->disk);
610 }
611
612 static const char *const virtblk_cache_types[] = {
613         "write through", "write back"
614 };
615
616 static ssize_t
617 cache_type_store(struct device *dev, struct device_attribute *attr,
618                  const char *buf, size_t count)
619 {
620         struct gendisk *disk = dev_to_disk(dev);
621         struct virtio_blk *vblk = disk->private_data;
622         struct virtio_device *vdev = vblk->vdev;
623         int i;
624
625         BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
626         i = sysfs_match_string(virtblk_cache_types, buf);
627         if (i < 0)
628                 return i;
629
630         virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
631         virtblk_update_cache_mode(vdev);
632         return count;
633 }
634
635 static ssize_t
636 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
637 {
638         struct gendisk *disk = dev_to_disk(dev);
639         struct virtio_blk *vblk = disk->private_data;
640         u8 writeback = virtblk_get_cache_mode(vblk->vdev);
641
642         BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
643         return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
644 }
645
646 static DEVICE_ATTR_RW(cache_type);
647
648 static struct attribute *virtblk_attrs[] = {
649         &dev_attr_serial.attr,
650         &dev_attr_cache_type.attr,
651         NULL,
652 };
653
654 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
655                 struct attribute *a, int n)
656 {
657         struct device *dev = container_of(kobj, struct device, kobj);
658         struct gendisk *disk = dev_to_disk(dev);
659         struct virtio_blk *vblk = disk->private_data;
660         struct virtio_device *vdev = vblk->vdev;
661
662         if (a == &dev_attr_cache_type.attr &&
663             !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
664                 return S_IRUGO;
665
666         return a->mode;
667 }
668
669 static const struct attribute_group virtblk_attr_group = {
670         .attrs = virtblk_attrs,
671         .is_visible = virtblk_attrs_are_visible,
672 };
673
674 static const struct attribute_group *virtblk_attr_groups[] = {
675         &virtblk_attr_group,
676         NULL,
677 };
678
679 static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
680                 unsigned int hctx_idx, unsigned int numa_node)
681 {
682         struct virtio_blk *vblk = set->driver_data;
683         struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
684
685 #ifdef CONFIG_VIRTIO_BLK_SCSI
686         vbr->sreq.sense = vbr->sense;
687 #endif
688         sg_init_table(vbr->sg, vblk->sg_elems);
689         return 0;
690 }
691
692 static int virtblk_map_queues(struct blk_mq_tag_set *set)
693 {
694         struct virtio_blk *vblk = set->driver_data;
695
696         return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
697                                         vblk->vdev, 0);
698 }
699
700 #ifdef CONFIG_VIRTIO_BLK_SCSI
701 static void virtblk_initialize_rq(struct request *req)
702 {
703         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
704
705         scsi_req_init(&vbr->sreq);
706 }
707 #endif
708
709 static const struct blk_mq_ops virtio_mq_ops = {
710         .queue_rq       = virtio_queue_rq,
711         .commit_rqs     = virtio_commit_rqs,
712         .complete       = virtblk_request_done,
713         .init_request   = virtblk_init_request,
714 #ifdef CONFIG_VIRTIO_BLK_SCSI
715         .initialize_rq_fn = virtblk_initialize_rq,
716 #endif
717         .map_queues     = virtblk_map_queues,
718 };
719
720 static unsigned int virtblk_queue_depth;
721 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
722
723 static int virtblk_probe(struct virtio_device *vdev)
724 {
725         struct virtio_blk *vblk;
726         struct request_queue *q;
727         int err, index;
728
729         u32 v, blk_size, max_size, sg_elems, opt_io_size;
730         u16 min_io_size;
731         u8 physical_block_exp, alignment_offset;
732
733         if (!vdev->config->get) {
734                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
735                         __func__);
736                 return -EINVAL;
737         }
738
739         err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
740                              GFP_KERNEL);
741         if (err < 0)
742                 goto out;
743         index = err;
744
745         /* We need to know how many segments before we allocate. */
746         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
747                                    struct virtio_blk_config, seg_max,
748                                    &sg_elems);
749
750         /* We need at least one SG element, whatever they say. */
751         if (err || !sg_elems)
752                 sg_elems = 1;
753
754         /* We need an extra sg elements at head and tail. */
755         sg_elems += 2;
756         vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
757         if (!vblk) {
758                 err = -ENOMEM;
759                 goto out_free_index;
760         }
761
762         vblk->vdev = vdev;
763         vblk->sg_elems = sg_elems;
764
765         INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
766
767         err = init_vq(vblk);
768         if (err)
769                 goto out_free_vblk;
770
771         /* FIXME: How many partitions?  How long is a piece of string? */
772         vblk->disk = alloc_disk(1 << PART_BITS);
773         if (!vblk->disk) {
774                 err = -ENOMEM;
775                 goto out_free_vq;
776         }
777
778         /* Default queue sizing is to fill the ring. */
779         if (!virtblk_queue_depth) {
780                 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
781                 /* ... but without indirect descs, we use 2 descs per req */
782                 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
783                         virtblk_queue_depth /= 2;
784         }
785
786         memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
787         vblk->tag_set.ops = &virtio_mq_ops;
788         vblk->tag_set.queue_depth = virtblk_queue_depth;
789         vblk->tag_set.numa_node = NUMA_NO_NODE;
790         vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
791         vblk->tag_set.cmd_size =
792                 sizeof(struct virtblk_req) +
793                 sizeof(struct scatterlist) * sg_elems;
794         vblk->tag_set.driver_data = vblk;
795         vblk->tag_set.nr_hw_queues = vblk->num_vqs;
796
797         err = blk_mq_alloc_tag_set(&vblk->tag_set);
798         if (err)
799                 goto out_put_disk;
800
801         q = blk_mq_init_queue(&vblk->tag_set);
802         if (IS_ERR(q)) {
803                 err = -ENOMEM;
804                 goto out_free_tags;
805         }
806         vblk->disk->queue = q;
807
808         q->queuedata = vblk;
809
810         virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
811
812         vblk->disk->major = major;
813         vblk->disk->first_minor = index_to_minor(index);
814         vblk->disk->private_data = vblk;
815         vblk->disk->fops = &virtblk_fops;
816         vblk->disk->flags |= GENHD_FL_EXT_DEVT;
817         vblk->index = index;
818
819         /* configure queue flush support */
820         virtblk_update_cache_mode(vdev);
821
822         /* If disk is read-only in the host, the guest should obey */
823         if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
824                 set_disk_ro(vblk->disk, 1);
825
826         /* We can handle whatever the host told us to handle. */
827         blk_queue_max_segments(q, vblk->sg_elems-2);
828
829         /* No real sector limit. */
830         blk_queue_max_hw_sectors(q, -1U);
831
832         max_size = virtio_max_dma_size(vdev);
833
834         /* Host can optionally specify maximum segment size and number of
835          * segments. */
836         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
837                                    struct virtio_blk_config, size_max, &v);
838         if (!err)
839                 max_size = min(max_size, v);
840
841         blk_queue_max_segment_size(q, max_size);
842
843         /* Host can optionally specify the block size of the device */
844         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
845                                    struct virtio_blk_config, blk_size,
846                                    &blk_size);
847         if (!err)
848                 blk_queue_logical_block_size(q, blk_size);
849         else
850                 blk_size = queue_logical_block_size(q);
851
852         /* Use topology information if available */
853         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
854                                    struct virtio_blk_config, physical_block_exp,
855                                    &physical_block_exp);
856         if (!err && physical_block_exp)
857                 blk_queue_physical_block_size(q,
858                                 blk_size * (1 << physical_block_exp));
859
860         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
861                                    struct virtio_blk_config, alignment_offset,
862                                    &alignment_offset);
863         if (!err && alignment_offset)
864                 blk_queue_alignment_offset(q, blk_size * alignment_offset);
865
866         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
867                                    struct virtio_blk_config, min_io_size,
868                                    &min_io_size);
869         if (!err && min_io_size)
870                 blk_queue_io_min(q, blk_size * min_io_size);
871
872         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
873                                    struct virtio_blk_config, opt_io_size,
874                                    &opt_io_size);
875         if (!err && opt_io_size)
876                 blk_queue_io_opt(q, blk_size * opt_io_size);
877
878         if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
879                 q->limits.discard_granularity = blk_size;
880
881                 virtio_cread(vdev, struct virtio_blk_config,
882                              discard_sector_alignment, &v);
883                 q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
884
885                 virtio_cread(vdev, struct virtio_blk_config,
886                              max_discard_sectors, &v);
887                 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
888
889                 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
890                              &v);
891                 blk_queue_max_discard_segments(q,
892                                                min_not_zero(v,
893                                                             MAX_DISCARD_SEGMENTS));
894
895                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
896         }
897
898         if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
899                 virtio_cread(vdev, struct virtio_blk_config,
900                              max_write_zeroes_sectors, &v);
901                 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
902         }
903
904         virtblk_update_capacity(vblk, false);
905         virtio_device_ready(vdev);
906
907         device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
908         return 0;
909
910 out_free_tags:
911         blk_mq_free_tag_set(&vblk->tag_set);
912 out_put_disk:
913         put_disk(vblk->disk);
914 out_free_vq:
915         vdev->config->del_vqs(vdev);
916 out_free_vblk:
917         kfree(vblk);
918 out_free_index:
919         ida_simple_remove(&vd_index_ida, index);
920 out:
921         return err;
922 }
923
924 static void virtblk_remove(struct virtio_device *vdev)
925 {
926         struct virtio_blk *vblk = vdev->priv;
927         int index = vblk->index;
928         int refc;
929
930         /* Make sure no work handler is accessing the device. */
931         flush_work(&vblk->config_work);
932
933         del_gendisk(vblk->disk);
934         blk_cleanup_queue(vblk->disk->queue);
935
936         blk_mq_free_tag_set(&vblk->tag_set);
937
938         /* Stop all the virtqueues. */
939         vdev->config->reset(vdev);
940
941         refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
942         put_disk(vblk->disk);
943         vdev->config->del_vqs(vdev);
944         kfree(vblk->vqs);
945         kfree(vblk);
946
947         /* Only free device id if we don't have any users */
948         if (refc == 1)
949                 ida_simple_remove(&vd_index_ida, index);
950 }
951
952 #ifdef CONFIG_PM_SLEEP
953 static int virtblk_freeze(struct virtio_device *vdev)
954 {
955         struct virtio_blk *vblk = vdev->priv;
956
957         /* Ensure we don't receive any more interrupts */
958         vdev->config->reset(vdev);
959
960         /* Make sure no work handler is accessing the device. */
961         flush_work(&vblk->config_work);
962
963         blk_mq_quiesce_queue(vblk->disk->queue);
964
965         vdev->config->del_vqs(vdev);
966         return 0;
967 }
968
969 static int virtblk_restore(struct virtio_device *vdev)
970 {
971         struct virtio_blk *vblk = vdev->priv;
972         int ret;
973
974         ret = init_vq(vdev->priv);
975         if (ret)
976                 return ret;
977
978         virtio_device_ready(vdev);
979
980         blk_mq_unquiesce_queue(vblk->disk->queue);
981         return 0;
982 }
983 #endif
984
985 static const struct virtio_device_id id_table[] = {
986         { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
987         { 0 },
988 };
989
990 static unsigned int features_legacy[] = {
991         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
992         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
993 #ifdef CONFIG_VIRTIO_BLK_SCSI
994         VIRTIO_BLK_F_SCSI,
995 #endif
996         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
997         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
998 }
999 ;
1000 static unsigned int features[] = {
1001         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1002         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1003         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1004         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1005 };
1006
1007 static struct virtio_driver virtio_blk = {
1008         .feature_table                  = features,
1009         .feature_table_size             = ARRAY_SIZE(features),
1010         .feature_table_legacy           = features_legacy,
1011         .feature_table_size_legacy      = ARRAY_SIZE(features_legacy),
1012         .driver.name                    = KBUILD_MODNAME,
1013         .driver.owner                   = THIS_MODULE,
1014         .id_table                       = id_table,
1015         .probe                          = virtblk_probe,
1016         .remove                         = virtblk_remove,
1017         .config_changed                 = virtblk_config_changed,
1018 #ifdef CONFIG_PM_SLEEP
1019         .freeze                         = virtblk_freeze,
1020         .restore                        = virtblk_restore,
1021 #endif
1022 };
1023
1024 static int __init init(void)
1025 {
1026         int error;
1027
1028         virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1029         if (!virtblk_wq)
1030                 return -ENOMEM;
1031
1032         major = register_blkdev(0, "virtblk");
1033         if (major < 0) {
1034                 error = major;
1035                 goto out_destroy_workqueue;
1036         }
1037
1038         error = register_virtio_driver(&virtio_blk);
1039         if (error)
1040                 goto out_unregister_blkdev;
1041         return 0;
1042
1043 out_unregister_blkdev:
1044         unregister_blkdev(major, "virtblk");
1045 out_destroy_workqueue:
1046         destroy_workqueue(virtblk_wq);
1047         return error;
1048 }
1049
1050 static void __exit fini(void)
1051 {
1052         unregister_virtio_driver(&virtio_blk);
1053         unregister_blkdev(major, "virtblk");
1054         destroy_workqueue(virtblk_wq);
1055 }
1056 module_init(init);
1057 module_exit(fini);
1058
1059 MODULE_DEVICE_TABLE(virtio, id_table);
1060 MODULE_DESCRIPTION("Virtio block driver");
1061 MODULE_LICENSE("GPL");