Commit | Line | Data |
---|---|---|
e467cde2 RR |
1 | //#define DEBUG |
2 | #include <linux/spinlock.h> | |
3 | #include <linux/blkdev.h> | |
4 | #include <linux/hdreg.h> | |
5 | #include <linux/virtio.h> | |
6 | #include <linux/virtio_blk.h> | |
3d1266c7 JA |
7 | #include <linux/scatterlist.h> |
8 | ||
9 | #define VIRTIO_MAX_SG (3+MAX_PHYS_SEGMENTS) | |
e467cde2 RR |
10 | |
11 | static unsigned char virtblk_index = 'a'; | |
12 | struct virtio_blk | |
13 | { | |
14 | spinlock_t lock; | |
15 | ||
16 | struct virtio_device *vdev; | |
17 | struct virtqueue *vq; | |
18 | ||
19 | /* The disk structure for the kernel. */ | |
20 | struct gendisk *disk; | |
21 | ||
22 | /* Request tracking. */ | |
23 | struct list_head reqs; | |
24 | ||
25 | mempool_t *pool; | |
26 | ||
27 | /* Scatterlist: can be too big for stack. */ | |
3d1266c7 | 28 | struct scatterlist sg[VIRTIO_MAX_SG]; |
e467cde2 RR |
29 | }; |
30 | ||
31 | struct virtblk_req | |
32 | { | |
33 | struct list_head list; | |
34 | struct request *req; | |
35 | struct virtio_blk_outhdr out_hdr; | |
36 | struct virtio_blk_inhdr in_hdr; | |
37 | }; | |
38 | ||
39 | static bool blk_done(struct virtqueue *vq) | |
40 | { | |
41 | struct virtio_blk *vblk = vq->vdev->priv; | |
42 | struct virtblk_req *vbr; | |
43 | unsigned int len; | |
44 | unsigned long flags; | |
45 | ||
46 | spin_lock_irqsave(&vblk->lock, flags); | |
47 | while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { | |
48 | int uptodate; | |
49 | switch (vbr->in_hdr.status) { | |
50 | case VIRTIO_BLK_S_OK: | |
51 | uptodate = 1; | |
52 | break; | |
53 | case VIRTIO_BLK_S_UNSUPP: | |
54 | uptodate = -ENOTTY; | |
55 | break; | |
56 | default: | |
57 | uptodate = 0; | |
58 | break; | |
59 | } | |
60 | ||
61 | end_dequeued_request(vbr->req, uptodate); | |
62 | list_del(&vbr->list); | |
63 | mempool_free(vbr, vblk->pool); | |
64 | } | |
65 | /* In case queue is stopped waiting for more buffers. */ | |
66 | blk_start_queue(vblk->disk->queue); | |
67 | spin_unlock_irqrestore(&vblk->lock, flags); | |
68 | return true; | |
69 | } | |
70 | ||
71 | static bool do_req(struct request_queue *q, struct virtio_blk *vblk, | |
72 | struct request *req) | |
73 | { | |
74 | unsigned long num, out, in; | |
75 | struct virtblk_req *vbr; | |
76 | ||
77 | vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); | |
78 | if (!vbr) | |
79 | /* When another request finishes we'll try again. */ | |
80 | return false; | |
81 | ||
82 | vbr->req = req; | |
83 | if (blk_fs_request(vbr->req)) { | |
84 | vbr->out_hdr.type = 0; | |
85 | vbr->out_hdr.sector = vbr->req->sector; | |
86 | vbr->out_hdr.ioprio = vbr->req->ioprio; | |
87 | } else if (blk_pc_request(vbr->req)) { | |
88 | vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; | |
89 | vbr->out_hdr.sector = 0; | |
90 | vbr->out_hdr.ioprio = vbr->req->ioprio; | |
91 | } else { | |
92 | /* We don't put anything else in the queue. */ | |
93 | BUG(); | |
94 | } | |
95 | ||
96 | if (blk_barrier_rq(vbr->req)) | |
97 | vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; | |
98 | ||
3d1266c7 JA |
99 | /* This init could be done at vblk creation time */ |
100 | sg_init_table(vblk->sg, VIRTIO_MAX_SG); | |
e467cde2 RR |
101 | sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); |
102 | num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); | |
103 | sg_set_buf(&vblk->sg[num+1], &vbr->in_hdr, sizeof(vbr->in_hdr)); | |
104 | ||
105 | if (rq_data_dir(vbr->req) == WRITE) { | |
106 | vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; | |
107 | out = 1 + num; | |
108 | in = 1; | |
109 | } else { | |
110 | vbr->out_hdr.type |= VIRTIO_BLK_T_IN; | |
111 | out = 1; | |
112 | in = 1 + num; | |
113 | } | |
114 | ||
115 | if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) { | |
116 | mempool_free(vbr, vblk->pool); | |
117 | return false; | |
118 | } | |
119 | ||
120 | list_add_tail(&vbr->list, &vblk->reqs); | |
121 | return true; | |
122 | } | |
123 | ||
124 | static void do_virtblk_request(struct request_queue *q) | |
125 | { | |
126 | struct virtio_blk *vblk = NULL; | |
127 | struct request *req; | |
128 | unsigned int issued = 0; | |
129 | ||
130 | while ((req = elv_next_request(q)) != NULL) { | |
131 | vblk = req->rq_disk->private_data; | |
132 | BUG_ON(req->nr_phys_segments > ARRAY_SIZE(vblk->sg)); | |
133 | ||
134 | /* If this request fails, stop queue and wait for something to | |
135 | finish to restart it. */ | |
136 | if (!do_req(q, vblk, req)) { | |
137 | blk_stop_queue(q); | |
138 | break; | |
139 | } | |
140 | blkdev_dequeue_request(req); | |
141 | issued++; | |
142 | } | |
143 | ||
144 | if (issued) | |
145 | vblk->vq->vq_ops->kick(vblk->vq); | |
146 | } | |
147 | ||
148 | static int virtblk_ioctl(struct inode *inode, struct file *filp, | |
149 | unsigned cmd, unsigned long data) | |
150 | { | |
151 | return scsi_cmd_ioctl(filp, inode->i_bdev->bd_disk->queue, | |
152 | inode->i_bdev->bd_disk, cmd, | |
153 | (void __user *)data); | |
154 | } | |
155 | ||
156 | static struct block_device_operations virtblk_fops = { | |
157 | .ioctl = virtblk_ioctl, | |
158 | .owner = THIS_MODULE, | |
159 | }; | |
160 | ||
161 | static int virtblk_probe(struct virtio_device *vdev) | |
162 | { | |
163 | struct virtio_blk *vblk; | |
164 | int err, major; | |
e467cde2 RR |
165 | u64 cap; |
166 | u32 v; | |
167 | ||
168 | vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); | |
169 | if (!vblk) { | |
170 | err = -ENOMEM; | |
171 | goto out; | |
172 | } | |
173 | ||
174 | INIT_LIST_HEAD(&vblk->reqs); | |
175 | spin_lock_init(&vblk->lock); | |
176 | vblk->vdev = vdev; | |
177 | ||
178 | /* We expect one virtqueue, for output. */ | |
a586d4f6 | 179 | vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); |
e467cde2 RR |
180 | if (IS_ERR(vblk->vq)) { |
181 | err = PTR_ERR(vblk->vq); | |
182 | goto out_free_vblk; | |
183 | } | |
184 | ||
185 | vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req)); | |
186 | if (!vblk->pool) { | |
187 | err = -ENOMEM; | |
188 | goto out_free_vq; | |
189 | } | |
190 | ||
191 | major = register_blkdev(0, "virtblk"); | |
192 | if (major < 0) { | |
193 | err = major; | |
194 | goto out_mempool; | |
195 | } | |
196 | ||
197 | /* FIXME: How many partitions? How long is a piece of string? */ | |
198 | vblk->disk = alloc_disk(1 << 4); | |
199 | if (!vblk->disk) { | |
200 | err = -ENOMEM; | |
201 | goto out_unregister_blkdev; | |
202 | } | |
203 | ||
204 | vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); | |
205 | if (!vblk->disk->queue) { | |
206 | err = -ENOMEM; | |
207 | goto out_put_disk; | |
208 | } | |
209 | ||
210 | sprintf(vblk->disk->disk_name, "vd%c", virtblk_index++); | |
211 | vblk->disk->major = major; | |
212 | vblk->disk->first_minor = 0; | |
213 | vblk->disk->private_data = vblk; | |
214 | vblk->disk->fops = &virtblk_fops; | |
215 | ||
216 | /* If barriers are supported, tell block layer that queue is ordered */ | |
a586d4f6 | 217 | if (vdev->config->feature(vdev, VIRTIO_BLK_F_BARRIER)) |
e467cde2 RR |
218 | blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL); |
219 | ||
a586d4f6 RR |
220 | /* Host must always specify the capacity. */ |
221 | __virtio_config_val(vdev, offsetof(struct virtio_blk_config, capacity), | |
222 | &cap); | |
e467cde2 RR |
223 | |
224 | /* If capacity is too big, truncate with warning. */ | |
225 | if ((sector_t)cap != cap) { | |
226 | dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", | |
227 | (unsigned long long)cap); | |
228 | cap = (sector_t)-1; | |
229 | } | |
230 | set_capacity(vblk->disk, cap); | |
231 | ||
a586d4f6 RR |
232 | /* Host can optionally specify maximum segment size and number of |
233 | * segments. */ | |
234 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, | |
235 | offsetof(struct virtio_blk_config, size_max), | |
236 | &v); | |
e467cde2 RR |
237 | if (!err) |
238 | blk_queue_max_segment_size(vblk->disk->queue, v); | |
e467cde2 | 239 | |
a586d4f6 RR |
240 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, |
241 | offsetof(struct virtio_blk_config, seg_max), | |
242 | &v); | |
e467cde2 RR |
243 | if (!err) |
244 | blk_queue_max_hw_segments(vblk->disk->queue, v); | |
e467cde2 RR |
245 | |
246 | add_disk(vblk->disk); | |
247 | return 0; | |
248 | ||
249 | out_put_disk: | |
250 | put_disk(vblk->disk); | |
251 | out_unregister_blkdev: | |
252 | unregister_blkdev(major, "virtblk"); | |
253 | out_mempool: | |
254 | mempool_destroy(vblk->pool); | |
255 | out_free_vq: | |
256 | vdev->config->del_vq(vblk->vq); | |
257 | out_free_vblk: | |
258 | kfree(vblk); | |
259 | out: | |
260 | return err; | |
261 | } | |
262 | ||
263 | static void virtblk_remove(struct virtio_device *vdev) | |
264 | { | |
265 | struct virtio_blk *vblk = vdev->priv; | |
266 | int major = vblk->disk->major; | |
267 | ||
268 | BUG_ON(!list_empty(&vblk->reqs)); | |
269 | blk_cleanup_queue(vblk->disk->queue); | |
270 | put_disk(vblk->disk); | |
271 | unregister_blkdev(major, "virtblk"); | |
272 | mempool_destroy(vblk->pool); | |
74b2553f RR |
273 | /* There should be nothing in the queue now, so no need to shutdown */ |
274 | vdev->config->del_vq(vblk->vq); | |
e467cde2 RR |
275 | kfree(vblk); |
276 | } | |
277 | ||
278 | static struct virtio_device_id id_table[] = { | |
279 | { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, | |
280 | { 0 }, | |
281 | }; | |
282 | ||
283 | static struct virtio_driver virtio_blk = { | |
284 | .driver.name = KBUILD_MODNAME, | |
285 | .driver.owner = THIS_MODULE, | |
286 | .id_table = id_table, | |
287 | .probe = virtblk_probe, | |
288 | .remove = __devexit_p(virtblk_remove), | |
289 | }; | |
290 | ||
291 | static int __init init(void) | |
292 | { | |
293 | return register_virtio_driver(&virtio_blk); | |
294 | } | |
295 | ||
296 | static void __exit fini(void) | |
297 | { | |
298 | unregister_virtio_driver(&virtio_blk); | |
299 | } | |
300 | module_init(init); | |
301 | module_exit(fini); | |
302 | ||
303 | MODULE_DEVICE_TABLE(virtio, id_table); | |
304 | MODULE_DESCRIPTION("Virtio block driver"); | |
305 | MODULE_LICENSE("GPL"); |