Commit | Line | Data |
---|---|---|
9f27ee59 JF |
1 | /* |
2 | * blkfront.c | |
3 | * | |
4 | * XenLinux virtual block device driver. | |
5 | * | |
6 | * Copyright (c) 2003-2004, Keir Fraser & Steve Hand | |
7 | * Modifications by Mark A. Williamson are (c) Intel Research Cambridge | |
8 | * Copyright (c) 2004, Christian Limpach | |
9 | * Copyright (c) 2004, Andrew Warfield | |
10 | * Copyright (c) 2005, Christopher Clark | |
11 | * Copyright (c) 2005, XenSource Ltd | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License version 2 | |
15 | * as published by the Free Software Foundation; or, when distributed | |
16 | * separately from the Linux kernel or incorporated into other | |
17 | * software packages, subject to the following license: | |
18 | * | |
19 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
20 | * of this source file (the "Software"), to deal in the Software without | |
21 | * restriction, including without limitation the rights to use, copy, modify, | |
22 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
23 | * and to permit persons to whom the Software is furnished to do so, subject to | |
24 | * the following conditions: | |
25 | * | |
26 | * The above copyright notice and this permission notice shall be included in | |
27 | * all copies or substantial portions of the Software. | |
28 | * | |
29 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
30 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
31 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
32 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
33 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
34 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
35 | * IN THE SOFTWARE. | |
36 | */ | |
37 | ||
38 | #include <linux/interrupt.h> | |
39 | #include <linux/blkdev.h> | |
597592d9 | 40 | #include <linux/hdreg.h> |
440a01a7 | 41 | #include <linux/cdrom.h> |
9f27ee59 | 42 | #include <linux/module.h> |
5a0e3ad6 | 43 | #include <linux/slab.h> |
2a48fc0a | 44 | #include <linux/mutex.h> |
9e973e64 | 45 | #include <linux/scatterlist.h> |
9f27ee59 | 46 | |
1ccbf534 | 47 | #include <xen/xen.h> |
9f27ee59 JF |
48 | #include <xen/xenbus.h> |
49 | #include <xen/grant_table.h> | |
50 | #include <xen/events.h> | |
51 | #include <xen/page.h> | |
c1c5413a | 52 | #include <xen/platform_pci.h> |
9f27ee59 JF |
53 | |
54 | #include <xen/interface/grant_table.h> | |
55 | #include <xen/interface/io/blkif.h> | |
3e334239 | 56 | #include <xen/interface/io/protocols.h> |
9f27ee59 JF |
57 | |
58 | #include <asm/xen/hypervisor.h> | |
59 | ||
60 | enum blkif_state { | |
61 | BLKIF_STATE_DISCONNECTED, | |
62 | BLKIF_STATE_CONNECTED, | |
63 | BLKIF_STATE_SUSPENDED, | |
64 | }; | |
65 | ||
66 | struct blk_shadow { | |
67 | struct blkif_request req; | |
a945b980 | 68 | struct request *request; |
9f27ee59 JF |
69 | unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
70 | }; | |
71 | ||
2a48fc0a | 72 | static DEFINE_MUTEX(blkfront_mutex); |
83d5cde4 | 73 | static const struct block_device_operations xlvbd_block_fops; |
9f27ee59 | 74 | |
667c78af | 75 | #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) |
9f27ee59 JF |
76 | |
77 | /* | |
78 | * We have one of these per vbd, whether ide, scsi or 'other'. They | |
79 | * hang in private_data off the gendisk structure. We may end up | |
80 | * putting all kinds of interesting stuff here :-) | |
81 | */ | |
82 | struct blkfront_info | |
83 | { | |
b70f5fa0 | 84 | struct mutex mutex; |
9f27ee59 | 85 | struct xenbus_device *xbdev; |
9f27ee59 JF |
86 | struct gendisk *gd; |
87 | int vdevice; | |
88 | blkif_vdev_t handle; | |
89 | enum blkif_state connected; | |
90 | int ring_ref; | |
91 | struct blkif_front_ring ring; | |
9e973e64 | 92 | struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
9f27ee59 JF |
93 | unsigned int evtchn, irq; |
94 | struct request_queue *rq; | |
95 | struct work_struct work; | |
96 | struct gnttab_free_callback callback; | |
97 | struct blk_shadow shadow[BLK_RING_SIZE]; | |
98 | unsigned long shadow_free; | |
4913efe4 | 99 | unsigned int feature_flush; |
edf6ef59 | 100 | unsigned int flush_op; |
ed30bf31 LD |
101 | unsigned int feature_discard; |
102 | unsigned int discard_granularity; | |
103 | unsigned int discard_alignment; | |
1d78d705 | 104 | int is_ready; |
9f27ee59 JF |
105 | }; |
106 | ||
107 | static DEFINE_SPINLOCK(blkif_io_lock); | |
108 | ||
0e345826 JB |
109 | static unsigned int nr_minors; |
110 | static unsigned long *minors; | |
111 | static DEFINE_SPINLOCK(minor_lock); | |
112 | ||
9f27ee59 JF |
113 | #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ |
114 | (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) | |
115 | #define GRANT_INVALID_REF 0 | |
116 | ||
117 | #define PARTS_PER_DISK 16 | |
9246b5f0 | 118 | #define PARTS_PER_EXT_DISK 256 |
9f27ee59 JF |
119 | |
120 | #define BLKIF_MAJOR(dev) ((dev)>>8) | |
121 | #define BLKIF_MINOR(dev) ((dev) & 0xff) | |
122 | ||
9246b5f0 CL |
123 | #define EXT_SHIFT 28 |
124 | #define EXTENDED (1<<EXT_SHIFT) | |
125 | #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED)) | |
126 | #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) | |
c80a4209 SS |
127 | #define EMULATED_HD_DISK_MINOR_OFFSET (0) |
128 | #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256) | |
196cfe2a SB |
129 | #define EMULATED_SD_DISK_MINOR_OFFSET (0) |
130 | #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256) | |
9f27ee59 | 131 | |
9246b5f0 | 132 | #define DEV_NAME "xvd" /* name in /dev */ |
9f27ee59 JF |
133 | |
134 | static int get_id_from_freelist(struct blkfront_info *info) | |
135 | { | |
136 | unsigned long free = info->shadow_free; | |
b9ed7252 | 137 | BUG_ON(free >= BLK_RING_SIZE); |
97e36834 KRW |
138 | info->shadow_free = info->shadow[free].req.u.rw.id; |
139 | info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ | |
9f27ee59 JF |
140 | return free; |
141 | } | |
142 | ||
143 | static void add_id_to_freelist(struct blkfront_info *info, | |
144 | unsigned long id) | |
145 | { | |
97e36834 | 146 | info->shadow[id].req.u.rw.id = info->shadow_free; |
a945b980 | 147 | info->shadow[id].request = NULL; |
9f27ee59 JF |
148 | info->shadow_free = id; |
149 | } | |
150 | ||
0e345826 JB |
151 | static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) |
152 | { | |
153 | unsigned int end = minor + nr; | |
154 | int rc; | |
155 | ||
156 | if (end > nr_minors) { | |
157 | unsigned long *bitmap, *old; | |
158 | ||
159 | bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap), | |
160 | GFP_KERNEL); | |
161 | if (bitmap == NULL) | |
162 | return -ENOMEM; | |
163 | ||
164 | spin_lock(&minor_lock); | |
165 | if (end > nr_minors) { | |
166 | old = minors; | |
167 | memcpy(bitmap, minors, | |
168 | BITS_TO_LONGS(nr_minors) * sizeof(*bitmap)); | |
169 | minors = bitmap; | |
170 | nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG; | |
171 | } else | |
172 | old = bitmap; | |
173 | spin_unlock(&minor_lock); | |
174 | kfree(old); | |
175 | } | |
176 | ||
177 | spin_lock(&minor_lock); | |
178 | if (find_next_bit(minors, end, minor) >= end) { | |
179 | for (; minor < end; ++minor) | |
180 | __set_bit(minor, minors); | |
181 | rc = 0; | |
182 | } else | |
183 | rc = -EBUSY; | |
184 | spin_unlock(&minor_lock); | |
185 | ||
186 | return rc; | |
187 | } | |
188 | ||
189 | static void xlbd_release_minors(unsigned int minor, unsigned int nr) | |
190 | { | |
191 | unsigned int end = minor + nr; | |
192 | ||
193 | BUG_ON(end > nr_minors); | |
194 | spin_lock(&minor_lock); | |
195 | for (; minor < end; ++minor) | |
196 | __clear_bit(minor, minors); | |
197 | spin_unlock(&minor_lock); | |
198 | } | |
199 | ||
9f27ee59 JF |
200 | static void blkif_restart_queue_callback(void *arg) |
201 | { | |
202 | struct blkfront_info *info = (struct blkfront_info *)arg; | |
203 | schedule_work(&info->work); | |
204 | } | |
205 | ||
afe42d7d | 206 | static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) |
597592d9 IC |
207 | { |
208 | /* We don't have real geometry info, but let's at least return | |
209 | values consistent with the size of the device */ | |
210 | sector_t nsect = get_capacity(bd->bd_disk); | |
211 | sector_t cylinders = nsect; | |
212 | ||
213 | hg->heads = 0xff; | |
214 | hg->sectors = 0x3f; | |
215 | sector_div(cylinders, hg->heads * hg->sectors); | |
216 | hg->cylinders = cylinders; | |
217 | if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) | |
218 | hg->cylinders = 0xffff; | |
219 | return 0; | |
220 | } | |
221 | ||
a63c848b | 222 | static int blkif_ioctl(struct block_device *bdev, fmode_t mode, |
62aa0054 | 223 | unsigned command, unsigned long argument) |
440a01a7 | 224 | { |
a63c848b | 225 | struct blkfront_info *info = bdev->bd_disk->private_data; |
440a01a7 CL |
226 | int i; |
227 | ||
228 | dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", | |
229 | command, (long)argument); | |
230 | ||
231 | switch (command) { | |
232 | case CDROMMULTISESSION: | |
233 | dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); | |
234 | for (i = 0; i < sizeof(struct cdrom_multisession); i++) | |
235 | if (put_user(0, (char __user *)(argument + i))) | |
236 | return -EFAULT; | |
237 | return 0; | |
238 | ||
239 | case CDROM_GET_CAPABILITY: { | |
240 | struct gendisk *gd = info->gd; | |
241 | if (gd->flags & GENHD_FL_CD) | |
242 | return 0; | |
243 | return -EINVAL; | |
244 | } | |
245 | ||
246 | default: | |
247 | /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", | |
248 | command);*/ | |
249 | return -EINVAL; /* same return as native Linux */ | |
250 | } | |
251 | ||
252 | return 0; | |
253 | } | |
254 | ||
9f27ee59 | 255 | /* |
c64e38ea | 256 | * Generate a Xen blkfront IO request from a blk layer request. Reads |
edf6ef59 | 257 | * and writes are handled as expected. |
9f27ee59 | 258 | * |
c64e38ea | 259 | * @req: a request struct |
9f27ee59 JF |
260 | */ |
261 | static int blkif_queue_request(struct request *req) | |
262 | { | |
263 | struct blkfront_info *info = req->rq_disk->private_data; | |
264 | unsigned long buffer_mfn; | |
265 | struct blkif_request *ring_req; | |
9f27ee59 JF |
266 | unsigned long id; |
267 | unsigned int fsect, lsect; | |
9e973e64 | 268 | int i, ref; |
9f27ee59 | 269 | grant_ref_t gref_head; |
9e973e64 | 270 | struct scatterlist *sg; |
9f27ee59 JF |
271 | |
272 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) | |
273 | return 1; | |
274 | ||
275 | if (gnttab_alloc_grant_references( | |
276 | BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { | |
277 | gnttab_request_free_callback( | |
278 | &info->callback, | |
279 | blkif_restart_queue_callback, | |
280 | info, | |
281 | BLKIF_MAX_SEGMENTS_PER_REQUEST); | |
282 | return 1; | |
283 | } | |
284 | ||
285 | /* Fill out a communications ring structure. */ | |
286 | ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); | |
287 | id = get_id_from_freelist(info); | |
a945b980 | 288 | info->shadow[id].request = req; |
9f27ee59 | 289 | |
97e36834 | 290 | ring_req->u.rw.id = id; |
51de6952 | 291 | ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); |
97e36834 | 292 | ring_req->u.rw.handle = info->handle; |
9f27ee59 JF |
293 | |
294 | ring_req->operation = rq_data_dir(req) ? | |
295 | BLKIF_OP_WRITE : BLKIF_OP_READ; | |
be2f8373 JF |
296 | |
297 | if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { | |
298 | /* | |
edf6ef59 KRW |
299 | * Ideally we can do an unordered flush-to-disk. In case the |
300 | * backend onlysupports barriers, use that. A barrier request | |
be2f8373 JF |
301 | * a superset of FUA, so we can implement it the same |
302 | * way. (It's also a FLUSH+FUA, since it is | |
303 | * guaranteed ordered WRT previous writes.) | |
304 | */ | |
edf6ef59 | 305 | ring_req->operation = info->flush_op; |
be2f8373 | 306 | } |
9f27ee59 | 307 | |
ed30bf31 LD |
308 | if (unlikely(req->cmd_flags & REQ_DISCARD)) { |
309 | /* id, sector_number and handle are set above. */ | |
310 | ring_req->operation = BLKIF_OP_DISCARD; | |
97e36834 | 311 | ring_req->u.discard.nr_segments = 0; |
ed30bf31 LD |
312 | ring_req->u.discard.nr_sectors = blk_rq_sectors(req); |
313 | } else { | |
97e36834 KRW |
314 | ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req, |
315 | info->sg); | |
316 | BUG_ON(ring_req->u.rw.nr_segments > | |
317 | BLKIF_MAX_SEGMENTS_PER_REQUEST); | |
9e973e64 | 318 | |
97e36834 | 319 | for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) { |
ed30bf31 LD |
320 | buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg))); |
321 | fsect = sg->offset >> 9; | |
322 | lsect = fsect + (sg->length >> 9) - 1; | |
323 | /* install a grant reference. */ | |
324 | ref = gnttab_claim_grant_reference(&gref_head); | |
325 | BUG_ON(ref == -ENOSPC); | |
6c92e699 | 326 | |
ed30bf31 LD |
327 | gnttab_grant_foreign_access_ref( |
328 | ref, | |
329 | info->xbdev->otherend_id, | |
330 | buffer_mfn, | |
331 | rq_data_dir(req)); | |
332 | ||
333 | info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); | |
334 | ring_req->u.rw.seg[i] = | |
335 | (struct blkif_request_segment) { | |
336 | .gref = ref, | |
337 | .first_sect = fsect, | |
338 | .last_sect = lsect }; | |
339 | } | |
9f27ee59 JF |
340 | } |
341 | ||
342 | info->ring.req_prod_pvt++; | |
343 | ||
344 | /* Keep a private copy so we can reissue requests when recovering. */ | |
345 | info->shadow[id].req = *ring_req; | |
346 | ||
347 | gnttab_free_grant_references(gref_head); | |
348 | ||
349 | return 0; | |
350 | } | |
351 | ||
352 | ||
353 | static inline void flush_requests(struct blkfront_info *info) | |
354 | { | |
355 | int notify; | |
356 | ||
357 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); | |
358 | ||
359 | if (notify) | |
360 | notify_remote_via_irq(info->irq); | |
361 | } | |
362 | ||
363 | /* | |
364 | * do_blkif_request | |
365 | * read a block; request is in a request queue | |
366 | */ | |
165125e1 | 367 | static void do_blkif_request(struct request_queue *rq) |
9f27ee59 JF |
368 | { |
369 | struct blkfront_info *info = NULL; | |
370 | struct request *req; | |
371 | int queued; | |
372 | ||
373 | pr_debug("Entered do_blkif_request\n"); | |
374 | ||
375 | queued = 0; | |
376 | ||
9934c8c0 | 377 | while ((req = blk_peek_request(rq)) != NULL) { |
9f27ee59 | 378 | info = req->rq_disk->private_data; |
9f27ee59 JF |
379 | |
380 | if (RING_FULL(&info->ring)) | |
381 | goto wait; | |
382 | ||
9934c8c0 | 383 | blk_start_request(req); |
296b2f6a | 384 | |
d11e6158 KRW |
385 | if ((req->cmd_type != REQ_TYPE_FS) || |
386 | ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) && | |
387 | !info->flush_op)) { | |
296b2f6a TH |
388 | __blk_end_request_all(req, -EIO); |
389 | continue; | |
390 | } | |
391 | ||
9f27ee59 | 392 | pr_debug("do_blk_req %p: cmd %p, sec %lx, " |
83096ebf TH |
393 | "(%u/%u) buffer:%p [%s]\n", |
394 | req, req->cmd, (unsigned long)blk_rq_pos(req), | |
395 | blk_rq_cur_sectors(req), blk_rq_sectors(req), | |
396 | req->buffer, rq_data_dir(req) ? "write" : "read"); | |
9f27ee59 | 397 | |
9f27ee59 JF |
398 | if (blkif_queue_request(req)) { |
399 | blk_requeue_request(rq, req); | |
400 | wait: | |
401 | /* Avoid pointless unplugs. */ | |
402 | blk_stop_queue(rq); | |
403 | break; | |
404 | } | |
405 | ||
406 | queued++; | |
407 | } | |
408 | ||
409 | if (queued != 0) | |
410 | flush_requests(info); | |
411 | } | |
412 | ||
413 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) | |
414 | { | |
165125e1 | 415 | struct request_queue *rq; |
ed30bf31 | 416 | struct blkfront_info *info = gd->private_data; |
9f27ee59 JF |
417 | |
418 | rq = blk_init_queue(do_blkif_request, &blkif_io_lock); | |
419 | if (rq == NULL) | |
420 | return -1; | |
421 | ||
66d352e1 | 422 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); |
9f27ee59 | 423 | |
ed30bf31 LD |
424 | if (info->feature_discard) { |
425 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); | |
426 | blk_queue_max_discard_sectors(rq, get_capacity(gd)); | |
427 | rq->limits.discard_granularity = info->discard_granularity; | |
428 | rq->limits.discard_alignment = info->discard_alignment; | |
429 | } | |
430 | ||
9f27ee59 | 431 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ |
e1defc4f | 432 | blk_queue_logical_block_size(rq, sector_size); |
086fa5ff | 433 | blk_queue_max_hw_sectors(rq, 512); |
9f27ee59 JF |
434 | |
435 | /* Each segment in a request is up to an aligned page in size. */ | |
436 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | |
437 | blk_queue_max_segment_size(rq, PAGE_SIZE); | |
438 | ||
439 | /* Ensure a merged request will fit in a single I/O ring slot. */ | |
8a78362c | 440 | blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
9f27ee59 JF |
441 | |
442 | /* Make sure buffer addresses are sector-aligned. */ | |
443 | blk_queue_dma_alignment(rq, 511); | |
444 | ||
1c91fe1a IC |
445 | /* Make sure we don't use bounce buffers. */ |
446 | blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); | |
447 | ||
9f27ee59 JF |
448 | gd->queue = rq; |
449 | ||
450 | return 0; | |
451 | } | |
452 | ||
453 | ||
4913efe4 | 454 | static void xlvbd_flush(struct blkfront_info *info) |
9f27ee59 | 455 | { |
4913efe4 | 456 | blk_queue_flush(info->rq, info->feature_flush); |
edf6ef59 | 457 | printk(KERN_INFO "blkfront: %s: %s: %s\n", |
4913efe4 | 458 | info->gd->disk_name, |
edf6ef59 KRW |
459 | info->flush_op == BLKIF_OP_WRITE_BARRIER ? |
460 | "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? | |
461 | "flush diskcache" : "barrier or flush"), | |
4913efe4 | 462 | info->feature_flush ? "enabled" : "disabled"); |
9f27ee59 JF |
463 | } |
464 | ||
c80a4209 SS |
465 | static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) |
466 | { | |
467 | int major; | |
468 | major = BLKIF_MAJOR(vdevice); | |
469 | *minor = BLKIF_MINOR(vdevice); | |
470 | switch (major) { | |
471 | case XEN_IDE0_MAJOR: | |
472 | *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET; | |
473 | *minor = ((*minor / 64) * PARTS_PER_DISK) + | |
474 | EMULATED_HD_DISK_MINOR_OFFSET; | |
475 | break; | |
476 | case XEN_IDE1_MAJOR: | |
477 | *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET; | |
478 | *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) + | |
479 | EMULATED_HD_DISK_MINOR_OFFSET; | |
480 | break; | |
481 | case XEN_SCSI_DISK0_MAJOR: | |
482 | *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET; | |
483 | *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET; | |
484 | break; | |
485 | case XEN_SCSI_DISK1_MAJOR: | |
486 | case XEN_SCSI_DISK2_MAJOR: | |
487 | case XEN_SCSI_DISK3_MAJOR: | |
488 | case XEN_SCSI_DISK4_MAJOR: | |
489 | case XEN_SCSI_DISK5_MAJOR: | |
490 | case XEN_SCSI_DISK6_MAJOR: | |
491 | case XEN_SCSI_DISK7_MAJOR: | |
492 | *offset = (*minor / PARTS_PER_DISK) + | |
493 | ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) + | |
494 | EMULATED_SD_DISK_NAME_OFFSET; | |
495 | *minor = *minor + | |
496 | ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) + | |
497 | EMULATED_SD_DISK_MINOR_OFFSET; | |
498 | break; | |
499 | case XEN_SCSI_DISK8_MAJOR: | |
500 | case XEN_SCSI_DISK9_MAJOR: | |
501 | case XEN_SCSI_DISK10_MAJOR: | |
502 | case XEN_SCSI_DISK11_MAJOR: | |
503 | case XEN_SCSI_DISK12_MAJOR: | |
504 | case XEN_SCSI_DISK13_MAJOR: | |
505 | case XEN_SCSI_DISK14_MAJOR: | |
506 | case XEN_SCSI_DISK15_MAJOR: | |
507 | *offset = (*minor / PARTS_PER_DISK) + | |
508 | ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) + | |
509 | EMULATED_SD_DISK_NAME_OFFSET; | |
510 | *minor = *minor + | |
511 | ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) + | |
512 | EMULATED_SD_DISK_MINOR_OFFSET; | |
513 | break; | |
514 | case XENVBD_MAJOR: | |
515 | *offset = *minor / PARTS_PER_DISK; | |
516 | break; | |
517 | default: | |
518 | printk(KERN_WARNING "blkfront: your disk configuration is " | |
519 | "incorrect, please use an xvd device instead\n"); | |
520 | return -ENODEV; | |
521 | } | |
522 | return 0; | |
523 | } | |
9f27ee59 | 524 | |
9246b5f0 CL |
525 | static int xlvbd_alloc_gendisk(blkif_sector_t capacity, |
526 | struct blkfront_info *info, | |
527 | u16 vdisk_info, u16 sector_size) | |
9f27ee59 JF |
528 | { |
529 | struct gendisk *gd; | |
530 | int nr_minors = 1; | |
c80a4209 | 531 | int err; |
9246b5f0 CL |
532 | unsigned int offset; |
533 | int minor; | |
534 | int nr_parts; | |
9f27ee59 JF |
535 | |
536 | BUG_ON(info->gd != NULL); | |
537 | BUG_ON(info->rq != NULL); | |
538 | ||
9246b5f0 CL |
539 | if ((info->vdevice>>EXT_SHIFT) > 1) { |
540 | /* this is above the extended range; something is wrong */ | |
541 | printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice); | |
542 | return -ENODEV; | |
543 | } | |
544 | ||
545 | if (!VDEV_IS_EXTENDED(info->vdevice)) { | |
c80a4209 SS |
546 | err = xen_translate_vdev(info->vdevice, &minor, &offset); |
547 | if (err) | |
548 | return err; | |
549 | nr_parts = PARTS_PER_DISK; | |
9246b5f0 CL |
550 | } else { |
551 | minor = BLKIF_MINOR_EXT(info->vdevice); | |
552 | nr_parts = PARTS_PER_EXT_DISK; | |
c80a4209 | 553 | offset = minor / nr_parts; |
89153b5c | 554 | if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4) |
c80a4209 SS |
555 | printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " |
556 | "emulated IDE disks,\n\t choose an xvd device name" | |
557 | "from xvde on\n", info->vdevice); | |
9246b5f0 | 558 | } |
c80a4209 | 559 | err = -ENODEV; |
9246b5f0 CL |
560 | |
561 | if ((minor % nr_parts) == 0) | |
562 | nr_minors = nr_parts; | |
9f27ee59 | 563 | |
0e345826 JB |
564 | err = xlbd_reserve_minors(minor, nr_minors); |
565 | if (err) | |
566 | goto out; | |
567 | err = -ENODEV; | |
568 | ||
9f27ee59 JF |
569 | gd = alloc_disk(nr_minors); |
570 | if (gd == NULL) | |
0e345826 | 571 | goto release; |
9f27ee59 | 572 | |
9246b5f0 CL |
573 | if (nr_minors > 1) { |
574 | if (offset < 26) | |
575 | sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset); | |
576 | else | |
577 | sprintf(gd->disk_name, "%s%c%c", DEV_NAME, | |
578 | 'a' + ((offset / 26)-1), 'a' + (offset % 26)); | |
579 | } else { | |
580 | if (offset < 26) | |
581 | sprintf(gd->disk_name, "%s%c%d", DEV_NAME, | |
582 | 'a' + offset, | |
583 | minor & (nr_parts - 1)); | |
584 | else | |
585 | sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME, | |
586 | 'a' + ((offset / 26) - 1), | |
587 | 'a' + (offset % 26), | |
588 | minor & (nr_parts - 1)); | |
589 | } | |
9f27ee59 JF |
590 | |
591 | gd->major = XENVBD_MAJOR; | |
592 | gd->first_minor = minor; | |
593 | gd->fops = &xlvbd_block_fops; | |
594 | gd->private_data = info; | |
595 | gd->driverfs_dev = &(info->xbdev->dev); | |
596 | set_capacity(gd, capacity); | |
597 | ||
598 | if (xlvbd_init_blk_queue(gd, sector_size)) { | |
599 | del_gendisk(gd); | |
0e345826 | 600 | goto release; |
9f27ee59 JF |
601 | } |
602 | ||
603 | info->rq = gd->queue; | |
604 | info->gd = gd; | |
605 | ||
4913efe4 | 606 | xlvbd_flush(info); |
9f27ee59 JF |
607 | |
608 | if (vdisk_info & VDISK_READONLY) | |
609 | set_disk_ro(gd, 1); | |
610 | ||
611 | if (vdisk_info & VDISK_REMOVABLE) | |
612 | gd->flags |= GENHD_FL_REMOVABLE; | |
613 | ||
614 | if (vdisk_info & VDISK_CDROM) | |
615 | gd->flags |= GENHD_FL_CD; | |
616 | ||
617 | return 0; | |
618 | ||
0e345826 JB |
619 | release: |
620 | xlbd_release_minors(minor, nr_minors); | |
9f27ee59 JF |
621 | out: |
622 | return err; | |
623 | } | |
624 | ||
a66b5aeb DS |
625 | static void xlvbd_release_gendisk(struct blkfront_info *info) |
626 | { | |
627 | unsigned int minor, nr_minors; | |
628 | unsigned long flags; | |
629 | ||
630 | if (info->rq == NULL) | |
631 | return; | |
632 | ||
633 | spin_lock_irqsave(&blkif_io_lock, flags); | |
634 | ||
635 | /* No more blkif_request(). */ | |
636 | blk_stop_queue(info->rq); | |
637 | ||
638 | /* No more gnttab callback work. */ | |
639 | gnttab_cancel_free_callback(&info->callback); | |
640 | spin_unlock_irqrestore(&blkif_io_lock, flags); | |
641 | ||
642 | /* Flush gnttab callback work. Must be done with no locks held. */ | |
30d65030 | 643 | flush_work_sync(&info->work); |
a66b5aeb DS |
644 | |
645 | del_gendisk(info->gd); | |
646 | ||
647 | minor = info->gd->first_minor; | |
648 | nr_minors = info->gd->minors; | |
649 | xlbd_release_minors(minor, nr_minors); | |
650 | ||
651 | blk_cleanup_queue(info->rq); | |
652 | info->rq = NULL; | |
653 | ||
654 | put_disk(info->gd); | |
655 | info->gd = NULL; | |
656 | } | |
657 | ||
9f27ee59 JF |
658 | static void kick_pending_request_queues(struct blkfront_info *info) |
659 | { | |
660 | if (!RING_FULL(&info->ring)) { | |
661 | /* Re-enable calldowns. */ | |
662 | blk_start_queue(info->rq); | |
663 | /* Kick things off immediately. */ | |
664 | do_blkif_request(info->rq); | |
665 | } | |
666 | } | |
667 | ||
668 | static void blkif_restart_queue(struct work_struct *work) | |
669 | { | |
670 | struct blkfront_info *info = container_of(work, struct blkfront_info, work); | |
671 | ||
672 | spin_lock_irq(&blkif_io_lock); | |
673 | if (info->connected == BLKIF_STATE_CONNECTED) | |
674 | kick_pending_request_queues(info); | |
675 | spin_unlock_irq(&blkif_io_lock); | |
676 | } | |
677 | ||
678 | static void blkif_free(struct blkfront_info *info, int suspend) | |
679 | { | |
680 | /* Prevent new requests being issued until we fix things up. */ | |
681 | spin_lock_irq(&blkif_io_lock); | |
682 | info->connected = suspend ? | |
683 | BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; | |
684 | /* No more blkif_request(). */ | |
685 | if (info->rq) | |
686 | blk_stop_queue(info->rq); | |
687 | /* No more gnttab callback work. */ | |
688 | gnttab_cancel_free_callback(&info->callback); | |
689 | spin_unlock_irq(&blkif_io_lock); | |
690 | ||
691 | /* Flush gnttab callback work. Must be done with no locks held. */ | |
30d65030 | 692 | flush_work_sync(&info->work); |
9f27ee59 JF |
693 | |
694 | /* Free resources associated with old device channel. */ | |
695 | if (info->ring_ref != GRANT_INVALID_REF) { | |
696 | gnttab_end_foreign_access(info->ring_ref, 0, | |
697 | (unsigned long)info->ring.sring); | |
698 | info->ring_ref = GRANT_INVALID_REF; | |
699 | info->ring.sring = NULL; | |
700 | } | |
701 | if (info->irq) | |
702 | unbind_from_irqhandler(info->irq, info); | |
703 | info->evtchn = info->irq = 0; | |
704 | ||
705 | } | |
706 | ||
707 | static void blkif_completion(struct blk_shadow *s) | |
708 | { | |
709 | int i; | |
97e36834 | 710 | for (i = 0; i < s->req.u.rw.nr_segments; i++) |
51de6952 | 711 | gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL); |
9f27ee59 JF |
712 | } |
713 | ||
714 | static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |
715 | { | |
716 | struct request *req; | |
717 | struct blkif_response *bret; | |
718 | RING_IDX i, rp; | |
719 | unsigned long flags; | |
720 | struct blkfront_info *info = (struct blkfront_info *)dev_id; | |
f530f036 | 721 | int error; |
9f27ee59 JF |
722 | |
723 | spin_lock_irqsave(&blkif_io_lock, flags); | |
724 | ||
725 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { | |
726 | spin_unlock_irqrestore(&blkif_io_lock, flags); | |
727 | return IRQ_HANDLED; | |
728 | } | |
729 | ||
730 | again: | |
731 | rp = info->ring.sring->rsp_prod; | |
732 | rmb(); /* Ensure we see queued responses up to 'rp'. */ | |
733 | ||
734 | for (i = info->ring.rsp_cons; i != rp; i++) { | |
735 | unsigned long id; | |
9f27ee59 JF |
736 | |
737 | bret = RING_GET_RESPONSE(&info->ring, i); | |
738 | id = bret->id; | |
a945b980 | 739 | req = info->shadow[id].request; |
9f27ee59 JF |
740 | |
741 | blkif_completion(&info->shadow[id]); | |
742 | ||
743 | add_id_to_freelist(info, id); | |
744 | ||
f530f036 | 745 | error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; |
9f27ee59 | 746 | switch (bret->operation) { |
ed30bf31 LD |
747 | case BLKIF_OP_DISCARD: |
748 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { | |
749 | struct request_queue *rq = info->rq; | |
750 | printk(KERN_WARNING "blkfront: %s: discard op failed\n", | |
751 | info->gd->disk_name); | |
752 | error = -EOPNOTSUPP; | |
753 | info->feature_discard = 0; | |
ed30bf31 | 754 | queue_flag_clear(QUEUE_FLAG_DISCARD, rq); |
ed30bf31 LD |
755 | } |
756 | __blk_end_request_all(req, error); | |
757 | break; | |
edf6ef59 | 758 | case BLKIF_OP_FLUSH_DISKCACHE: |
9f27ee59 JF |
759 | case BLKIF_OP_WRITE_BARRIER: |
760 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { | |
edf6ef59 KRW |
761 | printk(KERN_WARNING "blkfront: %s: write %s op failed\n", |
762 | info->flush_op == BLKIF_OP_WRITE_BARRIER ? | |
763 | "barrier" : "flush disk cache", | |
9f27ee59 | 764 | info->gd->disk_name); |
f530f036 | 765 | error = -EOPNOTSUPP; |
dcb8baec JF |
766 | } |
767 | if (unlikely(bret->status == BLKIF_RSP_ERROR && | |
97e36834 | 768 | info->shadow[id].req.u.rw.nr_segments == 0)) { |
edf6ef59 KRW |
769 | printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n", |
770 | info->flush_op == BLKIF_OP_WRITE_BARRIER ? | |
771 | "barrier" : "flush disk cache", | |
dcb8baec JF |
772 | info->gd->disk_name); |
773 | error = -EOPNOTSUPP; | |
774 | } | |
775 | if (unlikely(error)) { | |
776 | if (error == -EOPNOTSUPP) | |
777 | error = 0; | |
4913efe4 | 778 | info->feature_flush = 0; |
edf6ef59 | 779 | info->flush_op = 0; |
4913efe4 | 780 | xlvbd_flush(info); |
9f27ee59 JF |
781 | } |
782 | /* fall through */ | |
783 | case BLKIF_OP_READ: | |
784 | case BLKIF_OP_WRITE: | |
785 | if (unlikely(bret->status != BLKIF_RSP_OKAY)) | |
786 | dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " | |
787 | "request: %x\n", bret->status); | |
788 | ||
40cbbb78 | 789 | __blk_end_request_all(req, error); |
9f27ee59 JF |
790 | break; |
791 | default: | |
792 | BUG(); | |
793 | } | |
794 | } | |
795 | ||
796 | info->ring.rsp_cons = i; | |
797 | ||
798 | if (i != info->ring.req_prod_pvt) { | |
799 | int more_to_do; | |
800 | RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); | |
801 | if (more_to_do) | |
802 | goto again; | |
803 | } else | |
804 | info->ring.sring->rsp_event = i + 1; | |
805 | ||
806 | kick_pending_request_queues(info); | |
807 | ||
808 | spin_unlock_irqrestore(&blkif_io_lock, flags); | |
809 | ||
810 | return IRQ_HANDLED; | |
811 | } | |
812 | ||
813 | ||
814 | static int setup_blkring(struct xenbus_device *dev, | |
815 | struct blkfront_info *info) | |
816 | { | |
817 | struct blkif_sring *sring; | |
818 | int err; | |
819 | ||
820 | info->ring_ref = GRANT_INVALID_REF; | |
821 | ||
a144ff09 | 822 | sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH); |
9f27ee59 JF |
823 | if (!sring) { |
824 | xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); | |
825 | return -ENOMEM; | |
826 | } | |
827 | SHARED_RING_INIT(sring); | |
828 | FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); | |
9e973e64 JA |
829 | |
830 | sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); | |
9f27ee59 JF |
831 | |
832 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); | |
833 | if (err < 0) { | |
834 | free_page((unsigned long)sring); | |
835 | info->ring.sring = NULL; | |
836 | goto fail; | |
837 | } | |
838 | info->ring_ref = err; | |
839 | ||
840 | err = xenbus_alloc_evtchn(dev, &info->evtchn); | |
841 | if (err) | |
842 | goto fail; | |
843 | ||
844 | err = bind_evtchn_to_irqhandler(info->evtchn, | |
845 | blkif_interrupt, | |
846 | IRQF_SAMPLE_RANDOM, "blkif", info); | |
847 | if (err <= 0) { | |
848 | xenbus_dev_fatal(dev, err, | |
849 | "bind_evtchn_to_irqhandler failed"); | |
850 | goto fail; | |
851 | } | |
852 | info->irq = err; | |
853 | ||
854 | return 0; | |
855 | fail: | |
856 | blkif_free(info, 0); | |
857 | return err; | |
858 | } | |
859 | ||
860 | ||
861 | /* Common code used when first setting up, and when resuming. */ | |
203fd61f | 862 | static int talk_to_blkback(struct xenbus_device *dev, |
9f27ee59 JF |
863 | struct blkfront_info *info) |
864 | { | |
865 | const char *message = NULL; | |
866 | struct xenbus_transaction xbt; | |
867 | int err; | |
868 | ||
869 | /* Create shared ring, alloc event channel. */ | |
870 | err = setup_blkring(dev, info); | |
871 | if (err) | |
872 | goto out; | |
873 | ||
874 | again: | |
875 | err = xenbus_transaction_start(&xbt); | |
876 | if (err) { | |
877 | xenbus_dev_fatal(dev, err, "starting transaction"); | |
878 | goto destroy_blkring; | |
879 | } | |
880 | ||
881 | err = xenbus_printf(xbt, dev->nodename, | |
882 | "ring-ref", "%u", info->ring_ref); | |
883 | if (err) { | |
884 | message = "writing ring-ref"; | |
885 | goto abort_transaction; | |
886 | } | |
887 | err = xenbus_printf(xbt, dev->nodename, | |
888 | "event-channel", "%u", info->evtchn); | |
889 | if (err) { | |
890 | message = "writing event-channel"; | |
891 | goto abort_transaction; | |
892 | } | |
3e334239 MA |
893 | err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", |
894 | XEN_IO_PROTO_ABI_NATIVE); | |
895 | if (err) { | |
896 | message = "writing protocol"; | |
897 | goto abort_transaction; | |
898 | } | |
9f27ee59 JF |
899 | |
900 | err = xenbus_transaction_end(xbt, 0); | |
901 | if (err) { | |
902 | if (err == -EAGAIN) | |
903 | goto again; | |
904 | xenbus_dev_fatal(dev, err, "completing transaction"); | |
905 | goto destroy_blkring; | |
906 | } | |
907 | ||
908 | xenbus_switch_state(dev, XenbusStateInitialised); | |
909 | ||
910 | return 0; | |
911 | ||
912 | abort_transaction: | |
913 | xenbus_transaction_end(xbt, 1); | |
914 | if (message) | |
915 | xenbus_dev_fatal(dev, err, "%s", message); | |
916 | destroy_blkring: | |
917 | blkif_free(info, 0); | |
918 | out: | |
919 | return err; | |
920 | } | |
921 | ||
9f27ee59 JF |
922 | /** |
923 | * Entry point to this code when a new device is created. Allocate the basic | |
924 | * structures and the ring buffer for communication with the backend, and | |
925 | * inform the backend of the appropriate details for those. Switch to | |
926 | * Initialised state. | |
927 | */ | |
928 | static int blkfront_probe(struct xenbus_device *dev, | |
929 | const struct xenbus_device_id *id) | |
930 | { | |
931 | int err, vdevice, i; | |
932 | struct blkfront_info *info; | |
933 | ||
934 | /* FIXME: Use dynamic device id if this is not set. */ | |
935 | err = xenbus_scanf(XBT_NIL, dev->nodename, | |
936 | "virtual-device", "%i", &vdevice); | |
937 | if (err != 1) { | |
9246b5f0 CL |
938 | /* go looking in the extended area instead */ |
939 | err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", | |
940 | "%i", &vdevice); | |
941 | if (err != 1) { | |
942 | xenbus_dev_fatal(dev, err, "reading virtual-device"); | |
943 | return err; | |
944 | } | |
9f27ee59 JF |
945 | } |
946 | ||
b98a409b SS |
947 | if (xen_hvm_domain()) { |
948 | char *type; | |
949 | int len; | |
950 | /* no unplug has been done: do not hook devices != xen vbds */ | |
1dc7ce99 | 951 | if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) { |
b98a409b SS |
952 | int major; |
953 | ||
954 | if (!VDEV_IS_EXTENDED(vdevice)) | |
955 | major = BLKIF_MAJOR(vdevice); | |
956 | else | |
957 | major = XENVBD_MAJOR; | |
958 | ||
959 | if (major != XENVBD_MAJOR) { | |
960 | printk(KERN_INFO | |
961 | "%s: HVM does not support vbd %d as xen block device\n", | |
962 | __FUNCTION__, vdevice); | |
963 | return -ENODEV; | |
964 | } | |
965 | } | |
966 | /* do not create a PV cdrom device if we are an HVM guest */ | |
967 | type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len); | |
968 | if (IS_ERR(type)) | |
969 | return -ENODEV; | |
970 | if (strncmp(type, "cdrom", 5) == 0) { | |
971 | kfree(type); | |
c1c5413a SS |
972 | return -ENODEV; |
973 | } | |
b98a409b | 974 | kfree(type); |
c1c5413a | 975 | } |
9f27ee59 JF |
976 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
977 | if (!info) { | |
978 | xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); | |
979 | return -ENOMEM; | |
980 | } | |
981 | ||
b70f5fa0 | 982 | mutex_init(&info->mutex); |
9f27ee59 JF |
983 | info->xbdev = dev; |
984 | info->vdevice = vdevice; | |
985 | info->connected = BLKIF_STATE_DISCONNECTED; | |
986 | INIT_WORK(&info->work, blkif_restart_queue); | |
987 | ||
988 | for (i = 0; i < BLK_RING_SIZE; i++) | |
97e36834 KRW |
989 | info->shadow[i].req.u.rw.id = i+1; |
990 | info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; | |
9f27ee59 JF |
991 | |
992 | /* Front end dir is a number, which is used as the id. */ | |
993 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); | |
a1b4b12b | 994 | dev_set_drvdata(&dev->dev, info); |
9f27ee59 | 995 | |
203fd61f | 996 | err = talk_to_blkback(dev, info); |
9f27ee59 JF |
997 | if (err) { |
998 | kfree(info); | |
a1b4b12b | 999 | dev_set_drvdata(&dev->dev, NULL); |
9f27ee59 JF |
1000 | return err; |
1001 | } | |
1002 | ||
1003 | return 0; | |
1004 | } | |
1005 | ||
1006 | ||
1007 | static int blkif_recover(struct blkfront_info *info) | |
1008 | { | |
1009 | int i; | |
1010 | struct blkif_request *req; | |
1011 | struct blk_shadow *copy; | |
1012 | int j; | |
1013 | ||
1014 | /* Stage 1: Make a safe copy of the shadow state. */ | |
a144ff09 IC |
1015 | copy = kmalloc(sizeof(info->shadow), |
1016 | GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); | |
9f27ee59 JF |
1017 | if (!copy) |
1018 | return -ENOMEM; | |
1019 | memcpy(copy, info->shadow, sizeof(info->shadow)); | |
1020 | ||
1021 | /* Stage 2: Set up free list. */ | |
1022 | memset(&info->shadow, 0, sizeof(info->shadow)); | |
1023 | for (i = 0; i < BLK_RING_SIZE; i++) | |
97e36834 | 1024 | info->shadow[i].req.u.rw.id = i+1; |
9f27ee59 | 1025 | info->shadow_free = info->ring.req_prod_pvt; |
97e36834 | 1026 | info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; |
9f27ee59 JF |
1027 | |
1028 | /* Stage 3: Find pending requests and requeue them. */ | |
1029 | for (i = 0; i < BLK_RING_SIZE; i++) { | |
1030 | /* Not in use? */ | |
a945b980 | 1031 | if (!copy[i].request) |
9f27ee59 JF |
1032 | continue; |
1033 | ||
1034 | /* Grab a request slot and copy shadow state into it. */ | |
1035 | req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); | |
1036 | *req = copy[i].req; | |
1037 | ||
1038 | /* We get a new request id, and must reset the shadow state. */ | |
97e36834 KRW |
1039 | req->u.rw.id = get_id_from_freelist(info); |
1040 | memcpy(&info->shadow[req->u.rw.id], ©[i], sizeof(copy[i])); | |
9f27ee59 JF |
1041 | |
1042 | /* Rewrite any grant references invalidated by susp/resume. */ | |
97e36834 | 1043 | for (j = 0; j < req->u.rw.nr_segments; j++) |
9f27ee59 | 1044 | gnttab_grant_foreign_access_ref( |
51de6952 | 1045 | req->u.rw.seg[j].gref, |
9f27ee59 | 1046 | info->xbdev->otherend_id, |
97e36834 KRW |
1047 | pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]), |
1048 | rq_data_dir(info->shadow[req->u.rw.id].request)); | |
1049 | info->shadow[req->u.rw.id].req = *req; | |
9f27ee59 JF |
1050 | |
1051 | info->ring.req_prod_pvt++; | |
1052 | } | |
1053 | ||
1054 | kfree(copy); | |
1055 | ||
1056 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | |
1057 | ||
1058 | spin_lock_irq(&blkif_io_lock); | |
1059 | ||
1060 | /* Now safe for us to use the shared ring */ | |
1061 | info->connected = BLKIF_STATE_CONNECTED; | |
1062 | ||
1063 | /* Send off requeued requests */ | |
1064 | flush_requests(info); | |
1065 | ||
1066 | /* Kick any other new requests queued since we resumed */ | |
1067 | kick_pending_request_queues(info); | |
1068 | ||
1069 | spin_unlock_irq(&blkif_io_lock); | |
1070 | ||
1071 | return 0; | |
1072 | } | |
1073 | ||
1074 | /** | |
1075 | * We are reconnecting to the backend, due to a suspend/resume, or a backend | |
1076 | * driver restart. We tear down our blkif structure and recreate it, but | |
1077 | * leave the device-layer structures intact so that this is transparent to the | |
1078 | * rest of the kernel. | |
1079 | */ | |
1080 | static int blkfront_resume(struct xenbus_device *dev) | |
1081 | { | |
a1b4b12b | 1082 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
9f27ee59 JF |
1083 | int err; |
1084 | ||
1085 | dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); | |
1086 | ||
1087 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); | |
1088 | ||
203fd61f | 1089 | err = talk_to_blkback(dev, info); |
9f27ee59 JF |
1090 | if (info->connected == BLKIF_STATE_SUSPENDED && !err) |
1091 | err = blkif_recover(info); | |
1092 | ||
1093 | return err; | |
1094 | } | |
1095 | ||
b70f5fa0 DS |
1096 | static void |
1097 | blkfront_closing(struct blkfront_info *info) | |
1098 | { | |
1099 | struct xenbus_device *xbdev = info->xbdev; | |
1100 | struct block_device *bdev = NULL; | |
1101 | ||
1102 | mutex_lock(&info->mutex); | |
1103 | ||
1104 | if (xbdev->state == XenbusStateClosing) { | |
1105 | mutex_unlock(&info->mutex); | |
1106 | return; | |
1107 | } | |
1108 | ||
1109 | if (info->gd) | |
1110 | bdev = bdget_disk(info->gd, 0); | |
1111 | ||
1112 | mutex_unlock(&info->mutex); | |
1113 | ||
1114 | if (!bdev) { | |
1115 | xenbus_frontend_closed(xbdev); | |
1116 | return; | |
1117 | } | |
1118 | ||
1119 | mutex_lock(&bdev->bd_mutex); | |
1120 | ||
7b32d104 | 1121 | if (bdev->bd_openers) { |
b70f5fa0 DS |
1122 | xenbus_dev_error(xbdev, -EBUSY, |
1123 | "Device in use; refusing to close"); | |
1124 | xenbus_switch_state(xbdev, XenbusStateClosing); | |
1125 | } else { | |
1126 | xlvbd_release_gendisk(info); | |
1127 | xenbus_frontend_closed(xbdev); | |
1128 | } | |
1129 | ||
1130 | mutex_unlock(&bdev->bd_mutex); | |
1131 | bdput(bdev); | |
1132 | } | |
9f27ee59 | 1133 | |
ed30bf31 LD |
1134 | static void blkfront_setup_discard(struct blkfront_info *info) |
1135 | { | |
1136 | int err; | |
1137 | char *type; | |
1138 | unsigned int discard_granularity; | |
1139 | unsigned int discard_alignment; | |
1140 | ||
1141 | type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL); | |
1142 | if (IS_ERR(type)) | |
1143 | return; | |
1144 | ||
1145 | if (strncmp(type, "phy", 3) == 0) { | |
1146 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | |
1147 | "discard-granularity", "%u", &discard_granularity, | |
1148 | "discard-alignment", "%u", &discard_alignment, | |
1149 | NULL); | |
1150 | if (!err) { | |
1151 | info->feature_discard = 1; | |
1152 | info->discard_granularity = discard_granularity; | |
1153 | info->discard_alignment = discard_alignment; | |
1154 | } | |
1155 | } else if (strncmp(type, "file", 4) == 0) | |
1156 | info->feature_discard = 1; | |
1157 | ||
1158 | kfree(type); | |
1159 | } | |
1160 | ||
9f27ee59 JF |
1161 | /* |
1162 | * Invoked when the backend is finally 'ready' (and has told produced | |
1163 | * the details about the physical device - #sectors, size, etc). | |
1164 | */ | |
1165 | static void blkfront_connect(struct blkfront_info *info) | |
1166 | { | |
1167 | unsigned long long sectors; | |
1168 | unsigned long sector_size; | |
1169 | unsigned int binfo; | |
1170 | int err; | |
ed30bf31 | 1171 | int barrier, flush, discard; |
9f27ee59 | 1172 | |
1fa73be6 S |
1173 | switch (info->connected) { |
1174 | case BLKIF_STATE_CONNECTED: | |
1175 | /* | |
1176 | * Potentially, the back-end may be signalling | |
1177 | * a capacity change; update the capacity. | |
1178 | */ | |
1179 | err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, | |
1180 | "sectors", "%Lu", §ors); | |
1181 | if (XENBUS_EXIST_ERR(err)) | |
1182 | return; | |
1183 | printk(KERN_INFO "Setting capacity to %Lu\n", | |
1184 | sectors); | |
1185 | set_capacity(info->gd, sectors); | |
2def141e | 1186 | revalidate_disk(info->gd); |
1fa73be6 S |
1187 | |
1188 | /* fall through */ | |
1189 | case BLKIF_STATE_SUSPENDED: | |
9f27ee59 JF |
1190 | return; |
1191 | ||
b4dddb49 JF |
1192 | default: |
1193 | break; | |
1fa73be6 | 1194 | } |
9f27ee59 JF |
1195 | |
1196 | dev_dbg(&info->xbdev->dev, "%s:%s.\n", | |
1197 | __func__, info->xbdev->otherend); | |
1198 | ||
1199 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | |
1200 | "sectors", "%llu", §ors, | |
1201 | "info", "%u", &binfo, | |
1202 | "sector-size", "%lu", §or_size, | |
1203 | NULL); | |
1204 | if (err) { | |
1205 | xenbus_dev_fatal(info->xbdev, err, | |
1206 | "reading backend fields at %s", | |
1207 | info->xbdev->otherend); | |
1208 | return; | |
1209 | } | |
1210 | ||
edf6ef59 KRW |
1211 | info->feature_flush = 0; |
1212 | info->flush_op = 0; | |
1213 | ||
9f27ee59 | 1214 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
4352b47a | 1215 | "feature-barrier", "%d", &barrier, |
9f27ee59 | 1216 | NULL); |
7901d141 JF |
1217 | |
1218 | /* | |
1219 | * If there's no "feature-barrier" defined, then it means | |
1220 | * we're dealing with a very old backend which writes | |
4913efe4 | 1221 | * synchronously; nothing to do. |
7901d141 | 1222 | * |
6958f145 | 1223 | * If there are barriers, then we use flush. |
7901d141 | 1224 | */ |
edf6ef59 | 1225 | if (!err && barrier) { |
be2f8373 | 1226 | info->feature_flush = REQ_FLUSH | REQ_FUA; |
edf6ef59 KRW |
1227 | info->flush_op = BLKIF_OP_WRITE_BARRIER; |
1228 | } | |
1229 | /* | |
1230 | * And if there is "feature-flush-cache" use that above | |
1231 | * barriers. | |
1232 | */ | |
1233 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | |
1234 | "feature-flush-cache", "%d", &flush, | |
1235 | NULL); | |
9f27ee59 | 1236 | |
edf6ef59 KRW |
1237 | if (!err && flush) { |
1238 | info->feature_flush = REQ_FLUSH; | |
1239 | info->flush_op = BLKIF_OP_FLUSH_DISKCACHE; | |
1240 | } | |
ed30bf31 LD |
1241 | |
1242 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | |
1243 | "feature-discard", "%d", &discard, | |
1244 | NULL); | |
1245 | ||
1246 | if (!err && discard) | |
1247 | blkfront_setup_discard(info); | |
1248 | ||
9246b5f0 | 1249 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); |
9f27ee59 JF |
1250 | if (err) { |
1251 | xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", | |
1252 | info->xbdev->otherend); | |
1253 | return; | |
1254 | } | |
1255 | ||
1256 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | |
1257 | ||
1258 | /* Kick pending requests. */ | |
1259 | spin_lock_irq(&blkif_io_lock); | |
1260 | info->connected = BLKIF_STATE_CONNECTED; | |
1261 | kick_pending_request_queues(info); | |
1262 | spin_unlock_irq(&blkif_io_lock); | |
1263 | ||
1264 | add_disk(info->gd); | |
1d78d705 CL |
1265 | |
1266 | info->is_ready = 1; | |
9f27ee59 JF |
1267 | } |
1268 | ||
9f27ee59 JF |
1269 | /** |
1270 | * Callback received when the backend's state changes. | |
1271 | */ | |
203fd61f | 1272 | static void blkback_changed(struct xenbus_device *dev, |
9f27ee59 JF |
1273 | enum xenbus_state backend_state) |
1274 | { | |
a1b4b12b | 1275 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
9f27ee59 | 1276 | |
203fd61f | 1277 | dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state); |
9f27ee59 JF |
1278 | |
1279 | switch (backend_state) { | |
1280 | case XenbusStateInitialising: | |
1281 | case XenbusStateInitWait: | |
1282 | case XenbusStateInitialised: | |
b78c9512 NI |
1283 | case XenbusStateReconfiguring: |
1284 | case XenbusStateReconfigured: | |
9f27ee59 JF |
1285 | case XenbusStateUnknown: |
1286 | case XenbusStateClosed: | |
1287 | break; | |
1288 | ||
1289 | case XenbusStateConnected: | |
1290 | blkfront_connect(info); | |
1291 | break; | |
1292 | ||
1293 | case XenbusStateClosing: | |
b70f5fa0 | 1294 | blkfront_closing(info); |
9f27ee59 JF |
1295 | break; |
1296 | } | |
1297 | } | |
1298 | ||
fa1bd359 | 1299 | static int blkfront_remove(struct xenbus_device *xbdev) |
9f27ee59 | 1300 | { |
fa1bd359 DS |
1301 | struct blkfront_info *info = dev_get_drvdata(&xbdev->dev); |
1302 | struct block_device *bdev = NULL; | |
1303 | struct gendisk *disk; | |
9f27ee59 | 1304 | |
fa1bd359 | 1305 | dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); |
9f27ee59 JF |
1306 | |
1307 | blkif_free(info, 0); | |
1308 | ||
fa1bd359 DS |
1309 | mutex_lock(&info->mutex); |
1310 | ||
1311 | disk = info->gd; | |
1312 | if (disk) | |
1313 | bdev = bdget_disk(disk, 0); | |
1314 | ||
1315 | info->xbdev = NULL; | |
1316 | mutex_unlock(&info->mutex); | |
1317 | ||
1318 | if (!bdev) { | |
1319 | kfree(info); | |
1320 | return 0; | |
1321 | } | |
1322 | ||
1323 | /* | |
1324 | * The xbdev was removed before we reached the Closed | |
1325 | * state. See if it's safe to remove the disk. If the bdev | |
1326 | * isn't closed yet, we let release take care of it. | |
1327 | */ | |
1328 | ||
1329 | mutex_lock(&bdev->bd_mutex); | |
1330 | info = disk->private_data; | |
1331 | ||
d54142c7 DS |
1332 | dev_warn(disk_to_dev(disk), |
1333 | "%s was hot-unplugged, %d stale handles\n", | |
1334 | xbdev->nodename, bdev->bd_openers); | |
1335 | ||
7b32d104 | 1336 | if (info && !bdev->bd_openers) { |
fa1bd359 DS |
1337 | xlvbd_release_gendisk(info); |
1338 | disk->private_data = NULL; | |
0e345826 | 1339 | kfree(info); |
fa1bd359 DS |
1340 | } |
1341 | ||
1342 | mutex_unlock(&bdev->bd_mutex); | |
1343 | bdput(bdev); | |
9f27ee59 JF |
1344 | |
1345 | return 0; | |
1346 | } | |
1347 | ||
1d78d705 CL |
1348 | static int blkfront_is_ready(struct xenbus_device *dev) |
1349 | { | |
a1b4b12b | 1350 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
1d78d705 | 1351 | |
5d7ed20e | 1352 | return info->is_ready && info->xbdev; |
1d78d705 CL |
1353 | } |
1354 | ||
a63c848b | 1355 | static int blkif_open(struct block_device *bdev, fmode_t mode) |
9f27ee59 | 1356 | { |
13961743 DS |
1357 | struct gendisk *disk = bdev->bd_disk; |
1358 | struct blkfront_info *info; | |
1359 | int err = 0; | |
6e9624b8 | 1360 | |
2a48fc0a | 1361 | mutex_lock(&blkfront_mutex); |
6e9624b8 | 1362 | |
13961743 DS |
1363 | info = disk->private_data; |
1364 | if (!info) { | |
1365 | /* xbdev gone */ | |
1366 | err = -ERESTARTSYS; | |
1367 | goto out; | |
1368 | } | |
1369 | ||
1370 | mutex_lock(&info->mutex); | |
1371 | ||
1372 | if (!info->gd) | |
1373 | /* xbdev is closed */ | |
1374 | err = -ERESTARTSYS; | |
1375 | ||
1376 | mutex_unlock(&info->mutex); | |
1377 | ||
13961743 | 1378 | out: |
2a48fc0a | 1379 | mutex_unlock(&blkfront_mutex); |
13961743 | 1380 | return err; |
9f27ee59 JF |
1381 | } |
1382 | ||
a63c848b | 1383 | static int blkif_release(struct gendisk *disk, fmode_t mode) |
9f27ee59 | 1384 | { |
a63c848b | 1385 | struct blkfront_info *info = disk->private_data; |
7fd152f4 DS |
1386 | struct block_device *bdev; |
1387 | struct xenbus_device *xbdev; | |
1388 | ||
2a48fc0a | 1389 | mutex_lock(&blkfront_mutex); |
7fd152f4 DS |
1390 | |
1391 | bdev = bdget_disk(disk, 0); | |
1392 | bdput(bdev); | |
1393 | ||
acfca3c6 DS |
1394 | if (bdev->bd_openers) |
1395 | goto out; | |
1396 | ||
7fd152f4 DS |
1397 | /* |
1398 | * Check if we have been instructed to close. We will have | |
1399 | * deferred this request, because the bdev was still open. | |
1400 | */ | |
1401 | ||
1402 | mutex_lock(&info->mutex); | |
1403 | xbdev = info->xbdev; | |
1404 | ||
1405 | if (xbdev && xbdev->state == XenbusStateClosing) { | |
1406 | /* pending switch to state closed */ | |
d54142c7 | 1407 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); |
7fd152f4 DS |
1408 | xlvbd_release_gendisk(info); |
1409 | xenbus_frontend_closed(info->xbdev); | |
1410 | } | |
1411 | ||
1412 | mutex_unlock(&info->mutex); | |
1413 | ||
1414 | if (!xbdev) { | |
1415 | /* sudden device removal */ | |
d54142c7 | 1416 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); |
7fd152f4 DS |
1417 | xlvbd_release_gendisk(info); |
1418 | disk->private_data = NULL; | |
1419 | kfree(info); | |
9f27ee59 | 1420 | } |
7fd152f4 | 1421 | |
a4cc14ec | 1422 | out: |
2a48fc0a | 1423 | mutex_unlock(&blkfront_mutex); |
9f27ee59 JF |
1424 | return 0; |
1425 | } | |
1426 | ||
83d5cde4 | 1427 | static const struct block_device_operations xlvbd_block_fops = |
9f27ee59 JF |
1428 | { |
1429 | .owner = THIS_MODULE, | |
a63c848b AV |
1430 | .open = blkif_open, |
1431 | .release = blkif_release, | |
597592d9 | 1432 | .getgeo = blkif_getgeo, |
8a6cfeb6 | 1433 | .ioctl = blkif_ioctl, |
9f27ee59 JF |
1434 | }; |
1435 | ||
1436 | ||
ec9c42ec | 1437 | static const struct xenbus_device_id blkfront_ids[] = { |
9f27ee59 JF |
1438 | { "vbd" }, |
1439 | { "" } | |
1440 | }; | |
1441 | ||
1442 | static struct xenbus_driver blkfront = { | |
1443 | .name = "vbd", | |
1444 | .owner = THIS_MODULE, | |
1445 | .ids = blkfront_ids, | |
1446 | .probe = blkfront_probe, | |
1447 | .remove = blkfront_remove, | |
1448 | .resume = blkfront_resume, | |
203fd61f | 1449 | .otherend_changed = blkback_changed, |
1d78d705 | 1450 | .is_ready = blkfront_is_ready, |
9f27ee59 JF |
1451 | }; |
1452 | ||
1453 | static int __init xlblk_init(void) | |
1454 | { | |
469738e6 LE |
1455 | int ret; |
1456 | ||
6e833587 | 1457 | if (!xen_domain()) |
9f27ee59 JF |
1458 | return -ENODEV; |
1459 | ||
1460 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | |
1461 | printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", | |
1462 | XENVBD_MAJOR, DEV_NAME); | |
1463 | return -ENODEV; | |
1464 | } | |
1465 | ||
469738e6 LE |
1466 | ret = xenbus_register_frontend(&blkfront); |
1467 | if (ret) { | |
1468 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); | |
1469 | return ret; | |
1470 | } | |
1471 | ||
1472 | return 0; | |
9f27ee59 JF |
1473 | } |
1474 | module_init(xlblk_init); | |
1475 | ||
1476 | ||
5a60d0cd | 1477 | static void __exit xlblk_exit(void) |
9f27ee59 JF |
1478 | { |
1479 | return xenbus_unregister_driver(&blkfront); | |
1480 | } | |
1481 | module_exit(xlblk_exit); | |
1482 | ||
1483 | MODULE_DESCRIPTION("Xen virtual block device frontend"); | |
1484 | MODULE_LICENSE("GPL"); | |
1485 | MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); | |
d2f0c52b | 1486 | MODULE_ALIAS("xen:vbd"); |
4f93f09b | 1487 | MODULE_ALIAS("xenblk"); |