1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Xenbus code for blkif backend
3 Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
4 Copyright (C) 2005 XenSource Ltd
9 #define pr_fmt(fmt) "xen-blkback: " fmt
12 #include <linux/module.h>
13 #include <linux/kthread.h>
14 #include <xen/events.h>
15 #include <xen/grant_table.h>
18 /* On the XenBus the max length of 'ring-ref%u'. */
19 #define RINGREF_NAME_LEN (20)
22 struct xenbus_device *dev;
23 struct xen_blkif *blkif;
24 struct xenbus_watch backend_watch;
30 static struct kmem_cache *xen_blkif_cachep;
31 static void connect(struct backend_info *);
32 static int connect_ring(struct backend_info *);
33 static void backend_changed(struct xenbus_watch *, const char *,
35 static void xen_blkif_free(struct xen_blkif *blkif);
36 static void xen_vbd_free(struct xen_vbd *vbd);
38 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
44 * The last request could free the device from softirq context and
45 * xen_blkif_free() can sleep.
47 static void xen_blkif_deferred_free(struct work_struct *work)
49 struct xen_blkif *blkif;
51 blkif = container_of(work, struct xen_blkif, free_work);
52 xen_blkif_free(blkif);
55 static int blkback_name(struct xen_blkif *blkif, char *buf)
57 char *devpath, *devname;
58 struct xenbus_device *dev = blkif->be->dev;
60 devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
62 return PTR_ERR(devpath);
64 devname = strstr(devpath, "/dev/");
66 devname += strlen("/dev/");
70 snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
76 static void xen_update_blkif_status(struct xen_blkif *blkif)
79 char name[TASK_COMM_LEN];
80 struct xen_blkif_ring *ring;
83 /* Not ready to connect? */
84 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
87 /* Already connected? */
88 if (blkif->be->dev->state == XenbusStateConnected)
91 /* Attempt to connect: exit if we fail to. */
93 if (blkif->be->dev->state != XenbusStateConnected)
96 err = blkback_name(blkif, name);
98 xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
102 err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
104 xenbus_dev_error(blkif->be->dev, err, "block flush");
107 invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
109 for (i = 0; i < blkif->nr_rings; i++) {
110 ring = &blkif->rings[i];
111 ring->xenblkd = kthread_run(xen_blkif_schedule, ring, "%s-%d", name, i);
112 if (IS_ERR(ring->xenblkd)) {
113 err = PTR_ERR(ring->xenblkd);
114 ring->xenblkd = NULL;
115 xenbus_dev_fatal(blkif->be->dev, err,
116 "start %s-%d xenblkd", name, i);
124 ring = &blkif->rings[i];
125 kthread_stop(ring->xenblkd);
130 static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
134 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring),
139 for (r = 0; r < blkif->nr_rings; r++) {
140 struct xen_blkif_ring *ring = &blkif->rings[r];
142 spin_lock_init(&ring->blk_ring_lock);
143 init_waitqueue_head(&ring->wq);
144 INIT_LIST_HEAD(&ring->pending_free);
145 INIT_LIST_HEAD(&ring->persistent_purge_list);
146 INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
147 spin_lock_init(&ring->free_pages_lock);
148 INIT_LIST_HEAD(&ring->free_pages);
150 spin_lock_init(&ring->pending_free_lock);
151 init_waitqueue_head(&ring->pending_free_wq);
152 init_waitqueue_head(&ring->shutdown_wq);
154 ring->st_print = jiffies;
161 static struct xen_blkif *xen_blkif_alloc(domid_t domid)
163 struct xen_blkif *blkif;
165 BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
167 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
169 return ERR_PTR(-ENOMEM);
171 blkif->domid = domid;
172 atomic_set(&blkif->refcnt, 1);
173 init_completion(&blkif->drain_complete);
174 INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
179 static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
180 unsigned int nr_grefs, unsigned int evtchn)
183 struct xen_blkif *blkif = ring->blkif;
185 /* Already connected through? */
189 err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
194 switch (blkif->blk_protocol) {
195 case BLKIF_PROTOCOL_NATIVE:
197 struct blkif_sring *sring;
198 sring = (struct blkif_sring *)ring->blk_ring;
199 BACK_RING_INIT(&ring->blk_rings.native, sring,
200 XEN_PAGE_SIZE * nr_grefs);
203 case BLKIF_PROTOCOL_X86_32:
205 struct blkif_x86_32_sring *sring_x86_32;
206 sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring;
207 BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32,
208 XEN_PAGE_SIZE * nr_grefs);
211 case BLKIF_PROTOCOL_X86_64:
213 struct blkif_x86_64_sring *sring_x86_64;
214 sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring;
215 BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64,
216 XEN_PAGE_SIZE * nr_grefs);
223 err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
225 "blkif-backend", ring);
227 xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
228 ring->blk_rings.common.sring = NULL;
236 static int xen_blkif_disconnect(struct xen_blkif *blkif)
238 struct pending_req *req, *n;
242 for (r = 0; r < blkif->nr_rings; r++) {
243 struct xen_blkif_ring *ring = &blkif->rings[r];
250 kthread_stop(ring->xenblkd);
251 wake_up(&ring->shutdown_wq);
254 /* The above kthread_stop() guarantees that at this point we
255 * don't have any discard_io or other_io requests. So, checking
256 * for inflight IO is enough.
258 if (atomic_read(&ring->inflight) > 0) {
264 unbind_from_irqhandler(ring->irq, ring);
268 if (ring->blk_rings.common.sring) {
269 xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
270 ring->blk_rings.common.sring = NULL;
273 /* Remove all persistent grants and the cache of ballooned pages. */
274 xen_blkbk_free_caches(ring);
276 /* Check that there is no request in use */
277 list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
278 list_del(&req->free_list);
280 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
281 kfree(req->segments[j]);
283 for (j = 0; j < MAX_INDIRECT_PAGES; j++)
284 kfree(req->indirect_pages[j]);
290 BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
291 BUG_ON(!list_empty(&ring->persistent_purge_list));
292 BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
293 BUG_ON(!list_empty(&ring->free_pages));
294 BUG_ON(ring->free_pages_num != 0);
295 BUG_ON(ring->persistent_gnt_c != 0);
296 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
297 ring->active = false;
302 blkif->nr_ring_pages = 0;
304 * blkif->rings was allocated in connect_ring, so we should free it in
314 static void xen_blkif_free(struct xen_blkif *blkif)
316 WARN_ON(xen_blkif_disconnect(blkif));
317 xen_vbd_free(&blkif->vbd);
318 kfree(blkif->be->mode);
321 /* Make sure everything is drained before shutting down */
322 kmem_cache_free(xen_blkif_cachep, blkif);
325 int __init xen_blkif_interface_init(void)
327 xen_blkif_cachep = kmem_cache_create("blkif_cache",
328 sizeof(struct xen_blkif),
330 if (!xen_blkif_cachep)
337 * sysfs interface for VBD I/O requests
340 #define VBD_SHOW_ALLRING(name, format) \
341 static ssize_t show_##name(struct device *_dev, \
342 struct device_attribute *attr, \
345 struct xenbus_device *dev = to_xenbus_device(_dev); \
346 struct backend_info *be = dev_get_drvdata(&dev->dev); \
347 struct xen_blkif *blkif = be->blkif; \
349 unsigned long long result = 0; \
354 for (i = 0; i < blkif->nr_rings; i++) { \
355 struct xen_blkif_ring *ring = &blkif->rings[i]; \
357 result += ring->st_##name; \
361 return sprintf(buf, format, result); \
363 static DEVICE_ATTR(name, 0444, show_##name, NULL)
365 VBD_SHOW_ALLRING(oo_req, "%llu\n");
366 VBD_SHOW_ALLRING(rd_req, "%llu\n");
367 VBD_SHOW_ALLRING(wr_req, "%llu\n");
368 VBD_SHOW_ALLRING(f_req, "%llu\n");
369 VBD_SHOW_ALLRING(ds_req, "%llu\n");
370 VBD_SHOW_ALLRING(rd_sect, "%llu\n");
371 VBD_SHOW_ALLRING(wr_sect, "%llu\n");
373 static struct attribute *xen_vbdstat_attrs[] = {
374 &dev_attr_oo_req.attr,
375 &dev_attr_rd_req.attr,
376 &dev_attr_wr_req.attr,
377 &dev_attr_f_req.attr,
378 &dev_attr_ds_req.attr,
379 &dev_attr_rd_sect.attr,
380 &dev_attr_wr_sect.attr,
384 static const struct attribute_group xen_vbdstat_group = {
385 .name = "statistics",
386 .attrs = xen_vbdstat_attrs,
389 #define VBD_SHOW(name, format, args...) \
390 static ssize_t show_##name(struct device *_dev, \
391 struct device_attribute *attr, \
394 struct xenbus_device *dev = to_xenbus_device(_dev); \
395 struct backend_info *be = dev_get_drvdata(&dev->dev); \
397 return sprintf(buf, format, ##args); \
399 static DEVICE_ATTR(name, 0444, show_##name, NULL)
401 VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
402 VBD_SHOW(mode, "%s\n", be->mode);
404 static int xenvbd_sysfs_addif(struct xenbus_device *dev)
408 error = device_create_file(&dev->dev, &dev_attr_physical_device);
412 error = device_create_file(&dev->dev, &dev_attr_mode);
416 error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
422 fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
423 fail2: device_remove_file(&dev->dev, &dev_attr_mode);
424 fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
428 static void xenvbd_sysfs_delif(struct xenbus_device *dev)
430 sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
431 device_remove_file(&dev->dev, &dev_attr_mode);
432 device_remove_file(&dev->dev, &dev_attr_physical_device);
436 static void xen_vbd_free(struct xen_vbd *vbd)
439 blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
443 static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
444 unsigned major, unsigned minor, int readonly,
448 struct block_device *bdev;
449 struct request_queue *q;
452 vbd->handle = handle;
453 vbd->readonly = readonly;
456 vbd->pdevice = MKDEV(major, minor);
458 bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
459 FMODE_READ : FMODE_WRITE, NULL);
462 pr_warn("xen_vbd_create: device %08x could not be opened\n",
468 if (vbd->bdev->bd_disk == NULL) {
469 pr_warn("xen_vbd_create: device %08x doesn't exist\n",
474 vbd->size = vbd_sz(vbd);
476 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
477 vbd->type |= VDISK_CDROM;
478 if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
479 vbd->type |= VDISK_REMOVABLE;
481 q = bdev_get_queue(bdev);
482 if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
483 vbd->flush_support = true;
485 if (q && blk_queue_secure_erase(q))
486 vbd->discard_secure = true;
488 pr_debug("Successful creation of handle=%04x (dom=%u)\n",
489 handle, blkif->domid);
492 static int xen_blkbk_remove(struct xenbus_device *dev)
494 struct backend_info *be = dev_get_drvdata(&dev->dev);
496 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
498 if (be->major || be->minor)
499 xenvbd_sysfs_delif(dev);
501 if (be->backend_watch.node) {
502 unregister_xenbus_watch(&be->backend_watch);
503 kfree(be->backend_watch.node);
504 be->backend_watch.node = NULL;
507 dev_set_drvdata(&dev->dev, NULL);
510 xen_blkif_disconnect(be->blkif);
512 /* Put the reference we set in xen_blkif_alloc(). */
513 xen_blkif_put(be->blkif);
519 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
520 struct backend_info *be, int state)
522 struct xenbus_device *dev = be->dev;
525 err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
528 dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
533 static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
535 struct xenbus_device *dev = be->dev;
536 struct xen_blkif *blkif = be->blkif;
539 struct block_device *bdev = be->blkif->vbd.bdev;
540 struct request_queue *q = bdev_get_queue(bdev);
542 if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
545 if (blk_queue_discard(q)) {
546 err = xenbus_printf(xbt, dev->nodename,
547 "discard-granularity", "%u",
548 q->limits.discard_granularity);
550 dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
553 err = xenbus_printf(xbt, dev->nodename,
554 "discard-alignment", "%u",
555 q->limits.discard_alignment);
557 dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
562 err = xenbus_printf(xbt, dev->nodename,
563 "discard-secure", "%d",
564 blkif->vbd.discard_secure);
566 dev_warn(&dev->dev, "writing discard-secure (%d)", err);
570 err = xenbus_printf(xbt, dev->nodename, "feature-discard",
573 dev_warn(&dev->dev, "writing feature-discard (%d)", err);
575 int xen_blkbk_barrier(struct xenbus_transaction xbt,
576 struct backend_info *be, int state)
578 struct xenbus_device *dev = be->dev;
581 err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
584 dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
590 * Entry point to this code when a new device is created. Allocate the basic
591 * structures, and watch the store waiting for the hotplug scripts to tell us
592 * the device's physical major and minor numbers. Switch to InitWait.
594 static int xen_blkbk_probe(struct xenbus_device *dev,
595 const struct xenbus_device_id *id)
598 struct backend_info *be = kzalloc(sizeof(struct backend_info),
601 /* match the pr_debug in xen_blkbk_remove */
602 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
605 xenbus_dev_fatal(dev, -ENOMEM,
606 "allocating backend structure");
610 dev_set_drvdata(&dev->dev, be);
612 be->blkif = xen_blkif_alloc(dev->otherend_id);
613 if (IS_ERR(be->blkif)) {
614 err = PTR_ERR(be->blkif);
616 xenbus_dev_fatal(dev, err, "creating block interface");
620 err = xenbus_printf(XBT_NIL, dev->nodename,
621 "feature-max-indirect-segments", "%u",
622 MAX_INDIRECT_SEGMENTS);
625 "writing %s/feature-max-indirect-segments (%d)",
628 /* Multi-queue: advertise how many queues are supported by us.*/
629 err = xenbus_printf(XBT_NIL, dev->nodename,
630 "multi-queue-max-queues", "%u", xenblk_max_queues);
632 pr_warn("Error writing multi-queue-max-queues\n");
634 /* setup back pointer */
637 err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
638 "%s/%s", dev->nodename, "physical-device");
642 err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", "%u",
643 xen_blkif_max_ring_order);
645 pr_warn("%s write out 'max-ring-page-order' failed\n", __func__);
647 err = xenbus_switch_state(dev, XenbusStateInitWait);
654 pr_warn("%s failed\n", __func__);
655 xen_blkbk_remove(dev);
661 * Callback received when the hotplug scripts have placed the physical-device
662 * node. Read it and the mode node, and create a vbd. If the frontend is
665 static void backend_changed(struct xenbus_watch *watch,
666 const char *path, const char *token)
671 struct backend_info *be
672 = container_of(watch, struct backend_info, backend_watch);
673 struct xenbus_device *dev = be->dev;
675 unsigned long handle;
678 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
680 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
682 if (XENBUS_EXIST_ERR(err)) {
684 * Since this watch will fire once immediately after it is
685 * registered, we expect this. Ignore it, and wait for the
691 xenbus_dev_fatal(dev, err, "reading physical-device");
695 if (be->major | be->minor) {
696 if (be->major != major || be->minor != minor)
697 pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
698 be->major, be->minor, major, minor);
702 be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
703 if (IS_ERR(be->mode)) {
704 err = PTR_ERR(be->mode);
706 xenbus_dev_fatal(dev, err, "reading mode");
710 device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
711 if (!IS_ERR(device_type)) {
712 cdrom = strcmp(device_type, "cdrom") == 0;
716 /* Front end dir is a number, which is used as the handle. */
717 err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
727 err = xen_vbd_create(be->blkif, handle, major, minor,
728 !strchr(be->mode, 'w'), cdrom);
731 xenbus_dev_fatal(dev, err, "creating vbd structure");
733 err = xenvbd_sysfs_addif(dev);
735 xen_vbd_free(&be->blkif->vbd);
736 xenbus_dev_fatal(dev, err, "creating sysfs entries");
746 /* We're potentially connected now */
747 xen_update_blkif_status(be->blkif);
753 * Callback received when the frontend's state changes.
755 static void frontend_changed(struct xenbus_device *dev,
756 enum xenbus_state frontend_state)
758 struct backend_info *be = dev_get_drvdata(&dev->dev);
761 pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
763 switch (frontend_state) {
764 case XenbusStateInitialising:
765 if (dev->state == XenbusStateClosed) {
766 pr_info("%s: prepare for reconnect\n", dev->nodename);
767 xenbus_switch_state(dev, XenbusStateInitWait);
771 case XenbusStateInitialised:
772 case XenbusStateConnected:
774 * Ensure we connect even when two watches fire in
775 * close succession and we miss the intermediate value
778 if (dev->state == XenbusStateConnected)
782 * Enforce precondition before potential leak point.
783 * xen_blkif_disconnect() is idempotent.
785 err = xen_blkif_disconnect(be->blkif);
787 xenbus_dev_fatal(dev, err, "pending I/O");
791 err = connect_ring(be);
794 * Clean up so that memory resources can be used by
795 * other devices. connect_ring reported already error.
797 xen_blkif_disconnect(be->blkif);
800 xen_update_blkif_status(be->blkif);
803 case XenbusStateClosing:
804 xenbus_switch_state(dev, XenbusStateClosing);
807 case XenbusStateClosed:
808 xen_blkif_disconnect(be->blkif);
809 xenbus_switch_state(dev, XenbusStateClosed);
810 if (xenbus_dev_is_online(dev))
814 case XenbusStateUnknown:
815 /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
816 device_unregister(&dev->dev);
820 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
827 /* ** Connection ** */
831 * Write the physical details regarding the block device to the store, and
832 * switch to Connected state.
834 static void connect(struct backend_info *be)
836 struct xenbus_transaction xbt;
838 struct xenbus_device *dev = be->dev;
840 pr_debug("%s %s\n", __func__, dev->otherend);
842 /* Supply the information about the device the frontend needs */
844 err = xenbus_transaction_start(&xbt);
846 xenbus_dev_fatal(dev, err, "starting transaction");
850 /* If we can't advertise it is OK. */
851 xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
853 xen_blkbk_discard(xbt, be);
855 xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
857 err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1);
859 xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
864 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
865 (unsigned long long)vbd_sz(&be->blkif->vbd));
867 xenbus_dev_fatal(dev, err, "writing %s/sectors",
872 /* FIXME: use a typename instead */
873 err = xenbus_printf(xbt, dev->nodename, "info", "%u",
874 be->blkif->vbd.type |
875 (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
877 xenbus_dev_fatal(dev, err, "writing %s/info",
881 err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
883 bdev_logical_block_size(be->blkif->vbd.bdev));
885 xenbus_dev_fatal(dev, err, "writing %s/sector-size",
889 err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
890 bdev_physical_block_size(be->blkif->vbd.bdev));
892 xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
895 err = xenbus_transaction_end(xbt, 0);
899 xenbus_dev_fatal(dev, err, "ending transaction");
901 err = xenbus_switch_state(dev, XenbusStateConnected);
903 xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
908 xenbus_transaction_end(xbt, 1);
912 * Each ring may have multi pages, depends on "ring-page-order".
914 static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
916 unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
917 struct pending_req *req, *n;
919 struct xen_blkif *blkif = ring->blkif;
920 struct xenbus_device *dev = blkif->be->dev;
921 unsigned int nr_grefs, evtchn;
923 err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
927 xenbus_dev_fatal(dev, err, "reading %s/event-channel", dir);
931 nr_grefs = blkif->nr_ring_pages;
933 if (unlikely(!nr_grefs)) {
938 for (i = 0; i < nr_grefs; i++) {
939 char ring_ref_name[RINGREF_NAME_LEN];
941 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
942 err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
950 xenbus_dev_fatal(dev, err, "reading %s/%s",
957 WARN_ON(nr_grefs != 1);
959 err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
963 xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
969 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
970 req = kzalloc(sizeof(*req), GFP_KERNEL);
973 list_add_tail(&req->free_list, &ring->pending_free);
974 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
975 req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
976 if (!req->segments[j])
979 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
980 req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
982 if (!req->indirect_pages[j])
987 /* Map the shared frame, irq etc. */
988 err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
990 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
997 list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
998 list_del(&req->free_list);
999 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
1000 if (!req->segments[j])
1002 kfree(req->segments[j]);
1004 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
1005 if (!req->indirect_pages[j])
1007 kfree(req->indirect_pages[j]);
1014 static int connect_ring(struct backend_info *be)
1016 struct xenbus_device *dev = be->dev;
1017 struct xen_blkif *blkif = be->blkif;
1018 unsigned int pers_grants;
1019 char protocol[64] = "";
1023 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
1024 unsigned int requested_num_queues = 0;
1025 unsigned int ring_page_order;
1027 pr_debug("%s %s\n", __func__, dev->otherend);
1029 blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
1030 err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
1033 strcpy(protocol, "unspecified, assuming default");
1034 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
1035 blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
1036 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
1037 blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
1038 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
1039 blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
1041 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
1044 pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent",
1046 blkif->vbd.feature_gnt_persistent = pers_grants;
1047 blkif->vbd.overflow_max_grants = 0;
1050 * Read the number of hardware queues from frontend.
1052 requested_num_queues = xenbus_read_unsigned(dev->otherend,
1053 "multi-queue-num-queues",
1055 if (requested_num_queues > xenblk_max_queues
1056 || requested_num_queues == 0) {
1057 /* Buggy or malicious guest. */
1058 xenbus_dev_fatal(dev, err,
1059 "guest requested %u queues, exceeding the maximum of %u.",
1060 requested_num_queues, xenblk_max_queues);
1063 blkif->nr_rings = requested_num_queues;
1064 if (xen_blkif_alloc_rings(blkif))
1067 pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
1068 blkif->nr_rings, blkif->blk_protocol, protocol,
1069 pers_grants ? "persistent grants" : "");
1071 ring_page_order = xenbus_read_unsigned(dev->otherend,
1072 "ring-page-order", 0);
1074 if (ring_page_order > xen_blkif_max_ring_order) {
1076 xenbus_dev_fatal(dev, err,
1077 "requested ring page order %d exceed max:%d",
1079 xen_blkif_max_ring_order);
1083 blkif->nr_ring_pages = 1 << ring_page_order;
1085 if (blkif->nr_rings == 1)
1086 return read_per_ring_refs(&blkif->rings[0], dev->otherend);
1088 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
1089 xspath = kmalloc(xspathsize, GFP_KERNEL);
1091 xenbus_dev_fatal(dev, -ENOMEM, "reading ring references");
1095 for (i = 0; i < blkif->nr_rings; i++) {
1096 memset(xspath, 0, xspathsize);
1097 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
1098 err = read_per_ring_refs(&blkif->rings[i], xspath);
1109 static const struct xenbus_device_id xen_blkbk_ids[] = {
1114 static struct xenbus_driver xen_blkbk_driver = {
1115 .ids = xen_blkbk_ids,
1116 .probe = xen_blkbk_probe,
1117 .remove = xen_blkbk_remove,
1118 .otherend_changed = frontend_changed
1121 int xen_blkif_xenbus_init(void)
1123 return xenbus_register_backend(&xen_blkbk_driver);