2 rbd.c -- Export ceph rados objects as a Linux block device
5 based on drivers/block/osdblk.c:
7 Copyright 2009 Red Hat, Inc.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 For usage instructions, please refer to:
26 Documentation/ABI/testing/sysfs-bus-rbd
30 #include <linux/ceph/libceph.h>
31 #include <linux/ceph/osd_client.h>
32 #include <linux/ceph/mon_client.h>
33 #include <linux/ceph/decode.h>
34 #include <linux/parser.h>
36 #include <linux/kernel.h>
37 #include <linux/device.h>
38 #include <linux/module.h>
40 #include <linux/blkdev.h>
42 #include "rbd_types.h"
44 #define RBD_DEBUG /* Activate rbd_assert() calls */
47 * The basic unit of block I/O is a sector. It is interpreted in a
48 * number of contexts in Linux (blk, bio, genhd), but the default is
49 * universally 512 bytes. These symbols are just slightly more
50 * meaningful than the bare numbers they represent.
52 #define SECTOR_SHIFT 9
53 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
55 #define RBD_DRV_NAME "rbd"
56 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
58 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
60 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
61 #define RBD_MAX_SNAP_NAME_LEN \
62 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
64 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
66 #define RBD_SNAP_HEAD_NAME "-"
68 /* This allows a single page to hold an image name sent by OSD */
69 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
70 #define RBD_IMAGE_ID_LEN_MAX 64
72 #define RBD_OBJ_PREFIX_LEN_MAX 64
76 #define RBD_FEATURE_LAYERING (1<<0)
77 #define RBD_FEATURE_STRIPINGV2 (1<<1)
78 #define RBD_FEATURES_ALL \
79 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
81 /* Features supported by this (client software) implementation. */
83 #define RBD_FEATURES_SUPPORTED (0)
86 * An RBD device name will be "rbd#", where the "rbd" comes from
87 * RBD_DRV_NAME above, and # is a unique integer identifier.
88 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
89 * enough to hold all possible device names.
91 #define DEV_NAME_LEN 32
92 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
95 * block device image metadata (in-memory version)
97 struct rbd_image_header {
98 /* These four fields never change for a given rbd image */
105 /* The remaining fields need to be updated occasionally */
107 struct ceph_snap_context *snapc;
115 * An rbd image specification.
117 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
118 * identify an image. Each rbd_dev structure includes a pointer to
119 * an rbd_spec structure that encapsulates this identity.
121 * Each of the id's in an rbd_spec has an associated name. For a
122 * user-mapped image, the names are supplied and the id's associated
123 * with them are looked up. For a layered image, a parent image is
124 * defined by the tuple, and the names are looked up.
126 * An rbd_dev structure contains a parent_spec pointer which is
127 * non-null if the image it represents is a child in a layered
128 * image. This pointer will refer to the rbd_spec structure used
129 * by the parent rbd_dev for its own identity (i.e., the structure
130 * is shared between the parent and child).
132 * Since these structures are populated once, during the discovery
133 * phase of image construction, they are effectively immutable so
134 * we make no effort to synchronize access to them.
136 * Note that code herein does not assume the image name is known (it
137 * could be a null pointer).
153 * an instance of the client. multiple devices may share an rbd client.
156 struct ceph_client *client;
158 struct list_head node;
161 struct rbd_img_request;
162 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
164 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
166 struct rbd_obj_request;
167 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
169 enum obj_request_type {
170 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
174 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
175 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
176 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
177 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
180 struct rbd_obj_request {
181 const char *object_name;
182 u64 offset; /* object start byte */
183 u64 length; /* bytes from offset */
187 * An object request associated with an image will have its
188 * img_data flag set; a standalone object request will not.
190 * A standalone object request will have which == BAD_WHICH
191 * and a null obj_request pointer.
193 * An object request initiated in support of a layered image
194 * object (to check for its existence before a write) will
195 * have which == BAD_WHICH and a non-null obj_request pointer.
197 * Finally, an object request for rbd image data will have
198 * which != BAD_WHICH, and will have a non-null img_request
199 * pointer. The value of which will be in the range
200 * 0..(img_request->obj_request_count-1).
203 struct rbd_obj_request *obj_request; /* STAT op */
205 struct rbd_img_request *img_request;
207 /* links for img_request->obj_requests list */
208 struct list_head links;
211 u32 which; /* posn image request list */
213 enum obj_request_type type;
215 struct bio *bio_list;
221 struct page **copyup_pages;
223 struct ceph_osd_request *osd_req;
225 u64 xferred; /* bytes transferred */
229 rbd_obj_callback_t callback;
230 struct completion completion;
236 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
237 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
238 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
241 struct rbd_img_request {
242 struct rbd_device *rbd_dev;
243 u64 offset; /* starting image byte offset */
244 u64 length; /* byte count from offset */
247 u64 snap_id; /* for reads */
248 struct ceph_snap_context *snapc; /* for writes */
251 struct request *rq; /* block request */
252 struct rbd_obj_request *obj_request; /* obj req initiator */
254 struct page **copyup_pages;
255 spinlock_t completion_lock;/* protects next_completion */
257 rbd_img_callback_t callback;
258 u64 xferred;/* aggregate bytes transferred */
259 int result; /* first nonzero obj_request result */
261 u32 obj_request_count;
262 struct list_head obj_requests; /* rbd_obj_request structs */
267 #define for_each_obj_request(ireq, oreq) \
268 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
269 #define for_each_obj_request_from(ireq, oreq) \
270 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
271 #define for_each_obj_request_safe(ireq, oreq, n) \
272 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
278 struct list_head node;
293 int dev_id; /* blkdev unique id */
295 int major; /* blkdev assigned major */
296 struct gendisk *disk; /* blkdev's gendisk and rq */
298 u32 image_format; /* Either 1 or 2 */
299 struct rbd_client *rbd_client;
301 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
303 spinlock_t lock; /* queue, flags, open_count */
305 struct rbd_image_header header;
306 unsigned long flags; /* possibly lock protected */
307 struct rbd_spec *spec;
311 struct ceph_file_layout layout;
313 struct ceph_osd_event *watch_event;
314 struct rbd_obj_request *watch_request;
316 struct rbd_spec *parent_spec;
318 struct rbd_device *parent;
323 /* protects updating the header */
324 struct rw_semaphore header_rwsem;
326 struct rbd_mapping mapping;
328 struct list_head node;
330 /* list of snapshots */
331 struct list_head snaps;
335 unsigned long open_count; /* protected by lock */
339 * Flag bits for rbd_dev->flags. If atomicity is required,
340 * rbd_dev->lock is used to protect access.
342 * Currently, only the "removing" flag (which is coupled with the
343 * "open_count" field) requires atomic access.
346 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
347 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
350 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
352 static LIST_HEAD(rbd_dev_list); /* devices */
353 static DEFINE_SPINLOCK(rbd_dev_list_lock);
355 static LIST_HEAD(rbd_client_list); /* clients */
356 static DEFINE_SPINLOCK(rbd_client_list_lock);
358 static int rbd_img_request_submit(struct rbd_img_request *img_request);
360 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
361 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
363 static void rbd_dev_release(struct device *dev);
364 static void rbd_remove_snap_dev(struct rbd_snap *snap);
366 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
368 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
370 static int rbd_dev_probe(struct rbd_device *rbd_dev);
372 static struct bus_attribute rbd_bus_attrs[] = {
373 __ATTR(add, S_IWUSR, NULL, rbd_add),
374 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
378 static struct bus_type rbd_bus_type = {
380 .bus_attrs = rbd_bus_attrs,
383 static void rbd_root_dev_release(struct device *dev)
387 static struct device rbd_root_dev = {
389 .release = rbd_root_dev_release,
392 static __printf(2, 3)
393 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
395 struct va_format vaf;
403 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
404 else if (rbd_dev->disk)
405 printk(KERN_WARNING "%s: %s: %pV\n",
406 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
407 else if (rbd_dev->spec && rbd_dev->spec->image_name)
408 printk(KERN_WARNING "%s: image %s: %pV\n",
409 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
410 else if (rbd_dev->spec && rbd_dev->spec->image_id)
411 printk(KERN_WARNING "%s: id %s: %pV\n",
412 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
414 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
415 RBD_DRV_NAME, rbd_dev, &vaf);
420 #define rbd_assert(expr) \
421 if (unlikely(!(expr))) { \
422 printk(KERN_ERR "\nAssertion failure in %s() " \
424 "\trbd_assert(%s);\n\n", \
425 __func__, __LINE__, #expr); \
428 #else /* !RBD_DEBUG */
429 # define rbd_assert(expr) ((void) 0)
430 #endif /* !RBD_DEBUG */
432 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
433 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
435 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
436 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
438 static int rbd_open(struct block_device *bdev, fmode_t mode)
440 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
441 bool removing = false;
443 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
446 spin_lock_irq(&rbd_dev->lock);
447 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
450 rbd_dev->open_count++;
451 spin_unlock_irq(&rbd_dev->lock);
455 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
456 (void) get_device(&rbd_dev->dev);
457 set_device_ro(bdev, rbd_dev->mapping.read_only);
458 mutex_unlock(&ctl_mutex);
463 static int rbd_release(struct gendisk *disk, fmode_t mode)
465 struct rbd_device *rbd_dev = disk->private_data;
466 unsigned long open_count_before;
468 spin_lock_irq(&rbd_dev->lock);
469 open_count_before = rbd_dev->open_count--;
470 spin_unlock_irq(&rbd_dev->lock);
471 rbd_assert(open_count_before > 0);
473 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
474 put_device(&rbd_dev->dev);
475 mutex_unlock(&ctl_mutex);
480 static const struct block_device_operations rbd_bd_ops = {
481 .owner = THIS_MODULE,
483 .release = rbd_release,
487 * Initialize an rbd client instance.
490 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
492 struct rbd_client *rbdc;
495 dout("%s:\n", __func__);
496 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
500 kref_init(&rbdc->kref);
501 INIT_LIST_HEAD(&rbdc->node);
503 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
505 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
506 if (IS_ERR(rbdc->client))
508 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
510 ret = ceph_open_session(rbdc->client);
514 spin_lock(&rbd_client_list_lock);
515 list_add_tail(&rbdc->node, &rbd_client_list);
516 spin_unlock(&rbd_client_list_lock);
518 mutex_unlock(&ctl_mutex);
519 dout("%s: rbdc %p\n", __func__, rbdc);
524 ceph_destroy_client(rbdc->client);
526 mutex_unlock(&ctl_mutex);
530 ceph_destroy_options(ceph_opts);
531 dout("%s: error %d\n", __func__, ret);
536 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
538 kref_get(&rbdc->kref);
544 * Find a ceph client with specific addr and configuration. If
545 * found, bump its reference count.
547 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
549 struct rbd_client *client_node;
552 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
555 spin_lock(&rbd_client_list_lock);
556 list_for_each_entry(client_node, &rbd_client_list, node) {
557 if (!ceph_compare_options(ceph_opts, client_node->client)) {
558 __rbd_get_client(client_node);
564 spin_unlock(&rbd_client_list_lock);
566 return found ? client_node : NULL;
576 /* string args above */
579 /* Boolean args above */
583 static match_table_t rbd_opts_tokens = {
585 /* string args above */
586 {Opt_read_only, "read_only"},
587 {Opt_read_only, "ro"}, /* Alternate spelling */
588 {Opt_read_write, "read_write"},
589 {Opt_read_write, "rw"}, /* Alternate spelling */
590 /* Boolean args above */
598 #define RBD_READ_ONLY_DEFAULT false
600 static int parse_rbd_opts_token(char *c, void *private)
602 struct rbd_options *rbd_opts = private;
603 substring_t argstr[MAX_OPT_ARGS];
604 int token, intval, ret;
606 token = match_token(c, rbd_opts_tokens, argstr);
610 if (token < Opt_last_int) {
611 ret = match_int(&argstr[0], &intval);
613 pr_err("bad mount option arg (not int) "
617 dout("got int token %d val %d\n", token, intval);
618 } else if (token > Opt_last_int && token < Opt_last_string) {
619 dout("got string token %d val %s\n", token,
621 } else if (token > Opt_last_string && token < Opt_last_bool) {
622 dout("got Boolean token %d\n", token);
624 dout("got token %d\n", token);
629 rbd_opts->read_only = true;
632 rbd_opts->read_only = false;
642 * Get a ceph client with specific addr and configuration, if one does
643 * not exist create it.
645 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
647 struct rbd_client *rbdc;
649 rbdc = rbd_client_find(ceph_opts);
650 if (rbdc) /* using an existing client */
651 ceph_destroy_options(ceph_opts);
653 rbdc = rbd_client_create(ceph_opts);
659 * Destroy ceph client
661 * Caller must hold rbd_client_list_lock.
663 static void rbd_client_release(struct kref *kref)
665 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
667 dout("%s: rbdc %p\n", __func__, rbdc);
668 spin_lock(&rbd_client_list_lock);
669 list_del(&rbdc->node);
670 spin_unlock(&rbd_client_list_lock);
672 ceph_destroy_client(rbdc->client);
677 * Drop reference to ceph client node. If it's not referenced anymore, release
680 static void rbd_put_client(struct rbd_client *rbdc)
683 kref_put(&rbdc->kref, rbd_client_release);
686 static bool rbd_image_format_valid(u32 image_format)
688 return image_format == 1 || image_format == 2;
691 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
696 /* The header has to start with the magic rbd header text */
697 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
700 /* The bio layer requires at least sector-sized I/O */
702 if (ondisk->options.order < SECTOR_SHIFT)
705 /* If we use u64 in a few spots we may be able to loosen this */
707 if (ondisk->options.order > 8 * sizeof (int) - 1)
711 * The size of a snapshot header has to fit in a size_t, and
712 * that limits the number of snapshots.
714 snap_count = le32_to_cpu(ondisk->snap_count);
715 size = SIZE_MAX - sizeof (struct ceph_snap_context);
716 if (snap_count > size / sizeof (__le64))
720 * Not only that, but the size of the entire the snapshot
721 * header must also be representable in a size_t.
723 size -= snap_count * sizeof (__le64);
724 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
731 * Create a new header structure, translate header format from the on-disk
734 static int rbd_header_from_disk(struct rbd_image_header *header,
735 struct rbd_image_header_ondisk *ondisk)
742 memset(header, 0, sizeof (*header));
744 snap_count = le32_to_cpu(ondisk->snap_count);
746 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
747 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
748 if (!header->object_prefix)
750 memcpy(header->object_prefix, ondisk->object_prefix, len);
751 header->object_prefix[len] = '\0';
754 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
756 /* Save a copy of the snapshot names */
758 if (snap_names_len > (u64) SIZE_MAX)
760 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
761 if (!header->snap_names)
764 * Note that rbd_dev_v1_header_read() guarantees
765 * the ondisk buffer we're working with has
766 * snap_names_len bytes beyond the end of the
767 * snapshot id array, this memcpy() is safe.
769 memcpy(header->snap_names, &ondisk->snaps[snap_count],
772 /* Record each snapshot's size */
774 size = snap_count * sizeof (*header->snap_sizes);
775 header->snap_sizes = kmalloc(size, GFP_KERNEL);
776 if (!header->snap_sizes)
778 for (i = 0; i < snap_count; i++)
779 header->snap_sizes[i] =
780 le64_to_cpu(ondisk->snaps[i].image_size);
782 WARN_ON(ondisk->snap_names_len);
783 header->snap_names = NULL;
784 header->snap_sizes = NULL;
787 header->features = 0; /* No features support in v1 images */
788 header->obj_order = ondisk->options.order;
789 header->crypt_type = ondisk->options.crypt_type;
790 header->comp_type = ondisk->options.comp_type;
792 /* Allocate and fill in the snapshot context */
794 header->image_size = le64_to_cpu(ondisk->image_size);
795 size = sizeof (struct ceph_snap_context);
796 size += snap_count * sizeof (header->snapc->snaps[0]);
797 header->snapc = kzalloc(size, GFP_KERNEL);
801 atomic_set(&header->snapc->nref, 1);
802 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
803 header->snapc->num_snaps = snap_count;
804 for (i = 0; i < snap_count; i++)
805 header->snapc->snaps[i] =
806 le64_to_cpu(ondisk->snaps[i].id);
811 kfree(header->snap_sizes);
812 header->snap_sizes = NULL;
813 kfree(header->snap_names);
814 header->snap_names = NULL;
815 kfree(header->object_prefix);
816 header->object_prefix = NULL;
821 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
823 struct rbd_snap *snap;
825 if (snap_id == CEPH_NOSNAP)
826 return RBD_SNAP_HEAD_NAME;
828 list_for_each_entry(snap, &rbd_dev->snaps, node)
829 if (snap_id == snap->id)
835 static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
838 struct rbd_snap *snap;
840 list_for_each_entry(snap, &rbd_dev->snaps, node) {
841 if (!strcmp(snap_name, snap->name)) {
842 rbd_dev->spec->snap_id = snap->id;
843 rbd_dev->mapping.size = snap->size;
844 rbd_dev->mapping.features = snap->features;
853 static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
857 if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
858 sizeof (RBD_SNAP_HEAD_NAME))) {
859 rbd_dev->spec->snap_id = CEPH_NOSNAP;
860 rbd_dev->mapping.size = rbd_dev->header.image_size;
861 rbd_dev->mapping.features = rbd_dev->header.features;
864 ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
867 rbd_dev->mapping.read_only = true;
869 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
875 static void rbd_header_free(struct rbd_image_header *header)
877 kfree(header->object_prefix);
878 header->object_prefix = NULL;
879 kfree(header->snap_sizes);
880 header->snap_sizes = NULL;
881 kfree(header->snap_names);
882 header->snap_names = NULL;
883 ceph_put_snap_context(header->snapc);
884 header->snapc = NULL;
887 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
893 name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
896 segment = offset >> rbd_dev->header.obj_order;
897 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
898 rbd_dev->header.object_prefix, segment);
899 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
900 pr_err("error formatting segment name for #%llu (%d)\n",
909 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
911 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
913 return offset & (segment_size - 1);
916 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
917 u64 offset, u64 length)
919 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
921 offset &= segment_size - 1;
923 rbd_assert(length <= U64_MAX - offset);
924 if (offset + length > segment_size)
925 length = segment_size - offset;
931 * returns the size of an object in the image
933 static u64 rbd_obj_bytes(struct rbd_image_header *header)
935 return 1 << header->obj_order;
942 static void bio_chain_put(struct bio *chain)
948 chain = chain->bi_next;
954 * zeros a bio chain, starting at specific offset
956 static void zero_bio_chain(struct bio *chain, int start_ofs)
965 bio_for_each_segment(bv, chain, i) {
966 if (pos + bv->bv_len > start_ofs) {
967 int remainder = max(start_ofs - pos, 0);
968 buf = bvec_kmap_irq(bv, &flags);
969 memset(buf + remainder, 0,
970 bv->bv_len - remainder);
971 bvec_kunmap_irq(buf, &flags);
976 chain = chain->bi_next;
981 * similar to zero_bio_chain(), zeros data defined by a page array,
982 * starting at the given byte offset from the start of the array and
983 * continuing up to the given end offset. The pages array is
984 * assumed to be big enough to hold all bytes up to the end.
986 static void zero_pages(struct page **pages, u64 offset, u64 end)
988 struct page **page = &pages[offset >> PAGE_SHIFT];
990 rbd_assert(end > offset);
991 rbd_assert(end - offset <= (u64)SIZE_MAX);
992 while (offset < end) {
998 page_offset = (size_t)(offset & ~PAGE_MASK);
999 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1000 local_irq_save(flags);
1001 kaddr = kmap_atomic(*page);
1002 memset(kaddr + page_offset, 0, length);
1003 kunmap_atomic(kaddr);
1004 local_irq_restore(flags);
1012 * Clone a portion of a bio, starting at the given byte offset
1013 * and continuing for the number of bytes indicated.
1015 static struct bio *bio_clone_range(struct bio *bio_src,
1016 unsigned int offset,
1024 unsigned short end_idx;
1025 unsigned short vcnt;
1028 /* Handle the easy case for the caller */
1030 if (!offset && len == bio_src->bi_size)
1031 return bio_clone(bio_src, gfpmask);
1033 if (WARN_ON_ONCE(!len))
1035 if (WARN_ON_ONCE(len > bio_src->bi_size))
1037 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1040 /* Find first affected segment... */
1043 __bio_for_each_segment(bv, bio_src, idx, 0) {
1044 if (resid < bv->bv_len)
1046 resid -= bv->bv_len;
1050 /* ...and the last affected segment */
1053 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1054 if (resid <= bv->bv_len)
1056 resid -= bv->bv_len;
1058 vcnt = end_idx - idx + 1;
1060 /* Build the clone */
1062 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1064 return NULL; /* ENOMEM */
1066 bio->bi_bdev = bio_src->bi_bdev;
1067 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1068 bio->bi_rw = bio_src->bi_rw;
1069 bio->bi_flags |= 1 << BIO_CLONED;
1072 * Copy over our part of the bio_vec, then update the first
1073 * and last (or only) entries.
1075 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1076 vcnt * sizeof (struct bio_vec));
1077 bio->bi_io_vec[0].bv_offset += voff;
1079 bio->bi_io_vec[0].bv_len -= voff;
1080 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1082 bio->bi_io_vec[0].bv_len = len;
1085 bio->bi_vcnt = vcnt;
1093 * Clone a portion of a bio chain, starting at the given byte offset
1094 * into the first bio in the source chain and continuing for the
1095 * number of bytes indicated. The result is another bio chain of
1096 * exactly the given length, or a null pointer on error.
1098 * The bio_src and offset parameters are both in-out. On entry they
1099 * refer to the first source bio and the offset into that bio where
1100 * the start of data to be cloned is located.
1102 * On return, bio_src is updated to refer to the bio in the source
1103 * chain that contains first un-cloned byte, and *offset will
1104 * contain the offset of that byte within that bio.
1106 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1107 unsigned int *offset,
1111 struct bio *bi = *bio_src;
1112 unsigned int off = *offset;
1113 struct bio *chain = NULL;
1116 /* Build up a chain of clone bios up to the limit */
1118 if (!bi || off >= bi->bi_size || !len)
1119 return NULL; /* Nothing to clone */
1123 unsigned int bi_size;
1127 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1128 goto out_err; /* EINVAL; ran out of bio's */
1130 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1131 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1133 goto out_err; /* ENOMEM */
1136 end = &bio->bi_next;
1139 if (off == bi->bi_size) {
1150 bio_chain_put(chain);
1156 * The default/initial value for all object request flags is 0. For
1157 * each flag, once its value is set to 1 it is never reset to 0
1160 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1162 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1163 struct rbd_device *rbd_dev;
1165 rbd_dev = obj_request->img_request->rbd_dev;
1166 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1171 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1174 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1177 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1179 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1180 struct rbd_device *rbd_dev = NULL;
1182 if (obj_request_img_data_test(obj_request))
1183 rbd_dev = obj_request->img_request->rbd_dev;
1184 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1189 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1192 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1196 * This sets the KNOWN flag after (possibly) setting the EXISTS
1197 * flag. The latter is set based on the "exists" value provided.
1199 * Note that for our purposes once an object exists it never goes
1200 * away again. It's possible that the response from two existence
1201 * checks are separated by the creation of the target object, and
1202 * the first ("doesn't exist") response arrives *after* the second
1203 * ("does exist"). In that case we ignore the second one.
1205 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1209 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1210 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1214 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1217 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1220 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1223 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1226 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1228 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1229 atomic_read(&obj_request->kref.refcount));
1230 kref_get(&obj_request->kref);
1233 static void rbd_obj_request_destroy(struct kref *kref);
1234 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1236 rbd_assert(obj_request != NULL);
1237 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1238 atomic_read(&obj_request->kref.refcount));
1239 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1242 static void rbd_img_request_get(struct rbd_img_request *img_request)
1244 dout("%s: img %p (was %d)\n", __func__, img_request,
1245 atomic_read(&img_request->kref.refcount));
1246 kref_get(&img_request->kref);
1249 static void rbd_img_request_destroy(struct kref *kref);
1250 static void rbd_img_request_put(struct rbd_img_request *img_request)
1252 rbd_assert(img_request != NULL);
1253 dout("%s: img %p (was %d)\n", __func__, img_request,
1254 atomic_read(&img_request->kref.refcount));
1255 kref_put(&img_request->kref, rbd_img_request_destroy);
1258 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1259 struct rbd_obj_request *obj_request)
1261 rbd_assert(obj_request->img_request == NULL);
1263 /* Image request now owns object's original reference */
1264 obj_request->img_request = img_request;
1265 obj_request->which = img_request->obj_request_count;
1266 rbd_assert(!obj_request_img_data_test(obj_request));
1267 obj_request_img_data_set(obj_request);
1268 rbd_assert(obj_request->which != BAD_WHICH);
1269 img_request->obj_request_count++;
1270 list_add_tail(&obj_request->links, &img_request->obj_requests);
1271 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1272 obj_request->which);
1275 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1276 struct rbd_obj_request *obj_request)
1278 rbd_assert(obj_request->which != BAD_WHICH);
1280 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1281 obj_request->which);
1282 list_del(&obj_request->links);
1283 rbd_assert(img_request->obj_request_count > 0);
1284 img_request->obj_request_count--;
1285 rbd_assert(obj_request->which == img_request->obj_request_count);
1286 obj_request->which = BAD_WHICH;
1287 rbd_assert(obj_request_img_data_test(obj_request));
1288 rbd_assert(obj_request->img_request == img_request);
1289 obj_request->img_request = NULL;
1290 obj_request->callback = NULL;
1291 rbd_obj_request_put(obj_request);
1294 static bool obj_request_type_valid(enum obj_request_type type)
1297 case OBJ_REQUEST_NODATA:
1298 case OBJ_REQUEST_BIO:
1299 case OBJ_REQUEST_PAGES:
1306 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1307 struct rbd_obj_request *obj_request)
1309 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1311 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1314 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1317 dout("%s: img %p\n", __func__, img_request);
1320 * If no error occurred, compute the aggregate transfer
1321 * count for the image request. We could instead use
1322 * atomic64_cmpxchg() to update it as each object request
1323 * completes; not clear which way is better off hand.
1325 if (!img_request->result) {
1326 struct rbd_obj_request *obj_request;
1329 for_each_obj_request(img_request, obj_request)
1330 xferred += obj_request->xferred;
1331 img_request->xferred = xferred;
1334 if (img_request->callback)
1335 img_request->callback(img_request);
1337 rbd_img_request_put(img_request);
1340 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1342 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1344 dout("%s: obj %p\n", __func__, obj_request);
1346 return wait_for_completion_interruptible(&obj_request->completion);
1350 * The default/initial value for all image request flags is 0. Each
1351 * is conditionally set to 1 at image request initialization time
1352 * and currently never change thereafter.
1354 static void img_request_write_set(struct rbd_img_request *img_request)
1356 set_bit(IMG_REQ_WRITE, &img_request->flags);
1360 static bool img_request_write_test(struct rbd_img_request *img_request)
1363 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1366 static void img_request_child_set(struct rbd_img_request *img_request)
1368 set_bit(IMG_REQ_CHILD, &img_request->flags);
1372 static bool img_request_child_test(struct rbd_img_request *img_request)
1375 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1378 static void img_request_layered_set(struct rbd_img_request *img_request)
1380 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1384 static bool img_request_layered_test(struct rbd_img_request *img_request)
1387 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1391 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1393 u64 xferred = obj_request->xferred;
1394 u64 length = obj_request->length;
1396 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1397 obj_request, obj_request->img_request, obj_request->result,
1400 * ENOENT means a hole in the image. We zero-fill the
1401 * entire length of the request. A short read also implies
1402 * zero-fill to the end of the request. Either way we
1403 * update the xferred count to indicate the whole request
1406 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1407 if (obj_request->result == -ENOENT) {
1408 if (obj_request->type == OBJ_REQUEST_BIO)
1409 zero_bio_chain(obj_request->bio_list, 0);
1411 zero_pages(obj_request->pages, 0, length);
1412 obj_request->result = 0;
1413 obj_request->xferred = length;
1414 } else if (xferred < length && !obj_request->result) {
1415 if (obj_request->type == OBJ_REQUEST_BIO)
1416 zero_bio_chain(obj_request->bio_list, xferred);
1418 zero_pages(obj_request->pages, xferred, length);
1419 obj_request->xferred = length;
1421 obj_request_done_set(obj_request);
1424 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1426 dout("%s: obj %p cb %p\n", __func__, obj_request,
1427 obj_request->callback);
1428 if (obj_request->callback)
1429 obj_request->callback(obj_request);
1431 complete_all(&obj_request->completion);
1434 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1436 dout("%s: obj %p\n", __func__, obj_request);
1437 obj_request_done_set(obj_request);
1440 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1442 struct rbd_img_request *img_request = NULL;
1443 struct rbd_device *rbd_dev = NULL;
1444 bool layered = false;
1446 if (obj_request_img_data_test(obj_request)) {
1447 img_request = obj_request->img_request;
1448 layered = img_request && img_request_layered_test(img_request);
1449 rbd_dev = img_request->rbd_dev;
1452 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1453 obj_request, img_request, obj_request->result,
1454 obj_request->xferred, obj_request->length);
1455 if (layered && obj_request->result == -ENOENT &&
1456 obj_request->img_offset < rbd_dev->parent_overlap)
1457 rbd_img_parent_read(obj_request);
1458 else if (img_request)
1459 rbd_img_obj_request_read_callback(obj_request);
1461 obj_request_done_set(obj_request);
1464 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1466 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1467 obj_request->result, obj_request->length);
1469 * There is no such thing as a successful short write. Set
1470 * it to our originally-requested length.
1472 obj_request->xferred = obj_request->length;
1473 obj_request_done_set(obj_request);
1477 * For a simple stat call there's nothing to do. We'll do more if
1478 * this is part of a write sequence for a layered image.
1480 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1482 dout("%s: obj %p\n", __func__, obj_request);
1483 obj_request_done_set(obj_request);
1486 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1487 struct ceph_msg *msg)
1489 struct rbd_obj_request *obj_request = osd_req->r_priv;
1492 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1493 rbd_assert(osd_req == obj_request->osd_req);
1494 if (obj_request_img_data_test(obj_request)) {
1495 rbd_assert(obj_request->img_request);
1496 rbd_assert(obj_request->which != BAD_WHICH);
1498 rbd_assert(obj_request->which == BAD_WHICH);
1501 if (osd_req->r_result < 0)
1502 obj_request->result = osd_req->r_result;
1503 obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
1505 BUG_ON(osd_req->r_num_ops > 2);
1508 * We support a 64-bit length, but ultimately it has to be
1509 * passed to blk_end_request(), which takes an unsigned int.
1511 obj_request->xferred = osd_req->r_reply_op_len[0];
1512 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1513 opcode = osd_req->r_ops[0].op;
1515 case CEPH_OSD_OP_READ:
1516 rbd_osd_read_callback(obj_request);
1518 case CEPH_OSD_OP_WRITE:
1519 rbd_osd_write_callback(obj_request);
1521 case CEPH_OSD_OP_STAT:
1522 rbd_osd_stat_callback(obj_request);
1524 case CEPH_OSD_OP_CALL:
1525 case CEPH_OSD_OP_NOTIFY_ACK:
1526 case CEPH_OSD_OP_WATCH:
1527 rbd_osd_trivial_callback(obj_request);
1530 rbd_warn(NULL, "%s: unsupported op %hu\n",
1531 obj_request->object_name, (unsigned short) opcode);
1535 if (obj_request_done_test(obj_request))
1536 rbd_obj_request_complete(obj_request);
1539 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1541 struct rbd_img_request *img_request = obj_request->img_request;
1542 struct ceph_osd_request *osd_req = obj_request->osd_req;
1545 rbd_assert(osd_req != NULL);
1547 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1548 ceph_osdc_build_request(osd_req, obj_request->offset,
1549 NULL, snap_id, NULL);
1552 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1554 struct rbd_img_request *img_request = obj_request->img_request;
1555 struct ceph_osd_request *osd_req = obj_request->osd_req;
1556 struct ceph_snap_context *snapc;
1557 struct timespec mtime = CURRENT_TIME;
1559 rbd_assert(osd_req != NULL);
1561 snapc = img_request ? img_request->snapc : NULL;
1562 ceph_osdc_build_request(osd_req, obj_request->offset,
1563 snapc, CEPH_NOSNAP, &mtime);
1566 static struct ceph_osd_request *rbd_osd_req_create(
1567 struct rbd_device *rbd_dev,
1569 struct rbd_obj_request *obj_request)
1571 struct ceph_snap_context *snapc = NULL;
1572 struct ceph_osd_client *osdc;
1573 struct ceph_osd_request *osd_req;
1575 if (obj_request_img_data_test(obj_request)) {
1576 struct rbd_img_request *img_request = obj_request->img_request;
1578 rbd_assert(write_request ==
1579 img_request_write_test(img_request));
1581 snapc = img_request->snapc;
1584 /* Allocate and initialize the request, for the single op */
1586 osdc = &rbd_dev->rbd_client->client->osdc;
1587 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1589 return NULL; /* ENOMEM */
1592 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1594 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1596 osd_req->r_callback = rbd_osd_req_callback;
1597 osd_req->r_priv = obj_request;
1599 osd_req->r_oid_len = strlen(obj_request->object_name);
1600 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1601 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1603 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1609 * Create a copyup osd request based on the information in the
1610 * object request supplied. A copyup request has two osd ops,
1611 * a copyup method call, and a "normal" write request.
1613 static struct ceph_osd_request *
1614 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1616 struct rbd_img_request *img_request;
1617 struct ceph_snap_context *snapc;
1618 struct rbd_device *rbd_dev;
1619 struct ceph_osd_client *osdc;
1620 struct ceph_osd_request *osd_req;
1622 rbd_assert(obj_request_img_data_test(obj_request));
1623 img_request = obj_request->img_request;
1624 rbd_assert(img_request);
1625 rbd_assert(img_request_write_test(img_request));
1627 /* Allocate and initialize the request, for the two ops */
1629 snapc = img_request->snapc;
1630 rbd_dev = img_request->rbd_dev;
1631 osdc = &rbd_dev->rbd_client->client->osdc;
1632 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1634 return NULL; /* ENOMEM */
1636 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1637 osd_req->r_callback = rbd_osd_req_callback;
1638 osd_req->r_priv = obj_request;
1640 osd_req->r_oid_len = strlen(obj_request->object_name);
1641 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1642 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1644 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1650 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1652 ceph_osdc_put_request(osd_req);
1655 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1657 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1658 u64 offset, u64 length,
1659 enum obj_request_type type)
1661 struct rbd_obj_request *obj_request;
1665 rbd_assert(obj_request_type_valid(type));
1667 size = strlen(object_name) + 1;
1668 obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
1672 name = (char *)(obj_request + 1);
1673 obj_request->object_name = memcpy(name, object_name, size);
1674 obj_request->offset = offset;
1675 obj_request->length = length;
1676 obj_request->flags = 0;
1677 obj_request->which = BAD_WHICH;
1678 obj_request->type = type;
1679 INIT_LIST_HEAD(&obj_request->links);
1680 init_completion(&obj_request->completion);
1681 kref_init(&obj_request->kref);
1683 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1684 offset, length, (int)type, obj_request);
1689 static void rbd_obj_request_destroy(struct kref *kref)
1691 struct rbd_obj_request *obj_request;
1693 obj_request = container_of(kref, struct rbd_obj_request, kref);
1695 dout("%s: obj %p\n", __func__, obj_request);
1697 rbd_assert(obj_request->img_request == NULL);
1698 rbd_assert(obj_request->which == BAD_WHICH);
1700 if (obj_request->osd_req)
1701 rbd_osd_req_destroy(obj_request->osd_req);
1703 rbd_assert(obj_request_type_valid(obj_request->type));
1704 switch (obj_request->type) {
1705 case OBJ_REQUEST_NODATA:
1706 break; /* Nothing to do */
1707 case OBJ_REQUEST_BIO:
1708 if (obj_request->bio_list)
1709 bio_chain_put(obj_request->bio_list);
1711 case OBJ_REQUEST_PAGES:
1712 if (obj_request->pages)
1713 ceph_release_page_vector(obj_request->pages,
1714 obj_request->page_count);
1722 * Caller is responsible for filling in the list of object requests
1723 * that comprises the image request, and the Linux request pointer
1724 * (if there is one).
1726 static struct rbd_img_request *rbd_img_request_create(
1727 struct rbd_device *rbd_dev,
1728 u64 offset, u64 length,
1732 struct rbd_img_request *img_request;
1733 struct ceph_snap_context *snapc = NULL;
1735 img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
1739 if (write_request) {
1740 down_read(&rbd_dev->header_rwsem);
1741 snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1742 up_read(&rbd_dev->header_rwsem);
1743 if (WARN_ON(!snapc)) {
1745 return NULL; /* Shouldn't happen */
1750 img_request->rq = NULL;
1751 img_request->rbd_dev = rbd_dev;
1752 img_request->offset = offset;
1753 img_request->length = length;
1754 img_request->flags = 0;
1755 if (write_request) {
1756 img_request_write_set(img_request);
1757 img_request->snapc = snapc;
1759 img_request->snap_id = rbd_dev->spec->snap_id;
1762 img_request_child_set(img_request);
1763 if (rbd_dev->parent_spec)
1764 img_request_layered_set(img_request);
1765 spin_lock_init(&img_request->completion_lock);
1766 img_request->next_completion = 0;
1767 img_request->callback = NULL;
1768 img_request->result = 0;
1769 img_request->obj_request_count = 0;
1770 INIT_LIST_HEAD(&img_request->obj_requests);
1771 kref_init(&img_request->kref);
1773 rbd_img_request_get(img_request); /* Avoid a warning */
1774 rbd_img_request_put(img_request); /* TEMPORARY */
1776 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1777 write_request ? "write" : "read", offset, length,
1783 static void rbd_img_request_destroy(struct kref *kref)
1785 struct rbd_img_request *img_request;
1786 struct rbd_obj_request *obj_request;
1787 struct rbd_obj_request *next_obj_request;
1789 img_request = container_of(kref, struct rbd_img_request, kref);
1791 dout("%s: img %p\n", __func__, img_request);
1793 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1794 rbd_img_obj_request_del(img_request, obj_request);
1795 rbd_assert(img_request->obj_request_count == 0);
1797 if (img_request_write_test(img_request))
1798 ceph_put_snap_context(img_request->snapc);
1800 if (img_request_child_test(img_request))
1801 rbd_obj_request_put(img_request->obj_request);
1806 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1808 struct rbd_img_request *img_request;
1809 unsigned int xferred;
1813 rbd_assert(obj_request_img_data_test(obj_request));
1814 img_request = obj_request->img_request;
1816 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
1817 xferred = (unsigned int)obj_request->xferred;
1818 result = obj_request->result;
1820 struct rbd_device *rbd_dev = img_request->rbd_dev;
1822 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
1823 img_request_write_test(img_request) ? "write" : "read",
1824 obj_request->length, obj_request->img_offset,
1825 obj_request->offset);
1826 rbd_warn(rbd_dev, " result %d xferred %x\n",
1828 if (!img_request->result)
1829 img_request->result = result;
1832 /* Image object requests don't own their page array */
1834 if (obj_request->type == OBJ_REQUEST_PAGES) {
1835 obj_request->pages = NULL;
1836 obj_request->page_count = 0;
1839 if (img_request_child_test(img_request)) {
1840 rbd_assert(img_request->obj_request != NULL);
1841 more = obj_request->which < img_request->obj_request_count - 1;
1843 rbd_assert(img_request->rq != NULL);
1844 more = blk_end_request(img_request->rq, result, xferred);
1850 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1852 struct rbd_img_request *img_request;
1853 u32 which = obj_request->which;
1856 rbd_assert(obj_request_img_data_test(obj_request));
1857 img_request = obj_request->img_request;
1859 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1860 rbd_assert(img_request != NULL);
1861 rbd_assert(img_request->obj_request_count > 0);
1862 rbd_assert(which != BAD_WHICH);
1863 rbd_assert(which < img_request->obj_request_count);
1864 rbd_assert(which >= img_request->next_completion);
1866 spin_lock_irq(&img_request->completion_lock);
1867 if (which != img_request->next_completion)
1870 for_each_obj_request_from(img_request, obj_request) {
1872 rbd_assert(which < img_request->obj_request_count);
1874 if (!obj_request_done_test(obj_request))
1876 more = rbd_img_obj_end_request(obj_request);
1880 rbd_assert(more ^ (which == img_request->obj_request_count));
1881 img_request->next_completion = which;
1883 spin_unlock_irq(&img_request->completion_lock);
1886 rbd_img_request_complete(img_request);
1890 * Split up an image request into one or more object requests, each
1891 * to a different object. The "type" parameter indicates whether
1892 * "data_desc" is the pointer to the head of a list of bio
1893 * structures, or the base of a page array. In either case this
1894 * function assumes data_desc describes memory sufficient to hold
1895 * all data described by the image request.
1897 static int rbd_img_request_fill(struct rbd_img_request *img_request,
1898 enum obj_request_type type,
1901 struct rbd_device *rbd_dev = img_request->rbd_dev;
1902 struct rbd_obj_request *obj_request = NULL;
1903 struct rbd_obj_request *next_obj_request;
1904 bool write_request = img_request_write_test(img_request);
1905 struct bio *bio_list;
1906 unsigned int bio_offset = 0;
1907 struct page **pages;
1912 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
1913 (int)type, data_desc);
1915 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
1916 img_offset = img_request->offset;
1917 resid = img_request->length;
1918 rbd_assert(resid > 0);
1920 if (type == OBJ_REQUEST_BIO) {
1921 bio_list = data_desc;
1922 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
1924 rbd_assert(type == OBJ_REQUEST_PAGES);
1929 struct ceph_osd_request *osd_req;
1930 const char *object_name;
1934 object_name = rbd_segment_name(rbd_dev, img_offset);
1937 offset = rbd_segment_offset(rbd_dev, img_offset);
1938 length = rbd_segment_length(rbd_dev, img_offset, resid);
1939 obj_request = rbd_obj_request_create(object_name,
1940 offset, length, type);
1941 kfree(object_name); /* object request has its own copy */
1945 if (type == OBJ_REQUEST_BIO) {
1946 unsigned int clone_size;
1948 rbd_assert(length <= (u64)UINT_MAX);
1949 clone_size = (unsigned int)length;
1950 obj_request->bio_list =
1951 bio_chain_clone_range(&bio_list,
1955 if (!obj_request->bio_list)
1958 unsigned int page_count;
1960 obj_request->pages = pages;
1961 page_count = (u32)calc_pages_for(offset, length);
1962 obj_request->page_count = page_count;
1963 if ((offset + length) & ~PAGE_MASK)
1964 page_count--; /* more on last page */
1965 pages += page_count;
1968 osd_req = rbd_osd_req_create(rbd_dev, write_request,
1972 obj_request->osd_req = osd_req;
1973 obj_request->callback = rbd_img_obj_callback;
1975 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
1977 if (type == OBJ_REQUEST_BIO)
1978 osd_req_op_extent_osd_data_bio(osd_req, 0,
1979 obj_request->bio_list, length);
1981 osd_req_op_extent_osd_data_pages(osd_req, 0,
1982 obj_request->pages, length,
1983 offset & ~PAGE_MASK, false, false);
1986 rbd_osd_req_format_write(obj_request);
1988 rbd_osd_req_format_read(obj_request);
1990 obj_request->img_offset = img_offset;
1991 rbd_img_obj_request_add(img_request, obj_request);
1993 img_offset += length;
2000 rbd_obj_request_put(obj_request);
2002 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2003 rbd_obj_request_put(obj_request);
2009 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2011 struct rbd_img_request *img_request;
2012 struct rbd_device *rbd_dev;
2016 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2017 rbd_assert(obj_request_img_data_test(obj_request));
2018 img_request = obj_request->img_request;
2019 rbd_assert(img_request);
2021 rbd_dev = img_request->rbd_dev;
2022 rbd_assert(rbd_dev);
2023 length = (u64)1 << rbd_dev->header.obj_order;
2024 page_count = (u32)calc_pages_for(0, length);
2026 rbd_assert(obj_request->copyup_pages);
2027 ceph_release_page_vector(obj_request->copyup_pages, page_count);
2028 obj_request->copyup_pages = NULL;
2031 * We want the transfer count to reflect the size of the
2032 * original write request. There is no such thing as a
2033 * successful short write, so if the request was successful
2034 * we can just set it to the originally-requested length.
2036 if (!obj_request->result)
2037 obj_request->xferred = obj_request->length;
2039 /* Finish up with the normal image object callback */
2041 rbd_img_obj_callback(obj_request);
2045 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2047 struct rbd_obj_request *orig_request;
2048 struct ceph_osd_request *osd_req;
2049 struct ceph_osd_client *osdc;
2050 struct rbd_device *rbd_dev;
2051 struct page **pages;
2056 rbd_assert(img_request_child_test(img_request));
2058 /* First get what we need from the image request */
2060 pages = img_request->copyup_pages;
2061 rbd_assert(pages != NULL);
2062 img_request->copyup_pages = NULL;
2064 orig_request = img_request->obj_request;
2065 rbd_assert(orig_request != NULL);
2066 rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
2067 result = img_request->result;
2068 obj_size = img_request->length;
2069 xferred = img_request->xferred;
2071 rbd_dev = img_request->rbd_dev;
2072 rbd_assert(rbd_dev);
2073 rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2075 rbd_img_request_put(img_request);
2080 /* Allocate the new copyup osd request for the original request */
2083 rbd_assert(!orig_request->osd_req);
2084 osd_req = rbd_osd_req_create_copyup(orig_request);
2087 orig_request->osd_req = osd_req;
2088 orig_request->copyup_pages = pages;
2090 /* Initialize the copyup op */
2092 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2093 osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
2096 /* Then the original write request op */
2098 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2099 orig_request->offset,
2100 orig_request->length, 0, 0);
2101 osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
2102 orig_request->length);
2104 rbd_osd_req_format_write(orig_request);
2106 /* All set, send it off. */
2108 orig_request->callback = rbd_img_obj_copyup_callback;
2109 osdc = &rbd_dev->rbd_client->client->osdc;
2110 result = rbd_obj_request_submit(osdc, orig_request);
2114 /* Record the error code and complete the request */
2116 orig_request->result = result;
2117 orig_request->xferred = 0;
2118 obj_request_done_set(orig_request);
2119 rbd_obj_request_complete(orig_request);
2123 * Read from the parent image the range of data that covers the
2124 * entire target of the given object request. This is used for
2125 * satisfying a layered image write request when the target of an
2126 * object request from the image request does not exist.
2128 * A page array big enough to hold the returned data is allocated
2129 * and supplied to rbd_img_request_fill() as the "data descriptor."
2130 * When the read completes, this page array will be transferred to
2131 * the original object request for the copyup operation.
2133 * If an error occurs, record it as the result of the original
2134 * object request and mark it done so it gets completed.
2136 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2138 struct rbd_img_request *img_request = NULL;
2139 struct rbd_img_request *parent_request = NULL;
2140 struct rbd_device *rbd_dev;
2143 struct page **pages = NULL;
2147 rbd_assert(obj_request_img_data_test(obj_request));
2148 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2150 img_request = obj_request->img_request;
2151 rbd_assert(img_request != NULL);
2152 rbd_dev = img_request->rbd_dev;
2153 rbd_assert(rbd_dev->parent != NULL);
2156 * First things first. The original osd request is of no
2157 * use to use any more, we'll need a new one that can hold
2158 * the two ops in a copyup request. We'll get that later,
2159 * but for now we can release the old one.
2161 rbd_osd_req_destroy(obj_request->osd_req);
2162 obj_request->osd_req = NULL;
2165 * Determine the byte range covered by the object in the
2166 * child image to which the original request was to be sent.
2168 img_offset = obj_request->img_offset - obj_request->offset;
2169 length = (u64)1 << rbd_dev->header.obj_order;
2172 * There is no defined parent data beyond the parent
2173 * overlap, so limit what we read at that boundary if
2176 if (img_offset + length > rbd_dev->parent_overlap) {
2177 rbd_assert(img_offset < rbd_dev->parent_overlap);
2178 length = rbd_dev->parent_overlap - img_offset;
2182 * Allocate a page array big enough to receive the data read
2185 page_count = (u32)calc_pages_for(0, length);
2186 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2187 if (IS_ERR(pages)) {
2188 result = PTR_ERR(pages);
2194 parent_request = rbd_img_request_create(rbd_dev->parent,
2197 if (!parent_request)
2199 rbd_obj_request_get(obj_request);
2200 parent_request->obj_request = obj_request;
2202 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2205 parent_request->copyup_pages = pages;
2207 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2208 result = rbd_img_request_submit(parent_request);
2212 parent_request->copyup_pages = NULL;
2213 parent_request->obj_request = NULL;
2214 rbd_obj_request_put(obj_request);
2217 ceph_release_page_vector(pages, page_count);
2219 rbd_img_request_put(parent_request);
2220 obj_request->result = result;
2221 obj_request->xferred = 0;
2222 obj_request_done_set(obj_request);
2227 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2229 struct rbd_obj_request *orig_request;
2232 rbd_assert(!obj_request_img_data_test(obj_request));
2235 * All we need from the object request is the original
2236 * request and the result of the STAT op. Grab those, then
2237 * we're done with the request.
2239 orig_request = obj_request->obj_request;
2240 obj_request->obj_request = NULL;
2241 rbd_assert(orig_request);
2242 rbd_assert(orig_request->img_request);
2244 result = obj_request->result;
2245 obj_request->result = 0;
2247 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2248 obj_request, orig_request, result,
2249 obj_request->xferred, obj_request->length);
2250 rbd_obj_request_put(obj_request);
2252 rbd_assert(orig_request);
2253 rbd_assert(orig_request->img_request);
2256 * Our only purpose here is to determine whether the object
2257 * exists, and we don't want to treat the non-existence as
2258 * an error. If something else comes back, transfer the
2259 * error to the original request and complete it now.
2262 obj_request_existence_set(orig_request, true);
2263 } else if (result == -ENOENT) {
2264 obj_request_existence_set(orig_request, false);
2265 } else if (result) {
2266 orig_request->result = result;
2271 * Resubmit the original request now that we have recorded
2272 * whether the target object exists.
2274 orig_request->result = rbd_img_obj_request_submit(orig_request);
2276 if (orig_request->result)
2277 rbd_obj_request_complete(orig_request);
2278 rbd_obj_request_put(orig_request);
2281 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2283 struct rbd_obj_request *stat_request;
2284 struct rbd_device *rbd_dev;
2285 struct ceph_osd_client *osdc;
2286 struct page **pages = NULL;
2292 * The response data for a STAT call consists of:
2299 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2300 page_count = (u32)calc_pages_for(0, size);
2301 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2303 return PTR_ERR(pages);
2306 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2311 rbd_obj_request_get(obj_request);
2312 stat_request->obj_request = obj_request;
2313 stat_request->pages = pages;
2314 stat_request->page_count = page_count;
2316 rbd_assert(obj_request->img_request);
2317 rbd_dev = obj_request->img_request->rbd_dev;
2318 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2320 if (!stat_request->osd_req)
2322 stat_request->callback = rbd_img_obj_exists_callback;
2324 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2325 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2327 rbd_osd_req_format_read(stat_request);
2329 osdc = &rbd_dev->rbd_client->client->osdc;
2330 ret = rbd_obj_request_submit(osdc, stat_request);
2333 rbd_obj_request_put(obj_request);
2338 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2340 struct rbd_img_request *img_request;
2341 struct rbd_device *rbd_dev;
2344 rbd_assert(obj_request_img_data_test(obj_request));
2346 img_request = obj_request->img_request;
2347 rbd_assert(img_request);
2348 rbd_dev = img_request->rbd_dev;
2351 * Only writes to layered images need special handling.
2352 * Reads and non-layered writes are simple object requests.
2353 * Layered writes that start beyond the end of the overlap
2354 * with the parent have no parent data, so they too are
2355 * simple object requests. Finally, if the target object is
2356 * known to already exist, its parent data has already been
2357 * copied, so a write to the object can also be handled as a
2358 * simple object request.
2360 if (!img_request_write_test(img_request) ||
2361 !img_request_layered_test(img_request) ||
2362 rbd_dev->parent_overlap <= obj_request->img_offset ||
2363 ((known = obj_request_known_test(obj_request)) &&
2364 obj_request_exists_test(obj_request))) {
2366 struct rbd_device *rbd_dev;
2367 struct ceph_osd_client *osdc;
2369 rbd_dev = obj_request->img_request->rbd_dev;
2370 osdc = &rbd_dev->rbd_client->client->osdc;
2372 return rbd_obj_request_submit(osdc, obj_request);
2376 * It's a layered write. The target object might exist but
2377 * we may not know that yet. If we know it doesn't exist,
2378 * start by reading the data for the full target object from
2379 * the parent so we can use it for a copyup to the target.
2382 return rbd_img_obj_parent_read_full(obj_request);
2384 /* We don't know whether the target exists. Go find out. */
2386 return rbd_img_obj_exists_submit(obj_request);
2389 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2391 struct rbd_obj_request *obj_request;
2392 struct rbd_obj_request *next_obj_request;
2394 dout("%s: img %p\n", __func__, img_request);
2395 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2398 ret = rbd_img_obj_request_submit(obj_request);
2406 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2408 struct rbd_obj_request *obj_request;
2409 struct rbd_device *rbd_dev;
2412 rbd_assert(img_request_child_test(img_request));
2414 obj_request = img_request->obj_request;
2415 rbd_assert(obj_request);
2416 rbd_assert(obj_request->img_request);
2418 obj_request->result = img_request->result;
2419 if (obj_request->result)
2423 * We need to zero anything beyond the parent overlap
2424 * boundary. Since rbd_img_obj_request_read_callback()
2425 * will zero anything beyond the end of a short read, an
2426 * easy way to do this is to pretend the data from the
2427 * parent came up short--ending at the overlap boundary.
2429 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2430 obj_end = obj_request->img_offset + obj_request->length;
2431 rbd_dev = obj_request->img_request->rbd_dev;
2432 if (obj_end > rbd_dev->parent_overlap) {
2435 if (obj_request->img_offset < rbd_dev->parent_overlap)
2436 xferred = rbd_dev->parent_overlap -
2437 obj_request->img_offset;
2439 obj_request->xferred = min(img_request->xferred, xferred);
2441 obj_request->xferred = img_request->xferred;
2444 rbd_img_obj_request_read_callback(obj_request);
2445 rbd_obj_request_complete(obj_request);
2448 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2450 struct rbd_device *rbd_dev;
2451 struct rbd_img_request *img_request;
2454 rbd_assert(obj_request_img_data_test(obj_request));
2455 rbd_assert(obj_request->img_request != NULL);
2456 rbd_assert(obj_request->result == (s32) -ENOENT);
2457 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2459 rbd_dev = obj_request->img_request->rbd_dev;
2460 rbd_assert(rbd_dev->parent != NULL);
2461 /* rbd_read_finish(obj_request, obj_request->length); */
2462 img_request = rbd_img_request_create(rbd_dev->parent,
2463 obj_request->img_offset,
2464 obj_request->length,
2470 rbd_obj_request_get(obj_request);
2471 img_request->obj_request = obj_request;
2473 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2474 obj_request->bio_list);
2478 img_request->callback = rbd_img_parent_read_callback;
2479 result = rbd_img_request_submit(img_request);
2486 rbd_img_request_put(img_request);
2487 obj_request->result = result;
2488 obj_request->xferred = 0;
2489 obj_request_done_set(obj_request);
2492 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
2493 u64 ver, u64 notify_id)
2495 struct rbd_obj_request *obj_request;
2496 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2499 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2500 OBJ_REQUEST_NODATA);
2505 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2506 if (!obj_request->osd_req)
2508 obj_request->callback = rbd_obj_request_put;
2510 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2512 rbd_osd_req_format_read(obj_request);
2514 ret = rbd_obj_request_submit(osdc, obj_request);
2517 rbd_obj_request_put(obj_request);
2522 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2524 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2531 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2532 rbd_dev->header_name, (unsigned long long) notify_id,
2533 (unsigned int) opcode);
2534 rc = rbd_dev_refresh(rbd_dev, &hver);
2536 rbd_warn(rbd_dev, "got notification but failed to "
2537 " update snaps: %d\n", rc);
2539 rbd_obj_notify_ack(rbd_dev, hver, notify_id);
2543 * Request sync osd watch/unwatch. The value of "start" determines
2544 * whether a watch request is being initiated or torn down.
2546 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2548 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2549 struct rbd_obj_request *obj_request;
2552 rbd_assert(start ^ !!rbd_dev->watch_event);
2553 rbd_assert(start ^ !!rbd_dev->watch_request);
2556 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2557 &rbd_dev->watch_event);
2560 rbd_assert(rbd_dev->watch_event != NULL);
2564 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2565 OBJ_REQUEST_NODATA);
2569 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2570 if (!obj_request->osd_req)
2574 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2576 ceph_osdc_unregister_linger_request(osdc,
2577 rbd_dev->watch_request->osd_req);
2579 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2580 rbd_dev->watch_event->cookie,
2581 rbd_dev->header.obj_version, start);
2582 rbd_osd_req_format_write(obj_request);
2584 ret = rbd_obj_request_submit(osdc, obj_request);
2587 ret = rbd_obj_request_wait(obj_request);
2590 ret = obj_request->result;
2595 * A watch request is set to linger, so the underlying osd
2596 * request won't go away until we unregister it. We retain
2597 * a pointer to the object request during that time (in
2598 * rbd_dev->watch_request), so we'll keep a reference to
2599 * it. We'll drop that reference (below) after we've
2603 rbd_dev->watch_request = obj_request;
2608 /* We have successfully torn down the watch request */
2610 rbd_obj_request_put(rbd_dev->watch_request);
2611 rbd_dev->watch_request = NULL;
2613 /* Cancel the event if we're tearing down, or on error */
2614 ceph_osdc_cancel_event(rbd_dev->watch_event);
2615 rbd_dev->watch_event = NULL;
2617 rbd_obj_request_put(obj_request);
2623 * Synchronous osd object method call
2625 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2626 const char *object_name,
2627 const char *class_name,
2628 const char *method_name,
2629 const void *outbound,
2630 size_t outbound_size,
2632 size_t inbound_size,
2635 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2636 struct rbd_obj_request *obj_request;
2637 struct page **pages;
2642 * Method calls are ultimately read operations. The result
2643 * should placed into the inbound buffer provided. They
2644 * also supply outbound data--parameters for the object
2645 * method. Currently if this is present it will be a
2648 page_count = (u32)calc_pages_for(0, inbound_size);
2649 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2651 return PTR_ERR(pages);
2654 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2659 obj_request->pages = pages;
2660 obj_request->page_count = page_count;
2662 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2663 if (!obj_request->osd_req)
2666 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2667 class_name, method_name);
2668 if (outbound_size) {
2669 struct ceph_pagelist *pagelist;
2671 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2675 ceph_pagelist_init(pagelist);
2676 ceph_pagelist_append(pagelist, outbound, outbound_size);
2677 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2680 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2681 obj_request->pages, inbound_size,
2683 rbd_osd_req_format_read(obj_request);
2685 ret = rbd_obj_request_submit(osdc, obj_request);
2688 ret = rbd_obj_request_wait(obj_request);
2692 ret = obj_request->result;
2696 rbd_assert(obj_request->xferred < (u64)INT_MAX);
2697 ret = (int)obj_request->xferred;
2698 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
2700 *version = obj_request->version;
2703 rbd_obj_request_put(obj_request);
2705 ceph_release_page_vector(pages, page_count);
2710 static void rbd_request_fn(struct request_queue *q)
2711 __releases(q->queue_lock) __acquires(q->queue_lock)
2713 struct rbd_device *rbd_dev = q->queuedata;
2714 bool read_only = rbd_dev->mapping.read_only;
2718 while ((rq = blk_fetch_request(q))) {
2719 bool write_request = rq_data_dir(rq) == WRITE;
2720 struct rbd_img_request *img_request;
2724 /* Ignore any non-FS requests that filter through. */
2726 if (rq->cmd_type != REQ_TYPE_FS) {
2727 dout("%s: non-fs request type %d\n", __func__,
2728 (int) rq->cmd_type);
2729 __blk_end_request_all(rq, 0);
2733 /* Ignore/skip any zero-length requests */
2735 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
2736 length = (u64) blk_rq_bytes(rq);
2739 dout("%s: zero-length request\n", __func__);
2740 __blk_end_request_all(rq, 0);
2744 spin_unlock_irq(q->queue_lock);
2746 /* Disallow writes to a read-only device */
2748 if (write_request) {
2752 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
2756 * Quit early if the mapped snapshot no longer
2757 * exists. It's still possible the snapshot will
2758 * have disappeared by the time our request arrives
2759 * at the osd, but there's no sense in sending it if
2762 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
2763 dout("request for non-existent snapshot");
2764 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
2770 if (WARN_ON(offset && length > U64_MAX - offset + 1))
2771 goto end_request; /* Shouldn't happen */
2774 img_request = rbd_img_request_create(rbd_dev, offset, length,
2775 write_request, false);
2779 img_request->rq = rq;
2781 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2784 result = rbd_img_request_submit(img_request);
2786 rbd_img_request_put(img_request);
2788 spin_lock_irq(q->queue_lock);
2790 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
2791 write_request ? "write" : "read",
2792 length, offset, result);
2794 __blk_end_request_all(rq, result);
2800 * a queue callback. Makes sure that we don't create a bio that spans across
2801 * multiple osd objects. One exception would be with a single page bios,
2802 * which we handle later at bio_chain_clone_range()
2804 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2805 struct bio_vec *bvec)
2807 struct rbd_device *rbd_dev = q->queuedata;
2808 sector_t sector_offset;
2809 sector_t sectors_per_obj;
2810 sector_t obj_sector_offset;
2814 * Find how far into its rbd object the partition-relative
2815 * bio start sector is to offset relative to the enclosing
2818 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2819 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2820 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2823 * Compute the number of bytes from that offset to the end
2824 * of the object. Account for what's already used by the bio.
2826 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2827 if (ret > bmd->bi_size)
2828 ret -= bmd->bi_size;
2833 * Don't send back more than was asked for. And if the bio
2834 * was empty, let the whole thing through because: "Note
2835 * that a block device *must* allow a single page to be
2836 * added to an empty bio."
2838 rbd_assert(bvec->bv_len <= PAGE_SIZE);
2839 if (ret > (int) bvec->bv_len || !bmd->bi_size)
2840 ret = (int) bvec->bv_len;
2845 static void rbd_free_disk(struct rbd_device *rbd_dev)
2847 struct gendisk *disk = rbd_dev->disk;
2852 if (disk->flags & GENHD_FL_UP)
2855 blk_cleanup_queue(disk->queue);
2859 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2860 const char *object_name,
2861 u64 offset, u64 length,
2862 void *buf, u64 *version)
2865 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2866 struct rbd_obj_request *obj_request;
2867 struct page **pages = NULL;
2872 page_count = (u32) calc_pages_for(offset, length);
2873 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2875 ret = PTR_ERR(pages);
2878 obj_request = rbd_obj_request_create(object_name, offset, length,
2883 obj_request->pages = pages;
2884 obj_request->page_count = page_count;
2886 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2887 if (!obj_request->osd_req)
2890 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
2891 offset, length, 0, 0);
2892 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
2894 obj_request->length,
2895 obj_request->offset & ~PAGE_MASK,
2897 rbd_osd_req_format_read(obj_request);
2899 ret = rbd_obj_request_submit(osdc, obj_request);
2902 ret = rbd_obj_request_wait(obj_request);
2906 ret = obj_request->result;
2910 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
2911 size = (size_t) obj_request->xferred;
2912 ceph_copy_from_page_vector(pages, buf, 0, size);
2913 rbd_assert(size <= (size_t) INT_MAX);
2916 *version = obj_request->version;
2919 rbd_obj_request_put(obj_request);
2921 ceph_release_page_vector(pages, page_count);
2927 * Read the complete header for the given rbd device.
2929 * Returns a pointer to a dynamically-allocated buffer containing
2930 * the complete and validated header. Caller can pass the address
2931 * of a variable that will be filled in with the version of the
2932 * header object at the time it was read.
2934 * Returns a pointer-coded errno if a failure occurs.
2936 static struct rbd_image_header_ondisk *
2937 rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
2939 struct rbd_image_header_ondisk *ondisk = NULL;
2946 * The complete header will include an array of its 64-bit
2947 * snapshot ids, followed by the names of those snapshots as
2948 * a contiguous block of NUL-terminated strings. Note that
2949 * the number of snapshots could change by the time we read
2950 * it in, in which case we re-read it.
2957 size = sizeof (*ondisk);
2958 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
2960 ondisk = kmalloc(size, GFP_KERNEL);
2962 return ERR_PTR(-ENOMEM);
2964 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
2965 0, size, ondisk, version);
2968 if (WARN_ON((size_t) ret < size)) {
2970 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
2974 if (!rbd_dev_ondisk_valid(ondisk)) {
2976 rbd_warn(rbd_dev, "invalid header");
2980 names_size = le64_to_cpu(ondisk->snap_names_len);
2981 want_count = snap_count;
2982 snap_count = le32_to_cpu(ondisk->snap_count);
2983 } while (snap_count != want_count);
2990 return ERR_PTR(ret);
2994 * reload the ondisk the header
2996 static int rbd_read_header(struct rbd_device *rbd_dev,
2997 struct rbd_image_header *header)
2999 struct rbd_image_header_ondisk *ondisk;
3003 ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
3005 return PTR_ERR(ondisk);
3006 ret = rbd_header_from_disk(header, ondisk);
3008 header->obj_version = ver;
3014 static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
3016 struct rbd_snap *snap;
3017 struct rbd_snap *next;
3019 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
3020 rbd_remove_snap_dev(snap);
3023 static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
3027 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
3030 size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
3031 dout("setting size to %llu sectors", (unsigned long long) size);
3032 rbd_dev->mapping.size = (u64) size;
3033 set_capacity(rbd_dev->disk, size);
3037 * only read the first part of the ondisk header, without the snaps info
3039 static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
3042 struct rbd_image_header h;
3044 ret = rbd_read_header(rbd_dev, &h);
3048 down_write(&rbd_dev->header_rwsem);
3050 /* Update image size, and check for resize of mapped image */
3051 rbd_dev->header.image_size = h.image_size;
3052 rbd_update_mapping_size(rbd_dev);
3054 /* rbd_dev->header.object_prefix shouldn't change */
3055 kfree(rbd_dev->header.snap_sizes);
3056 kfree(rbd_dev->header.snap_names);
3057 /* osd requests may still refer to snapc */
3058 ceph_put_snap_context(rbd_dev->header.snapc);
3061 *hver = h.obj_version;
3062 rbd_dev->header.obj_version = h.obj_version;
3063 rbd_dev->header.image_size = h.image_size;
3064 rbd_dev->header.snapc = h.snapc;
3065 rbd_dev->header.snap_names = h.snap_names;
3066 rbd_dev->header.snap_sizes = h.snap_sizes;
3067 /* Free the extra copy of the object prefix */
3068 WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
3069 kfree(h.object_prefix);
3071 ret = rbd_dev_snaps_update(rbd_dev);
3073 ret = rbd_dev_snaps_register(rbd_dev);
3075 up_write(&rbd_dev->header_rwsem);
3080 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
3084 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3085 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3086 if (rbd_dev->image_format == 1)
3087 ret = rbd_dev_v1_refresh(rbd_dev, hver);
3089 ret = rbd_dev_v2_refresh(rbd_dev, hver);
3090 mutex_unlock(&ctl_mutex);
3091 revalidate_disk(rbd_dev->disk);
3096 static int rbd_init_disk(struct rbd_device *rbd_dev)
3098 struct gendisk *disk;
3099 struct request_queue *q;
3102 /* create gendisk info */
3103 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3107 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3109 disk->major = rbd_dev->major;
3110 disk->first_minor = 0;
3111 disk->fops = &rbd_bd_ops;
3112 disk->private_data = rbd_dev;
3114 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3118 /* We use the default size, but let's be explicit about it. */
3119 blk_queue_physical_block_size(q, SECTOR_SIZE);
3121 /* set io sizes to object size */
3122 segment_size = rbd_obj_bytes(&rbd_dev->header);
3123 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3124 blk_queue_max_segment_size(q, segment_size);
3125 blk_queue_io_min(q, segment_size);
3126 blk_queue_io_opt(q, segment_size);
3128 blk_queue_merge_bvec(q, rbd_merge_bvec);
3131 q->queuedata = rbd_dev;
3133 rbd_dev->disk = disk;
3135 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
3148 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3150 return container_of(dev, struct rbd_device, dev);
3153 static ssize_t rbd_size_show(struct device *dev,
3154 struct device_attribute *attr, char *buf)
3156 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3159 down_read(&rbd_dev->header_rwsem);
3160 size = get_capacity(rbd_dev->disk);
3161 up_read(&rbd_dev->header_rwsem);
3163 return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
3167 * Note this shows the features for whatever's mapped, which is not
3168 * necessarily the base image.
3170 static ssize_t rbd_features_show(struct device *dev,
3171 struct device_attribute *attr, char *buf)
3173 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3175 return sprintf(buf, "0x%016llx\n",
3176 (unsigned long long) rbd_dev->mapping.features);
3179 static ssize_t rbd_major_show(struct device *dev,
3180 struct device_attribute *attr, char *buf)
3182 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3184 return sprintf(buf, "%d\n", rbd_dev->major);
3187 static ssize_t rbd_client_id_show(struct device *dev,
3188 struct device_attribute *attr, char *buf)
3190 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3192 return sprintf(buf, "client%lld\n",
3193 ceph_client_id(rbd_dev->rbd_client->client));
3196 static ssize_t rbd_pool_show(struct device *dev,
3197 struct device_attribute *attr, char *buf)
3199 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3201 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3204 static ssize_t rbd_pool_id_show(struct device *dev,
3205 struct device_attribute *attr, char *buf)
3207 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3209 return sprintf(buf, "%llu\n",
3210 (unsigned long long) rbd_dev->spec->pool_id);
3213 static ssize_t rbd_name_show(struct device *dev,
3214 struct device_attribute *attr, char *buf)
3216 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3218 if (rbd_dev->spec->image_name)
3219 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3221 return sprintf(buf, "(unknown)\n");
3224 static ssize_t rbd_image_id_show(struct device *dev,
3225 struct device_attribute *attr, char *buf)
3227 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3229 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3233 * Shows the name of the currently-mapped snapshot (or
3234 * RBD_SNAP_HEAD_NAME for the base image).
3236 static ssize_t rbd_snap_show(struct device *dev,
3237 struct device_attribute *attr,
3240 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3242 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3246 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3247 * for the parent image. If there is no parent, simply shows
3248 * "(no parent image)".
3250 static ssize_t rbd_parent_show(struct device *dev,
3251 struct device_attribute *attr,
3254 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3255 struct rbd_spec *spec = rbd_dev->parent_spec;
3260 return sprintf(buf, "(no parent image)\n");
3262 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3263 (unsigned long long) spec->pool_id, spec->pool_name);
3268 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3269 spec->image_name ? spec->image_name : "(unknown)");
3274 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3275 (unsigned long long) spec->snap_id, spec->snap_name);
3280 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3285 return (ssize_t) (bufp - buf);
3288 static ssize_t rbd_image_refresh(struct device *dev,
3289 struct device_attribute *attr,
3293 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3296 ret = rbd_dev_refresh(rbd_dev, NULL);
3298 return ret < 0 ? ret : size;
3301 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3302 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3303 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3304 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3305 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3306 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3307 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3308 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3309 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3310 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3311 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3313 static struct attribute *rbd_attrs[] = {
3314 &dev_attr_size.attr,
3315 &dev_attr_features.attr,
3316 &dev_attr_major.attr,
3317 &dev_attr_client_id.attr,
3318 &dev_attr_pool.attr,
3319 &dev_attr_pool_id.attr,
3320 &dev_attr_name.attr,
3321 &dev_attr_image_id.attr,
3322 &dev_attr_current_snap.attr,
3323 &dev_attr_parent.attr,
3324 &dev_attr_refresh.attr,
3328 static struct attribute_group rbd_attr_group = {
3332 static const struct attribute_group *rbd_attr_groups[] = {
3337 static void rbd_sysfs_dev_release(struct device *dev)
3341 static struct device_type rbd_device_type = {
3343 .groups = rbd_attr_groups,
3344 .release = rbd_sysfs_dev_release,
3352 static ssize_t rbd_snap_size_show(struct device *dev,
3353 struct device_attribute *attr,
3356 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
3358 return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
3361 static ssize_t rbd_snap_id_show(struct device *dev,
3362 struct device_attribute *attr,
3365 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
3367 return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
3370 static ssize_t rbd_snap_features_show(struct device *dev,
3371 struct device_attribute *attr,
3374 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
3376 return sprintf(buf, "0x%016llx\n",
3377 (unsigned long long) snap->features);
3380 static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
3381 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
3382 static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL);
3384 static struct attribute *rbd_snap_attrs[] = {
3385 &dev_attr_snap_size.attr,
3386 &dev_attr_snap_id.attr,
3387 &dev_attr_snap_features.attr,
3391 static struct attribute_group rbd_snap_attr_group = {
3392 .attrs = rbd_snap_attrs,
3395 static void rbd_snap_dev_release(struct device *dev)
3397 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
3402 static const struct attribute_group *rbd_snap_attr_groups[] = {
3403 &rbd_snap_attr_group,
3407 static struct device_type rbd_snap_device_type = {
3408 .groups = rbd_snap_attr_groups,
3409 .release = rbd_snap_dev_release,
3412 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3414 kref_get(&spec->kref);
3419 static void rbd_spec_free(struct kref *kref);
3420 static void rbd_spec_put(struct rbd_spec *spec)
3423 kref_put(&spec->kref, rbd_spec_free);
3426 static struct rbd_spec *rbd_spec_alloc(void)
3428 struct rbd_spec *spec;
3430 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3433 kref_init(&spec->kref);
3438 static void rbd_spec_free(struct kref *kref)
3440 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3442 kfree(spec->pool_name);
3443 kfree(spec->image_id);
3444 kfree(spec->image_name);
3445 kfree(spec->snap_name);
3449 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3450 struct rbd_spec *spec)
3452 struct rbd_device *rbd_dev;
3454 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3458 spin_lock_init(&rbd_dev->lock);
3460 INIT_LIST_HEAD(&rbd_dev->node);
3461 INIT_LIST_HEAD(&rbd_dev->snaps);
3462 init_rwsem(&rbd_dev->header_rwsem);
3464 rbd_dev->spec = spec;
3465 rbd_dev->rbd_client = rbdc;
3467 /* Initialize the layout used for all rbd requests */
3469 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3470 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3471 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3472 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3477 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3479 rbd_spec_put(rbd_dev->parent_spec);
3480 kfree(rbd_dev->header_name);
3481 rbd_put_client(rbd_dev->rbd_client);
3482 rbd_spec_put(rbd_dev->spec);
3486 static bool rbd_snap_registered(struct rbd_snap *snap)
3488 bool ret = snap->dev.type == &rbd_snap_device_type;
3489 bool reg = device_is_registered(&snap->dev);
3491 rbd_assert(!ret ^ reg);
3496 static void rbd_remove_snap_dev(struct rbd_snap *snap)
3498 list_del(&snap->node);
3499 if (device_is_registered(&snap->dev))
3500 device_unregister(&snap->dev);
3503 static int rbd_register_snap_dev(struct rbd_snap *snap,
3504 struct device *parent)
3506 struct device *dev = &snap->dev;
3509 dev->type = &rbd_snap_device_type;
3510 dev->parent = parent;
3511 dev->release = rbd_snap_dev_release;
3512 dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
3513 dout("%s: registering device for snapshot %s\n", __func__, snap->name);
3515 ret = device_register(dev);
3520 static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
3521 const char *snap_name,
3522 u64 snap_id, u64 snap_size,
3525 struct rbd_snap *snap;
3528 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
3530 return ERR_PTR(-ENOMEM);
3533 snap->name = kstrdup(snap_name, GFP_KERNEL);
3538 snap->size = snap_size;
3539 snap->features = snap_features;
3547 return ERR_PTR(ret);
3550 static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
3551 u64 *snap_size, u64 *snap_features)
3555 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
3557 *snap_size = rbd_dev->header.snap_sizes[which];
3558 *snap_features = 0; /* No features for v1 */
3560 /* Skip over names until we find the one we are looking for */
3562 snap_name = rbd_dev->header.snap_names;
3564 snap_name += strlen(snap_name) + 1;
3570 * Get the size and object order for an image snapshot, or if
3571 * snap_id is CEPH_NOSNAP, gets this information for the base
3574 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3575 u8 *order, u64 *snap_size)
3577 __le64 snapid = cpu_to_le64(snap_id);
3582 } __attribute__ ((packed)) size_buf = { 0 };
3584 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3586 &snapid, sizeof (snapid),
3587 &size_buf, sizeof (size_buf), NULL);
3588 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3591 if (ret < sizeof (size_buf))
3594 *order = size_buf.order;
3595 *snap_size = le64_to_cpu(size_buf.size);
3597 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
3598 (unsigned long long)snap_id, (unsigned int)*order,
3599 (unsigned long long)*snap_size);
3604 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3606 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3607 &rbd_dev->header.obj_order,
3608 &rbd_dev->header.image_size);
3611 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3617 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3621 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3622 "rbd", "get_object_prefix", NULL, 0,
3623 reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
3624 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3629 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3630 p + ret, NULL, GFP_NOIO);
3633 if (IS_ERR(rbd_dev->header.object_prefix)) {
3634 ret = PTR_ERR(rbd_dev->header.object_prefix);
3635 rbd_dev->header.object_prefix = NULL;
3637 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3645 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3648 __le64 snapid = cpu_to_le64(snap_id);
3652 } __attribute__ ((packed)) features_buf = { 0 };
3656 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3657 "rbd", "get_features",
3658 &snapid, sizeof (snapid),
3659 &features_buf, sizeof (features_buf), NULL);
3660 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3663 if (ret < sizeof (features_buf))
3666 incompat = le64_to_cpu(features_buf.incompat);
3667 if (incompat & ~RBD_FEATURES_SUPPORTED)
3670 *snap_features = le64_to_cpu(features_buf.features);
3672 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3673 (unsigned long long)snap_id,
3674 (unsigned long long)*snap_features,
3675 (unsigned long long)le64_to_cpu(features_buf.incompat));
3680 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3682 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3683 &rbd_dev->header.features);
3686 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3688 struct rbd_spec *parent_spec;
3690 void *reply_buf = NULL;
3698 parent_spec = rbd_spec_alloc();
3702 size = sizeof (__le64) + /* pool_id */
3703 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3704 sizeof (__le64) + /* snap_id */
3705 sizeof (__le64); /* overlap */
3706 reply_buf = kmalloc(size, GFP_KERNEL);
3712 snapid = cpu_to_le64(CEPH_NOSNAP);
3713 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3714 "rbd", "get_parent",
3715 &snapid, sizeof (snapid),
3716 reply_buf, size, NULL);
3717 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3722 end = reply_buf + ret;
3724 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
3725 if (parent_spec->pool_id == CEPH_NOPOOL)
3726 goto out; /* No parent? No problem. */
3728 /* The ceph file layout needs to fit pool id in 32 bits */
3731 if (WARN_ON(parent_spec->pool_id > (u64)U32_MAX))
3734 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3735 if (IS_ERR(image_id)) {
3736 ret = PTR_ERR(image_id);
3739 parent_spec->image_id = image_id;
3740 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3741 ceph_decode_64_safe(&p, end, overlap, out_err);
3743 rbd_dev->parent_overlap = overlap;
3744 rbd_dev->parent_spec = parent_spec;
3745 parent_spec = NULL; /* rbd_dev now owns this */
3750 rbd_spec_put(parent_spec);
3755 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3759 __le64 stripe_count;
3760 } __attribute__ ((packed)) striping_info_buf = { 0 };
3761 size_t size = sizeof (striping_info_buf);
3768 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3769 "rbd", "get_stripe_unit_count", NULL, 0,
3770 (char *)&striping_info_buf, size, NULL);
3771 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3778 * We don't actually support the "fancy striping" feature
3779 * (STRIPINGV2) yet, but if the striping sizes are the
3780 * defaults the behavior is the same as before. So find
3781 * out, and only fail if the image has non-default values.
3784 obj_size = (u64)1 << rbd_dev->header.obj_order;
3785 p = &striping_info_buf;
3786 stripe_unit = ceph_decode_64(&p);
3787 if (stripe_unit != obj_size) {
3788 rbd_warn(rbd_dev, "unsupported stripe unit "
3789 "(got %llu want %llu)",
3790 stripe_unit, obj_size);
3793 stripe_count = ceph_decode_64(&p);
3794 if (stripe_count != 1) {
3795 rbd_warn(rbd_dev, "unsupported stripe count "
3796 "(got %llu want 1)", stripe_count);
3799 rbd_dev->stripe_unit = stripe_unit;
3800 rbd_dev->stripe_count = stripe_count;
3805 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3807 size_t image_id_size;
3812 void *reply_buf = NULL;
3814 char *image_name = NULL;
3817 rbd_assert(!rbd_dev->spec->image_name);
3819 len = strlen(rbd_dev->spec->image_id);
3820 image_id_size = sizeof (__le32) + len;
3821 image_id = kmalloc(image_id_size, GFP_KERNEL);
3826 end = image_id + image_id_size;
3827 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3829 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3830 reply_buf = kmalloc(size, GFP_KERNEL);
3834 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3835 "rbd", "dir_get_name",
3836 image_id, image_id_size,
3837 reply_buf, size, NULL);
3841 end = reply_buf + size;
3842 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3843 if (IS_ERR(image_name))
3846 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3855 * When a parent image gets probed, we only have the pool, image,
3856 * and snapshot ids but not the names of any of them. This call
3857 * is made later to fill in those names. It has to be done after
3858 * rbd_dev_snaps_update() has completed because some of the
3859 * information (in particular, snapshot name) is not available
3862 static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
3864 struct ceph_osd_client *osdc;
3866 void *reply_buf = NULL;
3869 if (rbd_dev->spec->pool_name)
3870 return 0; /* Already have the names */
3872 /* Look up the pool name */
3874 osdc = &rbd_dev->rbd_client->client->osdc;
3875 name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
3877 rbd_warn(rbd_dev, "there is no pool with id %llu",
3878 rbd_dev->spec->pool_id); /* Really a BUG() */
3882 rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
3883 if (!rbd_dev->spec->pool_name)
3886 /* Fetch the image name; tolerate failure here */
3888 name = rbd_dev_image_name(rbd_dev);
3890 rbd_dev->spec->image_name = (char *)name;
3892 rbd_warn(rbd_dev, "unable to get image name");
3894 /* Look up the snapshot name. */
3896 name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
3898 rbd_warn(rbd_dev, "no snapshot with id %llu",
3899 rbd_dev->spec->snap_id); /* Really a BUG() */
3903 rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
3904 if(!rbd_dev->spec->snap_name)
3910 kfree(rbd_dev->spec->pool_name);
3911 rbd_dev->spec->pool_name = NULL;
3916 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
3925 struct ceph_snap_context *snapc;
3929 * We'll need room for the seq value (maximum snapshot id),
3930 * snapshot count, and array of that many snapshot ids.
3931 * For now we have a fixed upper limit on the number we're
3932 * prepared to receive.
3934 size = sizeof (__le64) + sizeof (__le32) +
3935 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3936 reply_buf = kzalloc(size, GFP_KERNEL);
3940 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3941 "rbd", "get_snapcontext", NULL, 0,
3942 reply_buf, size, ver);
3943 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3948 end = reply_buf + ret;
3950 ceph_decode_64_safe(&p, end, seq, out);
3951 ceph_decode_32_safe(&p, end, snap_count, out);
3954 * Make sure the reported number of snapshot ids wouldn't go
3955 * beyond the end of our buffer. But before checking that,
3956 * make sure the computed size of the snapshot context we
3957 * allocate is representable in a size_t.
3959 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3964 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3967 size = sizeof (struct ceph_snap_context) +
3968 snap_count * sizeof (snapc->snaps[0]);
3969 snapc = kmalloc(size, GFP_KERNEL);
3976 atomic_set(&snapc->nref, 1);
3978 snapc->num_snaps = snap_count;
3979 for (i = 0; i < snap_count; i++)
3980 snapc->snaps[i] = ceph_decode_64(&p);
3982 rbd_dev->header.snapc = snapc;
3984 dout(" snap context seq = %llu, snap_count = %u\n",
3985 (unsigned long long)seq, (unsigned int)snap_count);
3992 static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
4002 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4003 reply_buf = kmalloc(size, GFP_KERNEL);
4005 return ERR_PTR(-ENOMEM);
4007 snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
4008 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4009 "rbd", "get_snapshot_name",
4010 &snap_id, sizeof (snap_id),
4011 reply_buf, size, NULL);
4012 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4017 end = reply_buf + size;
4018 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4019 if (IS_ERR(snap_name)) {
4020 ret = PTR_ERR(snap_name);
4023 dout(" snap_id 0x%016llx snap_name = %s\n",
4024 (unsigned long long)le64_to_cpu(snap_id), snap_name);
4032 return ERR_PTR(ret);
4035 static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
4036 u64 *snap_size, u64 *snap_features)
4042 snap_id = rbd_dev->header.snapc->snaps[which];
4043 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size);
4045 return ERR_PTR(ret);
4046 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features);
4048 return ERR_PTR(ret);
4050 return rbd_dev_v2_snap_name(rbd_dev, which);
4053 static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
4054 u64 *snap_size, u64 *snap_features)
4056 if (rbd_dev->image_format == 1)
4057 return rbd_dev_v1_snap_info(rbd_dev, which,
4058 snap_size, snap_features);
4059 if (rbd_dev->image_format == 2)
4060 return rbd_dev_v2_snap_info(rbd_dev, which,
4061 snap_size, snap_features);
4062 return ERR_PTR(-EINVAL);
4065 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
4070 down_write(&rbd_dev->header_rwsem);
4072 /* Grab old order first, to see if it changes */
4074 obj_order = rbd_dev->header.obj_order,
4075 ret = rbd_dev_v2_image_size(rbd_dev);
4078 if (rbd_dev->header.obj_order != obj_order) {
4082 rbd_update_mapping_size(rbd_dev);
4084 ret = rbd_dev_v2_snap_context(rbd_dev, hver);
4085 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4088 ret = rbd_dev_snaps_update(rbd_dev);
4089 dout("rbd_dev_snaps_update returned %d\n", ret);
4092 ret = rbd_dev_snaps_register(rbd_dev);
4093 dout("rbd_dev_snaps_register returned %d\n", ret);
4095 up_write(&rbd_dev->header_rwsem);
4101 * Scan the rbd device's current snapshot list and compare it to the
4102 * newly-received snapshot context. Remove any existing snapshots
4103 * not present in the new snapshot context. Add a new snapshot for
4104 * any snaphots in the snapshot context not in the current list.
4105 * And verify there are no changes to snapshots we already know
4108 * Assumes the snapshots in the snapshot context are sorted by
4109 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
4110 * are also maintained in that order.)
4112 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
4114 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4115 const u32 snap_count = snapc->num_snaps;
4116 struct list_head *head = &rbd_dev->snaps;
4117 struct list_head *links = head->next;
4120 dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
4121 while (index < snap_count || links != head) {
4123 struct rbd_snap *snap;
4126 u64 snap_features = 0;
4128 snap_id = index < snap_count ? snapc->snaps[index]
4130 snap = links != head ? list_entry(links, struct rbd_snap, node)
4132 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
4134 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
4135 struct list_head *next = links->next;
4138 * A previously-existing snapshot is not in
4139 * the new snap context.
4141 * If the now missing snapshot is the one the
4142 * image is mapped to, clear its exists flag
4143 * so we can avoid sending any more requests
4146 if (rbd_dev->spec->snap_id == snap->id)
4147 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4148 rbd_remove_snap_dev(snap);
4149 dout("%ssnap id %llu has been removed\n",
4150 rbd_dev->spec->snap_id == snap->id ?
4152 (unsigned long long) snap->id);
4154 /* Done with this list entry; advance */
4160 snap_name = rbd_dev_snap_info(rbd_dev, index,
4161 &snap_size, &snap_features);
4162 if (IS_ERR(snap_name))
4163 return PTR_ERR(snap_name);
4165 dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
4166 (unsigned long long) snap_id);
4167 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
4168 struct rbd_snap *new_snap;
4170 /* We haven't seen this snapshot before */
4172 new_snap = __rbd_add_snap_dev(rbd_dev, snap_name,
4173 snap_id, snap_size, snap_features);
4174 if (IS_ERR(new_snap)) {
4175 int err = PTR_ERR(new_snap);
4177 dout(" failed to add dev, error %d\n", err);
4182 /* New goes before existing, or at end of list */
4184 dout(" added dev%s\n", snap ? "" : " at end\n");
4186 list_add_tail(&new_snap->node, &snap->node);
4188 list_add_tail(&new_snap->node, head);
4190 /* Already have this one */
4192 dout(" already present\n");
4194 rbd_assert(snap->size == snap_size);
4195 rbd_assert(!strcmp(snap->name, snap_name));
4196 rbd_assert(snap->features == snap_features);
4198 /* Done with this list entry; advance */
4200 links = links->next;
4203 /* Advance to the next entry in the snapshot context */
4207 dout("%s: done\n", __func__);
4213 * Scan the list of snapshots and register the devices for any that
4214 * have not already been registered.
4216 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
4218 struct rbd_snap *snap;
4221 dout("%s:\n", __func__);
4222 if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
4225 list_for_each_entry(snap, &rbd_dev->snaps, node) {
4226 if (!rbd_snap_registered(snap)) {
4227 ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
4232 dout("%s: returning %d\n", __func__, ret);
4237 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4242 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4244 dev = &rbd_dev->dev;
4245 dev->bus = &rbd_bus_type;
4246 dev->type = &rbd_device_type;
4247 dev->parent = &rbd_root_dev;
4248 dev->release = rbd_dev_release;
4249 dev_set_name(dev, "%d", rbd_dev->dev_id);
4250 ret = device_register(dev);
4252 mutex_unlock(&ctl_mutex);
4257 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4259 device_unregister(&rbd_dev->dev);
4262 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4265 * Get a unique rbd identifier for the given new rbd_dev, and add
4266 * the rbd_dev to the global list. The minimum rbd id is 1.
4268 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4270 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4272 spin_lock(&rbd_dev_list_lock);
4273 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4274 spin_unlock(&rbd_dev_list_lock);
4275 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4276 (unsigned long long) rbd_dev->dev_id);
4280 * Remove an rbd_dev from the global list, and record that its
4281 * identifier is no longer in use.
4283 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4285 struct list_head *tmp;
4286 int rbd_id = rbd_dev->dev_id;
4289 rbd_assert(rbd_id > 0);
4291 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4292 (unsigned long long) rbd_dev->dev_id);
4293 spin_lock(&rbd_dev_list_lock);
4294 list_del_init(&rbd_dev->node);
4297 * If the id being "put" is not the current maximum, there
4298 * is nothing special we need to do.
4300 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4301 spin_unlock(&rbd_dev_list_lock);
4306 * We need to update the current maximum id. Search the
4307 * list to find out what it is. We're more likely to find
4308 * the maximum at the end, so search the list backward.
4311 list_for_each_prev(tmp, &rbd_dev_list) {
4312 struct rbd_device *rbd_dev;
4314 rbd_dev = list_entry(tmp, struct rbd_device, node);
4315 if (rbd_dev->dev_id > max_id)
4316 max_id = rbd_dev->dev_id;
4318 spin_unlock(&rbd_dev_list_lock);
4321 * The max id could have been updated by rbd_dev_id_get(), in
4322 * which case it now accurately reflects the new maximum.
4323 * Be careful not to overwrite the maximum value in that
4326 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4327 dout(" max dev id has been reset\n");
4331 * Skips over white space at *buf, and updates *buf to point to the
4332 * first found non-space character (if any). Returns the length of
4333 * the token (string of non-white space characters) found. Note
4334 * that *buf must be terminated with '\0'.
4336 static inline size_t next_token(const char **buf)
4339 * These are the characters that produce nonzero for
4340 * isspace() in the "C" and "POSIX" locales.
4342 const char *spaces = " \f\n\r\t\v";
4344 *buf += strspn(*buf, spaces); /* Find start of token */
4346 return strcspn(*buf, spaces); /* Return token length */
4350 * Finds the next token in *buf, and if the provided token buffer is
4351 * big enough, copies the found token into it. The result, if
4352 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4353 * must be terminated with '\0' on entry.
4355 * Returns the length of the token found (not including the '\0').
4356 * Return value will be 0 if no token is found, and it will be >=
4357 * token_size if the token would not fit.
4359 * The *buf pointer will be updated to point beyond the end of the
4360 * found token. Note that this occurs even if the token buffer is
4361 * too small to hold it.
4363 static inline size_t copy_token(const char **buf,
4369 len = next_token(buf);
4370 if (len < token_size) {
4371 memcpy(token, *buf, len);
4372 *(token + len) = '\0';
4380 * Finds the next token in *buf, dynamically allocates a buffer big
4381 * enough to hold a copy of it, and copies the token into the new
4382 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4383 * that a duplicate buffer is created even for a zero-length token.
4385 * Returns a pointer to the newly-allocated duplicate, or a null
4386 * pointer if memory for the duplicate was not available. If
4387 * the lenp argument is a non-null pointer, the length of the token
4388 * (not including the '\0') is returned in *lenp.
4390 * If successful, the *buf pointer will be updated to point beyond
4391 * the end of the found token.
4393 * Note: uses GFP_KERNEL for allocation.
4395 static inline char *dup_token(const char **buf, size_t *lenp)
4400 len = next_token(buf);
4401 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4404 *(dup + len) = '\0';
4414 * Parse the options provided for an "rbd add" (i.e., rbd image
4415 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4416 * and the data written is passed here via a NUL-terminated buffer.
4417 * Returns 0 if successful or an error code otherwise.
4419 * The information extracted from these options is recorded in
4420 * the other parameters which return dynamically-allocated
4423 * The address of a pointer that will refer to a ceph options
4424 * structure. Caller must release the returned pointer using
4425 * ceph_destroy_options() when it is no longer needed.
4427 * Address of an rbd options pointer. Fully initialized by
4428 * this function; caller must release with kfree().
4430 * Address of an rbd image specification pointer. Fully
4431 * initialized by this function based on parsed options.
4432 * Caller must release with rbd_spec_put().
4434 * The options passed take this form:
4435 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4438 * A comma-separated list of one or more monitor addresses.
4439 * A monitor address is an ip address, optionally followed
4440 * by a port number (separated by a colon).
4441 * I.e.: ip1[:port1][,ip2[:port2]...]
4443 * A comma-separated list of ceph and/or rbd options.
4445 * The name of the rados pool containing the rbd image.
4447 * The name of the image in that pool to map.
4449 * An optional snapshot id. If provided, the mapping will
4450 * present data from the image at the time that snapshot was
4451 * created. The image head is used if no snapshot id is
4452 * provided. Snapshot mappings are always read-only.
4454 static int rbd_add_parse_args(const char *buf,
4455 struct ceph_options **ceph_opts,
4456 struct rbd_options **opts,
4457 struct rbd_spec **rbd_spec)
4461 const char *mon_addrs;
4462 size_t mon_addrs_size;
4463 struct rbd_spec *spec = NULL;
4464 struct rbd_options *rbd_opts = NULL;
4465 struct ceph_options *copts;
4468 /* The first four tokens are required */
4470 len = next_token(&buf);
4472 rbd_warn(NULL, "no monitor address(es) provided");
4476 mon_addrs_size = len + 1;
4480 options = dup_token(&buf, NULL);
4484 rbd_warn(NULL, "no options provided");
4488 spec = rbd_spec_alloc();
4492 spec->pool_name = dup_token(&buf, NULL);
4493 if (!spec->pool_name)
4495 if (!*spec->pool_name) {
4496 rbd_warn(NULL, "no pool name provided");
4500 spec->image_name = dup_token(&buf, NULL);
4501 if (!spec->image_name)
4503 if (!*spec->image_name) {
4504 rbd_warn(NULL, "no image name provided");
4509 * Snapshot name is optional; default is to use "-"
4510 * (indicating the head/no snapshot).
4512 len = next_token(&buf);
4514 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4515 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4516 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4517 ret = -ENAMETOOLONG;
4520 spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4521 if (!spec->snap_name)
4523 *(spec->snap_name + len) = '\0';
4525 /* Initialize all rbd options to the defaults */
4527 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4531 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4533 copts = ceph_parse_options(options, mon_addrs,
4534 mon_addrs + mon_addrs_size - 1,
4535 parse_rbd_opts_token, rbd_opts);
4536 if (IS_ERR(copts)) {
4537 ret = PTR_ERR(copts);
4558 * An rbd format 2 image has a unique identifier, distinct from the
4559 * name given to it by the user. Internally, that identifier is
4560 * what's used to specify the names of objects related to the image.
4562 * A special "rbd id" object is used to map an rbd image name to its
4563 * id. If that object doesn't exist, then there is no v2 rbd image
4564 * with the supplied name.
4566 * This function will record the given rbd_dev's image_id field if
4567 * it can be determined, and in that case will return 0. If any
4568 * errors occur a negative errno will be returned and the rbd_dev's
4569 * image_id field will be unchanged (and should be NULL).
4571 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4579 /* If we already have it we don't need to look it up */
4581 if (rbd_dev->spec->image_id)
4585 * When probing a parent image, the image id is already
4586 * known (and the image name likely is not). There's no
4587 * need to fetch the image id again in this case.
4589 if (rbd_dev->spec->image_id)
4593 * First, see if the format 2 image id file exists, and if
4594 * so, get the image's persistent id from it.
4596 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4597 object_name = kmalloc(size, GFP_NOIO);
4600 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4601 dout("rbd id object name is %s\n", object_name);
4603 /* Response will be an encoded string, which includes a length */
4605 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4606 response = kzalloc(size, GFP_NOIO);
4612 ret = rbd_obj_method_sync(rbd_dev, object_name,
4613 "rbd", "get_id", NULL, 0,
4614 response, RBD_IMAGE_ID_LEN_MAX, NULL);
4615 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4620 rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
4625 if (IS_ERR(rbd_dev->spec->image_id)) {
4626 ret = PTR_ERR(rbd_dev->spec->image_id);
4627 rbd_dev->spec->image_id = NULL;
4629 dout("image_id is %s\n", rbd_dev->spec->image_id);
4638 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
4643 /* Version 1 images have no id; empty string is used */
4645 rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
4646 if (!rbd_dev->spec->image_id)
4649 /* Record the header object name for this rbd image. */
4651 size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
4652 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4653 if (!rbd_dev->header_name) {
4657 sprintf(rbd_dev->header_name, "%s%s",
4658 rbd_dev->spec->image_name, RBD_SUFFIX);
4660 /* Populate rbd image metadata */
4662 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
4666 /* Version 1 images have no parent (no layering) */
4668 rbd_dev->parent_spec = NULL;
4669 rbd_dev->parent_overlap = 0;
4671 rbd_dev->image_format = 1;
4673 dout("discovered version 1 image, header name is %s\n",
4674 rbd_dev->header_name);
4679 kfree(rbd_dev->header_name);
4680 rbd_dev->header_name = NULL;
4681 kfree(rbd_dev->spec->image_id);
4682 rbd_dev->spec->image_id = NULL;
4687 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4694 * Image id was filled in by the caller. Record the header
4695 * object name for this rbd image.
4697 size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
4698 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4699 if (!rbd_dev->header_name)
4701 sprintf(rbd_dev->header_name, "%s%s",
4702 RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
4704 /* Get the size and object order for the image */
4705 ret = rbd_dev_v2_image_size(rbd_dev);
4709 /* Get the object prefix (a.k.a. block_name) for the image */
4711 ret = rbd_dev_v2_object_prefix(rbd_dev);
4715 /* Get the and check features for the image */
4717 ret = rbd_dev_v2_features(rbd_dev);
4721 /* If the image supports layering, get the parent info */
4723 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4724 ret = rbd_dev_v2_parent_info(rbd_dev);
4729 /* If the image supports fancy striping, get its parameters */
4731 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4732 ret = rbd_dev_v2_striping_info(rbd_dev);
4737 /* crypto and compression type aren't (yet) supported for v2 images */
4739 rbd_dev->header.crypt_type = 0;
4740 rbd_dev->header.comp_type = 0;
4742 /* Get the snapshot context, plus the header version */
4744 ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
4747 rbd_dev->header.obj_version = ver;
4749 rbd_dev->image_format = 2;
4751 dout("discovered version 2 image, header name is %s\n",
4752 rbd_dev->header_name);
4756 rbd_dev->parent_overlap = 0;
4757 rbd_spec_put(rbd_dev->parent_spec);
4758 rbd_dev->parent_spec = NULL;
4759 kfree(rbd_dev->header_name);
4760 rbd_dev->header_name = NULL;
4761 kfree(rbd_dev->header.object_prefix);
4762 rbd_dev->header.object_prefix = NULL;
4767 static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
4769 struct rbd_device *parent = NULL;
4770 struct rbd_spec *parent_spec = NULL;
4771 struct rbd_client *rbdc = NULL;
4774 /* no need to lock here, as rbd_dev is not registered yet */
4775 ret = rbd_dev_snaps_update(rbd_dev);
4779 ret = rbd_dev_probe_update_spec(rbd_dev);
4783 ret = rbd_dev_set_mapping(rbd_dev);
4787 /* generate unique id: find highest unique id, add one */
4788 rbd_dev_id_get(rbd_dev);
4790 /* Fill in the device name, now that we have its id. */
4791 BUILD_BUG_ON(DEV_NAME_LEN
4792 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4793 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4795 /* Get our block major device number. */
4797 ret = register_blkdev(0, rbd_dev->name);
4800 rbd_dev->major = ret;
4802 /* Set up the blkdev mapping. */
4804 ret = rbd_init_disk(rbd_dev);
4806 goto err_out_blkdev;
4808 ret = rbd_bus_add_dev(rbd_dev);
4813 * At this point cleanup in the event of an error is the job
4814 * of the sysfs code (initiated by rbd_bus_del_dev()).
4816 /* Probe the parent if there is one */
4818 if (rbd_dev->parent_spec) {
4820 * We need to pass a reference to the client and the
4821 * parent spec when creating the parent rbd_dev.
4822 * Images related by parent/child relationships
4823 * always share both.
4825 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4826 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4828 parent = rbd_dev_create(rbdc, parent_spec);
4833 rbdc = NULL; /* parent now owns reference */
4834 parent_spec = NULL; /* parent now owns reference */
4835 ret = rbd_dev_probe(parent);
4837 goto err_out_parent;
4838 rbd_dev->parent = parent;
4841 down_write(&rbd_dev->header_rwsem);
4842 ret = rbd_dev_snaps_register(rbd_dev);
4843 up_write(&rbd_dev->header_rwsem);
4847 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
4851 /* Everything's ready. Announce the disk to the world. */
4853 add_disk(rbd_dev->disk);
4855 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4856 (unsigned long long) rbd_dev->mapping.size);
4861 rbd_dev_destroy(parent);
4863 rbd_spec_put(parent_spec);
4864 rbd_put_client(rbdc);
4866 /* this will also clean up rest of rbd_dev stuff */
4868 rbd_bus_del_dev(rbd_dev);
4872 rbd_free_disk(rbd_dev);
4874 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4876 rbd_dev_id_put(rbd_dev);
4878 rbd_remove_all_snaps(rbd_dev);
4884 * Probe for the existence of the header object for the given rbd
4885 * device. For format 2 images this includes determining the image
4888 static int rbd_dev_probe(struct rbd_device *rbd_dev)
4893 * Get the id from the image id object. If it's not a
4894 * format 2 image, we'll get ENOENT back, and we'll assume
4895 * it's a format 1 image.
4897 ret = rbd_dev_image_id(rbd_dev);
4899 ret = rbd_dev_v1_probe(rbd_dev);
4901 ret = rbd_dev_v2_probe(rbd_dev);
4903 dout("probe failed, returning %d\n", ret);
4908 ret = rbd_dev_probe_finish(rbd_dev);
4910 rbd_header_free(&rbd_dev->header);
4915 static ssize_t rbd_add(struct bus_type *bus,
4919 struct rbd_device *rbd_dev = NULL;
4920 struct ceph_options *ceph_opts = NULL;
4921 struct rbd_options *rbd_opts = NULL;
4922 struct rbd_spec *spec = NULL;
4923 struct rbd_client *rbdc;
4924 struct ceph_osd_client *osdc;
4927 if (!try_module_get(THIS_MODULE))
4930 /* parse add command */
4931 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4933 goto err_out_module;
4935 rbdc = rbd_get_client(ceph_opts);
4940 ceph_opts = NULL; /* rbd_dev client now owns this */
4943 osdc = &rbdc->client->osdc;
4944 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
4946 goto err_out_client;
4947 spec->pool_id = (u64) rc;
4949 /* The ceph file layout needs to fit pool id in 32 bits */
4951 if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
4953 goto err_out_client;
4956 rbd_dev = rbd_dev_create(rbdc, spec);
4958 goto err_out_client;
4959 rbdc = NULL; /* rbd_dev now owns this */
4960 spec = NULL; /* rbd_dev now owns this */
4962 rbd_dev->mapping.read_only = rbd_opts->read_only;
4964 rbd_opts = NULL; /* done with this */
4966 rc = rbd_dev_probe(rbd_dev);
4968 goto err_out_rbd_dev;
4972 rbd_dev_destroy(rbd_dev);
4974 rbd_put_client(rbdc);
4977 ceph_destroy_options(ceph_opts);
4981 module_put(THIS_MODULE);
4983 dout("Error adding device %s\n", buf);
4985 return (ssize_t) rc;
4988 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
4990 struct list_head *tmp;
4991 struct rbd_device *rbd_dev;
4993 spin_lock(&rbd_dev_list_lock);
4994 list_for_each(tmp, &rbd_dev_list) {
4995 rbd_dev = list_entry(tmp, struct rbd_device, node);
4996 if (rbd_dev->dev_id == dev_id) {
4997 spin_unlock(&rbd_dev_list_lock);
5001 spin_unlock(&rbd_dev_list_lock);
5005 static void rbd_dev_release(struct device *dev)
5007 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5009 if (rbd_dev->watch_event)
5010 rbd_dev_header_watch_sync(rbd_dev, 0);
5012 /* clean up and free blkdev */
5013 rbd_free_disk(rbd_dev);
5014 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5016 /* release allocated disk header fields */
5017 rbd_header_free(&rbd_dev->header);
5019 /* done with the id, and with the rbd_dev */
5020 rbd_dev_id_put(rbd_dev);
5021 rbd_assert(rbd_dev->rbd_client != NULL);
5022 rbd_dev_destroy(rbd_dev);
5024 /* release module ref */
5025 module_put(THIS_MODULE);
5028 static void __rbd_remove(struct rbd_device *rbd_dev)
5030 rbd_remove_all_snaps(rbd_dev);
5031 rbd_bus_del_dev(rbd_dev);
5034 static ssize_t rbd_remove(struct bus_type *bus,
5038 struct rbd_device *rbd_dev = NULL;
5043 rc = strict_strtoul(buf, 10, &ul);
5047 /* convert to int; abort if we lost anything in the conversion */
5048 target_id = (int) ul;
5049 if (target_id != ul)
5052 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5054 rbd_dev = __rbd_get_dev(target_id);
5060 spin_lock_irq(&rbd_dev->lock);
5061 if (rbd_dev->open_count)
5064 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
5065 spin_unlock_irq(&rbd_dev->lock);
5069 while (rbd_dev->parent_spec) {
5070 struct rbd_device *first = rbd_dev;
5071 struct rbd_device *second = first->parent;
5072 struct rbd_device *third;
5075 * Follow to the parent with no grandparent and
5078 while (second && (third = second->parent)) {
5082 __rbd_remove(second);
5083 rbd_spec_put(first->parent_spec);
5084 first->parent_spec = NULL;
5085 first->parent_overlap = 0;
5086 first->parent = NULL;
5088 __rbd_remove(rbd_dev);
5091 mutex_unlock(&ctl_mutex);
5097 * create control files in sysfs
5100 static int rbd_sysfs_init(void)
5104 ret = device_register(&rbd_root_dev);
5108 ret = bus_register(&rbd_bus_type);
5110 device_unregister(&rbd_root_dev);
5115 static void rbd_sysfs_cleanup(void)
5117 bus_unregister(&rbd_bus_type);
5118 device_unregister(&rbd_root_dev);
5121 static int __init rbd_init(void)
5125 if (!libceph_compatible(NULL)) {
5126 rbd_warn(NULL, "libceph incompatibility (quitting)");
5130 rc = rbd_sysfs_init();
5133 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5137 static void __exit rbd_exit(void)
5139 rbd_sysfs_cleanup();
5142 module_init(rbd_init);
5143 module_exit(rbd_exit);
5145 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5146 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5147 MODULE_DESCRIPTION("rados block device");
5149 /* following authorship retained from original osdblk.c */
5150 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5152 MODULE_LICENSE("GPL");