3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
45 #include "rbd_types.h"
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
64 static int atomic_inc_return_safe(atomic_t *v)
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
77 /* Decrement the counter. Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t *v)
82 counter = atomic_dec_return(v);
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
94 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
96 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN \
98 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
100 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
102 #define RBD_SNAP_HEAD_NAME "-"
104 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX 64
110 #define RBD_OBJ_PREFIX_LEN_MAX 64
114 #define RBD_FEATURE_LAYERING (1<<0)
115 #define RBD_FEATURE_STRIPINGV2 (1<<1)
116 #define RBD_FEATURES_ALL \
117 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
119 /* Features supported by this (client software) implementation. */
121 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
124 * An RBD device name will be "rbd#", where the "rbd" comes from
125 * RBD_DRV_NAME above, and # is a unique integer identifier.
126 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127 * enough to hold all possible device names.
129 #define DEV_NAME_LEN 32
130 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
133 * block device image metadata (in-memory version)
135 struct rbd_image_header {
136 /* These six fields never change for a given rbd image */
143 u64 features; /* Might be changeable someday? */
145 /* The remaining fields need to be updated occasionally */
147 struct ceph_snap_context *snapc;
148 char *snap_names; /* format 1 only */
149 u64 *snap_sizes; /* format 1 only */
153 * An rbd image specification.
155 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156 * identify an image. Each rbd_dev structure includes a pointer to
157 * an rbd_spec structure that encapsulates this identity.
159 * Each of the id's in an rbd_spec has an associated name. For a
160 * user-mapped image, the names are supplied and the id's associated
161 * with them are looked up. For a layered image, a parent image is
162 * defined by the tuple, and the names are looked up.
164 * An rbd_dev structure contains a parent_spec pointer which is
165 * non-null if the image it represents is a child in a layered
166 * image. This pointer will refer to the rbd_spec structure used
167 * by the parent rbd_dev for its own identity (i.e., the structure
168 * is shared between the parent and child).
170 * Since these structures are populated once, during the discovery
171 * phase of image construction, they are effectively immutable so
172 * we make no effort to synchronize access to them.
174 * Note that code herein does not assume the image name is known (it
175 * could be a null pointer).
179 const char *pool_name;
181 const char *image_id;
182 const char *image_name;
185 const char *snap_name;
191 * an instance of the client. multiple devices may share an rbd client.
194 struct ceph_client *client;
196 struct list_head node;
199 struct rbd_img_request;
200 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
202 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
204 struct rbd_obj_request;
205 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
207 enum obj_request_type {
208 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
212 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
213 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
214 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
215 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
218 struct rbd_obj_request {
219 const char *object_name;
220 u64 offset; /* object start byte */
221 u64 length; /* bytes from offset */
225 * An object request associated with an image will have its
226 * img_data flag set; a standalone object request will not.
228 * A standalone object request will have which == BAD_WHICH
229 * and a null obj_request pointer.
231 * An object request initiated in support of a layered image
232 * object (to check for its existence before a write) will
233 * have which == BAD_WHICH and a non-null obj_request pointer.
235 * Finally, an object request for rbd image data will have
236 * which != BAD_WHICH, and will have a non-null img_request
237 * pointer. The value of which will be in the range
238 * 0..(img_request->obj_request_count-1).
241 struct rbd_obj_request *obj_request; /* STAT op */
243 struct rbd_img_request *img_request;
245 /* links for img_request->obj_requests list */
246 struct list_head links;
249 u32 which; /* posn image request list */
251 enum obj_request_type type;
253 struct bio *bio_list;
259 struct page **copyup_pages;
260 u32 copyup_page_count;
262 struct ceph_osd_request *osd_req;
264 u64 xferred; /* bytes transferred */
267 rbd_obj_callback_t callback;
268 struct completion completion;
274 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
275 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
276 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
279 struct rbd_img_request {
280 struct rbd_device *rbd_dev;
281 u64 offset; /* starting image byte offset */
282 u64 length; /* byte count from offset */
285 u64 snap_id; /* for reads */
286 struct ceph_snap_context *snapc; /* for writes */
289 struct request *rq; /* block request */
290 struct rbd_obj_request *obj_request; /* obj req initiator */
292 struct page **copyup_pages;
293 u32 copyup_page_count;
294 spinlock_t completion_lock;/* protects next_completion */
296 rbd_img_callback_t callback;
297 u64 xferred;/* aggregate bytes transferred */
298 int result; /* first nonzero obj_request result */
300 u32 obj_request_count;
301 struct list_head obj_requests; /* rbd_obj_request structs */
306 #define for_each_obj_request(ireq, oreq) \
307 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
323 int dev_id; /* blkdev unique id */
325 int major; /* blkdev assigned major */
326 struct gendisk *disk; /* blkdev's gendisk and rq */
328 u32 image_format; /* Either 1 or 2 */
329 struct rbd_client *rbd_client;
331 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
333 spinlock_t lock; /* queue, flags, open_count */
335 struct rbd_image_header header;
336 unsigned long flags; /* possibly lock protected */
337 struct rbd_spec *spec;
341 struct ceph_file_layout layout;
343 struct ceph_osd_event *watch_event;
344 struct rbd_obj_request *watch_request;
346 struct rbd_spec *parent_spec;
349 struct rbd_device *parent;
351 /* protects updating the header */
352 struct rw_semaphore header_rwsem;
354 struct rbd_mapping mapping;
356 struct list_head node;
360 unsigned long open_count; /* protected by lock */
364 * Flag bits for rbd_dev->flags. If atomicity is required,
365 * rbd_dev->lock is used to protect access.
367 * Currently, only the "removing" flag (which is coupled with the
368 * "open_count" field) requires atomic access.
371 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
372 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
375 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
377 static LIST_HEAD(rbd_dev_list); /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock);
380 static LIST_HEAD(rbd_client_list); /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock);
383 /* Slab caches for frequently-allocated structures */
385 static struct kmem_cache *rbd_img_request_cache;
386 static struct kmem_cache *rbd_obj_request_cache;
387 static struct kmem_cache *rbd_segment_name_cache;
389 static int rbd_img_request_submit(struct rbd_img_request *img_request);
391 static void rbd_dev_device_release(struct device *dev);
393 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
395 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
397 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398 static void rbd_spec_put(struct rbd_spec *spec);
400 static struct bus_attribute rbd_bus_attrs[] = {
401 __ATTR(add, S_IWUSR, NULL, rbd_add),
402 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
406 static struct bus_type rbd_bus_type = {
408 .bus_attrs = rbd_bus_attrs,
411 static void rbd_root_dev_release(struct device *dev)
415 static struct device rbd_root_dev = {
417 .release = rbd_root_dev_release,
420 static __printf(2, 3)
421 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
423 struct va_format vaf;
431 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
432 else if (rbd_dev->disk)
433 printk(KERN_WARNING "%s: %s: %pV\n",
434 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
435 else if (rbd_dev->spec && rbd_dev->spec->image_name)
436 printk(KERN_WARNING "%s: image %s: %pV\n",
437 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
438 else if (rbd_dev->spec && rbd_dev->spec->image_id)
439 printk(KERN_WARNING "%s: id %s: %pV\n",
440 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
442 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
443 RBD_DRV_NAME, rbd_dev, &vaf);
448 #define rbd_assert(expr) \
449 if (unlikely(!(expr))) { \
450 printk(KERN_ERR "\nAssertion failure in %s() " \
452 "\trbd_assert(%s);\n\n", \
453 __func__, __LINE__, #expr); \
456 #else /* !RBD_DEBUG */
457 # define rbd_assert(expr) ((void) 0)
458 #endif /* !RBD_DEBUG */
460 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
461 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
462 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
464 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
465 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
466 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
467 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
469 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
470 u8 *order, u64 *snap_size);
471 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
473 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
475 static int rbd_open(struct block_device *bdev, fmode_t mode)
477 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
478 bool removing = false;
480 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
483 spin_lock_irq(&rbd_dev->lock);
484 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
487 rbd_dev->open_count++;
488 spin_unlock_irq(&rbd_dev->lock);
492 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
493 (void) get_device(&rbd_dev->dev);
494 set_device_ro(bdev, rbd_dev->mapping.read_only);
495 mutex_unlock(&ctl_mutex);
500 static int rbd_release(struct gendisk *disk, fmode_t mode)
502 struct rbd_device *rbd_dev = disk->private_data;
503 unsigned long open_count_before;
505 spin_lock_irq(&rbd_dev->lock);
506 open_count_before = rbd_dev->open_count--;
507 spin_unlock_irq(&rbd_dev->lock);
508 rbd_assert(open_count_before > 0);
510 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
511 put_device(&rbd_dev->dev);
512 mutex_unlock(&ctl_mutex);
517 static const struct block_device_operations rbd_bd_ops = {
518 .owner = THIS_MODULE,
520 .release = rbd_release,
524 * Initialize an rbd client instance.
527 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
529 struct rbd_client *rbdc;
532 dout("%s:\n", __func__);
533 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
537 kref_init(&rbdc->kref);
538 INIT_LIST_HEAD(&rbdc->node);
540 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
542 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
543 if (IS_ERR(rbdc->client))
545 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
547 ret = ceph_open_session(rbdc->client);
551 spin_lock(&rbd_client_list_lock);
552 list_add_tail(&rbdc->node, &rbd_client_list);
553 spin_unlock(&rbd_client_list_lock);
555 mutex_unlock(&ctl_mutex);
556 dout("%s: rbdc %p\n", __func__, rbdc);
561 ceph_destroy_client(rbdc->client);
563 mutex_unlock(&ctl_mutex);
567 ceph_destroy_options(ceph_opts);
568 dout("%s: error %d\n", __func__, ret);
573 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
575 kref_get(&rbdc->kref);
581 * Find a ceph client with specific addr and configuration. If
582 * found, bump its reference count.
584 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
586 struct rbd_client *client_node;
589 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
592 spin_lock(&rbd_client_list_lock);
593 list_for_each_entry(client_node, &rbd_client_list, node) {
594 if (!ceph_compare_options(ceph_opts, client_node->client)) {
595 __rbd_get_client(client_node);
601 spin_unlock(&rbd_client_list_lock);
603 return found ? client_node : NULL;
613 /* string args above */
616 /* Boolean args above */
620 static match_table_t rbd_opts_tokens = {
622 /* string args above */
623 {Opt_read_only, "read_only"},
624 {Opt_read_only, "ro"}, /* Alternate spelling */
625 {Opt_read_write, "read_write"},
626 {Opt_read_write, "rw"}, /* Alternate spelling */
627 /* Boolean args above */
635 #define RBD_READ_ONLY_DEFAULT false
637 static int parse_rbd_opts_token(char *c, void *private)
639 struct rbd_options *rbd_opts = private;
640 substring_t argstr[MAX_OPT_ARGS];
641 int token, intval, ret;
643 token = match_token(c, rbd_opts_tokens, argstr);
647 if (token < Opt_last_int) {
648 ret = match_int(&argstr[0], &intval);
650 pr_err("bad mount option arg (not int) "
654 dout("got int token %d val %d\n", token, intval);
655 } else if (token > Opt_last_int && token < Opt_last_string) {
656 dout("got string token %d val %s\n", token,
658 } else if (token > Opt_last_string && token < Opt_last_bool) {
659 dout("got Boolean token %d\n", token);
661 dout("got token %d\n", token);
666 rbd_opts->read_only = true;
669 rbd_opts->read_only = false;
679 * Get a ceph client with specific addr and configuration, if one does
680 * not exist create it.
682 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
684 struct rbd_client *rbdc;
686 rbdc = rbd_client_find(ceph_opts);
687 if (rbdc) /* using an existing client */
688 ceph_destroy_options(ceph_opts);
690 rbdc = rbd_client_create(ceph_opts);
696 * Destroy ceph client
698 * Caller must hold rbd_client_list_lock.
700 static void rbd_client_release(struct kref *kref)
702 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
704 dout("%s: rbdc %p\n", __func__, rbdc);
705 spin_lock(&rbd_client_list_lock);
706 list_del(&rbdc->node);
707 spin_unlock(&rbd_client_list_lock);
709 ceph_destroy_client(rbdc->client);
714 * Drop reference to ceph client node. If it's not referenced anymore, release
717 static void rbd_put_client(struct rbd_client *rbdc)
720 kref_put(&rbdc->kref, rbd_client_release);
723 static bool rbd_image_format_valid(u32 image_format)
725 return image_format == 1 || image_format == 2;
728 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
733 /* The header has to start with the magic rbd header text */
734 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
737 /* The bio layer requires at least sector-sized I/O */
739 if (ondisk->options.order < SECTOR_SHIFT)
742 /* If we use u64 in a few spots we may be able to loosen this */
744 if (ondisk->options.order > 8 * sizeof (int) - 1)
748 * The size of a snapshot header has to fit in a size_t, and
749 * that limits the number of snapshots.
751 snap_count = le32_to_cpu(ondisk->snap_count);
752 size = SIZE_MAX - sizeof (struct ceph_snap_context);
753 if (snap_count > size / sizeof (__le64))
757 * Not only that, but the size of the entire the snapshot
758 * header must also be representable in a size_t.
760 size -= snap_count * sizeof (__le64);
761 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
768 * Fill an rbd image header with information from the given format 1
771 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
772 struct rbd_image_header_ondisk *ondisk)
774 struct rbd_image_header *header = &rbd_dev->header;
775 bool first_time = header->object_prefix == NULL;
776 struct ceph_snap_context *snapc;
777 char *object_prefix = NULL;
778 char *snap_names = NULL;
779 u64 *snap_sizes = NULL;
785 /* Allocate this now to avoid having to handle failure below */
790 len = strnlen(ondisk->object_prefix,
791 sizeof (ondisk->object_prefix));
792 object_prefix = kmalloc(len + 1, GFP_KERNEL);
795 memcpy(object_prefix, ondisk->object_prefix, len);
796 object_prefix[len] = '\0';
799 /* Allocate the snapshot context and fill it in */
801 snap_count = le32_to_cpu(ondisk->snap_count);
802 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
805 snapc->seq = le64_to_cpu(ondisk->snap_seq);
807 struct rbd_image_snap_ondisk *snaps;
808 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
810 /* We'll keep a copy of the snapshot names... */
812 if (snap_names_len > (u64)SIZE_MAX)
814 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
818 /* ...as well as the array of their sizes. */
820 size = snap_count * sizeof (*header->snap_sizes);
821 snap_sizes = kmalloc(size, GFP_KERNEL);
826 * Copy the names, and fill in each snapshot's id
829 * Note that rbd_dev_v1_header_info() guarantees the
830 * ondisk buffer we're working with has
831 * snap_names_len bytes beyond the end of the
832 * snapshot id array, this memcpy() is safe.
834 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
835 snaps = ondisk->snaps;
836 for (i = 0; i < snap_count; i++) {
837 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
838 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
842 /* We won't fail any more, fill in the header */
844 down_write(&rbd_dev->header_rwsem);
846 header->object_prefix = object_prefix;
847 header->obj_order = ondisk->options.order;
848 header->crypt_type = ondisk->options.crypt_type;
849 header->comp_type = ondisk->options.comp_type;
850 /* The rest aren't used for format 1 images */
851 header->stripe_unit = 0;
852 header->stripe_count = 0;
853 header->features = 0;
855 ceph_put_snap_context(header->snapc);
856 kfree(header->snap_names);
857 kfree(header->snap_sizes);
860 /* The remaining fields always get updated (when we refresh) */
862 header->image_size = le64_to_cpu(ondisk->image_size);
863 header->snapc = snapc;
864 header->snap_names = snap_names;
865 header->snap_sizes = snap_sizes;
867 /* Make sure mapping size is consistent with header info */
869 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
870 if (rbd_dev->mapping.size != header->image_size)
871 rbd_dev->mapping.size = header->image_size;
873 up_write(&rbd_dev->header_rwsem);
881 ceph_put_snap_context(snapc);
882 kfree(object_prefix);
887 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
889 const char *snap_name;
891 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
893 /* Skip over names until we find the one we are looking for */
895 snap_name = rbd_dev->header.snap_names;
897 snap_name += strlen(snap_name) + 1;
899 return kstrdup(snap_name, GFP_KERNEL);
903 * Snapshot id comparison function for use with qsort()/bsearch().
904 * Note that result is for snapshots in *descending* order.
906 static int snapid_compare_reverse(const void *s1, const void *s2)
908 u64 snap_id1 = *(u64 *)s1;
909 u64 snap_id2 = *(u64 *)s2;
911 if (snap_id1 < snap_id2)
913 return snap_id1 == snap_id2 ? 0 : -1;
917 * Search a snapshot context to see if the given snapshot id is
920 * Returns the position of the snapshot id in the array if it's found,
921 * or BAD_SNAP_INDEX otherwise.
923 * Note: The snapshot array is in kept sorted (by the osd) in
924 * reverse order, highest snapshot id first.
926 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
928 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
931 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
932 sizeof (snap_id), snapid_compare_reverse);
934 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
937 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
942 which = rbd_dev_snap_index(rbd_dev, snap_id);
943 if (which == BAD_SNAP_INDEX)
946 return _rbd_dev_v1_snap_name(rbd_dev, which);
949 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
951 if (snap_id == CEPH_NOSNAP)
952 return RBD_SNAP_HEAD_NAME;
954 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
955 if (rbd_dev->image_format == 1)
956 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
958 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
961 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
964 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
965 if (snap_id == CEPH_NOSNAP) {
966 *snap_size = rbd_dev->header.image_size;
967 } else if (rbd_dev->image_format == 1) {
970 which = rbd_dev_snap_index(rbd_dev, snap_id);
971 if (which == BAD_SNAP_INDEX)
974 *snap_size = rbd_dev->header.snap_sizes[which];
979 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
988 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
991 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
992 if (snap_id == CEPH_NOSNAP) {
993 *snap_features = rbd_dev->header.features;
994 } else if (rbd_dev->image_format == 1) {
995 *snap_features = 0; /* No features for format 1 */
1000 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1004 *snap_features = features;
1009 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1011 u64 snap_id = rbd_dev->spec->snap_id;
1016 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1019 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1023 rbd_dev->mapping.size = size;
1024 rbd_dev->mapping.features = features;
1029 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1031 rbd_dev->mapping.size = 0;
1032 rbd_dev->mapping.features = 0;
1035 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1041 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1044 segment = offset >> rbd_dev->header.obj_order;
1045 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
1046 rbd_dev->header.object_prefix, segment);
1047 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1048 pr_err("error formatting segment name for #%llu (%d)\n",
1057 static void rbd_segment_name_free(const char *name)
1059 /* The explicit cast here is needed to drop the const qualifier */
1061 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1064 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1066 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1068 return offset & (segment_size - 1);
1071 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1072 u64 offset, u64 length)
1074 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1076 offset &= segment_size - 1;
1078 rbd_assert(length <= U64_MAX - offset);
1079 if (offset + length > segment_size)
1080 length = segment_size - offset;
1086 * returns the size of an object in the image
1088 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1090 return 1 << header->obj_order;
1097 static void bio_chain_put(struct bio *chain)
1103 chain = chain->bi_next;
1109 * zeros a bio chain, starting at specific offset
1111 static void zero_bio_chain(struct bio *chain, int start_ofs)
1114 unsigned long flags;
1120 bio_for_each_segment(bv, chain, i) {
1121 if (pos + bv->bv_len > start_ofs) {
1122 int remainder = max(start_ofs - pos, 0);
1123 buf = bvec_kmap_irq(bv, &flags);
1124 memset(buf + remainder, 0,
1125 bv->bv_len - remainder);
1126 bvec_kunmap_irq(buf, &flags);
1131 chain = chain->bi_next;
1136 * similar to zero_bio_chain(), zeros data defined by a page array,
1137 * starting at the given byte offset from the start of the array and
1138 * continuing up to the given end offset. The pages array is
1139 * assumed to be big enough to hold all bytes up to the end.
1141 static void zero_pages(struct page **pages, u64 offset, u64 end)
1143 struct page **page = &pages[offset >> PAGE_SHIFT];
1145 rbd_assert(end > offset);
1146 rbd_assert(end - offset <= (u64)SIZE_MAX);
1147 while (offset < end) {
1150 unsigned long flags;
1153 page_offset = (size_t)(offset & ~PAGE_MASK);
1154 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1155 local_irq_save(flags);
1156 kaddr = kmap_atomic(*page);
1157 memset(kaddr + page_offset, 0, length);
1158 kunmap_atomic(kaddr);
1159 local_irq_restore(flags);
1167 * Clone a portion of a bio, starting at the given byte offset
1168 * and continuing for the number of bytes indicated.
1170 static struct bio *bio_clone_range(struct bio *bio_src,
1171 unsigned int offset,
1179 unsigned short end_idx;
1180 unsigned short vcnt;
1183 /* Handle the easy case for the caller */
1185 if (!offset && len == bio_src->bi_size)
1186 return bio_clone(bio_src, gfpmask);
1188 if (WARN_ON_ONCE(!len))
1190 if (WARN_ON_ONCE(len > bio_src->bi_size))
1192 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1195 /* Find first affected segment... */
1198 __bio_for_each_segment(bv, bio_src, idx, 0) {
1199 if (resid < bv->bv_len)
1201 resid -= bv->bv_len;
1205 /* ...and the last affected segment */
1208 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1209 if (resid <= bv->bv_len)
1211 resid -= bv->bv_len;
1213 vcnt = end_idx - idx + 1;
1215 /* Build the clone */
1217 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1219 return NULL; /* ENOMEM */
1221 bio->bi_bdev = bio_src->bi_bdev;
1222 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1223 bio->bi_rw = bio_src->bi_rw;
1224 bio->bi_flags |= 1 << BIO_CLONED;
1227 * Copy over our part of the bio_vec, then update the first
1228 * and last (or only) entries.
1230 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1231 vcnt * sizeof (struct bio_vec));
1232 bio->bi_io_vec[0].bv_offset += voff;
1234 bio->bi_io_vec[0].bv_len -= voff;
1235 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1237 bio->bi_io_vec[0].bv_len = len;
1240 bio->bi_vcnt = vcnt;
1248 * Clone a portion of a bio chain, starting at the given byte offset
1249 * into the first bio in the source chain and continuing for the
1250 * number of bytes indicated. The result is another bio chain of
1251 * exactly the given length, or a null pointer on error.
1253 * The bio_src and offset parameters are both in-out. On entry they
1254 * refer to the first source bio and the offset into that bio where
1255 * the start of data to be cloned is located.
1257 * On return, bio_src is updated to refer to the bio in the source
1258 * chain that contains first un-cloned byte, and *offset will
1259 * contain the offset of that byte within that bio.
1261 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1262 unsigned int *offset,
1266 struct bio *bi = *bio_src;
1267 unsigned int off = *offset;
1268 struct bio *chain = NULL;
1271 /* Build up a chain of clone bios up to the limit */
1273 if (!bi || off >= bi->bi_size || !len)
1274 return NULL; /* Nothing to clone */
1278 unsigned int bi_size;
1282 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1283 goto out_err; /* EINVAL; ran out of bio's */
1285 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1286 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1288 goto out_err; /* ENOMEM */
1291 end = &bio->bi_next;
1294 if (off == bi->bi_size) {
1305 bio_chain_put(chain);
1311 * The default/initial value for all object request flags is 0. For
1312 * each flag, once its value is set to 1 it is never reset to 0
1315 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1317 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1318 struct rbd_device *rbd_dev;
1320 rbd_dev = obj_request->img_request->rbd_dev;
1321 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1326 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1329 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1332 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1334 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1335 struct rbd_device *rbd_dev = NULL;
1337 if (obj_request_img_data_test(obj_request))
1338 rbd_dev = obj_request->img_request->rbd_dev;
1339 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1344 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1347 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1351 * This sets the KNOWN flag after (possibly) setting the EXISTS
1352 * flag. The latter is set based on the "exists" value provided.
1354 * Note that for our purposes once an object exists it never goes
1355 * away again. It's possible that the response from two existence
1356 * checks are separated by the creation of the target object, and
1357 * the first ("doesn't exist") response arrives *after* the second
1358 * ("does exist"). In that case we ignore the second one.
1360 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1364 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1365 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1369 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1372 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1375 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1378 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1381 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1383 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1384 atomic_read(&obj_request->kref.refcount));
1385 kref_get(&obj_request->kref);
1388 static void rbd_obj_request_destroy(struct kref *kref);
1389 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1391 rbd_assert(obj_request != NULL);
1392 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1393 atomic_read(&obj_request->kref.refcount));
1394 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1397 static bool img_request_child_test(struct rbd_img_request *img_request);
1398 static void rbd_parent_request_destroy(struct kref *kref);
1399 static void rbd_img_request_destroy(struct kref *kref);
1400 static void rbd_img_request_put(struct rbd_img_request *img_request)
1402 rbd_assert(img_request != NULL);
1403 dout("%s: img %p (was %d)\n", __func__, img_request,
1404 atomic_read(&img_request->kref.refcount));
1405 if (img_request_child_test(img_request))
1406 kref_put(&img_request->kref, rbd_parent_request_destroy);
1408 kref_put(&img_request->kref, rbd_img_request_destroy);
1411 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1412 struct rbd_obj_request *obj_request)
1414 rbd_assert(obj_request->img_request == NULL);
1416 /* Image request now owns object's original reference */
1417 obj_request->img_request = img_request;
1418 obj_request->which = img_request->obj_request_count;
1419 rbd_assert(!obj_request_img_data_test(obj_request));
1420 obj_request_img_data_set(obj_request);
1421 rbd_assert(obj_request->which != BAD_WHICH);
1422 img_request->obj_request_count++;
1423 list_add_tail(&obj_request->links, &img_request->obj_requests);
1424 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1425 obj_request->which);
1428 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1429 struct rbd_obj_request *obj_request)
1431 rbd_assert(obj_request->which != BAD_WHICH);
1433 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1434 obj_request->which);
1435 list_del(&obj_request->links);
1436 rbd_assert(img_request->obj_request_count > 0);
1437 img_request->obj_request_count--;
1438 rbd_assert(obj_request->which == img_request->obj_request_count);
1439 obj_request->which = BAD_WHICH;
1440 rbd_assert(obj_request_img_data_test(obj_request));
1441 rbd_assert(obj_request->img_request == img_request);
1442 obj_request->img_request = NULL;
1443 obj_request->callback = NULL;
1444 rbd_obj_request_put(obj_request);
1447 static bool obj_request_type_valid(enum obj_request_type type)
1450 case OBJ_REQUEST_NODATA:
1451 case OBJ_REQUEST_BIO:
1452 case OBJ_REQUEST_PAGES:
1459 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1460 struct rbd_obj_request *obj_request)
1462 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1464 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1467 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1470 dout("%s: img %p\n", __func__, img_request);
1473 * If no error occurred, compute the aggregate transfer
1474 * count for the image request. We could instead use
1475 * atomic64_cmpxchg() to update it as each object request
1476 * completes; not clear which way is better off hand.
1478 if (!img_request->result) {
1479 struct rbd_obj_request *obj_request;
1482 for_each_obj_request(img_request, obj_request)
1483 xferred += obj_request->xferred;
1484 img_request->xferred = xferred;
1487 if (img_request->callback)
1488 img_request->callback(img_request);
1490 rbd_img_request_put(img_request);
1493 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1495 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1497 dout("%s: obj %p\n", __func__, obj_request);
1499 return wait_for_completion_interruptible(&obj_request->completion);
1503 * The default/initial value for all image request flags is 0. Each
1504 * is conditionally set to 1 at image request initialization time
1505 * and currently never change thereafter.
1507 static void img_request_write_set(struct rbd_img_request *img_request)
1509 set_bit(IMG_REQ_WRITE, &img_request->flags);
1513 static bool img_request_write_test(struct rbd_img_request *img_request)
1516 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1519 static void img_request_child_set(struct rbd_img_request *img_request)
1521 set_bit(IMG_REQ_CHILD, &img_request->flags);
1525 static void img_request_child_clear(struct rbd_img_request *img_request)
1527 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1531 static bool img_request_child_test(struct rbd_img_request *img_request)
1534 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1537 static void img_request_layered_set(struct rbd_img_request *img_request)
1539 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1543 static void img_request_layered_clear(struct rbd_img_request *img_request)
1545 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1549 static bool img_request_layered_test(struct rbd_img_request *img_request)
1552 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1556 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1558 u64 xferred = obj_request->xferred;
1559 u64 length = obj_request->length;
1561 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1562 obj_request, obj_request->img_request, obj_request->result,
1565 * ENOENT means a hole in the image. We zero-fill the
1566 * entire length of the request. A short read also implies
1567 * zero-fill to the end of the request. Either way we
1568 * update the xferred count to indicate the whole request
1571 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1572 if (obj_request->result == -ENOENT) {
1573 if (obj_request->type == OBJ_REQUEST_BIO)
1574 zero_bio_chain(obj_request->bio_list, 0);
1576 zero_pages(obj_request->pages, 0, length);
1577 obj_request->result = 0;
1578 obj_request->xferred = length;
1579 } else if (xferred < length && !obj_request->result) {
1580 if (obj_request->type == OBJ_REQUEST_BIO)
1581 zero_bio_chain(obj_request->bio_list, xferred);
1583 zero_pages(obj_request->pages, xferred, length);
1584 obj_request->xferred = length;
1586 obj_request_done_set(obj_request);
1589 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1591 dout("%s: obj %p cb %p\n", __func__, obj_request,
1592 obj_request->callback);
1593 if (obj_request->callback)
1594 obj_request->callback(obj_request);
1596 complete_all(&obj_request->completion);
1599 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1601 dout("%s: obj %p\n", __func__, obj_request);
1602 obj_request_done_set(obj_request);
1605 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1607 struct rbd_img_request *img_request = NULL;
1608 struct rbd_device *rbd_dev = NULL;
1609 bool layered = false;
1611 if (obj_request_img_data_test(obj_request)) {
1612 img_request = obj_request->img_request;
1613 layered = img_request && img_request_layered_test(img_request);
1614 rbd_dev = img_request->rbd_dev;
1617 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1618 obj_request, img_request, obj_request->result,
1619 obj_request->xferred, obj_request->length);
1620 if (layered && obj_request->result == -ENOENT &&
1621 obj_request->img_offset < rbd_dev->parent_overlap)
1622 rbd_img_parent_read(obj_request);
1623 else if (img_request)
1624 rbd_img_obj_request_read_callback(obj_request);
1626 obj_request_done_set(obj_request);
1629 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1631 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1632 obj_request->result, obj_request->length);
1634 * There is no such thing as a successful short write. Set
1635 * it to our originally-requested length.
1637 obj_request->xferred = obj_request->length;
1638 obj_request_done_set(obj_request);
1642 * For a simple stat call there's nothing to do. We'll do more if
1643 * this is part of a write sequence for a layered image.
1645 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1647 dout("%s: obj %p\n", __func__, obj_request);
1648 obj_request_done_set(obj_request);
1651 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1652 struct ceph_msg *msg)
1654 struct rbd_obj_request *obj_request = osd_req->r_priv;
1657 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1658 rbd_assert(osd_req == obj_request->osd_req);
1659 if (obj_request_img_data_test(obj_request)) {
1660 rbd_assert(obj_request->img_request);
1661 rbd_assert(obj_request->which != BAD_WHICH);
1663 rbd_assert(obj_request->which == BAD_WHICH);
1666 if (osd_req->r_result < 0)
1667 obj_request->result = osd_req->r_result;
1669 BUG_ON(osd_req->r_num_ops > 2);
1672 * We support a 64-bit length, but ultimately it has to be
1673 * passed to blk_end_request(), which takes an unsigned int.
1675 obj_request->xferred = osd_req->r_reply_op_len[0];
1676 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1677 opcode = osd_req->r_ops[0].op;
1679 case CEPH_OSD_OP_READ:
1680 rbd_osd_read_callback(obj_request);
1682 case CEPH_OSD_OP_WRITE:
1683 rbd_osd_write_callback(obj_request);
1685 case CEPH_OSD_OP_STAT:
1686 rbd_osd_stat_callback(obj_request);
1688 case CEPH_OSD_OP_CALL:
1689 case CEPH_OSD_OP_NOTIFY_ACK:
1690 case CEPH_OSD_OP_WATCH:
1691 rbd_osd_trivial_callback(obj_request);
1694 rbd_warn(NULL, "%s: unsupported op %hu\n",
1695 obj_request->object_name, (unsigned short) opcode);
1699 if (obj_request_done_test(obj_request))
1700 rbd_obj_request_complete(obj_request);
1703 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1705 struct rbd_img_request *img_request = obj_request->img_request;
1706 struct ceph_osd_request *osd_req = obj_request->osd_req;
1709 rbd_assert(osd_req != NULL);
1711 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1712 ceph_osdc_build_request(osd_req, obj_request->offset,
1713 NULL, snap_id, NULL);
1716 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1718 struct rbd_img_request *img_request = obj_request->img_request;
1719 struct ceph_osd_request *osd_req = obj_request->osd_req;
1720 struct ceph_snap_context *snapc;
1721 struct timespec mtime = CURRENT_TIME;
1723 rbd_assert(osd_req != NULL);
1725 snapc = img_request ? img_request->snapc : NULL;
1726 ceph_osdc_build_request(osd_req, obj_request->offset,
1727 snapc, CEPH_NOSNAP, &mtime);
1730 static struct ceph_osd_request *rbd_osd_req_create(
1731 struct rbd_device *rbd_dev,
1733 struct rbd_obj_request *obj_request)
1735 struct ceph_snap_context *snapc = NULL;
1736 struct ceph_osd_client *osdc;
1737 struct ceph_osd_request *osd_req;
1739 if (obj_request_img_data_test(obj_request)) {
1740 struct rbd_img_request *img_request = obj_request->img_request;
1742 rbd_assert(write_request ==
1743 img_request_write_test(img_request));
1745 snapc = img_request->snapc;
1748 /* Allocate and initialize the request, for the single op */
1750 osdc = &rbd_dev->rbd_client->client->osdc;
1751 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1753 return NULL; /* ENOMEM */
1756 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1758 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1760 osd_req->r_callback = rbd_osd_req_callback;
1761 osd_req->r_priv = obj_request;
1763 osd_req->r_oid_len = strlen(obj_request->object_name);
1764 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1765 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1767 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1773 * Create a copyup osd request based on the information in the
1774 * object request supplied. A copyup request has two osd ops,
1775 * a copyup method call, and a "normal" write request.
1777 static struct ceph_osd_request *
1778 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1780 struct rbd_img_request *img_request;
1781 struct ceph_snap_context *snapc;
1782 struct rbd_device *rbd_dev;
1783 struct ceph_osd_client *osdc;
1784 struct ceph_osd_request *osd_req;
1786 rbd_assert(obj_request_img_data_test(obj_request));
1787 img_request = obj_request->img_request;
1788 rbd_assert(img_request);
1789 rbd_assert(img_request_write_test(img_request));
1791 /* Allocate and initialize the request, for the two ops */
1793 snapc = img_request->snapc;
1794 rbd_dev = img_request->rbd_dev;
1795 osdc = &rbd_dev->rbd_client->client->osdc;
1796 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1798 return NULL; /* ENOMEM */
1800 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1801 osd_req->r_callback = rbd_osd_req_callback;
1802 osd_req->r_priv = obj_request;
1804 osd_req->r_oid_len = strlen(obj_request->object_name);
1805 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1806 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1808 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1814 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1816 ceph_osdc_put_request(osd_req);
1819 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1821 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1822 u64 offset, u64 length,
1823 enum obj_request_type type)
1825 struct rbd_obj_request *obj_request;
1829 rbd_assert(obj_request_type_valid(type));
1831 size = strlen(object_name) + 1;
1832 name = kmalloc(size, GFP_KERNEL);
1836 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1842 obj_request->object_name = memcpy(name, object_name, size);
1843 obj_request->offset = offset;
1844 obj_request->length = length;
1845 obj_request->flags = 0;
1846 obj_request->which = BAD_WHICH;
1847 obj_request->type = type;
1848 INIT_LIST_HEAD(&obj_request->links);
1849 init_completion(&obj_request->completion);
1850 kref_init(&obj_request->kref);
1852 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1853 offset, length, (int)type, obj_request);
1858 static void rbd_obj_request_destroy(struct kref *kref)
1860 struct rbd_obj_request *obj_request;
1862 obj_request = container_of(kref, struct rbd_obj_request, kref);
1864 dout("%s: obj %p\n", __func__, obj_request);
1866 rbd_assert(obj_request->img_request == NULL);
1867 rbd_assert(obj_request->which == BAD_WHICH);
1869 if (obj_request->osd_req)
1870 rbd_osd_req_destroy(obj_request->osd_req);
1872 rbd_assert(obj_request_type_valid(obj_request->type));
1873 switch (obj_request->type) {
1874 case OBJ_REQUEST_NODATA:
1875 break; /* Nothing to do */
1876 case OBJ_REQUEST_BIO:
1877 if (obj_request->bio_list)
1878 bio_chain_put(obj_request->bio_list);
1880 case OBJ_REQUEST_PAGES:
1881 if (obj_request->pages)
1882 ceph_release_page_vector(obj_request->pages,
1883 obj_request->page_count);
1887 kfree(obj_request->object_name);
1888 obj_request->object_name = NULL;
1889 kmem_cache_free(rbd_obj_request_cache, obj_request);
1892 /* It's OK to call this for a device with no parent */
1894 static void rbd_spec_put(struct rbd_spec *spec);
1895 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1897 rbd_dev_remove_parent(rbd_dev);
1898 rbd_spec_put(rbd_dev->parent_spec);
1899 rbd_dev->parent_spec = NULL;
1900 rbd_dev->parent_overlap = 0;
1904 * Parent image reference counting is used to determine when an
1905 * image's parent fields can be safely torn down--after there are no
1906 * more in-flight requests to the parent image. When the last
1907 * reference is dropped, cleaning them up is safe.
1909 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1913 if (!rbd_dev->parent_spec)
1916 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1920 /* Last reference; clean up parent data structures */
1923 rbd_dev_unparent(rbd_dev);
1925 rbd_warn(rbd_dev, "parent reference underflow\n");
1929 * If an image has a non-zero parent overlap, get a reference to its
1932 * We must get the reference before checking for the overlap to
1933 * coordinate properly with zeroing the parent overlap in
1934 * rbd_dev_v2_parent_info() when an image gets flattened. We
1935 * drop it again if there is no overlap.
1937 * Returns true if the rbd device has a parent with a non-zero
1938 * overlap and a reference for it was successfully taken, or
1941 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1945 if (!rbd_dev->parent_spec)
1948 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1949 if (counter > 0 && rbd_dev->parent_overlap)
1952 /* Image was flattened, but parent is not yet torn down */
1955 rbd_warn(rbd_dev, "parent reference overflow\n");
1961 * Caller is responsible for filling in the list of object requests
1962 * that comprises the image request, and the Linux request pointer
1963 * (if there is one).
1965 static struct rbd_img_request *rbd_img_request_create(
1966 struct rbd_device *rbd_dev,
1967 u64 offset, u64 length,
1970 struct rbd_img_request *img_request;
1972 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1976 if (write_request) {
1977 down_read(&rbd_dev->header_rwsem);
1978 ceph_get_snap_context(rbd_dev->header.snapc);
1979 up_read(&rbd_dev->header_rwsem);
1982 img_request->rq = NULL;
1983 img_request->rbd_dev = rbd_dev;
1984 img_request->offset = offset;
1985 img_request->length = length;
1986 img_request->flags = 0;
1987 if (write_request) {
1988 img_request_write_set(img_request);
1989 img_request->snapc = rbd_dev->header.snapc;
1991 img_request->snap_id = rbd_dev->spec->snap_id;
1993 if (rbd_dev_parent_get(rbd_dev))
1994 img_request_layered_set(img_request);
1995 spin_lock_init(&img_request->completion_lock);
1996 img_request->next_completion = 0;
1997 img_request->callback = NULL;
1998 img_request->result = 0;
1999 img_request->obj_request_count = 0;
2000 INIT_LIST_HEAD(&img_request->obj_requests);
2001 kref_init(&img_request->kref);
2003 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2004 write_request ? "write" : "read", offset, length,
2010 static void rbd_img_request_destroy(struct kref *kref)
2012 struct rbd_img_request *img_request;
2013 struct rbd_obj_request *obj_request;
2014 struct rbd_obj_request *next_obj_request;
2016 img_request = container_of(kref, struct rbd_img_request, kref);
2018 dout("%s: img %p\n", __func__, img_request);
2020 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2021 rbd_img_obj_request_del(img_request, obj_request);
2022 rbd_assert(img_request->obj_request_count == 0);
2024 if (img_request_layered_test(img_request)) {
2025 img_request_layered_clear(img_request);
2026 rbd_dev_parent_put(img_request->rbd_dev);
2029 if (img_request_write_test(img_request))
2030 ceph_put_snap_context(img_request->snapc);
2032 kmem_cache_free(rbd_img_request_cache, img_request);
2035 static struct rbd_img_request *rbd_parent_request_create(
2036 struct rbd_obj_request *obj_request,
2037 u64 img_offset, u64 length)
2039 struct rbd_img_request *parent_request;
2040 struct rbd_device *rbd_dev;
2042 rbd_assert(obj_request->img_request);
2043 rbd_dev = obj_request->img_request->rbd_dev;
2045 parent_request = rbd_img_request_create(rbd_dev->parent,
2046 img_offset, length, false);
2047 if (!parent_request)
2050 img_request_child_set(parent_request);
2051 rbd_obj_request_get(obj_request);
2052 parent_request->obj_request = obj_request;
2054 return parent_request;
2057 static void rbd_parent_request_destroy(struct kref *kref)
2059 struct rbd_img_request *parent_request;
2060 struct rbd_obj_request *orig_request;
2062 parent_request = container_of(kref, struct rbd_img_request, kref);
2063 orig_request = parent_request->obj_request;
2065 parent_request->obj_request = NULL;
2066 rbd_obj_request_put(orig_request);
2067 img_request_child_clear(parent_request);
2069 rbd_img_request_destroy(kref);
2072 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2074 struct rbd_img_request *img_request;
2075 unsigned int xferred;
2079 rbd_assert(obj_request_img_data_test(obj_request));
2080 img_request = obj_request->img_request;
2082 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2083 xferred = (unsigned int)obj_request->xferred;
2084 result = obj_request->result;
2086 struct rbd_device *rbd_dev = img_request->rbd_dev;
2088 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2089 img_request_write_test(img_request) ? "write" : "read",
2090 obj_request->length, obj_request->img_offset,
2091 obj_request->offset);
2092 rbd_warn(rbd_dev, " result %d xferred %x\n",
2094 if (!img_request->result)
2095 img_request->result = result;
2098 /* Image object requests don't own their page array */
2100 if (obj_request->type == OBJ_REQUEST_PAGES) {
2101 obj_request->pages = NULL;
2102 obj_request->page_count = 0;
2105 if (img_request_child_test(img_request)) {
2106 rbd_assert(img_request->obj_request != NULL);
2107 more = obj_request->which < img_request->obj_request_count - 1;
2109 rbd_assert(img_request->rq != NULL);
2110 more = blk_end_request(img_request->rq, result, xferred);
2116 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2118 struct rbd_img_request *img_request;
2119 u32 which = obj_request->which;
2122 rbd_assert(obj_request_img_data_test(obj_request));
2123 img_request = obj_request->img_request;
2125 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2126 rbd_assert(img_request != NULL);
2127 rbd_assert(img_request->obj_request_count > 0);
2128 rbd_assert(which != BAD_WHICH);
2129 rbd_assert(which < img_request->obj_request_count);
2130 rbd_assert(which >= img_request->next_completion);
2132 spin_lock_irq(&img_request->completion_lock);
2133 if (which != img_request->next_completion)
2136 for_each_obj_request_from(img_request, obj_request) {
2138 rbd_assert(which < img_request->obj_request_count);
2140 if (!obj_request_done_test(obj_request))
2142 more = rbd_img_obj_end_request(obj_request);
2146 rbd_assert(more ^ (which == img_request->obj_request_count));
2147 img_request->next_completion = which;
2149 spin_unlock_irq(&img_request->completion_lock);
2152 rbd_img_request_complete(img_request);
2156 * Split up an image request into one or more object requests, each
2157 * to a different object. The "type" parameter indicates whether
2158 * "data_desc" is the pointer to the head of a list of bio
2159 * structures, or the base of a page array. In either case this
2160 * function assumes data_desc describes memory sufficient to hold
2161 * all data described by the image request.
2163 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2164 enum obj_request_type type,
2167 struct rbd_device *rbd_dev = img_request->rbd_dev;
2168 struct rbd_obj_request *obj_request = NULL;
2169 struct rbd_obj_request *next_obj_request;
2170 bool write_request = img_request_write_test(img_request);
2171 struct bio *bio_list;
2172 unsigned int bio_offset = 0;
2173 struct page **pages;
2178 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2179 (int)type, data_desc);
2181 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2182 img_offset = img_request->offset;
2183 resid = img_request->length;
2184 rbd_assert(resid > 0);
2186 if (type == OBJ_REQUEST_BIO) {
2187 bio_list = data_desc;
2188 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2190 rbd_assert(type == OBJ_REQUEST_PAGES);
2195 struct ceph_osd_request *osd_req;
2196 const char *object_name;
2200 object_name = rbd_segment_name(rbd_dev, img_offset);
2203 offset = rbd_segment_offset(rbd_dev, img_offset);
2204 length = rbd_segment_length(rbd_dev, img_offset, resid);
2205 obj_request = rbd_obj_request_create(object_name,
2206 offset, length, type);
2207 /* object request has its own copy of the object name */
2208 rbd_segment_name_free(object_name);
2212 if (type == OBJ_REQUEST_BIO) {
2213 unsigned int clone_size;
2215 rbd_assert(length <= (u64)UINT_MAX);
2216 clone_size = (unsigned int)length;
2217 obj_request->bio_list =
2218 bio_chain_clone_range(&bio_list,
2222 if (!obj_request->bio_list)
2225 unsigned int page_count;
2227 obj_request->pages = pages;
2228 page_count = (u32)calc_pages_for(offset, length);
2229 obj_request->page_count = page_count;
2230 if ((offset + length) & ~PAGE_MASK)
2231 page_count--; /* more on last page */
2232 pages += page_count;
2235 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2239 obj_request->osd_req = osd_req;
2240 obj_request->callback = rbd_img_obj_callback;
2242 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2244 if (type == OBJ_REQUEST_BIO)
2245 osd_req_op_extent_osd_data_bio(osd_req, 0,
2246 obj_request->bio_list, length);
2248 osd_req_op_extent_osd_data_pages(osd_req, 0,
2249 obj_request->pages, length,
2250 offset & ~PAGE_MASK, false, false);
2253 rbd_osd_req_format_write(obj_request);
2255 rbd_osd_req_format_read(obj_request);
2257 obj_request->img_offset = img_offset;
2258 rbd_img_obj_request_add(img_request, obj_request);
2260 img_offset += length;
2267 rbd_obj_request_put(obj_request);
2269 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2270 rbd_obj_request_put(obj_request);
2276 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2278 struct rbd_img_request *img_request;
2279 struct rbd_device *rbd_dev;
2280 struct page **pages;
2283 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2284 rbd_assert(obj_request_img_data_test(obj_request));
2285 img_request = obj_request->img_request;
2286 rbd_assert(img_request);
2288 rbd_dev = img_request->rbd_dev;
2289 rbd_assert(rbd_dev);
2291 pages = obj_request->copyup_pages;
2292 rbd_assert(pages != NULL);
2293 obj_request->copyup_pages = NULL;
2294 page_count = obj_request->copyup_page_count;
2295 rbd_assert(page_count);
2296 obj_request->copyup_page_count = 0;
2297 ceph_release_page_vector(pages, page_count);
2300 * We want the transfer count to reflect the size of the
2301 * original write request. There is no such thing as a
2302 * successful short write, so if the request was successful
2303 * we can just set it to the originally-requested length.
2305 if (!obj_request->result)
2306 obj_request->xferred = obj_request->length;
2308 /* Finish up with the normal image object callback */
2310 rbd_img_obj_callback(obj_request);
2314 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2316 struct rbd_obj_request *orig_request;
2317 struct ceph_osd_request *osd_req;
2318 struct ceph_osd_client *osdc;
2319 struct rbd_device *rbd_dev;
2320 struct page **pages;
2327 rbd_assert(img_request_child_test(img_request));
2329 /* First get what we need from the image request */
2331 pages = img_request->copyup_pages;
2332 rbd_assert(pages != NULL);
2333 img_request->copyup_pages = NULL;
2334 page_count = img_request->copyup_page_count;
2335 rbd_assert(page_count);
2336 img_request->copyup_page_count = 0;
2338 orig_request = img_request->obj_request;
2339 rbd_assert(orig_request != NULL);
2340 rbd_assert(obj_request_type_valid(orig_request->type));
2341 img_result = img_request->result;
2342 parent_length = img_request->length;
2343 rbd_assert(parent_length == img_request->xferred);
2344 rbd_img_request_put(img_request);
2346 rbd_assert(orig_request->img_request);
2347 rbd_dev = orig_request->img_request->rbd_dev;
2348 rbd_assert(rbd_dev);
2351 * If the overlap has become 0 (most likely because the
2352 * image has been flattened) we need to free the pages
2353 * and re-submit the original write request.
2355 if (!rbd_dev->parent_overlap) {
2356 struct ceph_osd_client *osdc;
2358 ceph_release_page_vector(pages, page_count);
2359 osdc = &rbd_dev->rbd_client->client->osdc;
2360 img_result = rbd_obj_request_submit(osdc, orig_request);
2369 * The original osd request is of no use to use any more.
2370 * We need a new one that can hold the two ops in a copyup
2371 * request. Allocate the new copyup osd request for the
2372 * original request, and release the old one.
2374 img_result = -ENOMEM;
2375 osd_req = rbd_osd_req_create_copyup(orig_request);
2378 rbd_osd_req_destroy(orig_request->osd_req);
2379 orig_request->osd_req = osd_req;
2380 orig_request->copyup_pages = pages;
2381 orig_request->copyup_page_count = page_count;
2383 /* Initialize the copyup op */
2385 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2386 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2389 /* Then the original write request op */
2391 offset = orig_request->offset;
2392 length = orig_request->length;
2393 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2394 offset, length, 0, 0);
2395 if (orig_request->type == OBJ_REQUEST_BIO)
2396 osd_req_op_extent_osd_data_bio(osd_req, 1,
2397 orig_request->bio_list, length);
2399 osd_req_op_extent_osd_data_pages(osd_req, 1,
2400 orig_request->pages, length,
2401 offset & ~PAGE_MASK, false, false);
2403 rbd_osd_req_format_write(orig_request);
2405 /* All set, send it off. */
2407 orig_request->callback = rbd_img_obj_copyup_callback;
2408 osdc = &rbd_dev->rbd_client->client->osdc;
2409 img_result = rbd_obj_request_submit(osdc, orig_request);
2413 /* Record the error code and complete the request */
2415 orig_request->result = img_result;
2416 orig_request->xferred = 0;
2417 obj_request_done_set(orig_request);
2418 rbd_obj_request_complete(orig_request);
2422 * Read from the parent image the range of data that covers the
2423 * entire target of the given object request. This is used for
2424 * satisfying a layered image write request when the target of an
2425 * object request from the image request does not exist.
2427 * A page array big enough to hold the returned data is allocated
2428 * and supplied to rbd_img_request_fill() as the "data descriptor."
2429 * When the read completes, this page array will be transferred to
2430 * the original object request for the copyup operation.
2432 * If an error occurs, record it as the result of the original
2433 * object request and mark it done so it gets completed.
2435 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2437 struct rbd_img_request *img_request = NULL;
2438 struct rbd_img_request *parent_request = NULL;
2439 struct rbd_device *rbd_dev;
2442 struct page **pages = NULL;
2446 rbd_assert(obj_request_img_data_test(obj_request));
2447 rbd_assert(obj_request_type_valid(obj_request->type));
2449 img_request = obj_request->img_request;
2450 rbd_assert(img_request != NULL);
2451 rbd_dev = img_request->rbd_dev;
2452 rbd_assert(rbd_dev->parent != NULL);
2455 * Determine the byte range covered by the object in the
2456 * child image to which the original request was to be sent.
2458 img_offset = obj_request->img_offset - obj_request->offset;
2459 length = (u64)1 << rbd_dev->header.obj_order;
2462 * There is no defined parent data beyond the parent
2463 * overlap, so limit what we read at that boundary if
2466 if (img_offset + length > rbd_dev->parent_overlap) {
2467 rbd_assert(img_offset < rbd_dev->parent_overlap);
2468 length = rbd_dev->parent_overlap - img_offset;
2472 * Allocate a page array big enough to receive the data read
2475 page_count = (u32)calc_pages_for(0, length);
2476 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2477 if (IS_ERR(pages)) {
2478 result = PTR_ERR(pages);
2484 parent_request = rbd_parent_request_create(obj_request,
2485 img_offset, length);
2486 if (!parent_request)
2489 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2492 parent_request->copyup_pages = pages;
2493 parent_request->copyup_page_count = page_count;
2495 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2496 result = rbd_img_request_submit(parent_request);
2500 parent_request->copyup_pages = NULL;
2501 parent_request->copyup_page_count = 0;
2502 parent_request->obj_request = NULL;
2503 rbd_obj_request_put(obj_request);
2506 ceph_release_page_vector(pages, page_count);
2508 rbd_img_request_put(parent_request);
2509 obj_request->result = result;
2510 obj_request->xferred = 0;
2511 obj_request_done_set(obj_request);
2516 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2518 struct rbd_obj_request *orig_request;
2519 struct rbd_device *rbd_dev;
2522 rbd_assert(!obj_request_img_data_test(obj_request));
2525 * All we need from the object request is the original
2526 * request and the result of the STAT op. Grab those, then
2527 * we're done with the request.
2529 orig_request = obj_request->obj_request;
2530 obj_request->obj_request = NULL;
2531 rbd_assert(orig_request);
2532 rbd_assert(orig_request->img_request);
2534 result = obj_request->result;
2535 obj_request->result = 0;
2537 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2538 obj_request, orig_request, result,
2539 obj_request->xferred, obj_request->length);
2540 rbd_obj_request_put(obj_request);
2543 * If the overlap has become 0 (most likely because the
2544 * image has been flattened) we need to free the pages
2545 * and re-submit the original write request.
2547 rbd_dev = orig_request->img_request->rbd_dev;
2548 if (!rbd_dev->parent_overlap) {
2549 struct ceph_osd_client *osdc;
2551 rbd_obj_request_put(orig_request);
2552 osdc = &rbd_dev->rbd_client->client->osdc;
2553 result = rbd_obj_request_submit(osdc, orig_request);
2559 * Our only purpose here is to determine whether the object
2560 * exists, and we don't want to treat the non-existence as
2561 * an error. If something else comes back, transfer the
2562 * error to the original request and complete it now.
2565 obj_request_existence_set(orig_request, true);
2566 } else if (result == -ENOENT) {
2567 obj_request_existence_set(orig_request, false);
2568 } else if (result) {
2569 orig_request->result = result;
2574 * Resubmit the original request now that we have recorded
2575 * whether the target object exists.
2577 orig_request->result = rbd_img_obj_request_submit(orig_request);
2579 if (orig_request->result)
2580 rbd_obj_request_complete(orig_request);
2581 rbd_obj_request_put(orig_request);
2584 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2586 struct rbd_obj_request *stat_request;
2587 struct rbd_device *rbd_dev;
2588 struct ceph_osd_client *osdc;
2589 struct page **pages = NULL;
2595 * The response data for a STAT call consists of:
2602 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2603 page_count = (u32)calc_pages_for(0, size);
2604 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2606 return PTR_ERR(pages);
2609 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2614 rbd_obj_request_get(obj_request);
2615 stat_request->obj_request = obj_request;
2616 stat_request->pages = pages;
2617 stat_request->page_count = page_count;
2619 rbd_assert(obj_request->img_request);
2620 rbd_dev = obj_request->img_request->rbd_dev;
2621 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2623 if (!stat_request->osd_req)
2625 stat_request->callback = rbd_img_obj_exists_callback;
2627 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2628 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2630 rbd_osd_req_format_read(stat_request);
2632 osdc = &rbd_dev->rbd_client->client->osdc;
2633 ret = rbd_obj_request_submit(osdc, stat_request);
2636 rbd_obj_request_put(obj_request);
2641 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2643 struct rbd_img_request *img_request;
2644 struct rbd_device *rbd_dev;
2647 rbd_assert(obj_request_img_data_test(obj_request));
2649 img_request = obj_request->img_request;
2650 rbd_assert(img_request);
2651 rbd_dev = img_request->rbd_dev;
2654 * Only writes to layered images need special handling.
2655 * Reads and non-layered writes are simple object requests.
2656 * Layered writes that start beyond the end of the overlap
2657 * with the parent have no parent data, so they too are
2658 * simple object requests. Finally, if the target object is
2659 * known to already exist, its parent data has already been
2660 * copied, so a write to the object can also be handled as a
2661 * simple object request.
2663 if (!img_request_write_test(img_request) ||
2664 !img_request_layered_test(img_request) ||
2665 rbd_dev->parent_overlap <= obj_request->img_offset ||
2666 ((known = obj_request_known_test(obj_request)) &&
2667 obj_request_exists_test(obj_request))) {
2669 struct rbd_device *rbd_dev;
2670 struct ceph_osd_client *osdc;
2672 rbd_dev = obj_request->img_request->rbd_dev;
2673 osdc = &rbd_dev->rbd_client->client->osdc;
2675 return rbd_obj_request_submit(osdc, obj_request);
2679 * It's a layered write. The target object might exist but
2680 * we may not know that yet. If we know it doesn't exist,
2681 * start by reading the data for the full target object from
2682 * the parent so we can use it for a copyup to the target.
2685 return rbd_img_obj_parent_read_full(obj_request);
2687 /* We don't know whether the target exists. Go find out. */
2689 return rbd_img_obj_exists_submit(obj_request);
2692 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2694 struct rbd_obj_request *obj_request;
2695 struct rbd_obj_request *next_obj_request;
2697 dout("%s: img %p\n", __func__, img_request);
2698 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2701 ret = rbd_img_obj_request_submit(obj_request);
2709 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2711 struct rbd_obj_request *obj_request;
2712 struct rbd_device *rbd_dev;
2717 rbd_assert(img_request_child_test(img_request));
2719 /* First get what we need from the image request and release it */
2721 obj_request = img_request->obj_request;
2722 img_xferred = img_request->xferred;
2723 img_result = img_request->result;
2724 rbd_img_request_put(img_request);
2727 * If the overlap has become 0 (most likely because the
2728 * image has been flattened) we need to re-submit the
2731 rbd_assert(obj_request);
2732 rbd_assert(obj_request->img_request);
2733 rbd_dev = obj_request->img_request->rbd_dev;
2734 if (!rbd_dev->parent_overlap) {
2735 struct ceph_osd_client *osdc;
2737 osdc = &rbd_dev->rbd_client->client->osdc;
2738 img_result = rbd_obj_request_submit(osdc, obj_request);
2743 obj_request->result = img_result;
2744 if (obj_request->result)
2748 * We need to zero anything beyond the parent overlap
2749 * boundary. Since rbd_img_obj_request_read_callback()
2750 * will zero anything beyond the end of a short read, an
2751 * easy way to do this is to pretend the data from the
2752 * parent came up short--ending at the overlap boundary.
2754 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2755 obj_end = obj_request->img_offset + obj_request->length;
2756 if (obj_end > rbd_dev->parent_overlap) {
2759 if (obj_request->img_offset < rbd_dev->parent_overlap)
2760 xferred = rbd_dev->parent_overlap -
2761 obj_request->img_offset;
2763 obj_request->xferred = min(img_xferred, xferred);
2765 obj_request->xferred = img_xferred;
2768 rbd_img_obj_request_read_callback(obj_request);
2769 rbd_obj_request_complete(obj_request);
2772 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2774 struct rbd_img_request *img_request;
2777 rbd_assert(obj_request_img_data_test(obj_request));
2778 rbd_assert(obj_request->img_request != NULL);
2779 rbd_assert(obj_request->result == (s32) -ENOENT);
2780 rbd_assert(obj_request_type_valid(obj_request->type));
2782 /* rbd_read_finish(obj_request, obj_request->length); */
2783 img_request = rbd_parent_request_create(obj_request,
2784 obj_request->img_offset,
2785 obj_request->length);
2790 if (obj_request->type == OBJ_REQUEST_BIO)
2791 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2792 obj_request->bio_list);
2794 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2795 obj_request->pages);
2799 img_request->callback = rbd_img_parent_read_callback;
2800 result = rbd_img_request_submit(img_request);
2807 rbd_img_request_put(img_request);
2808 obj_request->result = result;
2809 obj_request->xferred = 0;
2810 obj_request_done_set(obj_request);
2813 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2815 struct rbd_obj_request *obj_request;
2816 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2819 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2820 OBJ_REQUEST_NODATA);
2825 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2826 if (!obj_request->osd_req)
2828 obj_request->callback = rbd_obj_request_put;
2830 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2832 rbd_osd_req_format_read(obj_request);
2834 ret = rbd_obj_request_submit(osdc, obj_request);
2837 rbd_obj_request_put(obj_request);
2842 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2844 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2850 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2851 rbd_dev->header_name, (unsigned long long)notify_id,
2852 (unsigned int)opcode);
2853 ret = rbd_dev_refresh(rbd_dev);
2855 rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2857 rbd_obj_notify_ack(rbd_dev, notify_id);
2861 * Request sync osd watch/unwatch. The value of "start" determines
2862 * whether a watch request is being initiated or torn down.
2864 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2866 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2867 struct rbd_obj_request *obj_request;
2870 rbd_assert(start ^ !!rbd_dev->watch_event);
2871 rbd_assert(start ^ !!rbd_dev->watch_request);
2874 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2875 &rbd_dev->watch_event);
2878 rbd_assert(rbd_dev->watch_event != NULL);
2882 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2883 OBJ_REQUEST_NODATA);
2887 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2888 if (!obj_request->osd_req)
2892 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2894 ceph_osdc_unregister_linger_request(osdc,
2895 rbd_dev->watch_request->osd_req);
2897 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2898 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2899 rbd_osd_req_format_write(obj_request);
2901 ret = rbd_obj_request_submit(osdc, obj_request);
2904 ret = rbd_obj_request_wait(obj_request);
2907 ret = obj_request->result;
2912 * A watch request is set to linger, so the underlying osd
2913 * request won't go away until we unregister it. We retain
2914 * a pointer to the object request during that time (in
2915 * rbd_dev->watch_request), so we'll keep a reference to
2916 * it. We'll drop that reference (below) after we've
2920 rbd_dev->watch_request = obj_request;
2925 /* We have successfully torn down the watch request */
2927 rbd_obj_request_put(rbd_dev->watch_request);
2928 rbd_dev->watch_request = NULL;
2930 /* Cancel the event if we're tearing down, or on error */
2931 ceph_osdc_cancel_event(rbd_dev->watch_event);
2932 rbd_dev->watch_event = NULL;
2934 rbd_obj_request_put(obj_request);
2940 * Synchronous osd object method call. Returns the number of bytes
2941 * returned in the outbound buffer, or a negative error code.
2943 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2944 const char *object_name,
2945 const char *class_name,
2946 const char *method_name,
2947 const void *outbound,
2948 size_t outbound_size,
2950 size_t inbound_size)
2952 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2953 struct rbd_obj_request *obj_request;
2954 struct page **pages;
2959 * Method calls are ultimately read operations. The result
2960 * should placed into the inbound buffer provided. They
2961 * also supply outbound data--parameters for the object
2962 * method. Currently if this is present it will be a
2965 page_count = (u32)calc_pages_for(0, inbound_size);
2966 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2968 return PTR_ERR(pages);
2971 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2976 obj_request->pages = pages;
2977 obj_request->page_count = page_count;
2979 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2980 if (!obj_request->osd_req)
2983 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2984 class_name, method_name);
2985 if (outbound_size) {
2986 struct ceph_pagelist *pagelist;
2988 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2992 ceph_pagelist_init(pagelist);
2993 ceph_pagelist_append(pagelist, outbound, outbound_size);
2994 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2997 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2998 obj_request->pages, inbound_size,
3000 rbd_osd_req_format_read(obj_request);
3002 ret = rbd_obj_request_submit(osdc, obj_request);
3005 ret = rbd_obj_request_wait(obj_request);
3009 ret = obj_request->result;
3013 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3014 ret = (int)obj_request->xferred;
3015 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3018 rbd_obj_request_put(obj_request);
3020 ceph_release_page_vector(pages, page_count);
3025 static void rbd_request_fn(struct request_queue *q)
3026 __releases(q->queue_lock) __acquires(q->queue_lock)
3028 struct rbd_device *rbd_dev = q->queuedata;
3029 bool read_only = rbd_dev->mapping.read_only;
3033 while ((rq = blk_fetch_request(q))) {
3034 bool write_request = rq_data_dir(rq) == WRITE;
3035 struct rbd_img_request *img_request;
3039 /* Ignore any non-FS requests that filter through. */
3041 if (rq->cmd_type != REQ_TYPE_FS) {
3042 dout("%s: non-fs request type %d\n", __func__,
3043 (int) rq->cmd_type);
3044 __blk_end_request_all(rq, 0);
3048 /* Ignore/skip any zero-length requests */
3050 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3051 length = (u64) blk_rq_bytes(rq);
3054 dout("%s: zero-length request\n", __func__);
3055 __blk_end_request_all(rq, 0);
3059 spin_unlock_irq(q->queue_lock);
3061 /* Disallow writes to a read-only device */
3063 if (write_request) {
3067 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3071 * Quit early if the mapped snapshot no longer
3072 * exists. It's still possible the snapshot will
3073 * have disappeared by the time our request arrives
3074 * at the osd, but there's no sense in sending it if
3077 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3078 dout("request for non-existent snapshot");
3079 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3085 if (offset && length > U64_MAX - offset + 1) {
3086 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3088 goto end_request; /* Shouldn't happen */
3092 if (offset + length > rbd_dev->mapping.size) {
3093 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3094 offset, length, rbd_dev->mapping.size);
3099 img_request = rbd_img_request_create(rbd_dev, offset, length,
3104 img_request->rq = rq;
3106 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3109 result = rbd_img_request_submit(img_request);
3111 rbd_img_request_put(img_request);
3113 spin_lock_irq(q->queue_lock);
3115 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3116 write_request ? "write" : "read",
3117 length, offset, result);
3119 __blk_end_request_all(rq, result);
3125 * a queue callback. Makes sure that we don't create a bio that spans across
3126 * multiple osd objects. One exception would be with a single page bios,
3127 * which we handle later at bio_chain_clone_range()
3129 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3130 struct bio_vec *bvec)
3132 struct rbd_device *rbd_dev = q->queuedata;
3133 sector_t sector_offset;
3134 sector_t sectors_per_obj;
3135 sector_t obj_sector_offset;
3139 * Find how far into its rbd object the partition-relative
3140 * bio start sector is to offset relative to the enclosing
3143 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3144 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3145 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3148 * Compute the number of bytes from that offset to the end
3149 * of the object. Account for what's already used by the bio.
3151 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3152 if (ret > bmd->bi_size)
3153 ret -= bmd->bi_size;
3158 * Don't send back more than was asked for. And if the bio
3159 * was empty, let the whole thing through because: "Note
3160 * that a block device *must* allow a single page to be
3161 * added to an empty bio."
3163 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3164 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3165 ret = (int) bvec->bv_len;
3170 static void rbd_free_disk(struct rbd_device *rbd_dev)
3172 struct gendisk *disk = rbd_dev->disk;
3177 rbd_dev->disk = NULL;
3178 if (disk->flags & GENHD_FL_UP) {
3181 blk_cleanup_queue(disk->queue);
3186 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3187 const char *object_name,
3188 u64 offset, u64 length, void *buf)
3191 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3192 struct rbd_obj_request *obj_request;
3193 struct page **pages = NULL;
3198 page_count = (u32) calc_pages_for(offset, length);
3199 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3201 ret = PTR_ERR(pages);
3204 obj_request = rbd_obj_request_create(object_name, offset, length,
3209 obj_request->pages = pages;
3210 obj_request->page_count = page_count;
3212 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3213 if (!obj_request->osd_req)
3216 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3217 offset, length, 0, 0);
3218 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3220 obj_request->length,
3221 obj_request->offset & ~PAGE_MASK,
3223 rbd_osd_req_format_read(obj_request);
3225 ret = rbd_obj_request_submit(osdc, obj_request);
3228 ret = rbd_obj_request_wait(obj_request);
3232 ret = obj_request->result;
3236 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3237 size = (size_t) obj_request->xferred;
3238 ceph_copy_from_page_vector(pages, buf, 0, size);
3239 rbd_assert(size <= (size_t)INT_MAX);
3243 rbd_obj_request_put(obj_request);
3245 ceph_release_page_vector(pages, page_count);
3251 * Read the complete header for the given rbd device. On successful
3252 * return, the rbd_dev->header field will contain up-to-date
3253 * information about the image.
3255 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3257 struct rbd_image_header_ondisk *ondisk = NULL;
3264 * The complete header will include an array of its 64-bit
3265 * snapshot ids, followed by the names of those snapshots as
3266 * a contiguous block of NUL-terminated strings. Note that
3267 * the number of snapshots could change by the time we read
3268 * it in, in which case we re-read it.
3275 size = sizeof (*ondisk);
3276 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3278 ondisk = kmalloc(size, GFP_KERNEL);
3282 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3286 if ((size_t)ret < size) {
3288 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3292 if (!rbd_dev_ondisk_valid(ondisk)) {
3294 rbd_warn(rbd_dev, "invalid header");
3298 names_size = le64_to_cpu(ondisk->snap_names_len);
3299 want_count = snap_count;
3300 snap_count = le32_to_cpu(ondisk->snap_count);
3301 } while (snap_count != want_count);
3303 ret = rbd_header_from_disk(rbd_dev, ondisk);
3311 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3312 * has disappeared from the (just updated) snapshot context.
3314 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3318 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3321 snap_id = rbd_dev->spec->snap_id;
3322 if (snap_id == CEPH_NOSNAP)
3325 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3326 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3329 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3334 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3335 mapping_size = rbd_dev->mapping.size;
3336 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3337 if (rbd_dev->image_format == 1)
3338 ret = rbd_dev_v1_header_info(rbd_dev);
3340 ret = rbd_dev_v2_header_info(rbd_dev);
3342 /* If it's a mapped snapshot, validate its EXISTS flag */
3344 rbd_exists_validate(rbd_dev);
3345 mutex_unlock(&ctl_mutex);
3346 if (mapping_size != rbd_dev->mapping.size) {
3349 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3350 dout("setting size to %llu sectors", (unsigned long long)size);
3351 set_capacity(rbd_dev->disk, size);
3352 revalidate_disk(rbd_dev->disk);
3358 static int rbd_init_disk(struct rbd_device *rbd_dev)
3360 struct gendisk *disk;
3361 struct request_queue *q;
3364 /* create gendisk info */
3365 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3369 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3371 disk->major = rbd_dev->major;
3372 disk->first_minor = 0;
3373 disk->fops = &rbd_bd_ops;
3374 disk->private_data = rbd_dev;
3376 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3380 /* We use the default size, but let's be explicit about it. */
3381 blk_queue_physical_block_size(q, SECTOR_SIZE);
3383 /* set io sizes to object size */
3384 segment_size = rbd_obj_bytes(&rbd_dev->header);
3385 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3386 blk_queue_max_segment_size(q, segment_size);
3387 blk_queue_io_min(q, segment_size);
3388 blk_queue_io_opt(q, segment_size);
3390 blk_queue_merge_bvec(q, rbd_merge_bvec);
3393 q->queuedata = rbd_dev;
3395 rbd_dev->disk = disk;
3408 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3410 return container_of(dev, struct rbd_device, dev);
3413 static ssize_t rbd_size_show(struct device *dev,
3414 struct device_attribute *attr, char *buf)
3416 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3418 return sprintf(buf, "%llu\n",
3419 (unsigned long long)rbd_dev->mapping.size);
3423 * Note this shows the features for whatever's mapped, which is not
3424 * necessarily the base image.
3426 static ssize_t rbd_features_show(struct device *dev,
3427 struct device_attribute *attr, char *buf)
3429 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3431 return sprintf(buf, "0x%016llx\n",
3432 (unsigned long long)rbd_dev->mapping.features);
3435 static ssize_t rbd_major_show(struct device *dev,
3436 struct device_attribute *attr, char *buf)
3438 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3441 return sprintf(buf, "%d\n", rbd_dev->major);
3443 return sprintf(buf, "(none)\n");
3447 static ssize_t rbd_client_id_show(struct device *dev,
3448 struct device_attribute *attr, char *buf)
3450 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3452 return sprintf(buf, "client%lld\n",
3453 ceph_client_id(rbd_dev->rbd_client->client));
3456 static ssize_t rbd_pool_show(struct device *dev,
3457 struct device_attribute *attr, char *buf)
3459 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3461 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3464 static ssize_t rbd_pool_id_show(struct device *dev,
3465 struct device_attribute *attr, char *buf)
3467 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3469 return sprintf(buf, "%llu\n",
3470 (unsigned long long) rbd_dev->spec->pool_id);
3473 static ssize_t rbd_name_show(struct device *dev,
3474 struct device_attribute *attr, char *buf)
3476 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3478 if (rbd_dev->spec->image_name)
3479 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3481 return sprintf(buf, "(unknown)\n");
3484 static ssize_t rbd_image_id_show(struct device *dev,
3485 struct device_attribute *attr, char *buf)
3487 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3489 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3493 * Shows the name of the currently-mapped snapshot (or
3494 * RBD_SNAP_HEAD_NAME for the base image).
3496 static ssize_t rbd_snap_show(struct device *dev,
3497 struct device_attribute *attr,
3500 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3502 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3506 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3507 * for the parent image. If there is no parent, simply shows
3508 * "(no parent image)".
3510 static ssize_t rbd_parent_show(struct device *dev,
3511 struct device_attribute *attr,
3514 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3515 struct rbd_spec *spec = rbd_dev->parent_spec;
3520 return sprintf(buf, "(no parent image)\n");
3522 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3523 (unsigned long long) spec->pool_id, spec->pool_name);
3528 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3529 spec->image_name ? spec->image_name : "(unknown)");
3534 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3535 (unsigned long long) spec->snap_id, spec->snap_name);
3540 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3545 return (ssize_t) (bufp - buf);
3548 static ssize_t rbd_image_refresh(struct device *dev,
3549 struct device_attribute *attr,
3553 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3556 ret = rbd_dev_refresh(rbd_dev);
3558 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3560 return ret < 0 ? ret : size;
3563 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3564 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3565 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3566 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3567 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3568 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3569 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3570 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3571 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3572 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3573 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3575 static struct attribute *rbd_attrs[] = {
3576 &dev_attr_size.attr,
3577 &dev_attr_features.attr,
3578 &dev_attr_major.attr,
3579 &dev_attr_client_id.attr,
3580 &dev_attr_pool.attr,
3581 &dev_attr_pool_id.attr,
3582 &dev_attr_name.attr,
3583 &dev_attr_image_id.attr,
3584 &dev_attr_current_snap.attr,
3585 &dev_attr_parent.attr,
3586 &dev_attr_refresh.attr,
3590 static struct attribute_group rbd_attr_group = {
3594 static const struct attribute_group *rbd_attr_groups[] = {
3599 static void rbd_sysfs_dev_release(struct device *dev)
3603 static struct device_type rbd_device_type = {
3605 .groups = rbd_attr_groups,
3606 .release = rbd_sysfs_dev_release,
3609 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3611 kref_get(&spec->kref);
3616 static void rbd_spec_free(struct kref *kref);
3617 static void rbd_spec_put(struct rbd_spec *spec)
3620 kref_put(&spec->kref, rbd_spec_free);
3623 static struct rbd_spec *rbd_spec_alloc(void)
3625 struct rbd_spec *spec;
3627 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3630 kref_init(&spec->kref);
3635 static void rbd_spec_free(struct kref *kref)
3637 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3639 kfree(spec->pool_name);
3640 kfree(spec->image_id);
3641 kfree(spec->image_name);
3642 kfree(spec->snap_name);
3646 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3647 struct rbd_spec *spec)
3649 struct rbd_device *rbd_dev;
3651 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3655 spin_lock_init(&rbd_dev->lock);
3657 atomic_set(&rbd_dev->parent_ref, 0);
3658 INIT_LIST_HEAD(&rbd_dev->node);
3659 init_rwsem(&rbd_dev->header_rwsem);
3661 rbd_dev->spec = spec;
3662 rbd_dev->rbd_client = rbdc;
3664 /* Initialize the layout used for all rbd requests */
3666 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3667 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3668 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3669 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3674 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3676 rbd_put_client(rbd_dev->rbd_client);
3677 rbd_spec_put(rbd_dev->spec);
3682 * Get the size and object order for an image snapshot, or if
3683 * snap_id is CEPH_NOSNAP, gets this information for the base
3686 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3687 u8 *order, u64 *snap_size)
3689 __le64 snapid = cpu_to_le64(snap_id);
3694 } __attribute__ ((packed)) size_buf = { 0 };
3696 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3698 &snapid, sizeof (snapid),
3699 &size_buf, sizeof (size_buf));
3700 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3703 if (ret < sizeof (size_buf))
3707 *order = size_buf.order;
3708 *snap_size = le64_to_cpu(size_buf.size);
3710 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
3711 (unsigned long long)snap_id, (unsigned int)*order,
3712 (unsigned long long)*snap_size);
3717 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3719 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3720 &rbd_dev->header.obj_order,
3721 &rbd_dev->header.image_size);
3724 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3730 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3734 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3735 "rbd", "get_object_prefix", NULL, 0,
3736 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3737 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3742 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3743 p + ret, NULL, GFP_NOIO);
3746 if (IS_ERR(rbd_dev->header.object_prefix)) {
3747 ret = PTR_ERR(rbd_dev->header.object_prefix);
3748 rbd_dev->header.object_prefix = NULL;
3750 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3758 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3761 __le64 snapid = cpu_to_le64(snap_id);
3765 } __attribute__ ((packed)) features_buf = { 0 };
3769 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3770 "rbd", "get_features",
3771 &snapid, sizeof (snapid),
3772 &features_buf, sizeof (features_buf));
3773 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3776 if (ret < sizeof (features_buf))
3779 incompat = le64_to_cpu(features_buf.incompat);
3780 if (incompat & ~RBD_FEATURES_SUPPORTED)
3783 *snap_features = le64_to_cpu(features_buf.features);
3785 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3786 (unsigned long long)snap_id,
3787 (unsigned long long)*snap_features,
3788 (unsigned long long)le64_to_cpu(features_buf.incompat));
3793 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3795 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3796 &rbd_dev->header.features);
3799 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3801 struct rbd_spec *parent_spec;
3803 void *reply_buf = NULL;
3812 parent_spec = rbd_spec_alloc();
3816 size = sizeof (__le64) + /* pool_id */
3817 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3818 sizeof (__le64) + /* snap_id */
3819 sizeof (__le64); /* overlap */
3820 reply_buf = kmalloc(size, GFP_KERNEL);
3826 snapid = cpu_to_le64(CEPH_NOSNAP);
3827 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3828 "rbd", "get_parent",
3829 &snapid, sizeof (snapid),
3831 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3836 end = reply_buf + ret;
3838 ceph_decode_64_safe(&p, end, pool_id, out_err);
3839 if (pool_id == CEPH_NOPOOL) {
3841 * Either the parent never existed, or we have
3842 * record of it but the image got flattened so it no
3843 * longer has a parent. When the parent of a
3844 * layered image disappears we immediately set the
3845 * overlap to 0. The effect of this is that all new
3846 * requests will be treated as if the image had no
3849 if (rbd_dev->parent_overlap) {
3850 rbd_dev->parent_overlap = 0;
3852 rbd_dev_parent_put(rbd_dev);
3853 pr_info("%s: clone image has been flattened\n",
3854 rbd_dev->disk->disk_name);
3857 goto out; /* No parent? No problem. */
3860 /* The ceph file layout needs to fit pool id in 32 bits */
3863 if (pool_id > (u64)U32_MAX) {
3864 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3865 (unsigned long long)pool_id, U32_MAX);
3868 parent_spec->pool_id = pool_id;
3870 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3871 if (IS_ERR(image_id)) {
3872 ret = PTR_ERR(image_id);
3875 parent_spec->image_id = image_id;
3876 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3877 ceph_decode_64_safe(&p, end, overlap, out_err);
3880 rbd_spec_put(rbd_dev->parent_spec);
3881 rbd_dev->parent_spec = parent_spec;
3882 parent_spec = NULL; /* rbd_dev now owns this */
3883 rbd_dev->parent_overlap = overlap;
3885 rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
3891 rbd_spec_put(parent_spec);
3896 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3900 __le64 stripe_count;
3901 } __attribute__ ((packed)) striping_info_buf = { 0 };
3902 size_t size = sizeof (striping_info_buf);
3909 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3910 "rbd", "get_stripe_unit_count", NULL, 0,
3911 (char *)&striping_info_buf, size);
3912 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3919 * We don't actually support the "fancy striping" feature
3920 * (STRIPINGV2) yet, but if the striping sizes are the
3921 * defaults the behavior is the same as before. So find
3922 * out, and only fail if the image has non-default values.
3925 obj_size = (u64)1 << rbd_dev->header.obj_order;
3926 p = &striping_info_buf;
3927 stripe_unit = ceph_decode_64(&p);
3928 if (stripe_unit != obj_size) {
3929 rbd_warn(rbd_dev, "unsupported stripe unit "
3930 "(got %llu want %llu)",
3931 stripe_unit, obj_size);
3934 stripe_count = ceph_decode_64(&p);
3935 if (stripe_count != 1) {
3936 rbd_warn(rbd_dev, "unsupported stripe count "
3937 "(got %llu want 1)", stripe_count);
3940 rbd_dev->header.stripe_unit = stripe_unit;
3941 rbd_dev->header.stripe_count = stripe_count;
3946 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3948 size_t image_id_size;
3953 void *reply_buf = NULL;
3955 char *image_name = NULL;
3958 rbd_assert(!rbd_dev->spec->image_name);
3960 len = strlen(rbd_dev->spec->image_id);
3961 image_id_size = sizeof (__le32) + len;
3962 image_id = kmalloc(image_id_size, GFP_KERNEL);
3967 end = image_id + image_id_size;
3968 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3970 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3971 reply_buf = kmalloc(size, GFP_KERNEL);
3975 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3976 "rbd", "dir_get_name",
3977 image_id, image_id_size,
3982 end = reply_buf + ret;
3984 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3985 if (IS_ERR(image_name))
3988 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3996 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3998 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3999 const char *snap_name;
4002 /* Skip over names until we find the one we are looking for */
4004 snap_name = rbd_dev->header.snap_names;
4005 while (which < snapc->num_snaps) {
4006 if (!strcmp(name, snap_name))
4007 return snapc->snaps[which];
4008 snap_name += strlen(snap_name) + 1;
4014 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4016 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4021 for (which = 0; !found && which < snapc->num_snaps; which++) {
4022 const char *snap_name;
4024 snap_id = snapc->snaps[which];
4025 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4026 if (IS_ERR(snap_name))
4028 found = !strcmp(name, snap_name);
4031 return found ? snap_id : CEPH_NOSNAP;
4035 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4036 * no snapshot by that name is found, or if an error occurs.
4038 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4040 if (rbd_dev->image_format == 1)
4041 return rbd_v1_snap_id_by_name(rbd_dev, name);
4043 return rbd_v2_snap_id_by_name(rbd_dev, name);
4047 * When an rbd image has a parent image, it is identified by the
4048 * pool, image, and snapshot ids (not names). This function fills
4049 * in the names for those ids. (It's OK if we can't figure out the
4050 * name for an image id, but the pool and snapshot ids should always
4051 * exist and have names.) All names in an rbd spec are dynamically
4054 * When an image being mapped (not a parent) is probed, we have the
4055 * pool name and pool id, image name and image id, and the snapshot
4056 * name. The only thing we're missing is the snapshot id.
4058 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4060 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4061 struct rbd_spec *spec = rbd_dev->spec;
4062 const char *pool_name;
4063 const char *image_name;
4064 const char *snap_name;
4068 * An image being mapped will have the pool name (etc.), but
4069 * we need to look up the snapshot id.
4071 if (spec->pool_name) {
4072 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4075 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4076 if (snap_id == CEPH_NOSNAP)
4078 spec->snap_id = snap_id;
4080 spec->snap_id = CEPH_NOSNAP;
4086 /* Get the pool name; we have to make our own copy of this */
4088 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4090 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4093 pool_name = kstrdup(pool_name, GFP_KERNEL);
4097 /* Fetch the image name; tolerate failure here */
4099 image_name = rbd_dev_image_name(rbd_dev);
4101 rbd_warn(rbd_dev, "unable to get image name");
4103 /* Look up the snapshot name, and make a copy */
4105 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4111 spec->pool_name = pool_name;
4112 spec->image_name = image_name;
4113 spec->snap_name = snap_name;
4123 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4132 struct ceph_snap_context *snapc;
4136 * We'll need room for the seq value (maximum snapshot id),
4137 * snapshot count, and array of that many snapshot ids.
4138 * For now we have a fixed upper limit on the number we're
4139 * prepared to receive.
4141 size = sizeof (__le64) + sizeof (__le32) +
4142 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4143 reply_buf = kzalloc(size, GFP_KERNEL);
4147 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4148 "rbd", "get_snapcontext", NULL, 0,
4150 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4155 end = reply_buf + ret;
4157 ceph_decode_64_safe(&p, end, seq, out);
4158 ceph_decode_32_safe(&p, end, snap_count, out);
4161 * Make sure the reported number of snapshot ids wouldn't go
4162 * beyond the end of our buffer. But before checking that,
4163 * make sure the computed size of the snapshot context we
4164 * allocate is representable in a size_t.
4166 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4171 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4175 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4181 for (i = 0; i < snap_count; i++)
4182 snapc->snaps[i] = ceph_decode_64(&p);
4184 ceph_put_snap_context(rbd_dev->header.snapc);
4185 rbd_dev->header.snapc = snapc;
4187 dout(" snap context seq = %llu, snap_count = %u\n",
4188 (unsigned long long)seq, (unsigned int)snap_count);
4195 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4206 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4207 reply_buf = kmalloc(size, GFP_KERNEL);
4209 return ERR_PTR(-ENOMEM);
4211 snapid = cpu_to_le64(snap_id);
4212 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4213 "rbd", "get_snapshot_name",
4214 &snapid, sizeof (snapid),
4216 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4218 snap_name = ERR_PTR(ret);
4223 end = reply_buf + ret;
4224 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4225 if (IS_ERR(snap_name))
4228 dout(" snap_id 0x%016llx snap_name = %s\n",
4229 (unsigned long long)snap_id, snap_name);
4236 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4238 bool first_time = rbd_dev->header.object_prefix == NULL;
4241 down_write(&rbd_dev->header_rwsem);
4244 ret = rbd_dev_v2_header_onetime(rbd_dev);
4250 * If the image supports layering, get the parent info. We
4251 * need to probe the first time regardless. Thereafter we
4252 * only need to if there's a parent, to see if it has
4253 * disappeared due to the mapped image getting flattened.
4255 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4256 (first_time || rbd_dev->parent_spec)) {
4259 ret = rbd_dev_v2_parent_info(rbd_dev);
4264 * Print a warning if this is the initial probe and
4265 * the image has a parent. Don't print it if the
4266 * image now being probed is itself a parent. We
4267 * can tell at this point because we won't know its
4268 * pool name yet (just its pool id).
4270 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4271 if (first_time && warn)
4272 rbd_warn(rbd_dev, "WARNING: kernel layering "
4273 "is EXPERIMENTAL!");
4276 ret = rbd_dev_v2_image_size(rbd_dev);
4280 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4281 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4282 rbd_dev->mapping.size = rbd_dev->header.image_size;
4284 ret = rbd_dev_v2_snap_context(rbd_dev);
4285 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4287 up_write(&rbd_dev->header_rwsem);
4292 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4297 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4299 dev = &rbd_dev->dev;
4300 dev->bus = &rbd_bus_type;
4301 dev->type = &rbd_device_type;
4302 dev->parent = &rbd_root_dev;
4303 dev->release = rbd_dev_device_release;
4304 dev_set_name(dev, "%d", rbd_dev->dev_id);
4305 ret = device_register(dev);
4307 mutex_unlock(&ctl_mutex);
4312 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4314 device_unregister(&rbd_dev->dev);
4317 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4320 * Get a unique rbd identifier for the given new rbd_dev, and add
4321 * the rbd_dev to the global list. The minimum rbd id is 1.
4323 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4325 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4327 spin_lock(&rbd_dev_list_lock);
4328 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4329 spin_unlock(&rbd_dev_list_lock);
4330 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4331 (unsigned long long) rbd_dev->dev_id);
4335 * Remove an rbd_dev from the global list, and record that its
4336 * identifier is no longer in use.
4338 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4340 struct list_head *tmp;
4341 int rbd_id = rbd_dev->dev_id;
4344 rbd_assert(rbd_id > 0);
4346 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4347 (unsigned long long) rbd_dev->dev_id);
4348 spin_lock(&rbd_dev_list_lock);
4349 list_del_init(&rbd_dev->node);
4352 * If the id being "put" is not the current maximum, there
4353 * is nothing special we need to do.
4355 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4356 spin_unlock(&rbd_dev_list_lock);
4361 * We need to update the current maximum id. Search the
4362 * list to find out what it is. We're more likely to find
4363 * the maximum at the end, so search the list backward.
4366 list_for_each_prev(tmp, &rbd_dev_list) {
4367 struct rbd_device *rbd_dev;
4369 rbd_dev = list_entry(tmp, struct rbd_device, node);
4370 if (rbd_dev->dev_id > max_id)
4371 max_id = rbd_dev->dev_id;
4373 spin_unlock(&rbd_dev_list_lock);
4376 * The max id could have been updated by rbd_dev_id_get(), in
4377 * which case it now accurately reflects the new maximum.
4378 * Be careful not to overwrite the maximum value in that
4381 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4382 dout(" max dev id has been reset\n");
4386 * Skips over white space at *buf, and updates *buf to point to the
4387 * first found non-space character (if any). Returns the length of
4388 * the token (string of non-white space characters) found. Note
4389 * that *buf must be terminated with '\0'.
4391 static inline size_t next_token(const char **buf)
4394 * These are the characters that produce nonzero for
4395 * isspace() in the "C" and "POSIX" locales.
4397 const char *spaces = " \f\n\r\t\v";
4399 *buf += strspn(*buf, spaces); /* Find start of token */
4401 return strcspn(*buf, spaces); /* Return token length */
4405 * Finds the next token in *buf, and if the provided token buffer is
4406 * big enough, copies the found token into it. The result, if
4407 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4408 * must be terminated with '\0' on entry.
4410 * Returns the length of the token found (not including the '\0').
4411 * Return value will be 0 if no token is found, and it will be >=
4412 * token_size if the token would not fit.
4414 * The *buf pointer will be updated to point beyond the end of the
4415 * found token. Note that this occurs even if the token buffer is
4416 * too small to hold it.
4418 static inline size_t copy_token(const char **buf,
4424 len = next_token(buf);
4425 if (len < token_size) {
4426 memcpy(token, *buf, len);
4427 *(token + len) = '\0';
4435 * Finds the next token in *buf, dynamically allocates a buffer big
4436 * enough to hold a copy of it, and copies the token into the new
4437 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4438 * that a duplicate buffer is created even for a zero-length token.
4440 * Returns a pointer to the newly-allocated duplicate, or a null
4441 * pointer if memory for the duplicate was not available. If
4442 * the lenp argument is a non-null pointer, the length of the token
4443 * (not including the '\0') is returned in *lenp.
4445 * If successful, the *buf pointer will be updated to point beyond
4446 * the end of the found token.
4448 * Note: uses GFP_KERNEL for allocation.
4450 static inline char *dup_token(const char **buf, size_t *lenp)
4455 len = next_token(buf);
4456 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4459 *(dup + len) = '\0';
4469 * Parse the options provided for an "rbd add" (i.e., rbd image
4470 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4471 * and the data written is passed here via a NUL-terminated buffer.
4472 * Returns 0 if successful or an error code otherwise.
4474 * The information extracted from these options is recorded in
4475 * the other parameters which return dynamically-allocated
4478 * The address of a pointer that will refer to a ceph options
4479 * structure. Caller must release the returned pointer using
4480 * ceph_destroy_options() when it is no longer needed.
4482 * Address of an rbd options pointer. Fully initialized by
4483 * this function; caller must release with kfree().
4485 * Address of an rbd image specification pointer. Fully
4486 * initialized by this function based on parsed options.
4487 * Caller must release with rbd_spec_put().
4489 * The options passed take this form:
4490 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4493 * A comma-separated list of one or more monitor addresses.
4494 * A monitor address is an ip address, optionally followed
4495 * by a port number (separated by a colon).
4496 * I.e.: ip1[:port1][,ip2[:port2]...]
4498 * A comma-separated list of ceph and/or rbd options.
4500 * The name of the rados pool containing the rbd image.
4502 * The name of the image in that pool to map.
4504 * An optional snapshot id. If provided, the mapping will
4505 * present data from the image at the time that snapshot was
4506 * created. The image head is used if no snapshot id is
4507 * provided. Snapshot mappings are always read-only.
4509 static int rbd_add_parse_args(const char *buf,
4510 struct ceph_options **ceph_opts,
4511 struct rbd_options **opts,
4512 struct rbd_spec **rbd_spec)
4516 const char *mon_addrs;
4518 size_t mon_addrs_size;
4519 struct rbd_spec *spec = NULL;
4520 struct rbd_options *rbd_opts = NULL;
4521 struct ceph_options *copts;
4524 /* The first four tokens are required */
4526 len = next_token(&buf);
4528 rbd_warn(NULL, "no monitor address(es) provided");
4532 mon_addrs_size = len + 1;
4536 options = dup_token(&buf, NULL);
4540 rbd_warn(NULL, "no options provided");
4544 spec = rbd_spec_alloc();
4548 spec->pool_name = dup_token(&buf, NULL);
4549 if (!spec->pool_name)
4551 if (!*spec->pool_name) {
4552 rbd_warn(NULL, "no pool name provided");
4556 spec->image_name = dup_token(&buf, NULL);
4557 if (!spec->image_name)
4559 if (!*spec->image_name) {
4560 rbd_warn(NULL, "no image name provided");
4565 * Snapshot name is optional; default is to use "-"
4566 * (indicating the head/no snapshot).
4568 len = next_token(&buf);
4570 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4571 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4572 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4573 ret = -ENAMETOOLONG;
4576 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4579 *(snap_name + len) = '\0';
4580 spec->snap_name = snap_name;
4582 /* Initialize all rbd options to the defaults */
4584 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4588 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4590 copts = ceph_parse_options(options, mon_addrs,
4591 mon_addrs + mon_addrs_size - 1,
4592 parse_rbd_opts_token, rbd_opts);
4593 if (IS_ERR(copts)) {
4594 ret = PTR_ERR(copts);
4615 * An rbd format 2 image has a unique identifier, distinct from the
4616 * name given to it by the user. Internally, that identifier is
4617 * what's used to specify the names of objects related to the image.
4619 * A special "rbd id" object is used to map an rbd image name to its
4620 * id. If that object doesn't exist, then there is no v2 rbd image
4621 * with the supplied name.
4623 * This function will record the given rbd_dev's image_id field if
4624 * it can be determined, and in that case will return 0. If any
4625 * errors occur a negative errno will be returned and the rbd_dev's
4626 * image_id field will be unchanged (and should be NULL).
4628 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4637 * When probing a parent image, the image id is already
4638 * known (and the image name likely is not). There's no
4639 * need to fetch the image id again in this case. We
4640 * do still need to set the image format though.
4642 if (rbd_dev->spec->image_id) {
4643 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4649 * First, see if the format 2 image id file exists, and if
4650 * so, get the image's persistent id from it.
4652 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4653 object_name = kmalloc(size, GFP_NOIO);
4656 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4657 dout("rbd id object name is %s\n", object_name);
4659 /* Response will be an encoded string, which includes a length */
4661 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4662 response = kzalloc(size, GFP_NOIO);
4668 /* If it doesn't exist we'll assume it's a format 1 image */
4670 ret = rbd_obj_method_sync(rbd_dev, object_name,
4671 "rbd", "get_id", NULL, 0,
4672 response, RBD_IMAGE_ID_LEN_MAX);
4673 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4674 if (ret == -ENOENT) {
4675 image_id = kstrdup("", GFP_KERNEL);
4676 ret = image_id ? 0 : -ENOMEM;
4678 rbd_dev->image_format = 1;
4679 } else if (ret > sizeof (__le32)) {
4682 image_id = ceph_extract_encoded_string(&p, p + ret,
4684 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4686 rbd_dev->image_format = 2;
4692 rbd_dev->spec->image_id = image_id;
4693 dout("image_id is %s\n", image_id);
4702 /* Undo whatever state changes are made by v1 or v2 image probe */
4704 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4706 struct rbd_image_header *header;
4708 /* Drop parent reference unless it's already been done (or none) */
4710 if (rbd_dev->parent_overlap)
4711 rbd_dev_parent_put(rbd_dev);
4713 /* Free dynamic fields from the header, then zero it out */
4715 header = &rbd_dev->header;
4716 ceph_put_snap_context(header->snapc);
4717 kfree(header->snap_sizes);
4718 kfree(header->snap_names);
4719 kfree(header->object_prefix);
4720 memset(header, 0, sizeof (*header));
4723 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4727 ret = rbd_dev_v2_object_prefix(rbd_dev);
4732 * Get the and check features for the image. Currently the
4733 * features are assumed to never change.
4735 ret = rbd_dev_v2_features(rbd_dev);
4739 /* If the image supports fancy striping, get its parameters */
4741 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4742 ret = rbd_dev_v2_striping_info(rbd_dev);
4746 /* No support for crypto and compression type format 2 images */
4750 rbd_dev->header.features = 0;
4751 kfree(rbd_dev->header.object_prefix);
4752 rbd_dev->header.object_prefix = NULL;
4757 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4759 struct rbd_device *parent = NULL;
4760 struct rbd_spec *parent_spec;
4761 struct rbd_client *rbdc;
4764 if (!rbd_dev->parent_spec)
4767 * We need to pass a reference to the client and the parent
4768 * spec when creating the parent rbd_dev. Images related by
4769 * parent/child relationships always share both.
4771 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4772 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4775 parent = rbd_dev_create(rbdc, parent_spec);
4779 ret = rbd_dev_image_probe(parent, false);
4782 rbd_dev->parent = parent;
4783 atomic_set(&rbd_dev->parent_ref, 1);
4788 rbd_dev_unparent(rbd_dev);
4789 kfree(rbd_dev->header_name);
4790 rbd_dev_destroy(parent);
4792 rbd_put_client(rbdc);
4793 rbd_spec_put(parent_spec);
4799 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4803 /* generate unique id: find highest unique id, add one */
4804 rbd_dev_id_get(rbd_dev);
4806 /* Fill in the device name, now that we have its id. */
4807 BUILD_BUG_ON(DEV_NAME_LEN
4808 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4809 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4811 /* Get our block major device number. */
4813 ret = register_blkdev(0, rbd_dev->name);
4816 rbd_dev->major = ret;
4818 /* Set up the blkdev mapping. */
4820 ret = rbd_init_disk(rbd_dev);
4822 goto err_out_blkdev;
4824 ret = rbd_dev_mapping_set(rbd_dev);
4827 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4829 ret = rbd_bus_add_dev(rbd_dev);
4831 goto err_out_mapping;
4833 /* Everything's ready. Announce the disk to the world. */
4835 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4836 add_disk(rbd_dev->disk);
4838 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4839 (unsigned long long) rbd_dev->mapping.size);
4844 rbd_dev_mapping_clear(rbd_dev);
4846 rbd_free_disk(rbd_dev);
4848 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4850 rbd_dev_id_put(rbd_dev);
4851 rbd_dev_mapping_clear(rbd_dev);
4856 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4858 struct rbd_spec *spec = rbd_dev->spec;
4861 /* Record the header object name for this rbd image. */
4863 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4865 if (rbd_dev->image_format == 1)
4866 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4868 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4870 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4871 if (!rbd_dev->header_name)
4874 if (rbd_dev->image_format == 1)
4875 sprintf(rbd_dev->header_name, "%s%s",
4876 spec->image_name, RBD_SUFFIX);
4878 sprintf(rbd_dev->header_name, "%s%s",
4879 RBD_HEADER_PREFIX, spec->image_id);
4883 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4885 rbd_dev_unprobe(rbd_dev);
4886 kfree(rbd_dev->header_name);
4887 rbd_dev->header_name = NULL;
4888 rbd_dev->image_format = 0;
4889 kfree(rbd_dev->spec->image_id);
4890 rbd_dev->spec->image_id = NULL;
4892 rbd_dev_destroy(rbd_dev);
4896 * Probe for the existence of the header object for the given rbd
4897 * device. If this image is the one being mapped (i.e., not a
4898 * parent), initiate a watch on its header object before using that
4899 * object to get detailed information about the rbd image.
4901 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4907 * Get the id from the image id object. If it's not a
4908 * format 2 image, we'll get ENOENT back, and we'll assume
4909 * it's a format 1 image.
4911 ret = rbd_dev_image_id(rbd_dev);
4914 rbd_assert(rbd_dev->spec->image_id);
4915 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4917 ret = rbd_dev_header_name(rbd_dev);
4919 goto err_out_format;
4922 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4924 goto out_header_name;
4927 if (rbd_dev->image_format == 1)
4928 ret = rbd_dev_v1_header_info(rbd_dev);
4930 ret = rbd_dev_v2_header_info(rbd_dev);
4934 ret = rbd_dev_spec_update(rbd_dev);
4938 ret = rbd_dev_probe_parent(rbd_dev);
4942 dout("discovered format %u image, header name is %s\n",
4943 rbd_dev->image_format, rbd_dev->header_name);
4947 rbd_dev_unprobe(rbd_dev);
4950 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4952 rbd_warn(rbd_dev, "unable to tear down "
4953 "watch request (%d)\n", tmp);
4956 kfree(rbd_dev->header_name);
4957 rbd_dev->header_name = NULL;
4959 rbd_dev->image_format = 0;
4960 kfree(rbd_dev->spec->image_id);
4961 rbd_dev->spec->image_id = NULL;
4963 dout("probe failed, returning %d\n", ret);
4968 static ssize_t rbd_add(struct bus_type *bus,
4972 struct rbd_device *rbd_dev = NULL;
4973 struct ceph_options *ceph_opts = NULL;
4974 struct rbd_options *rbd_opts = NULL;
4975 struct rbd_spec *spec = NULL;
4976 struct rbd_client *rbdc;
4977 struct ceph_osd_client *osdc;
4981 if (!try_module_get(THIS_MODULE))
4984 /* parse add command */
4985 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4987 goto err_out_module;
4988 read_only = rbd_opts->read_only;
4990 rbd_opts = NULL; /* done with this */
4992 rbdc = rbd_get_client(ceph_opts);
4997 ceph_opts = NULL; /* rbd_dev client now owns this */
5000 osdc = &rbdc->client->osdc;
5001 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5003 goto err_out_client;
5004 spec->pool_id = (u64)rc;
5006 /* The ceph file layout needs to fit pool id in 32 bits */
5008 if (spec->pool_id > (u64)U32_MAX) {
5009 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5010 (unsigned long long)spec->pool_id, U32_MAX);
5012 goto err_out_client;
5015 rbd_dev = rbd_dev_create(rbdc, spec);
5017 goto err_out_client;
5018 rbdc = NULL; /* rbd_dev now owns this */
5019 spec = NULL; /* rbd_dev now owns this */
5021 rc = rbd_dev_image_probe(rbd_dev, true);
5023 goto err_out_rbd_dev;
5025 /* If we are mapping a snapshot it must be marked read-only */
5027 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5029 rbd_dev->mapping.read_only = read_only;
5031 rc = rbd_dev_device_setup(rbd_dev);
5035 rbd_dev_image_release(rbd_dev);
5037 rbd_dev_destroy(rbd_dev);
5039 rbd_put_client(rbdc);
5042 ceph_destroy_options(ceph_opts);
5046 module_put(THIS_MODULE);
5048 dout("Error adding device %s\n", buf);
5053 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
5055 struct list_head *tmp;
5056 struct rbd_device *rbd_dev;
5058 spin_lock(&rbd_dev_list_lock);
5059 list_for_each(tmp, &rbd_dev_list) {
5060 rbd_dev = list_entry(tmp, struct rbd_device, node);
5061 if (rbd_dev->dev_id == dev_id) {
5062 spin_unlock(&rbd_dev_list_lock);
5066 spin_unlock(&rbd_dev_list_lock);
5070 static void rbd_dev_device_release(struct device *dev)
5072 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5074 rbd_free_disk(rbd_dev);
5075 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5076 rbd_dev_mapping_clear(rbd_dev);
5077 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5079 rbd_dev_id_put(rbd_dev);
5080 rbd_dev_mapping_clear(rbd_dev);
5083 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5085 while (rbd_dev->parent) {
5086 struct rbd_device *first = rbd_dev;
5087 struct rbd_device *second = first->parent;
5088 struct rbd_device *third;
5091 * Follow to the parent with no grandparent and
5094 while (second && (third = second->parent)) {
5099 rbd_dev_image_release(second);
5100 first->parent = NULL;
5101 first->parent_overlap = 0;
5103 rbd_assert(first->parent_spec);
5104 rbd_spec_put(first->parent_spec);
5105 first->parent_spec = NULL;
5109 static ssize_t rbd_remove(struct bus_type *bus,
5113 struct rbd_device *rbd_dev = NULL;
5118 ret = strict_strtoul(buf, 10, &ul);
5122 /* convert to int; abort if we lost anything in the conversion */
5123 target_id = (int) ul;
5124 if (target_id != ul)
5127 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5129 rbd_dev = __rbd_get_dev(target_id);
5135 spin_lock_irq(&rbd_dev->lock);
5136 if (rbd_dev->open_count)
5139 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
5140 spin_unlock_irq(&rbd_dev->lock);
5143 rbd_bus_del_dev(rbd_dev);
5144 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5146 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5147 rbd_dev_image_release(rbd_dev);
5148 module_put(THIS_MODULE);
5151 mutex_unlock(&ctl_mutex);
5157 * create control files in sysfs
5160 static int rbd_sysfs_init(void)
5164 ret = device_register(&rbd_root_dev);
5168 ret = bus_register(&rbd_bus_type);
5170 device_unregister(&rbd_root_dev);
5175 static void rbd_sysfs_cleanup(void)
5177 bus_unregister(&rbd_bus_type);
5178 device_unregister(&rbd_root_dev);
5181 static int rbd_slab_init(void)
5183 rbd_assert(!rbd_img_request_cache);
5184 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5185 sizeof (struct rbd_img_request),
5186 __alignof__(struct rbd_img_request),
5188 if (!rbd_img_request_cache)
5191 rbd_assert(!rbd_obj_request_cache);
5192 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5193 sizeof (struct rbd_obj_request),
5194 __alignof__(struct rbd_obj_request),
5196 if (!rbd_obj_request_cache)
5199 rbd_assert(!rbd_segment_name_cache);
5200 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5201 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5202 if (rbd_segment_name_cache)
5205 if (rbd_obj_request_cache) {
5206 kmem_cache_destroy(rbd_obj_request_cache);
5207 rbd_obj_request_cache = NULL;
5210 kmem_cache_destroy(rbd_img_request_cache);
5211 rbd_img_request_cache = NULL;
5216 static void rbd_slab_exit(void)
5218 rbd_assert(rbd_segment_name_cache);
5219 kmem_cache_destroy(rbd_segment_name_cache);
5220 rbd_segment_name_cache = NULL;
5222 rbd_assert(rbd_obj_request_cache);
5223 kmem_cache_destroy(rbd_obj_request_cache);
5224 rbd_obj_request_cache = NULL;
5226 rbd_assert(rbd_img_request_cache);
5227 kmem_cache_destroy(rbd_img_request_cache);
5228 rbd_img_request_cache = NULL;
5231 static int __init rbd_init(void)
5235 if (!libceph_compatible(NULL)) {
5236 rbd_warn(NULL, "libceph incompatibility (quitting)");
5240 rc = rbd_slab_init();
5243 rc = rbd_sysfs_init();
5247 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5252 static void __exit rbd_exit(void)
5254 rbd_sysfs_cleanup();
5258 module_init(rbd_init);
5259 module_exit(rbd_exit);
5261 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5262 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5263 MODULE_DESCRIPTION("rados block device");
5265 /* following authorship retained from original osdblk.c */
5266 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5268 MODULE_LICENSE("GPL");