3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
45 #include "rbd_types.h"
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
64 static int atomic_inc_return_safe(atomic_t *v)
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
77 /* Decrement the counter. Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t *v)
82 counter = atomic_dec_return(v);
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
94 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
96 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN \
98 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
100 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
102 #define RBD_SNAP_HEAD_NAME "-"
104 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX 64
110 #define RBD_OBJ_PREFIX_LEN_MAX 64
114 #define RBD_FEATURE_LAYERING (1<<0)
115 #define RBD_FEATURE_STRIPINGV2 (1<<1)
116 #define RBD_FEATURES_ALL \
117 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
119 /* Features supported by this (client software) implementation. */
121 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
124 * An RBD device name will be "rbd#", where the "rbd" comes from
125 * RBD_DRV_NAME above, and # is a unique integer identifier.
126 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127 * enough to hold all possible device names.
129 #define DEV_NAME_LEN 32
130 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
133 * block device image metadata (in-memory version)
135 struct rbd_image_header {
136 /* These six fields never change for a given rbd image */
143 u64 features; /* Might be changeable someday? */
145 /* The remaining fields need to be updated occasionally */
147 struct ceph_snap_context *snapc;
148 char *snap_names; /* format 1 only */
149 u64 *snap_sizes; /* format 1 only */
153 * An rbd image specification.
155 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156 * identify an image. Each rbd_dev structure includes a pointer to
157 * an rbd_spec structure that encapsulates this identity.
159 * Each of the id's in an rbd_spec has an associated name. For a
160 * user-mapped image, the names are supplied and the id's associated
161 * with them are looked up. For a layered image, a parent image is
162 * defined by the tuple, and the names are looked up.
164 * An rbd_dev structure contains a parent_spec pointer which is
165 * non-null if the image it represents is a child in a layered
166 * image. This pointer will refer to the rbd_spec structure used
167 * by the parent rbd_dev for its own identity (i.e., the structure
168 * is shared between the parent and child).
170 * Since these structures are populated once, during the discovery
171 * phase of image construction, they are effectively immutable so
172 * we make no effort to synchronize access to them.
174 * Note that code herein does not assume the image name is known (it
175 * could be a null pointer).
179 const char *pool_name;
181 const char *image_id;
182 const char *image_name;
185 const char *snap_name;
191 * an instance of the client. multiple devices may share an rbd client.
194 struct ceph_client *client;
196 struct list_head node;
199 struct rbd_img_request;
200 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
202 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
204 struct rbd_obj_request;
205 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
207 enum obj_request_type {
208 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
212 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
213 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
214 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
215 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
218 struct rbd_obj_request {
219 const char *object_name;
220 u64 offset; /* object start byte */
221 u64 length; /* bytes from offset */
225 * An object request associated with an image will have its
226 * img_data flag set; a standalone object request will not.
228 * A standalone object request will have which == BAD_WHICH
229 * and a null obj_request pointer.
231 * An object request initiated in support of a layered image
232 * object (to check for its existence before a write) will
233 * have which == BAD_WHICH and a non-null obj_request pointer.
235 * Finally, an object request for rbd image data will have
236 * which != BAD_WHICH, and will have a non-null img_request
237 * pointer. The value of which will be in the range
238 * 0..(img_request->obj_request_count-1).
241 struct rbd_obj_request *obj_request; /* STAT op */
243 struct rbd_img_request *img_request;
245 /* links for img_request->obj_requests list */
246 struct list_head links;
249 u32 which; /* posn image request list */
251 enum obj_request_type type;
253 struct bio *bio_list;
259 struct page **copyup_pages;
260 u32 copyup_page_count;
262 struct ceph_osd_request *osd_req;
264 u64 xferred; /* bytes transferred */
267 rbd_obj_callback_t callback;
268 struct completion completion;
274 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
275 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
276 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
279 struct rbd_img_request {
280 struct rbd_device *rbd_dev;
281 u64 offset; /* starting image byte offset */
282 u64 length; /* byte count from offset */
285 u64 snap_id; /* for reads */
286 struct ceph_snap_context *snapc; /* for writes */
289 struct request *rq; /* block request */
290 struct rbd_obj_request *obj_request; /* obj req initiator */
292 struct page **copyup_pages;
293 u32 copyup_page_count;
294 spinlock_t completion_lock;/* protects next_completion */
296 rbd_img_callback_t callback;
297 u64 xferred;/* aggregate bytes transferred */
298 int result; /* first nonzero obj_request result */
300 u32 obj_request_count;
301 struct list_head obj_requests; /* rbd_obj_request structs */
306 #define for_each_obj_request(ireq, oreq) \
307 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
323 int dev_id; /* blkdev unique id */
325 int major; /* blkdev assigned major */
326 struct gendisk *disk; /* blkdev's gendisk and rq */
328 u32 image_format; /* Either 1 or 2 */
329 struct rbd_client *rbd_client;
331 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
333 spinlock_t lock; /* queue, flags, open_count */
335 struct rbd_image_header header;
336 unsigned long flags; /* possibly lock protected */
337 struct rbd_spec *spec;
341 struct ceph_file_layout layout;
343 struct ceph_osd_event *watch_event;
344 struct rbd_obj_request *watch_request;
346 struct rbd_spec *parent_spec;
349 struct rbd_device *parent;
351 /* protects updating the header */
352 struct rw_semaphore header_rwsem;
354 struct rbd_mapping mapping;
356 struct list_head node;
360 unsigned long open_count; /* protected by lock */
364 * Flag bits for rbd_dev->flags. If atomicity is required,
365 * rbd_dev->lock is used to protect access.
367 * Currently, only the "removing" flag (which is coupled with the
368 * "open_count" field) requires atomic access.
371 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
372 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
375 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
377 static LIST_HEAD(rbd_dev_list); /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock);
380 static LIST_HEAD(rbd_client_list); /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock);
383 /* Slab caches for frequently-allocated structures */
385 static struct kmem_cache *rbd_img_request_cache;
386 static struct kmem_cache *rbd_obj_request_cache;
387 static struct kmem_cache *rbd_segment_name_cache;
389 static int rbd_img_request_submit(struct rbd_img_request *img_request);
391 static void rbd_dev_device_release(struct device *dev);
393 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
395 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
397 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398 static void rbd_spec_put(struct rbd_spec *spec);
400 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
401 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
403 static struct attribute *rbd_bus_attrs[] = {
405 &bus_attr_remove.attr,
408 ATTRIBUTE_GROUPS(rbd_bus);
410 static struct bus_type rbd_bus_type = {
412 .bus_groups = rbd_bus_groups,
415 static void rbd_root_dev_release(struct device *dev)
419 static struct device rbd_root_dev = {
421 .release = rbd_root_dev_release,
424 static __printf(2, 3)
425 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
427 struct va_format vaf;
435 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
436 else if (rbd_dev->disk)
437 printk(KERN_WARNING "%s: %s: %pV\n",
438 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
439 else if (rbd_dev->spec && rbd_dev->spec->image_name)
440 printk(KERN_WARNING "%s: image %s: %pV\n",
441 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
442 else if (rbd_dev->spec && rbd_dev->spec->image_id)
443 printk(KERN_WARNING "%s: id %s: %pV\n",
444 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
446 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
447 RBD_DRV_NAME, rbd_dev, &vaf);
452 #define rbd_assert(expr) \
453 if (unlikely(!(expr))) { \
454 printk(KERN_ERR "\nAssertion failure in %s() " \
456 "\trbd_assert(%s);\n\n", \
457 __func__, __LINE__, #expr); \
460 #else /* !RBD_DEBUG */
461 # define rbd_assert(expr) ((void) 0)
462 #endif /* !RBD_DEBUG */
464 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
465 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
466 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
468 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
469 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
470 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
471 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
473 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
474 u8 *order, u64 *snap_size);
475 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
477 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
479 static int rbd_open(struct block_device *bdev, fmode_t mode)
481 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
482 bool removing = false;
484 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
487 spin_lock_irq(&rbd_dev->lock);
488 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
491 rbd_dev->open_count++;
492 spin_unlock_irq(&rbd_dev->lock);
496 (void) get_device(&rbd_dev->dev);
497 set_device_ro(bdev, rbd_dev->mapping.read_only);
502 static void rbd_release(struct gendisk *disk, fmode_t mode)
504 struct rbd_device *rbd_dev = disk->private_data;
505 unsigned long open_count_before;
507 spin_lock_irq(&rbd_dev->lock);
508 open_count_before = rbd_dev->open_count--;
509 spin_unlock_irq(&rbd_dev->lock);
510 rbd_assert(open_count_before > 0);
512 put_device(&rbd_dev->dev);
515 static const struct block_device_operations rbd_bd_ops = {
516 .owner = THIS_MODULE,
518 .release = rbd_release,
522 * Initialize an rbd client instance. Success or not, this function
523 * consumes ceph_opts. Caller holds client_mutex.
525 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
527 struct rbd_client *rbdc;
530 dout("%s:\n", __func__);
531 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
535 kref_init(&rbdc->kref);
536 INIT_LIST_HEAD(&rbdc->node);
538 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
539 if (IS_ERR(rbdc->client))
541 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
543 ret = ceph_open_session(rbdc->client);
547 spin_lock(&rbd_client_list_lock);
548 list_add_tail(&rbdc->node, &rbd_client_list);
549 spin_unlock(&rbd_client_list_lock);
551 dout("%s: rbdc %p\n", __func__, rbdc);
555 ceph_destroy_client(rbdc->client);
560 ceph_destroy_options(ceph_opts);
561 dout("%s: error %d\n", __func__, ret);
566 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
568 kref_get(&rbdc->kref);
574 * Find a ceph client with specific addr and configuration. If
575 * found, bump its reference count.
577 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
579 struct rbd_client *client_node;
582 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
585 spin_lock(&rbd_client_list_lock);
586 list_for_each_entry(client_node, &rbd_client_list, node) {
587 if (!ceph_compare_options(ceph_opts, client_node->client)) {
588 __rbd_get_client(client_node);
594 spin_unlock(&rbd_client_list_lock);
596 return found ? client_node : NULL;
606 /* string args above */
609 /* Boolean args above */
613 static match_table_t rbd_opts_tokens = {
615 /* string args above */
616 {Opt_read_only, "read_only"},
617 {Opt_read_only, "ro"}, /* Alternate spelling */
618 {Opt_read_write, "read_write"},
619 {Opt_read_write, "rw"}, /* Alternate spelling */
620 /* Boolean args above */
628 #define RBD_READ_ONLY_DEFAULT false
630 static int parse_rbd_opts_token(char *c, void *private)
632 struct rbd_options *rbd_opts = private;
633 substring_t argstr[MAX_OPT_ARGS];
634 int token, intval, ret;
636 token = match_token(c, rbd_opts_tokens, argstr);
640 if (token < Opt_last_int) {
641 ret = match_int(&argstr[0], &intval);
643 pr_err("bad mount option arg (not int) "
647 dout("got int token %d val %d\n", token, intval);
648 } else if (token > Opt_last_int && token < Opt_last_string) {
649 dout("got string token %d val %s\n", token,
651 } else if (token > Opt_last_string && token < Opt_last_bool) {
652 dout("got Boolean token %d\n", token);
654 dout("got token %d\n", token);
659 rbd_opts->read_only = true;
662 rbd_opts->read_only = false;
672 * Get a ceph client with specific addr and configuration, if one does
673 * not exist create it. Either way, ceph_opts is consumed by this
676 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
678 struct rbd_client *rbdc;
680 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
681 rbdc = rbd_client_find(ceph_opts);
682 if (rbdc) /* using an existing client */
683 ceph_destroy_options(ceph_opts);
685 rbdc = rbd_client_create(ceph_opts);
686 mutex_unlock(&client_mutex);
692 * Destroy ceph client
694 * Caller must hold rbd_client_list_lock.
696 static void rbd_client_release(struct kref *kref)
698 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
700 dout("%s: rbdc %p\n", __func__, rbdc);
701 spin_lock(&rbd_client_list_lock);
702 list_del(&rbdc->node);
703 spin_unlock(&rbd_client_list_lock);
705 ceph_destroy_client(rbdc->client);
710 * Drop reference to ceph client node. If it's not referenced anymore, release
713 static void rbd_put_client(struct rbd_client *rbdc)
716 kref_put(&rbdc->kref, rbd_client_release);
719 static bool rbd_image_format_valid(u32 image_format)
721 return image_format == 1 || image_format == 2;
724 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
729 /* The header has to start with the magic rbd header text */
730 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
733 /* The bio layer requires at least sector-sized I/O */
735 if (ondisk->options.order < SECTOR_SHIFT)
738 /* If we use u64 in a few spots we may be able to loosen this */
740 if (ondisk->options.order > 8 * sizeof (int) - 1)
744 * The size of a snapshot header has to fit in a size_t, and
745 * that limits the number of snapshots.
747 snap_count = le32_to_cpu(ondisk->snap_count);
748 size = SIZE_MAX - sizeof (struct ceph_snap_context);
749 if (snap_count > size / sizeof (__le64))
753 * Not only that, but the size of the entire the snapshot
754 * header must also be representable in a size_t.
756 size -= snap_count * sizeof (__le64);
757 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
764 * Fill an rbd image header with information from the given format 1
767 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
768 struct rbd_image_header_ondisk *ondisk)
770 struct rbd_image_header *header = &rbd_dev->header;
771 bool first_time = header->object_prefix == NULL;
772 struct ceph_snap_context *snapc;
773 char *object_prefix = NULL;
774 char *snap_names = NULL;
775 u64 *snap_sizes = NULL;
781 /* Allocate this now to avoid having to handle failure below */
786 len = strnlen(ondisk->object_prefix,
787 sizeof (ondisk->object_prefix));
788 object_prefix = kmalloc(len + 1, GFP_KERNEL);
791 memcpy(object_prefix, ondisk->object_prefix, len);
792 object_prefix[len] = '\0';
795 /* Allocate the snapshot context and fill it in */
797 snap_count = le32_to_cpu(ondisk->snap_count);
798 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
801 snapc->seq = le64_to_cpu(ondisk->snap_seq);
803 struct rbd_image_snap_ondisk *snaps;
804 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
806 /* We'll keep a copy of the snapshot names... */
808 if (snap_names_len > (u64)SIZE_MAX)
810 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
814 /* ...as well as the array of their sizes. */
816 size = snap_count * sizeof (*header->snap_sizes);
817 snap_sizes = kmalloc(size, GFP_KERNEL);
822 * Copy the names, and fill in each snapshot's id
825 * Note that rbd_dev_v1_header_info() guarantees the
826 * ondisk buffer we're working with has
827 * snap_names_len bytes beyond the end of the
828 * snapshot id array, this memcpy() is safe.
830 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
831 snaps = ondisk->snaps;
832 for (i = 0; i < snap_count; i++) {
833 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
834 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
838 /* We won't fail any more, fill in the header */
841 header->object_prefix = object_prefix;
842 header->obj_order = ondisk->options.order;
843 header->crypt_type = ondisk->options.crypt_type;
844 header->comp_type = ondisk->options.comp_type;
845 /* The rest aren't used for format 1 images */
846 header->stripe_unit = 0;
847 header->stripe_count = 0;
848 header->features = 0;
850 ceph_put_snap_context(header->snapc);
851 kfree(header->snap_names);
852 kfree(header->snap_sizes);
855 /* The remaining fields always get updated (when we refresh) */
857 header->image_size = le64_to_cpu(ondisk->image_size);
858 header->snapc = snapc;
859 header->snap_names = snap_names;
860 header->snap_sizes = snap_sizes;
862 /* Make sure mapping size is consistent with header info */
864 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
865 if (rbd_dev->mapping.size != header->image_size)
866 rbd_dev->mapping.size = header->image_size;
874 ceph_put_snap_context(snapc);
875 kfree(object_prefix);
880 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
882 const char *snap_name;
884 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
886 /* Skip over names until we find the one we are looking for */
888 snap_name = rbd_dev->header.snap_names;
890 snap_name += strlen(snap_name) + 1;
892 return kstrdup(snap_name, GFP_KERNEL);
896 * Snapshot id comparison function for use with qsort()/bsearch().
897 * Note that result is for snapshots in *descending* order.
899 static int snapid_compare_reverse(const void *s1, const void *s2)
901 u64 snap_id1 = *(u64 *)s1;
902 u64 snap_id2 = *(u64 *)s2;
904 if (snap_id1 < snap_id2)
906 return snap_id1 == snap_id2 ? 0 : -1;
910 * Search a snapshot context to see if the given snapshot id is
913 * Returns the position of the snapshot id in the array if it's found,
914 * or BAD_SNAP_INDEX otherwise.
916 * Note: The snapshot array is in kept sorted (by the osd) in
917 * reverse order, highest snapshot id first.
919 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
921 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
924 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
925 sizeof (snap_id), snapid_compare_reverse);
927 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
930 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
934 const char *snap_name;
936 which = rbd_dev_snap_index(rbd_dev, snap_id);
937 if (which == BAD_SNAP_INDEX)
938 return ERR_PTR(-ENOENT);
940 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
941 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
944 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
946 if (snap_id == CEPH_NOSNAP)
947 return RBD_SNAP_HEAD_NAME;
949 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
950 if (rbd_dev->image_format == 1)
951 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
953 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
956 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
959 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
960 if (snap_id == CEPH_NOSNAP) {
961 *snap_size = rbd_dev->header.image_size;
962 } else if (rbd_dev->image_format == 1) {
965 which = rbd_dev_snap_index(rbd_dev, snap_id);
966 if (which == BAD_SNAP_INDEX)
969 *snap_size = rbd_dev->header.snap_sizes[which];
974 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
983 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
986 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
987 if (snap_id == CEPH_NOSNAP) {
988 *snap_features = rbd_dev->header.features;
989 } else if (rbd_dev->image_format == 1) {
990 *snap_features = 0; /* No features for format 1 */
995 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
999 *snap_features = features;
1004 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1006 u64 snap_id = rbd_dev->spec->snap_id;
1011 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1014 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1018 rbd_dev->mapping.size = size;
1019 rbd_dev->mapping.features = features;
1024 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1026 rbd_dev->mapping.size = 0;
1027 rbd_dev->mapping.features = 0;
1030 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1037 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1040 segment = offset >> rbd_dev->header.obj_order;
1041 name_format = "%s.%012llx";
1042 if (rbd_dev->image_format == 2)
1043 name_format = "%s.%016llx";
1044 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
1045 rbd_dev->header.object_prefix, segment);
1046 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1047 pr_err("error formatting segment name for #%llu (%d)\n",
1056 static void rbd_segment_name_free(const char *name)
1058 /* The explicit cast here is needed to drop the const qualifier */
1060 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1063 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1065 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1067 return offset & (segment_size - 1);
1070 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1071 u64 offset, u64 length)
1073 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1075 offset &= segment_size - 1;
1077 rbd_assert(length <= U64_MAX - offset);
1078 if (offset + length > segment_size)
1079 length = segment_size - offset;
1085 * returns the size of an object in the image
1087 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1089 return 1 << header->obj_order;
1096 static void bio_chain_put(struct bio *chain)
1102 chain = chain->bi_next;
1108 * zeros a bio chain, starting at specific offset
1110 static void zero_bio_chain(struct bio *chain, int start_ofs)
1113 unsigned long flags;
1119 bio_for_each_segment(bv, chain, i) {
1120 if (pos + bv->bv_len > start_ofs) {
1121 int remainder = max(start_ofs - pos, 0);
1122 buf = bvec_kmap_irq(bv, &flags);
1123 memset(buf + remainder, 0,
1124 bv->bv_len - remainder);
1125 flush_dcache_page(bv->bv_page);
1126 bvec_kunmap_irq(buf, &flags);
1131 chain = chain->bi_next;
1136 * similar to zero_bio_chain(), zeros data defined by a page array,
1137 * starting at the given byte offset from the start of the array and
1138 * continuing up to the given end offset. The pages array is
1139 * assumed to be big enough to hold all bytes up to the end.
1141 static void zero_pages(struct page **pages, u64 offset, u64 end)
1143 struct page **page = &pages[offset >> PAGE_SHIFT];
1145 rbd_assert(end > offset);
1146 rbd_assert(end - offset <= (u64)SIZE_MAX);
1147 while (offset < end) {
1150 unsigned long flags;
1153 page_offset = offset & ~PAGE_MASK;
1154 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1155 local_irq_save(flags);
1156 kaddr = kmap_atomic(*page);
1157 memset(kaddr + page_offset, 0, length);
1158 flush_dcache_page(*page);
1159 kunmap_atomic(kaddr);
1160 local_irq_restore(flags);
1168 * Clone a portion of a bio, starting at the given byte offset
1169 * and continuing for the number of bytes indicated.
1171 static struct bio *bio_clone_range(struct bio *bio_src,
1172 unsigned int offset,
1180 unsigned short end_idx;
1181 unsigned short vcnt;
1184 /* Handle the easy case for the caller */
1186 if (!offset && len == bio_src->bi_iter.bi_size)
1187 return bio_clone(bio_src, gfpmask);
1189 if (WARN_ON_ONCE(!len))
1191 if (WARN_ON_ONCE(len > bio_src->bi_iter.bi_size))
1193 if (WARN_ON_ONCE(offset > bio_src->bi_iter.bi_size - len))
1196 /* Find first affected segment... */
1199 bio_for_each_segment(bv, bio_src, idx) {
1200 if (resid < bv->bv_len)
1202 resid -= bv->bv_len;
1206 /* ...and the last affected segment */
1209 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1210 if (resid <= bv->bv_len)
1212 resid -= bv->bv_len;
1214 vcnt = end_idx - idx + 1;
1216 /* Build the clone */
1218 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1220 return NULL; /* ENOMEM */
1222 bio->bi_bdev = bio_src->bi_bdev;
1223 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector +
1224 (offset >> SECTOR_SHIFT);
1225 bio->bi_rw = bio_src->bi_rw;
1226 bio->bi_flags |= 1 << BIO_CLONED;
1229 * Copy over our part of the bio_vec, then update the first
1230 * and last (or only) entries.
1232 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1233 vcnt * sizeof (struct bio_vec));
1234 bio->bi_io_vec[0].bv_offset += voff;
1236 bio->bi_io_vec[0].bv_len -= voff;
1237 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1239 bio->bi_io_vec[0].bv_len = len;
1242 bio->bi_vcnt = vcnt;
1243 bio->bi_iter.bi_size = len;
1249 * Clone a portion of a bio chain, starting at the given byte offset
1250 * into the first bio in the source chain and continuing for the
1251 * number of bytes indicated. The result is another bio chain of
1252 * exactly the given length, or a null pointer on error.
1254 * The bio_src and offset parameters are both in-out. On entry they
1255 * refer to the first source bio and the offset into that bio where
1256 * the start of data to be cloned is located.
1258 * On return, bio_src is updated to refer to the bio in the source
1259 * chain that contains first un-cloned byte, and *offset will
1260 * contain the offset of that byte within that bio.
1262 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1263 unsigned int *offset,
1267 struct bio *bi = *bio_src;
1268 unsigned int off = *offset;
1269 struct bio *chain = NULL;
1272 /* Build up a chain of clone bios up to the limit */
1274 if (!bi || off >= bi->bi_iter.bi_size || !len)
1275 return NULL; /* Nothing to clone */
1279 unsigned int bi_size;
1283 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1284 goto out_err; /* EINVAL; ran out of bio's */
1286 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1287 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1289 goto out_err; /* ENOMEM */
1292 end = &bio->bi_next;
1295 if (off == bi->bi_iter.bi_size) {
1306 bio_chain_put(chain);
1312 * The default/initial value for all object request flags is 0. For
1313 * each flag, once its value is set to 1 it is never reset to 0
1316 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1318 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1319 struct rbd_device *rbd_dev;
1321 rbd_dev = obj_request->img_request->rbd_dev;
1322 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1327 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1330 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1333 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1335 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1336 struct rbd_device *rbd_dev = NULL;
1338 if (obj_request_img_data_test(obj_request))
1339 rbd_dev = obj_request->img_request->rbd_dev;
1340 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1345 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1348 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1352 * This sets the KNOWN flag after (possibly) setting the EXISTS
1353 * flag. The latter is set based on the "exists" value provided.
1355 * Note that for our purposes once an object exists it never goes
1356 * away again. It's possible that the response from two existence
1357 * checks are separated by the creation of the target object, and
1358 * the first ("doesn't exist") response arrives *after* the second
1359 * ("does exist"). In that case we ignore the second one.
1361 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1365 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1366 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1370 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1373 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1376 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1379 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1382 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1384 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1385 atomic_read(&obj_request->kref.refcount));
1386 kref_get(&obj_request->kref);
1389 static void rbd_obj_request_destroy(struct kref *kref);
1390 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1392 rbd_assert(obj_request != NULL);
1393 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1394 atomic_read(&obj_request->kref.refcount));
1395 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1398 static bool img_request_child_test(struct rbd_img_request *img_request);
1399 static void rbd_parent_request_destroy(struct kref *kref);
1400 static void rbd_img_request_destroy(struct kref *kref);
1401 static void rbd_img_request_put(struct rbd_img_request *img_request)
1403 rbd_assert(img_request != NULL);
1404 dout("%s: img %p (was %d)\n", __func__, img_request,
1405 atomic_read(&img_request->kref.refcount));
1406 if (img_request_child_test(img_request))
1407 kref_put(&img_request->kref, rbd_parent_request_destroy);
1409 kref_put(&img_request->kref, rbd_img_request_destroy);
1412 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1413 struct rbd_obj_request *obj_request)
1415 rbd_assert(obj_request->img_request == NULL);
1417 /* Image request now owns object's original reference */
1418 obj_request->img_request = img_request;
1419 obj_request->which = img_request->obj_request_count;
1420 rbd_assert(!obj_request_img_data_test(obj_request));
1421 obj_request_img_data_set(obj_request);
1422 rbd_assert(obj_request->which != BAD_WHICH);
1423 img_request->obj_request_count++;
1424 list_add_tail(&obj_request->links, &img_request->obj_requests);
1425 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1426 obj_request->which);
1429 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1430 struct rbd_obj_request *obj_request)
1432 rbd_assert(obj_request->which != BAD_WHICH);
1434 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1435 obj_request->which);
1436 list_del(&obj_request->links);
1437 rbd_assert(img_request->obj_request_count > 0);
1438 img_request->obj_request_count--;
1439 rbd_assert(obj_request->which == img_request->obj_request_count);
1440 obj_request->which = BAD_WHICH;
1441 rbd_assert(obj_request_img_data_test(obj_request));
1442 rbd_assert(obj_request->img_request == img_request);
1443 obj_request->img_request = NULL;
1444 obj_request->callback = NULL;
1445 rbd_obj_request_put(obj_request);
1448 static bool obj_request_type_valid(enum obj_request_type type)
1451 case OBJ_REQUEST_NODATA:
1452 case OBJ_REQUEST_BIO:
1453 case OBJ_REQUEST_PAGES:
1460 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1461 struct rbd_obj_request *obj_request)
1463 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1465 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1468 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1471 dout("%s: img %p\n", __func__, img_request);
1474 * If no error occurred, compute the aggregate transfer
1475 * count for the image request. We could instead use
1476 * atomic64_cmpxchg() to update it as each object request
1477 * completes; not clear which way is better off hand.
1479 if (!img_request->result) {
1480 struct rbd_obj_request *obj_request;
1483 for_each_obj_request(img_request, obj_request)
1484 xferred += obj_request->xferred;
1485 img_request->xferred = xferred;
1488 if (img_request->callback)
1489 img_request->callback(img_request);
1491 rbd_img_request_put(img_request);
1494 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1496 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1498 dout("%s: obj %p\n", __func__, obj_request);
1500 return wait_for_completion_interruptible(&obj_request->completion);
1504 * The default/initial value for all image request flags is 0. Each
1505 * is conditionally set to 1 at image request initialization time
1506 * and currently never change thereafter.
1508 static void img_request_write_set(struct rbd_img_request *img_request)
1510 set_bit(IMG_REQ_WRITE, &img_request->flags);
1514 static bool img_request_write_test(struct rbd_img_request *img_request)
1517 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1520 static void img_request_child_set(struct rbd_img_request *img_request)
1522 set_bit(IMG_REQ_CHILD, &img_request->flags);
1526 static void img_request_child_clear(struct rbd_img_request *img_request)
1528 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1532 static bool img_request_child_test(struct rbd_img_request *img_request)
1535 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1538 static void img_request_layered_set(struct rbd_img_request *img_request)
1540 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1544 static void img_request_layered_clear(struct rbd_img_request *img_request)
1546 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1550 static bool img_request_layered_test(struct rbd_img_request *img_request)
1553 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1557 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1559 u64 xferred = obj_request->xferred;
1560 u64 length = obj_request->length;
1562 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1563 obj_request, obj_request->img_request, obj_request->result,
1566 * ENOENT means a hole in the image. We zero-fill the entire
1567 * length of the request. A short read also implies zero-fill
1568 * to the end of the request. An error requires the whole
1569 * length of the request to be reported finished with an error
1570 * to the block layer. In each case we update the xferred
1571 * count to indicate the whole request was satisfied.
1573 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1574 if (obj_request->result == -ENOENT) {
1575 if (obj_request->type == OBJ_REQUEST_BIO)
1576 zero_bio_chain(obj_request->bio_list, 0);
1578 zero_pages(obj_request->pages, 0, length);
1579 obj_request->result = 0;
1580 } else if (xferred < length && !obj_request->result) {
1581 if (obj_request->type == OBJ_REQUEST_BIO)
1582 zero_bio_chain(obj_request->bio_list, xferred);
1584 zero_pages(obj_request->pages, xferred, length);
1586 obj_request->xferred = length;
1587 obj_request_done_set(obj_request);
1590 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1592 dout("%s: obj %p cb %p\n", __func__, obj_request,
1593 obj_request->callback);
1594 if (obj_request->callback)
1595 obj_request->callback(obj_request);
1597 complete_all(&obj_request->completion);
1600 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1602 dout("%s: obj %p\n", __func__, obj_request);
1603 obj_request_done_set(obj_request);
1606 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1608 struct rbd_img_request *img_request = NULL;
1609 struct rbd_device *rbd_dev = NULL;
1610 bool layered = false;
1612 if (obj_request_img_data_test(obj_request)) {
1613 img_request = obj_request->img_request;
1614 layered = img_request && img_request_layered_test(img_request);
1615 rbd_dev = img_request->rbd_dev;
1618 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1619 obj_request, img_request, obj_request->result,
1620 obj_request->xferred, obj_request->length);
1621 if (layered && obj_request->result == -ENOENT &&
1622 obj_request->img_offset < rbd_dev->parent_overlap)
1623 rbd_img_parent_read(obj_request);
1624 else if (img_request)
1625 rbd_img_obj_request_read_callback(obj_request);
1627 obj_request_done_set(obj_request);
1630 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1632 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1633 obj_request->result, obj_request->length);
1635 * There is no such thing as a successful short write. Set
1636 * it to our originally-requested length.
1638 obj_request->xferred = obj_request->length;
1639 obj_request_done_set(obj_request);
1643 * For a simple stat call there's nothing to do. We'll do more if
1644 * this is part of a write sequence for a layered image.
1646 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1648 dout("%s: obj %p\n", __func__, obj_request);
1649 obj_request_done_set(obj_request);
1652 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1653 struct ceph_msg *msg)
1655 struct rbd_obj_request *obj_request = osd_req->r_priv;
1658 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1659 rbd_assert(osd_req == obj_request->osd_req);
1660 if (obj_request_img_data_test(obj_request)) {
1661 rbd_assert(obj_request->img_request);
1662 rbd_assert(obj_request->which != BAD_WHICH);
1664 rbd_assert(obj_request->which == BAD_WHICH);
1667 if (osd_req->r_result < 0)
1668 obj_request->result = osd_req->r_result;
1670 BUG_ON(osd_req->r_num_ops > 2);
1673 * We support a 64-bit length, but ultimately it has to be
1674 * passed to blk_end_request(), which takes an unsigned int.
1676 obj_request->xferred = osd_req->r_reply_op_len[0];
1677 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1678 opcode = osd_req->r_ops[0].op;
1680 case CEPH_OSD_OP_READ:
1681 rbd_osd_read_callback(obj_request);
1683 case CEPH_OSD_OP_WRITE:
1684 rbd_osd_write_callback(obj_request);
1686 case CEPH_OSD_OP_STAT:
1687 rbd_osd_stat_callback(obj_request);
1689 case CEPH_OSD_OP_CALL:
1690 case CEPH_OSD_OP_NOTIFY_ACK:
1691 case CEPH_OSD_OP_WATCH:
1692 rbd_osd_trivial_callback(obj_request);
1695 rbd_warn(NULL, "%s: unsupported op %hu\n",
1696 obj_request->object_name, (unsigned short) opcode);
1700 if (obj_request_done_test(obj_request))
1701 rbd_obj_request_complete(obj_request);
1704 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1706 struct rbd_img_request *img_request = obj_request->img_request;
1707 struct ceph_osd_request *osd_req = obj_request->osd_req;
1710 rbd_assert(osd_req != NULL);
1712 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1713 ceph_osdc_build_request(osd_req, obj_request->offset,
1714 NULL, snap_id, NULL);
1717 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1719 struct rbd_img_request *img_request = obj_request->img_request;
1720 struct ceph_osd_request *osd_req = obj_request->osd_req;
1721 struct ceph_snap_context *snapc;
1722 struct timespec mtime = CURRENT_TIME;
1724 rbd_assert(osd_req != NULL);
1726 snapc = img_request ? img_request->snapc : NULL;
1727 ceph_osdc_build_request(osd_req, obj_request->offset,
1728 snapc, CEPH_NOSNAP, &mtime);
1731 static struct ceph_osd_request *rbd_osd_req_create(
1732 struct rbd_device *rbd_dev,
1734 struct rbd_obj_request *obj_request)
1736 struct ceph_snap_context *snapc = NULL;
1737 struct ceph_osd_client *osdc;
1738 struct ceph_osd_request *osd_req;
1740 if (obj_request_img_data_test(obj_request)) {
1741 struct rbd_img_request *img_request = obj_request->img_request;
1743 rbd_assert(write_request ==
1744 img_request_write_test(img_request));
1746 snapc = img_request->snapc;
1749 /* Allocate and initialize the request, for the single op */
1751 osdc = &rbd_dev->rbd_client->client->osdc;
1752 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1754 return NULL; /* ENOMEM */
1757 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1759 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1761 osd_req->r_callback = rbd_osd_req_callback;
1762 osd_req->r_priv = obj_request;
1764 osd_req->r_oid_len = strlen(obj_request->object_name);
1765 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1766 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1768 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1774 * Create a copyup osd request based on the information in the
1775 * object request supplied. A copyup request has two osd ops,
1776 * a copyup method call, and a "normal" write request.
1778 static struct ceph_osd_request *
1779 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1781 struct rbd_img_request *img_request;
1782 struct ceph_snap_context *snapc;
1783 struct rbd_device *rbd_dev;
1784 struct ceph_osd_client *osdc;
1785 struct ceph_osd_request *osd_req;
1787 rbd_assert(obj_request_img_data_test(obj_request));
1788 img_request = obj_request->img_request;
1789 rbd_assert(img_request);
1790 rbd_assert(img_request_write_test(img_request));
1792 /* Allocate and initialize the request, for the two ops */
1794 snapc = img_request->snapc;
1795 rbd_dev = img_request->rbd_dev;
1796 osdc = &rbd_dev->rbd_client->client->osdc;
1797 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1799 return NULL; /* ENOMEM */
1801 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1802 osd_req->r_callback = rbd_osd_req_callback;
1803 osd_req->r_priv = obj_request;
1805 osd_req->r_oid_len = strlen(obj_request->object_name);
1806 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1807 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1809 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1815 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1817 ceph_osdc_put_request(osd_req);
1820 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1822 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1823 u64 offset, u64 length,
1824 enum obj_request_type type)
1826 struct rbd_obj_request *obj_request;
1830 rbd_assert(obj_request_type_valid(type));
1832 size = strlen(object_name) + 1;
1833 name = kmalloc(size, GFP_KERNEL);
1837 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1843 obj_request->object_name = memcpy(name, object_name, size);
1844 obj_request->offset = offset;
1845 obj_request->length = length;
1846 obj_request->flags = 0;
1847 obj_request->which = BAD_WHICH;
1848 obj_request->type = type;
1849 INIT_LIST_HEAD(&obj_request->links);
1850 init_completion(&obj_request->completion);
1851 kref_init(&obj_request->kref);
1853 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1854 offset, length, (int)type, obj_request);
1859 static void rbd_obj_request_destroy(struct kref *kref)
1861 struct rbd_obj_request *obj_request;
1863 obj_request = container_of(kref, struct rbd_obj_request, kref);
1865 dout("%s: obj %p\n", __func__, obj_request);
1867 rbd_assert(obj_request->img_request == NULL);
1868 rbd_assert(obj_request->which == BAD_WHICH);
1870 if (obj_request->osd_req)
1871 rbd_osd_req_destroy(obj_request->osd_req);
1873 rbd_assert(obj_request_type_valid(obj_request->type));
1874 switch (obj_request->type) {
1875 case OBJ_REQUEST_NODATA:
1876 break; /* Nothing to do */
1877 case OBJ_REQUEST_BIO:
1878 if (obj_request->bio_list)
1879 bio_chain_put(obj_request->bio_list);
1881 case OBJ_REQUEST_PAGES:
1882 if (obj_request->pages)
1883 ceph_release_page_vector(obj_request->pages,
1884 obj_request->page_count);
1888 kfree(obj_request->object_name);
1889 obj_request->object_name = NULL;
1890 kmem_cache_free(rbd_obj_request_cache, obj_request);
1893 /* It's OK to call this for a device with no parent */
1895 static void rbd_spec_put(struct rbd_spec *spec);
1896 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1898 rbd_dev_remove_parent(rbd_dev);
1899 rbd_spec_put(rbd_dev->parent_spec);
1900 rbd_dev->parent_spec = NULL;
1901 rbd_dev->parent_overlap = 0;
1905 * Parent image reference counting is used to determine when an
1906 * image's parent fields can be safely torn down--after there are no
1907 * more in-flight requests to the parent image. When the last
1908 * reference is dropped, cleaning them up is safe.
1910 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1914 if (!rbd_dev->parent_spec)
1917 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1921 /* Last reference; clean up parent data structures */
1924 rbd_dev_unparent(rbd_dev);
1926 rbd_warn(rbd_dev, "parent reference underflow\n");
1930 * If an image has a non-zero parent overlap, get a reference to its
1933 * We must get the reference before checking for the overlap to
1934 * coordinate properly with zeroing the parent overlap in
1935 * rbd_dev_v2_parent_info() when an image gets flattened. We
1936 * drop it again if there is no overlap.
1938 * Returns true if the rbd device has a parent with a non-zero
1939 * overlap and a reference for it was successfully taken, or
1942 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1946 if (!rbd_dev->parent_spec)
1949 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1950 if (counter > 0 && rbd_dev->parent_overlap)
1953 /* Image was flattened, but parent is not yet torn down */
1956 rbd_warn(rbd_dev, "parent reference overflow\n");
1962 * Caller is responsible for filling in the list of object requests
1963 * that comprises the image request, and the Linux request pointer
1964 * (if there is one).
1966 static struct rbd_img_request *rbd_img_request_create(
1967 struct rbd_device *rbd_dev,
1968 u64 offset, u64 length,
1971 struct rbd_img_request *img_request;
1973 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1977 if (write_request) {
1978 down_read(&rbd_dev->header_rwsem);
1979 ceph_get_snap_context(rbd_dev->header.snapc);
1980 up_read(&rbd_dev->header_rwsem);
1983 img_request->rq = NULL;
1984 img_request->rbd_dev = rbd_dev;
1985 img_request->offset = offset;
1986 img_request->length = length;
1987 img_request->flags = 0;
1988 if (write_request) {
1989 img_request_write_set(img_request);
1990 img_request->snapc = rbd_dev->header.snapc;
1992 img_request->snap_id = rbd_dev->spec->snap_id;
1994 if (rbd_dev_parent_get(rbd_dev))
1995 img_request_layered_set(img_request);
1996 spin_lock_init(&img_request->completion_lock);
1997 img_request->next_completion = 0;
1998 img_request->callback = NULL;
1999 img_request->result = 0;
2000 img_request->obj_request_count = 0;
2001 INIT_LIST_HEAD(&img_request->obj_requests);
2002 kref_init(&img_request->kref);
2004 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2005 write_request ? "write" : "read", offset, length,
2011 static void rbd_img_request_destroy(struct kref *kref)
2013 struct rbd_img_request *img_request;
2014 struct rbd_obj_request *obj_request;
2015 struct rbd_obj_request *next_obj_request;
2017 img_request = container_of(kref, struct rbd_img_request, kref);
2019 dout("%s: img %p\n", __func__, img_request);
2021 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2022 rbd_img_obj_request_del(img_request, obj_request);
2023 rbd_assert(img_request->obj_request_count == 0);
2025 if (img_request_layered_test(img_request)) {
2026 img_request_layered_clear(img_request);
2027 rbd_dev_parent_put(img_request->rbd_dev);
2030 if (img_request_write_test(img_request))
2031 ceph_put_snap_context(img_request->snapc);
2033 kmem_cache_free(rbd_img_request_cache, img_request);
2036 static struct rbd_img_request *rbd_parent_request_create(
2037 struct rbd_obj_request *obj_request,
2038 u64 img_offset, u64 length)
2040 struct rbd_img_request *parent_request;
2041 struct rbd_device *rbd_dev;
2043 rbd_assert(obj_request->img_request);
2044 rbd_dev = obj_request->img_request->rbd_dev;
2046 parent_request = rbd_img_request_create(rbd_dev->parent,
2047 img_offset, length, false);
2048 if (!parent_request)
2051 img_request_child_set(parent_request);
2052 rbd_obj_request_get(obj_request);
2053 parent_request->obj_request = obj_request;
2055 return parent_request;
2058 static void rbd_parent_request_destroy(struct kref *kref)
2060 struct rbd_img_request *parent_request;
2061 struct rbd_obj_request *orig_request;
2063 parent_request = container_of(kref, struct rbd_img_request, kref);
2064 orig_request = parent_request->obj_request;
2066 parent_request->obj_request = NULL;
2067 rbd_obj_request_put(orig_request);
2068 img_request_child_clear(parent_request);
2070 rbd_img_request_destroy(kref);
2073 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2075 struct rbd_img_request *img_request;
2076 unsigned int xferred;
2080 rbd_assert(obj_request_img_data_test(obj_request));
2081 img_request = obj_request->img_request;
2083 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2084 xferred = (unsigned int)obj_request->xferred;
2085 result = obj_request->result;
2087 struct rbd_device *rbd_dev = img_request->rbd_dev;
2089 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2090 img_request_write_test(img_request) ? "write" : "read",
2091 obj_request->length, obj_request->img_offset,
2092 obj_request->offset);
2093 rbd_warn(rbd_dev, " result %d xferred %x\n",
2095 if (!img_request->result)
2096 img_request->result = result;
2099 /* Image object requests don't own their page array */
2101 if (obj_request->type == OBJ_REQUEST_PAGES) {
2102 obj_request->pages = NULL;
2103 obj_request->page_count = 0;
2106 if (img_request_child_test(img_request)) {
2107 rbd_assert(img_request->obj_request != NULL);
2108 more = obj_request->which < img_request->obj_request_count - 1;
2110 rbd_assert(img_request->rq != NULL);
2111 more = blk_end_request(img_request->rq, result, xferred);
2117 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2119 struct rbd_img_request *img_request;
2120 u32 which = obj_request->which;
2123 rbd_assert(obj_request_img_data_test(obj_request));
2124 img_request = obj_request->img_request;
2126 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2127 rbd_assert(img_request != NULL);
2128 rbd_assert(img_request->obj_request_count > 0);
2129 rbd_assert(which != BAD_WHICH);
2130 rbd_assert(which < img_request->obj_request_count);
2131 rbd_assert(which >= img_request->next_completion);
2133 spin_lock_irq(&img_request->completion_lock);
2134 if (which != img_request->next_completion)
2137 for_each_obj_request_from(img_request, obj_request) {
2139 rbd_assert(which < img_request->obj_request_count);
2141 if (!obj_request_done_test(obj_request))
2143 more = rbd_img_obj_end_request(obj_request);
2147 rbd_assert(more ^ (which == img_request->obj_request_count));
2148 img_request->next_completion = which;
2150 spin_unlock_irq(&img_request->completion_lock);
2153 rbd_img_request_complete(img_request);
2157 * Split up an image request into one or more object requests, each
2158 * to a different object. The "type" parameter indicates whether
2159 * "data_desc" is the pointer to the head of a list of bio
2160 * structures, or the base of a page array. In either case this
2161 * function assumes data_desc describes memory sufficient to hold
2162 * all data described by the image request.
2164 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2165 enum obj_request_type type,
2168 struct rbd_device *rbd_dev = img_request->rbd_dev;
2169 struct rbd_obj_request *obj_request = NULL;
2170 struct rbd_obj_request *next_obj_request;
2171 bool write_request = img_request_write_test(img_request);
2172 struct bio *bio_list = NULL;
2173 unsigned int bio_offset = 0;
2174 struct page **pages = NULL;
2179 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2180 (int)type, data_desc);
2182 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2183 img_offset = img_request->offset;
2184 resid = img_request->length;
2185 rbd_assert(resid > 0);
2187 if (type == OBJ_REQUEST_BIO) {
2188 bio_list = data_desc;
2189 rbd_assert(img_offset ==
2190 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2192 rbd_assert(type == OBJ_REQUEST_PAGES);
2197 struct ceph_osd_request *osd_req;
2198 const char *object_name;
2202 object_name = rbd_segment_name(rbd_dev, img_offset);
2205 offset = rbd_segment_offset(rbd_dev, img_offset);
2206 length = rbd_segment_length(rbd_dev, img_offset, resid);
2207 obj_request = rbd_obj_request_create(object_name,
2208 offset, length, type);
2209 /* object request has its own copy of the object name */
2210 rbd_segment_name_free(object_name);
2214 * set obj_request->img_request before creating the
2215 * osd_request so that it gets the right snapc
2217 rbd_img_obj_request_add(img_request, obj_request);
2219 if (type == OBJ_REQUEST_BIO) {
2220 unsigned int clone_size;
2222 rbd_assert(length <= (u64)UINT_MAX);
2223 clone_size = (unsigned int)length;
2224 obj_request->bio_list =
2225 bio_chain_clone_range(&bio_list,
2229 if (!obj_request->bio_list)
2232 unsigned int page_count;
2234 obj_request->pages = pages;
2235 page_count = (u32)calc_pages_for(offset, length);
2236 obj_request->page_count = page_count;
2237 if ((offset + length) & ~PAGE_MASK)
2238 page_count--; /* more on last page */
2239 pages += page_count;
2242 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2246 obj_request->osd_req = osd_req;
2247 obj_request->callback = rbd_img_obj_callback;
2249 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2251 if (type == OBJ_REQUEST_BIO)
2252 osd_req_op_extent_osd_data_bio(osd_req, 0,
2253 obj_request->bio_list, length);
2255 osd_req_op_extent_osd_data_pages(osd_req, 0,
2256 obj_request->pages, length,
2257 offset & ~PAGE_MASK, false, false);
2260 rbd_osd_req_format_write(obj_request);
2262 rbd_osd_req_format_read(obj_request);
2264 obj_request->img_offset = img_offset;
2266 img_offset += length;
2273 rbd_obj_request_put(obj_request);
2275 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2276 rbd_obj_request_put(obj_request);
2282 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2284 struct rbd_img_request *img_request;
2285 struct rbd_device *rbd_dev;
2286 struct page **pages;
2289 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2290 rbd_assert(obj_request_img_data_test(obj_request));
2291 img_request = obj_request->img_request;
2292 rbd_assert(img_request);
2294 rbd_dev = img_request->rbd_dev;
2295 rbd_assert(rbd_dev);
2297 pages = obj_request->copyup_pages;
2298 rbd_assert(pages != NULL);
2299 obj_request->copyup_pages = NULL;
2300 page_count = obj_request->copyup_page_count;
2301 rbd_assert(page_count);
2302 obj_request->copyup_page_count = 0;
2303 ceph_release_page_vector(pages, page_count);
2306 * We want the transfer count to reflect the size of the
2307 * original write request. There is no such thing as a
2308 * successful short write, so if the request was successful
2309 * we can just set it to the originally-requested length.
2311 if (!obj_request->result)
2312 obj_request->xferred = obj_request->length;
2314 /* Finish up with the normal image object callback */
2316 rbd_img_obj_callback(obj_request);
2320 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2322 struct rbd_obj_request *orig_request;
2323 struct ceph_osd_request *osd_req;
2324 struct ceph_osd_client *osdc;
2325 struct rbd_device *rbd_dev;
2326 struct page **pages;
2333 rbd_assert(img_request_child_test(img_request));
2335 /* First get what we need from the image request */
2337 pages = img_request->copyup_pages;
2338 rbd_assert(pages != NULL);
2339 img_request->copyup_pages = NULL;
2340 page_count = img_request->copyup_page_count;
2341 rbd_assert(page_count);
2342 img_request->copyup_page_count = 0;
2344 orig_request = img_request->obj_request;
2345 rbd_assert(orig_request != NULL);
2346 rbd_assert(obj_request_type_valid(orig_request->type));
2347 img_result = img_request->result;
2348 parent_length = img_request->length;
2349 rbd_assert(parent_length == img_request->xferred);
2350 rbd_img_request_put(img_request);
2352 rbd_assert(orig_request->img_request);
2353 rbd_dev = orig_request->img_request->rbd_dev;
2354 rbd_assert(rbd_dev);
2357 * If the overlap has become 0 (most likely because the
2358 * image has been flattened) we need to free the pages
2359 * and re-submit the original write request.
2361 if (!rbd_dev->parent_overlap) {
2362 struct ceph_osd_client *osdc;
2364 ceph_release_page_vector(pages, page_count);
2365 osdc = &rbd_dev->rbd_client->client->osdc;
2366 img_result = rbd_obj_request_submit(osdc, orig_request);
2375 * The original osd request is of no use to use any more.
2376 * We need a new one that can hold the two ops in a copyup
2377 * request. Allocate the new copyup osd request for the
2378 * original request, and release the old one.
2380 img_result = -ENOMEM;
2381 osd_req = rbd_osd_req_create_copyup(orig_request);
2384 rbd_osd_req_destroy(orig_request->osd_req);
2385 orig_request->osd_req = osd_req;
2386 orig_request->copyup_pages = pages;
2387 orig_request->copyup_page_count = page_count;
2389 /* Initialize the copyup op */
2391 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2392 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2395 /* Then the original write request op */
2397 offset = orig_request->offset;
2398 length = orig_request->length;
2399 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2400 offset, length, 0, 0);
2401 if (orig_request->type == OBJ_REQUEST_BIO)
2402 osd_req_op_extent_osd_data_bio(osd_req, 1,
2403 orig_request->bio_list, length);
2405 osd_req_op_extent_osd_data_pages(osd_req, 1,
2406 orig_request->pages, length,
2407 offset & ~PAGE_MASK, false, false);
2409 rbd_osd_req_format_write(orig_request);
2411 /* All set, send it off. */
2413 orig_request->callback = rbd_img_obj_copyup_callback;
2414 osdc = &rbd_dev->rbd_client->client->osdc;
2415 img_result = rbd_obj_request_submit(osdc, orig_request);
2419 /* Record the error code and complete the request */
2421 orig_request->result = img_result;
2422 orig_request->xferred = 0;
2423 obj_request_done_set(orig_request);
2424 rbd_obj_request_complete(orig_request);
2428 * Read from the parent image the range of data that covers the
2429 * entire target of the given object request. This is used for
2430 * satisfying a layered image write request when the target of an
2431 * object request from the image request does not exist.
2433 * A page array big enough to hold the returned data is allocated
2434 * and supplied to rbd_img_request_fill() as the "data descriptor."
2435 * When the read completes, this page array will be transferred to
2436 * the original object request for the copyup operation.
2438 * If an error occurs, record it as the result of the original
2439 * object request and mark it done so it gets completed.
2441 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2443 struct rbd_img_request *img_request = NULL;
2444 struct rbd_img_request *parent_request = NULL;
2445 struct rbd_device *rbd_dev;
2448 struct page **pages = NULL;
2452 rbd_assert(obj_request_img_data_test(obj_request));
2453 rbd_assert(obj_request_type_valid(obj_request->type));
2455 img_request = obj_request->img_request;
2456 rbd_assert(img_request != NULL);
2457 rbd_dev = img_request->rbd_dev;
2458 rbd_assert(rbd_dev->parent != NULL);
2461 * Determine the byte range covered by the object in the
2462 * child image to which the original request was to be sent.
2464 img_offset = obj_request->img_offset - obj_request->offset;
2465 length = (u64)1 << rbd_dev->header.obj_order;
2468 * There is no defined parent data beyond the parent
2469 * overlap, so limit what we read at that boundary if
2472 if (img_offset + length > rbd_dev->parent_overlap) {
2473 rbd_assert(img_offset < rbd_dev->parent_overlap);
2474 length = rbd_dev->parent_overlap - img_offset;
2478 * Allocate a page array big enough to receive the data read
2481 page_count = (u32)calc_pages_for(0, length);
2482 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2483 if (IS_ERR(pages)) {
2484 result = PTR_ERR(pages);
2490 parent_request = rbd_parent_request_create(obj_request,
2491 img_offset, length);
2492 if (!parent_request)
2495 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2498 parent_request->copyup_pages = pages;
2499 parent_request->copyup_page_count = page_count;
2501 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2502 result = rbd_img_request_submit(parent_request);
2506 parent_request->copyup_pages = NULL;
2507 parent_request->copyup_page_count = 0;
2508 parent_request->obj_request = NULL;
2509 rbd_obj_request_put(obj_request);
2512 ceph_release_page_vector(pages, page_count);
2514 rbd_img_request_put(parent_request);
2515 obj_request->result = result;
2516 obj_request->xferred = 0;
2517 obj_request_done_set(obj_request);
2522 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2524 struct rbd_obj_request *orig_request;
2525 struct rbd_device *rbd_dev;
2528 rbd_assert(!obj_request_img_data_test(obj_request));
2531 * All we need from the object request is the original
2532 * request and the result of the STAT op. Grab those, then
2533 * we're done with the request.
2535 orig_request = obj_request->obj_request;
2536 obj_request->obj_request = NULL;
2537 rbd_obj_request_put(orig_request);
2538 rbd_assert(orig_request);
2539 rbd_assert(orig_request->img_request);
2541 result = obj_request->result;
2542 obj_request->result = 0;
2544 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2545 obj_request, orig_request, result,
2546 obj_request->xferred, obj_request->length);
2547 rbd_obj_request_put(obj_request);
2550 * If the overlap has become 0 (most likely because the
2551 * image has been flattened) we need to free the pages
2552 * and re-submit the original write request.
2554 rbd_dev = orig_request->img_request->rbd_dev;
2555 if (!rbd_dev->parent_overlap) {
2556 struct ceph_osd_client *osdc;
2558 osdc = &rbd_dev->rbd_client->client->osdc;
2559 result = rbd_obj_request_submit(osdc, orig_request);
2565 * Our only purpose here is to determine whether the object
2566 * exists, and we don't want to treat the non-existence as
2567 * an error. If something else comes back, transfer the
2568 * error to the original request and complete it now.
2571 obj_request_existence_set(orig_request, true);
2572 } else if (result == -ENOENT) {
2573 obj_request_existence_set(orig_request, false);
2574 } else if (result) {
2575 orig_request->result = result;
2580 * Resubmit the original request now that we have recorded
2581 * whether the target object exists.
2583 orig_request->result = rbd_img_obj_request_submit(orig_request);
2585 if (orig_request->result)
2586 rbd_obj_request_complete(orig_request);
2589 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2591 struct rbd_obj_request *stat_request;
2592 struct rbd_device *rbd_dev;
2593 struct ceph_osd_client *osdc;
2594 struct page **pages = NULL;
2600 * The response data for a STAT call consists of:
2607 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2608 page_count = (u32)calc_pages_for(0, size);
2609 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2611 return PTR_ERR(pages);
2614 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2619 rbd_obj_request_get(obj_request);
2620 stat_request->obj_request = obj_request;
2621 stat_request->pages = pages;
2622 stat_request->page_count = page_count;
2624 rbd_assert(obj_request->img_request);
2625 rbd_dev = obj_request->img_request->rbd_dev;
2626 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2628 if (!stat_request->osd_req)
2630 stat_request->callback = rbd_img_obj_exists_callback;
2632 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2633 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2635 rbd_osd_req_format_read(stat_request);
2637 osdc = &rbd_dev->rbd_client->client->osdc;
2638 ret = rbd_obj_request_submit(osdc, stat_request);
2641 rbd_obj_request_put(obj_request);
2646 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2648 struct rbd_img_request *img_request;
2649 struct rbd_device *rbd_dev;
2652 rbd_assert(obj_request_img_data_test(obj_request));
2654 img_request = obj_request->img_request;
2655 rbd_assert(img_request);
2656 rbd_dev = img_request->rbd_dev;
2659 * Only writes to layered images need special handling.
2660 * Reads and non-layered writes are simple object requests.
2661 * Layered writes that start beyond the end of the overlap
2662 * with the parent have no parent data, so they too are
2663 * simple object requests. Finally, if the target object is
2664 * known to already exist, its parent data has already been
2665 * copied, so a write to the object can also be handled as a
2666 * simple object request.
2668 if (!img_request_write_test(img_request) ||
2669 !img_request_layered_test(img_request) ||
2670 rbd_dev->parent_overlap <= obj_request->img_offset ||
2671 ((known = obj_request_known_test(obj_request)) &&
2672 obj_request_exists_test(obj_request))) {
2674 struct rbd_device *rbd_dev;
2675 struct ceph_osd_client *osdc;
2677 rbd_dev = obj_request->img_request->rbd_dev;
2678 osdc = &rbd_dev->rbd_client->client->osdc;
2680 return rbd_obj_request_submit(osdc, obj_request);
2684 * It's a layered write. The target object might exist but
2685 * we may not know that yet. If we know it doesn't exist,
2686 * start by reading the data for the full target object from
2687 * the parent so we can use it for a copyup to the target.
2690 return rbd_img_obj_parent_read_full(obj_request);
2692 /* We don't know whether the target exists. Go find out. */
2694 return rbd_img_obj_exists_submit(obj_request);
2697 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2699 struct rbd_obj_request *obj_request;
2700 struct rbd_obj_request *next_obj_request;
2702 dout("%s: img %p\n", __func__, img_request);
2703 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2706 ret = rbd_img_obj_request_submit(obj_request);
2714 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2716 struct rbd_obj_request *obj_request;
2717 struct rbd_device *rbd_dev;
2722 rbd_assert(img_request_child_test(img_request));
2724 /* First get what we need from the image request and release it */
2726 obj_request = img_request->obj_request;
2727 img_xferred = img_request->xferred;
2728 img_result = img_request->result;
2729 rbd_img_request_put(img_request);
2732 * If the overlap has become 0 (most likely because the
2733 * image has been flattened) we need to re-submit the
2736 rbd_assert(obj_request);
2737 rbd_assert(obj_request->img_request);
2738 rbd_dev = obj_request->img_request->rbd_dev;
2739 if (!rbd_dev->parent_overlap) {
2740 struct ceph_osd_client *osdc;
2742 osdc = &rbd_dev->rbd_client->client->osdc;
2743 img_result = rbd_obj_request_submit(osdc, obj_request);
2748 obj_request->result = img_result;
2749 if (obj_request->result)
2753 * We need to zero anything beyond the parent overlap
2754 * boundary. Since rbd_img_obj_request_read_callback()
2755 * will zero anything beyond the end of a short read, an
2756 * easy way to do this is to pretend the data from the
2757 * parent came up short--ending at the overlap boundary.
2759 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2760 obj_end = obj_request->img_offset + obj_request->length;
2761 if (obj_end > rbd_dev->parent_overlap) {
2764 if (obj_request->img_offset < rbd_dev->parent_overlap)
2765 xferred = rbd_dev->parent_overlap -
2766 obj_request->img_offset;
2768 obj_request->xferred = min(img_xferred, xferred);
2770 obj_request->xferred = img_xferred;
2773 rbd_img_obj_request_read_callback(obj_request);
2774 rbd_obj_request_complete(obj_request);
2777 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2779 struct rbd_img_request *img_request;
2782 rbd_assert(obj_request_img_data_test(obj_request));
2783 rbd_assert(obj_request->img_request != NULL);
2784 rbd_assert(obj_request->result == (s32) -ENOENT);
2785 rbd_assert(obj_request_type_valid(obj_request->type));
2787 /* rbd_read_finish(obj_request, obj_request->length); */
2788 img_request = rbd_parent_request_create(obj_request,
2789 obj_request->img_offset,
2790 obj_request->length);
2795 if (obj_request->type == OBJ_REQUEST_BIO)
2796 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2797 obj_request->bio_list);
2799 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2800 obj_request->pages);
2804 img_request->callback = rbd_img_parent_read_callback;
2805 result = rbd_img_request_submit(img_request);
2812 rbd_img_request_put(img_request);
2813 obj_request->result = result;
2814 obj_request->xferred = 0;
2815 obj_request_done_set(obj_request);
2818 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2820 struct rbd_obj_request *obj_request;
2821 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2824 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2825 OBJ_REQUEST_NODATA);
2830 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2831 if (!obj_request->osd_req)
2834 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2836 rbd_osd_req_format_read(obj_request);
2838 ret = rbd_obj_request_submit(osdc, obj_request);
2841 ret = rbd_obj_request_wait(obj_request);
2843 rbd_obj_request_put(obj_request);
2848 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2850 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2856 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2857 rbd_dev->header_name, (unsigned long long)notify_id,
2858 (unsigned int)opcode);
2859 ret = rbd_dev_refresh(rbd_dev);
2861 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
2863 rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2867 * Request sync osd watch/unwatch. The value of "start" determines
2868 * whether a watch request is being initiated or torn down.
2870 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2872 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2873 struct rbd_obj_request *obj_request;
2876 rbd_assert(start ^ !!rbd_dev->watch_event);
2877 rbd_assert(start ^ !!rbd_dev->watch_request);
2880 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2881 &rbd_dev->watch_event);
2884 rbd_assert(rbd_dev->watch_event != NULL);
2888 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2889 OBJ_REQUEST_NODATA);
2893 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2894 if (!obj_request->osd_req)
2898 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2900 ceph_osdc_unregister_linger_request(osdc,
2901 rbd_dev->watch_request->osd_req);
2903 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2904 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2905 rbd_osd_req_format_write(obj_request);
2907 ret = rbd_obj_request_submit(osdc, obj_request);
2910 ret = rbd_obj_request_wait(obj_request);
2913 ret = obj_request->result;
2918 * A watch request is set to linger, so the underlying osd
2919 * request won't go away until we unregister it. We retain
2920 * a pointer to the object request during that time (in
2921 * rbd_dev->watch_request), so we'll keep a reference to
2922 * it. We'll drop that reference (below) after we've
2926 rbd_dev->watch_request = obj_request;
2931 /* We have successfully torn down the watch request */
2933 rbd_obj_request_put(rbd_dev->watch_request);
2934 rbd_dev->watch_request = NULL;
2936 /* Cancel the event if we're tearing down, or on error */
2937 ceph_osdc_cancel_event(rbd_dev->watch_event);
2938 rbd_dev->watch_event = NULL;
2940 rbd_obj_request_put(obj_request);
2946 * Synchronous osd object method call. Returns the number of bytes
2947 * returned in the outbound buffer, or a negative error code.
2949 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2950 const char *object_name,
2951 const char *class_name,
2952 const char *method_name,
2953 const void *outbound,
2954 size_t outbound_size,
2956 size_t inbound_size)
2958 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2959 struct rbd_obj_request *obj_request;
2960 struct page **pages;
2965 * Method calls are ultimately read operations. The result
2966 * should placed into the inbound buffer provided. They
2967 * also supply outbound data--parameters for the object
2968 * method. Currently if this is present it will be a
2971 page_count = (u32)calc_pages_for(0, inbound_size);
2972 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2974 return PTR_ERR(pages);
2977 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2982 obj_request->pages = pages;
2983 obj_request->page_count = page_count;
2985 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2986 if (!obj_request->osd_req)
2989 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2990 class_name, method_name);
2991 if (outbound_size) {
2992 struct ceph_pagelist *pagelist;
2994 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2998 ceph_pagelist_init(pagelist);
2999 ceph_pagelist_append(pagelist, outbound, outbound_size);
3000 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3003 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3004 obj_request->pages, inbound_size,
3006 rbd_osd_req_format_read(obj_request);
3008 ret = rbd_obj_request_submit(osdc, obj_request);
3011 ret = rbd_obj_request_wait(obj_request);
3015 ret = obj_request->result;
3019 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3020 ret = (int)obj_request->xferred;
3021 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3024 rbd_obj_request_put(obj_request);
3026 ceph_release_page_vector(pages, page_count);
3031 static void rbd_request_fn(struct request_queue *q)
3032 __releases(q->queue_lock) __acquires(q->queue_lock)
3034 struct rbd_device *rbd_dev = q->queuedata;
3035 bool read_only = rbd_dev->mapping.read_only;
3039 while ((rq = blk_fetch_request(q))) {
3040 bool write_request = rq_data_dir(rq) == WRITE;
3041 struct rbd_img_request *img_request;
3045 /* Ignore any non-FS requests that filter through. */
3047 if (rq->cmd_type != REQ_TYPE_FS) {
3048 dout("%s: non-fs request type %d\n", __func__,
3049 (int) rq->cmd_type);
3050 __blk_end_request_all(rq, 0);
3054 /* Ignore/skip any zero-length requests */
3056 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3057 length = (u64) blk_rq_bytes(rq);
3060 dout("%s: zero-length request\n", __func__);
3061 __blk_end_request_all(rq, 0);
3065 spin_unlock_irq(q->queue_lock);
3067 /* Disallow writes to a read-only device */
3069 if (write_request) {
3073 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3077 * Quit early if the mapped snapshot no longer
3078 * exists. It's still possible the snapshot will
3079 * have disappeared by the time our request arrives
3080 * at the osd, but there's no sense in sending it if
3083 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3084 dout("request for non-existent snapshot");
3085 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3091 if (offset && length > U64_MAX - offset + 1) {
3092 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3094 goto end_request; /* Shouldn't happen */
3098 if (offset + length > rbd_dev->mapping.size) {
3099 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3100 offset, length, rbd_dev->mapping.size);
3105 img_request = rbd_img_request_create(rbd_dev, offset, length,
3110 img_request->rq = rq;
3112 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3115 result = rbd_img_request_submit(img_request);
3117 rbd_img_request_put(img_request);
3119 spin_lock_irq(q->queue_lock);
3121 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3122 write_request ? "write" : "read",
3123 length, offset, result);
3125 __blk_end_request_all(rq, result);
3131 * a queue callback. Makes sure that we don't create a bio that spans across
3132 * multiple osd objects. One exception would be with a single page bios,
3133 * which we handle later at bio_chain_clone_range()
3135 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3136 struct bio_vec *bvec)
3138 struct rbd_device *rbd_dev = q->queuedata;
3139 sector_t sector_offset;
3140 sector_t sectors_per_obj;
3141 sector_t obj_sector_offset;
3145 * Find how far into its rbd object the partition-relative
3146 * bio start sector is to offset relative to the enclosing
3149 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3150 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3151 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3154 * Compute the number of bytes from that offset to the end
3155 * of the object. Account for what's already used by the bio.
3157 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3158 if (ret > bmd->bi_size)
3159 ret -= bmd->bi_size;
3164 * Don't send back more than was asked for. And if the bio
3165 * was empty, let the whole thing through because: "Note
3166 * that a block device *must* allow a single page to be
3167 * added to an empty bio."
3169 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3170 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3171 ret = (int) bvec->bv_len;
3176 static void rbd_free_disk(struct rbd_device *rbd_dev)
3178 struct gendisk *disk = rbd_dev->disk;
3183 rbd_dev->disk = NULL;
3184 if (disk->flags & GENHD_FL_UP) {
3187 blk_cleanup_queue(disk->queue);
3192 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3193 const char *object_name,
3194 u64 offset, u64 length, void *buf)
3197 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3198 struct rbd_obj_request *obj_request;
3199 struct page **pages = NULL;
3204 page_count = (u32) calc_pages_for(offset, length);
3205 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3207 ret = PTR_ERR(pages);
3210 obj_request = rbd_obj_request_create(object_name, offset, length,
3215 obj_request->pages = pages;
3216 obj_request->page_count = page_count;
3218 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3219 if (!obj_request->osd_req)
3222 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3223 offset, length, 0, 0);
3224 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3226 obj_request->length,
3227 obj_request->offset & ~PAGE_MASK,
3229 rbd_osd_req_format_read(obj_request);
3231 ret = rbd_obj_request_submit(osdc, obj_request);
3234 ret = rbd_obj_request_wait(obj_request);
3238 ret = obj_request->result;
3242 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3243 size = (size_t) obj_request->xferred;
3244 ceph_copy_from_page_vector(pages, buf, 0, size);
3245 rbd_assert(size <= (size_t)INT_MAX);
3249 rbd_obj_request_put(obj_request);
3251 ceph_release_page_vector(pages, page_count);
3257 * Read the complete header for the given rbd device. On successful
3258 * return, the rbd_dev->header field will contain up-to-date
3259 * information about the image.
3261 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3263 struct rbd_image_header_ondisk *ondisk = NULL;
3270 * The complete header will include an array of its 64-bit
3271 * snapshot ids, followed by the names of those snapshots as
3272 * a contiguous block of NUL-terminated strings. Note that
3273 * the number of snapshots could change by the time we read
3274 * it in, in which case we re-read it.
3281 size = sizeof (*ondisk);
3282 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3284 ondisk = kmalloc(size, GFP_KERNEL);
3288 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3292 if ((size_t)ret < size) {
3294 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3298 if (!rbd_dev_ondisk_valid(ondisk)) {
3300 rbd_warn(rbd_dev, "invalid header");
3304 names_size = le64_to_cpu(ondisk->snap_names_len);
3305 want_count = snap_count;
3306 snap_count = le32_to_cpu(ondisk->snap_count);
3307 } while (snap_count != want_count);
3309 ret = rbd_header_from_disk(rbd_dev, ondisk);
3317 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3318 * has disappeared from the (just updated) snapshot context.
3320 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3324 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3327 snap_id = rbd_dev->spec->snap_id;
3328 if (snap_id == CEPH_NOSNAP)
3331 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3332 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3335 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3341 * Don't hold the lock while doing disk operations,
3342 * or lock ordering will conflict with the bdev mutex via:
3343 * rbd_add() -> blkdev_get() -> rbd_open()
3345 spin_lock_irq(&rbd_dev->lock);
3346 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3347 spin_unlock_irq(&rbd_dev->lock);
3349 * If the device is being removed, rbd_dev->disk has
3350 * been destroyed, so don't try to update its size
3353 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3354 dout("setting size to %llu sectors", (unsigned long long)size);
3355 set_capacity(rbd_dev->disk, size);
3356 revalidate_disk(rbd_dev->disk);
3360 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3365 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3366 down_write(&rbd_dev->header_rwsem);
3367 mapping_size = rbd_dev->mapping.size;
3368 if (rbd_dev->image_format == 1)
3369 ret = rbd_dev_v1_header_info(rbd_dev);
3371 ret = rbd_dev_v2_header_info(rbd_dev);
3373 /* If it's a mapped snapshot, validate its EXISTS flag */
3375 rbd_exists_validate(rbd_dev);
3376 up_write(&rbd_dev->header_rwsem);
3378 if (mapping_size != rbd_dev->mapping.size) {
3379 rbd_dev_update_size(rbd_dev);
3385 static int rbd_init_disk(struct rbd_device *rbd_dev)
3387 struct gendisk *disk;
3388 struct request_queue *q;
3391 /* create gendisk info */
3392 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3396 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3398 disk->major = rbd_dev->major;
3399 disk->first_minor = 0;
3400 disk->fops = &rbd_bd_ops;
3401 disk->private_data = rbd_dev;
3403 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3407 /* We use the default size, but let's be explicit about it. */
3408 blk_queue_physical_block_size(q, SECTOR_SIZE);
3410 /* set io sizes to object size */
3411 segment_size = rbd_obj_bytes(&rbd_dev->header);
3412 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3413 blk_queue_max_segment_size(q, segment_size);
3414 blk_queue_io_min(q, segment_size);
3415 blk_queue_io_opt(q, segment_size);
3417 blk_queue_merge_bvec(q, rbd_merge_bvec);
3420 q->queuedata = rbd_dev;
3422 rbd_dev->disk = disk;
3435 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3437 return container_of(dev, struct rbd_device, dev);
3440 static ssize_t rbd_size_show(struct device *dev,
3441 struct device_attribute *attr, char *buf)
3443 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3445 return sprintf(buf, "%llu\n",
3446 (unsigned long long)rbd_dev->mapping.size);
3450 * Note this shows the features for whatever's mapped, which is not
3451 * necessarily the base image.
3453 static ssize_t rbd_features_show(struct device *dev,
3454 struct device_attribute *attr, char *buf)
3456 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3458 return sprintf(buf, "0x%016llx\n",
3459 (unsigned long long)rbd_dev->mapping.features);
3462 static ssize_t rbd_major_show(struct device *dev,
3463 struct device_attribute *attr, char *buf)
3465 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3468 return sprintf(buf, "%d\n", rbd_dev->major);
3470 return sprintf(buf, "(none)\n");
3474 static ssize_t rbd_client_id_show(struct device *dev,
3475 struct device_attribute *attr, char *buf)
3477 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3479 return sprintf(buf, "client%lld\n",
3480 ceph_client_id(rbd_dev->rbd_client->client));
3483 static ssize_t rbd_pool_show(struct device *dev,
3484 struct device_attribute *attr, char *buf)
3486 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3488 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3491 static ssize_t rbd_pool_id_show(struct device *dev,
3492 struct device_attribute *attr, char *buf)
3494 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3496 return sprintf(buf, "%llu\n",
3497 (unsigned long long) rbd_dev->spec->pool_id);
3500 static ssize_t rbd_name_show(struct device *dev,
3501 struct device_attribute *attr, char *buf)
3503 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3505 if (rbd_dev->spec->image_name)
3506 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3508 return sprintf(buf, "(unknown)\n");
3511 static ssize_t rbd_image_id_show(struct device *dev,
3512 struct device_attribute *attr, char *buf)
3514 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3516 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3520 * Shows the name of the currently-mapped snapshot (or
3521 * RBD_SNAP_HEAD_NAME for the base image).
3523 static ssize_t rbd_snap_show(struct device *dev,
3524 struct device_attribute *attr,
3527 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3529 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3533 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3534 * for the parent image. If there is no parent, simply shows
3535 * "(no parent image)".
3537 static ssize_t rbd_parent_show(struct device *dev,
3538 struct device_attribute *attr,
3541 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3542 struct rbd_spec *spec = rbd_dev->parent_spec;
3547 return sprintf(buf, "(no parent image)\n");
3549 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3550 (unsigned long long) spec->pool_id, spec->pool_name);
3555 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3556 spec->image_name ? spec->image_name : "(unknown)");
3561 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3562 (unsigned long long) spec->snap_id, spec->snap_name);
3567 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3572 return (ssize_t) (bufp - buf);
3575 static ssize_t rbd_image_refresh(struct device *dev,
3576 struct device_attribute *attr,
3580 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3583 ret = rbd_dev_refresh(rbd_dev);
3585 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3587 return ret < 0 ? ret : size;
3590 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3591 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3592 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3593 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3594 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3595 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3596 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3597 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3598 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3599 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3600 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3602 static struct attribute *rbd_attrs[] = {
3603 &dev_attr_size.attr,
3604 &dev_attr_features.attr,
3605 &dev_attr_major.attr,
3606 &dev_attr_client_id.attr,
3607 &dev_attr_pool.attr,
3608 &dev_attr_pool_id.attr,
3609 &dev_attr_name.attr,
3610 &dev_attr_image_id.attr,
3611 &dev_attr_current_snap.attr,
3612 &dev_attr_parent.attr,
3613 &dev_attr_refresh.attr,
3617 static struct attribute_group rbd_attr_group = {
3621 static const struct attribute_group *rbd_attr_groups[] = {
3626 static void rbd_sysfs_dev_release(struct device *dev)
3630 static struct device_type rbd_device_type = {
3632 .groups = rbd_attr_groups,
3633 .release = rbd_sysfs_dev_release,
3636 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3638 kref_get(&spec->kref);
3643 static void rbd_spec_free(struct kref *kref);
3644 static void rbd_spec_put(struct rbd_spec *spec)
3647 kref_put(&spec->kref, rbd_spec_free);
3650 static struct rbd_spec *rbd_spec_alloc(void)
3652 struct rbd_spec *spec;
3654 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3657 kref_init(&spec->kref);
3662 static void rbd_spec_free(struct kref *kref)
3664 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3666 kfree(spec->pool_name);
3667 kfree(spec->image_id);
3668 kfree(spec->image_name);
3669 kfree(spec->snap_name);
3673 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3674 struct rbd_spec *spec)
3676 struct rbd_device *rbd_dev;
3678 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3682 spin_lock_init(&rbd_dev->lock);
3684 atomic_set(&rbd_dev->parent_ref, 0);
3685 INIT_LIST_HEAD(&rbd_dev->node);
3686 init_rwsem(&rbd_dev->header_rwsem);
3688 rbd_dev->spec = spec;
3689 rbd_dev->rbd_client = rbdc;
3691 /* Initialize the layout used for all rbd requests */
3693 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3694 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3695 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3696 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3701 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3703 rbd_put_client(rbd_dev->rbd_client);
3704 rbd_spec_put(rbd_dev->spec);
3709 * Get the size and object order for an image snapshot, or if
3710 * snap_id is CEPH_NOSNAP, gets this information for the base
3713 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3714 u8 *order, u64 *snap_size)
3716 __le64 snapid = cpu_to_le64(snap_id);
3721 } __attribute__ ((packed)) size_buf = { 0 };
3723 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3725 &snapid, sizeof (snapid),
3726 &size_buf, sizeof (size_buf));
3727 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3730 if (ret < sizeof (size_buf))
3734 *order = size_buf.order;
3735 dout(" order %u", (unsigned int)*order);
3737 *snap_size = le64_to_cpu(size_buf.size);
3739 dout(" snap_id 0x%016llx snap_size = %llu\n",
3740 (unsigned long long)snap_id,
3741 (unsigned long long)*snap_size);
3746 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3748 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3749 &rbd_dev->header.obj_order,
3750 &rbd_dev->header.image_size);
3753 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3759 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3763 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3764 "rbd", "get_object_prefix", NULL, 0,
3765 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3766 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3771 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3772 p + ret, NULL, GFP_NOIO);
3775 if (IS_ERR(rbd_dev->header.object_prefix)) {
3776 ret = PTR_ERR(rbd_dev->header.object_prefix);
3777 rbd_dev->header.object_prefix = NULL;
3779 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3787 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3790 __le64 snapid = cpu_to_le64(snap_id);
3794 } __attribute__ ((packed)) features_buf = { 0 };
3798 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3799 "rbd", "get_features",
3800 &snapid, sizeof (snapid),
3801 &features_buf, sizeof (features_buf));
3802 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3805 if (ret < sizeof (features_buf))
3808 incompat = le64_to_cpu(features_buf.incompat);
3809 if (incompat & ~RBD_FEATURES_SUPPORTED)
3812 *snap_features = le64_to_cpu(features_buf.features);
3814 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3815 (unsigned long long)snap_id,
3816 (unsigned long long)*snap_features,
3817 (unsigned long long)le64_to_cpu(features_buf.incompat));
3822 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3824 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3825 &rbd_dev->header.features);
3828 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3830 struct rbd_spec *parent_spec;
3832 void *reply_buf = NULL;
3842 parent_spec = rbd_spec_alloc();
3846 size = sizeof (__le64) + /* pool_id */
3847 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3848 sizeof (__le64) + /* snap_id */
3849 sizeof (__le64); /* overlap */
3850 reply_buf = kmalloc(size, GFP_KERNEL);
3856 snapid = cpu_to_le64(CEPH_NOSNAP);
3857 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3858 "rbd", "get_parent",
3859 &snapid, sizeof (snapid),
3861 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3866 end = reply_buf + ret;
3868 ceph_decode_64_safe(&p, end, pool_id, out_err);
3869 if (pool_id == CEPH_NOPOOL) {
3871 * Either the parent never existed, or we have
3872 * record of it but the image got flattened so it no
3873 * longer has a parent. When the parent of a
3874 * layered image disappears we immediately set the
3875 * overlap to 0. The effect of this is that all new
3876 * requests will be treated as if the image had no
3879 if (rbd_dev->parent_overlap) {
3880 rbd_dev->parent_overlap = 0;
3882 rbd_dev_parent_put(rbd_dev);
3883 pr_info("%s: clone image has been flattened\n",
3884 rbd_dev->disk->disk_name);
3887 goto out; /* No parent? No problem. */
3890 /* The ceph file layout needs to fit pool id in 32 bits */
3893 if (pool_id > (u64)U32_MAX) {
3894 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3895 (unsigned long long)pool_id, U32_MAX);
3899 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3900 if (IS_ERR(image_id)) {
3901 ret = PTR_ERR(image_id);
3904 ceph_decode_64_safe(&p, end, snap_id, out_err);
3905 ceph_decode_64_safe(&p, end, overlap, out_err);
3908 * The parent won't change (except when the clone is
3909 * flattened, already handled that). So we only need to
3910 * record the parent spec we have not already done so.
3912 if (!rbd_dev->parent_spec) {
3913 parent_spec->pool_id = pool_id;
3914 parent_spec->image_id = image_id;
3915 parent_spec->snap_id = snap_id;
3916 rbd_dev->parent_spec = parent_spec;
3917 parent_spec = NULL; /* rbd_dev now owns this */
3921 * We always update the parent overlap. If it's zero we
3922 * treat it specially.
3924 rbd_dev->parent_overlap = overlap;
3928 /* A null parent_spec indicates it's the initial probe */
3932 * The overlap has become zero, so the clone
3933 * must have been resized down to 0 at some
3934 * point. Treat this the same as a flatten.
3936 rbd_dev_parent_put(rbd_dev);
3937 pr_info("%s: clone image now standalone\n",
3938 rbd_dev->disk->disk_name);
3941 * For the initial probe, if we find the
3942 * overlap is zero we just pretend there was
3945 rbd_warn(rbd_dev, "ignoring parent of "
3946 "clone with overlap 0\n");
3953 rbd_spec_put(parent_spec);
3958 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3962 __le64 stripe_count;
3963 } __attribute__ ((packed)) striping_info_buf = { 0 };
3964 size_t size = sizeof (striping_info_buf);
3971 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3972 "rbd", "get_stripe_unit_count", NULL, 0,
3973 (char *)&striping_info_buf, size);
3974 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3981 * We don't actually support the "fancy striping" feature
3982 * (STRIPINGV2) yet, but if the striping sizes are the
3983 * defaults the behavior is the same as before. So find
3984 * out, and only fail if the image has non-default values.
3987 obj_size = (u64)1 << rbd_dev->header.obj_order;
3988 p = &striping_info_buf;
3989 stripe_unit = ceph_decode_64(&p);
3990 if (stripe_unit != obj_size) {
3991 rbd_warn(rbd_dev, "unsupported stripe unit "
3992 "(got %llu want %llu)",
3993 stripe_unit, obj_size);
3996 stripe_count = ceph_decode_64(&p);
3997 if (stripe_count != 1) {
3998 rbd_warn(rbd_dev, "unsupported stripe count "
3999 "(got %llu want 1)", stripe_count);
4002 rbd_dev->header.stripe_unit = stripe_unit;
4003 rbd_dev->header.stripe_count = stripe_count;
4008 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4010 size_t image_id_size;
4015 void *reply_buf = NULL;
4017 char *image_name = NULL;
4020 rbd_assert(!rbd_dev->spec->image_name);
4022 len = strlen(rbd_dev->spec->image_id);
4023 image_id_size = sizeof (__le32) + len;
4024 image_id = kmalloc(image_id_size, GFP_KERNEL);
4029 end = image_id + image_id_size;
4030 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4032 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4033 reply_buf = kmalloc(size, GFP_KERNEL);
4037 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4038 "rbd", "dir_get_name",
4039 image_id, image_id_size,
4044 end = reply_buf + ret;
4046 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4047 if (IS_ERR(image_name))
4050 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4058 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4060 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4061 const char *snap_name;
4064 /* Skip over names until we find the one we are looking for */
4066 snap_name = rbd_dev->header.snap_names;
4067 while (which < snapc->num_snaps) {
4068 if (!strcmp(name, snap_name))
4069 return snapc->snaps[which];
4070 snap_name += strlen(snap_name) + 1;
4076 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4078 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4083 for (which = 0; !found && which < snapc->num_snaps; which++) {
4084 const char *snap_name;
4086 snap_id = snapc->snaps[which];
4087 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4088 if (IS_ERR(snap_name)) {
4089 /* ignore no-longer existing snapshots */
4090 if (PTR_ERR(snap_name) == -ENOENT)
4095 found = !strcmp(name, snap_name);
4098 return found ? snap_id : CEPH_NOSNAP;
4102 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4103 * no snapshot by that name is found, or if an error occurs.
4105 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4107 if (rbd_dev->image_format == 1)
4108 return rbd_v1_snap_id_by_name(rbd_dev, name);
4110 return rbd_v2_snap_id_by_name(rbd_dev, name);
4114 * When an rbd image has a parent image, it is identified by the
4115 * pool, image, and snapshot ids (not names). This function fills
4116 * in the names for those ids. (It's OK if we can't figure out the
4117 * name for an image id, but the pool and snapshot ids should always
4118 * exist and have names.) All names in an rbd spec are dynamically
4121 * When an image being mapped (not a parent) is probed, we have the
4122 * pool name and pool id, image name and image id, and the snapshot
4123 * name. The only thing we're missing is the snapshot id.
4125 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4127 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4128 struct rbd_spec *spec = rbd_dev->spec;
4129 const char *pool_name;
4130 const char *image_name;
4131 const char *snap_name;
4135 * An image being mapped will have the pool name (etc.), but
4136 * we need to look up the snapshot id.
4138 if (spec->pool_name) {
4139 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4142 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4143 if (snap_id == CEPH_NOSNAP)
4145 spec->snap_id = snap_id;
4147 spec->snap_id = CEPH_NOSNAP;
4153 /* Get the pool name; we have to make our own copy of this */
4155 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4157 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4160 pool_name = kstrdup(pool_name, GFP_KERNEL);
4164 /* Fetch the image name; tolerate failure here */
4166 image_name = rbd_dev_image_name(rbd_dev);
4168 rbd_warn(rbd_dev, "unable to get image name");
4170 /* Look up the snapshot name, and make a copy */
4172 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4173 if (IS_ERR(snap_name)) {
4174 ret = PTR_ERR(snap_name);
4178 spec->pool_name = pool_name;
4179 spec->image_name = image_name;
4180 spec->snap_name = snap_name;
4190 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4199 struct ceph_snap_context *snapc;
4203 * We'll need room for the seq value (maximum snapshot id),
4204 * snapshot count, and array of that many snapshot ids.
4205 * For now we have a fixed upper limit on the number we're
4206 * prepared to receive.
4208 size = sizeof (__le64) + sizeof (__le32) +
4209 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4210 reply_buf = kzalloc(size, GFP_KERNEL);
4214 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4215 "rbd", "get_snapcontext", NULL, 0,
4217 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4222 end = reply_buf + ret;
4224 ceph_decode_64_safe(&p, end, seq, out);
4225 ceph_decode_32_safe(&p, end, snap_count, out);
4228 * Make sure the reported number of snapshot ids wouldn't go
4229 * beyond the end of our buffer. But before checking that,
4230 * make sure the computed size of the snapshot context we
4231 * allocate is representable in a size_t.
4233 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4238 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4242 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4248 for (i = 0; i < snap_count; i++)
4249 snapc->snaps[i] = ceph_decode_64(&p);
4251 ceph_put_snap_context(rbd_dev->header.snapc);
4252 rbd_dev->header.snapc = snapc;
4254 dout(" snap context seq = %llu, snap_count = %u\n",
4255 (unsigned long long)seq, (unsigned int)snap_count);
4262 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4273 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4274 reply_buf = kmalloc(size, GFP_KERNEL);
4276 return ERR_PTR(-ENOMEM);
4278 snapid = cpu_to_le64(snap_id);
4279 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4280 "rbd", "get_snapshot_name",
4281 &snapid, sizeof (snapid),
4283 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4285 snap_name = ERR_PTR(ret);
4290 end = reply_buf + ret;
4291 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4292 if (IS_ERR(snap_name))
4295 dout(" snap_id 0x%016llx snap_name = %s\n",
4296 (unsigned long long)snap_id, snap_name);
4303 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4305 bool first_time = rbd_dev->header.object_prefix == NULL;
4308 ret = rbd_dev_v2_image_size(rbd_dev);
4313 ret = rbd_dev_v2_header_onetime(rbd_dev);
4319 * If the image supports layering, get the parent info. We
4320 * need to probe the first time regardless. Thereafter we
4321 * only need to if there's a parent, to see if it has
4322 * disappeared due to the mapped image getting flattened.
4324 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4325 (first_time || rbd_dev->parent_spec)) {
4328 ret = rbd_dev_v2_parent_info(rbd_dev);
4333 * Print a warning if this is the initial probe and
4334 * the image has a parent. Don't print it if the
4335 * image now being probed is itself a parent. We
4336 * can tell at this point because we won't know its
4337 * pool name yet (just its pool id).
4339 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4340 if (first_time && warn)
4341 rbd_warn(rbd_dev, "WARNING: kernel layering "
4342 "is EXPERIMENTAL!");
4345 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4346 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4347 rbd_dev->mapping.size = rbd_dev->header.image_size;
4349 ret = rbd_dev_v2_snap_context(rbd_dev);
4350 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4355 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4360 dev = &rbd_dev->dev;
4361 dev->bus = &rbd_bus_type;
4362 dev->type = &rbd_device_type;
4363 dev->parent = &rbd_root_dev;
4364 dev->release = rbd_dev_device_release;
4365 dev_set_name(dev, "%d", rbd_dev->dev_id);
4366 ret = device_register(dev);
4371 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4373 device_unregister(&rbd_dev->dev);
4376 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4379 * Get a unique rbd identifier for the given new rbd_dev, and add
4380 * the rbd_dev to the global list. The minimum rbd id is 1.
4382 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4384 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4386 spin_lock(&rbd_dev_list_lock);
4387 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4388 spin_unlock(&rbd_dev_list_lock);
4389 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4390 (unsigned long long) rbd_dev->dev_id);
4394 * Remove an rbd_dev from the global list, and record that its
4395 * identifier is no longer in use.
4397 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4399 struct list_head *tmp;
4400 int rbd_id = rbd_dev->dev_id;
4403 rbd_assert(rbd_id > 0);
4405 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4406 (unsigned long long) rbd_dev->dev_id);
4407 spin_lock(&rbd_dev_list_lock);
4408 list_del_init(&rbd_dev->node);
4411 * If the id being "put" is not the current maximum, there
4412 * is nothing special we need to do.
4414 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4415 spin_unlock(&rbd_dev_list_lock);
4420 * We need to update the current maximum id. Search the
4421 * list to find out what it is. We're more likely to find
4422 * the maximum at the end, so search the list backward.
4425 list_for_each_prev(tmp, &rbd_dev_list) {
4426 struct rbd_device *rbd_dev;
4428 rbd_dev = list_entry(tmp, struct rbd_device, node);
4429 if (rbd_dev->dev_id > max_id)
4430 max_id = rbd_dev->dev_id;
4432 spin_unlock(&rbd_dev_list_lock);
4435 * The max id could have been updated by rbd_dev_id_get(), in
4436 * which case it now accurately reflects the new maximum.
4437 * Be careful not to overwrite the maximum value in that
4440 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4441 dout(" max dev id has been reset\n");
4445 * Skips over white space at *buf, and updates *buf to point to the
4446 * first found non-space character (if any). Returns the length of
4447 * the token (string of non-white space characters) found. Note
4448 * that *buf must be terminated with '\0'.
4450 static inline size_t next_token(const char **buf)
4453 * These are the characters that produce nonzero for
4454 * isspace() in the "C" and "POSIX" locales.
4456 const char *spaces = " \f\n\r\t\v";
4458 *buf += strspn(*buf, spaces); /* Find start of token */
4460 return strcspn(*buf, spaces); /* Return token length */
4464 * Finds the next token in *buf, and if the provided token buffer is
4465 * big enough, copies the found token into it. The result, if
4466 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4467 * must be terminated with '\0' on entry.
4469 * Returns the length of the token found (not including the '\0').
4470 * Return value will be 0 if no token is found, and it will be >=
4471 * token_size if the token would not fit.
4473 * The *buf pointer will be updated to point beyond the end of the
4474 * found token. Note that this occurs even if the token buffer is
4475 * too small to hold it.
4477 static inline size_t copy_token(const char **buf,
4483 len = next_token(buf);
4484 if (len < token_size) {
4485 memcpy(token, *buf, len);
4486 *(token + len) = '\0';
4494 * Finds the next token in *buf, dynamically allocates a buffer big
4495 * enough to hold a copy of it, and copies the token into the new
4496 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4497 * that a duplicate buffer is created even for a zero-length token.
4499 * Returns a pointer to the newly-allocated duplicate, or a null
4500 * pointer if memory for the duplicate was not available. If
4501 * the lenp argument is a non-null pointer, the length of the token
4502 * (not including the '\0') is returned in *lenp.
4504 * If successful, the *buf pointer will be updated to point beyond
4505 * the end of the found token.
4507 * Note: uses GFP_KERNEL for allocation.
4509 static inline char *dup_token(const char **buf, size_t *lenp)
4514 len = next_token(buf);
4515 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4518 *(dup + len) = '\0';
4528 * Parse the options provided for an "rbd add" (i.e., rbd image
4529 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4530 * and the data written is passed here via a NUL-terminated buffer.
4531 * Returns 0 if successful or an error code otherwise.
4533 * The information extracted from these options is recorded in
4534 * the other parameters which return dynamically-allocated
4537 * The address of a pointer that will refer to a ceph options
4538 * structure. Caller must release the returned pointer using
4539 * ceph_destroy_options() when it is no longer needed.
4541 * Address of an rbd options pointer. Fully initialized by
4542 * this function; caller must release with kfree().
4544 * Address of an rbd image specification pointer. Fully
4545 * initialized by this function based on parsed options.
4546 * Caller must release with rbd_spec_put().
4548 * The options passed take this form:
4549 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4552 * A comma-separated list of one or more monitor addresses.
4553 * A monitor address is an ip address, optionally followed
4554 * by a port number (separated by a colon).
4555 * I.e.: ip1[:port1][,ip2[:port2]...]
4557 * A comma-separated list of ceph and/or rbd options.
4559 * The name of the rados pool containing the rbd image.
4561 * The name of the image in that pool to map.
4563 * An optional snapshot id. If provided, the mapping will
4564 * present data from the image at the time that snapshot was
4565 * created. The image head is used if no snapshot id is
4566 * provided. Snapshot mappings are always read-only.
4568 static int rbd_add_parse_args(const char *buf,
4569 struct ceph_options **ceph_opts,
4570 struct rbd_options **opts,
4571 struct rbd_spec **rbd_spec)
4575 const char *mon_addrs;
4577 size_t mon_addrs_size;
4578 struct rbd_spec *spec = NULL;
4579 struct rbd_options *rbd_opts = NULL;
4580 struct ceph_options *copts;
4583 /* The first four tokens are required */
4585 len = next_token(&buf);
4587 rbd_warn(NULL, "no monitor address(es) provided");
4591 mon_addrs_size = len + 1;
4595 options = dup_token(&buf, NULL);
4599 rbd_warn(NULL, "no options provided");
4603 spec = rbd_spec_alloc();
4607 spec->pool_name = dup_token(&buf, NULL);
4608 if (!spec->pool_name)
4610 if (!*spec->pool_name) {
4611 rbd_warn(NULL, "no pool name provided");
4615 spec->image_name = dup_token(&buf, NULL);
4616 if (!spec->image_name)
4618 if (!*spec->image_name) {
4619 rbd_warn(NULL, "no image name provided");
4624 * Snapshot name is optional; default is to use "-"
4625 * (indicating the head/no snapshot).
4627 len = next_token(&buf);
4629 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4630 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4631 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4632 ret = -ENAMETOOLONG;
4635 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4638 *(snap_name + len) = '\0';
4639 spec->snap_name = snap_name;
4641 /* Initialize all rbd options to the defaults */
4643 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4647 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4649 copts = ceph_parse_options(options, mon_addrs,
4650 mon_addrs + mon_addrs_size - 1,
4651 parse_rbd_opts_token, rbd_opts);
4652 if (IS_ERR(copts)) {
4653 ret = PTR_ERR(copts);
4674 * An rbd format 2 image has a unique identifier, distinct from the
4675 * name given to it by the user. Internally, that identifier is
4676 * what's used to specify the names of objects related to the image.
4678 * A special "rbd id" object is used to map an rbd image name to its
4679 * id. If that object doesn't exist, then there is no v2 rbd image
4680 * with the supplied name.
4682 * This function will record the given rbd_dev's image_id field if
4683 * it can be determined, and in that case will return 0. If any
4684 * errors occur a negative errno will be returned and the rbd_dev's
4685 * image_id field will be unchanged (and should be NULL).
4687 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4696 * When probing a parent image, the image id is already
4697 * known (and the image name likely is not). There's no
4698 * need to fetch the image id again in this case. We
4699 * do still need to set the image format though.
4701 if (rbd_dev->spec->image_id) {
4702 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4708 * First, see if the format 2 image id file exists, and if
4709 * so, get the image's persistent id from it.
4711 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4712 object_name = kmalloc(size, GFP_NOIO);
4715 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4716 dout("rbd id object name is %s\n", object_name);
4718 /* Response will be an encoded string, which includes a length */
4720 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4721 response = kzalloc(size, GFP_NOIO);
4727 /* If it doesn't exist we'll assume it's a format 1 image */
4729 ret = rbd_obj_method_sync(rbd_dev, object_name,
4730 "rbd", "get_id", NULL, 0,
4731 response, RBD_IMAGE_ID_LEN_MAX);
4732 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4733 if (ret == -ENOENT) {
4734 image_id = kstrdup("", GFP_KERNEL);
4735 ret = image_id ? 0 : -ENOMEM;
4737 rbd_dev->image_format = 1;
4738 } else if (ret > sizeof (__le32)) {
4741 image_id = ceph_extract_encoded_string(&p, p + ret,
4743 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4745 rbd_dev->image_format = 2;
4751 rbd_dev->spec->image_id = image_id;
4752 dout("image_id is %s\n", image_id);
4762 * Undo whatever state changes are made by v1 or v2 header info
4765 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4767 struct rbd_image_header *header;
4769 /* Drop parent reference unless it's already been done (or none) */
4771 if (rbd_dev->parent_overlap)
4772 rbd_dev_parent_put(rbd_dev);
4774 /* Free dynamic fields from the header, then zero it out */
4776 header = &rbd_dev->header;
4777 ceph_put_snap_context(header->snapc);
4778 kfree(header->snap_sizes);
4779 kfree(header->snap_names);
4780 kfree(header->object_prefix);
4781 memset(header, 0, sizeof (*header));
4784 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4788 ret = rbd_dev_v2_object_prefix(rbd_dev);
4793 * Get the and check features for the image. Currently the
4794 * features are assumed to never change.
4796 ret = rbd_dev_v2_features(rbd_dev);
4800 /* If the image supports fancy striping, get its parameters */
4802 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4803 ret = rbd_dev_v2_striping_info(rbd_dev);
4807 /* No support for crypto and compression type format 2 images */
4811 rbd_dev->header.features = 0;
4812 kfree(rbd_dev->header.object_prefix);
4813 rbd_dev->header.object_prefix = NULL;
4818 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4820 struct rbd_device *parent = NULL;
4821 struct rbd_spec *parent_spec;
4822 struct rbd_client *rbdc;
4825 if (!rbd_dev->parent_spec)
4828 * We need to pass a reference to the client and the parent
4829 * spec when creating the parent rbd_dev. Images related by
4830 * parent/child relationships always share both.
4832 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4833 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4836 parent = rbd_dev_create(rbdc, parent_spec);
4840 ret = rbd_dev_image_probe(parent, false);
4843 rbd_dev->parent = parent;
4844 atomic_set(&rbd_dev->parent_ref, 1);
4849 rbd_dev_unparent(rbd_dev);
4850 kfree(rbd_dev->header_name);
4851 rbd_dev_destroy(parent);
4853 rbd_put_client(rbdc);
4854 rbd_spec_put(parent_spec);
4860 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4864 /* generate unique id: find highest unique id, add one */
4865 rbd_dev_id_get(rbd_dev);
4867 /* Fill in the device name, now that we have its id. */
4868 BUILD_BUG_ON(DEV_NAME_LEN
4869 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4870 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4872 /* Get our block major device number. */
4874 ret = register_blkdev(0, rbd_dev->name);
4877 rbd_dev->major = ret;
4879 /* Set up the blkdev mapping. */
4881 ret = rbd_init_disk(rbd_dev);
4883 goto err_out_blkdev;
4885 ret = rbd_dev_mapping_set(rbd_dev);
4888 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4890 ret = rbd_bus_add_dev(rbd_dev);
4892 goto err_out_mapping;
4894 /* Everything's ready. Announce the disk to the world. */
4896 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4897 add_disk(rbd_dev->disk);
4899 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4900 (unsigned long long) rbd_dev->mapping.size);
4905 rbd_dev_mapping_clear(rbd_dev);
4907 rbd_free_disk(rbd_dev);
4909 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4911 rbd_dev_id_put(rbd_dev);
4912 rbd_dev_mapping_clear(rbd_dev);
4917 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4919 struct rbd_spec *spec = rbd_dev->spec;
4922 /* Record the header object name for this rbd image. */
4924 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4926 if (rbd_dev->image_format == 1)
4927 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4929 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4931 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4932 if (!rbd_dev->header_name)
4935 if (rbd_dev->image_format == 1)
4936 sprintf(rbd_dev->header_name, "%s%s",
4937 spec->image_name, RBD_SUFFIX);
4939 sprintf(rbd_dev->header_name, "%s%s",
4940 RBD_HEADER_PREFIX, spec->image_id);
4944 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4946 rbd_dev_unprobe(rbd_dev);
4947 kfree(rbd_dev->header_name);
4948 rbd_dev->header_name = NULL;
4949 rbd_dev->image_format = 0;
4950 kfree(rbd_dev->spec->image_id);
4951 rbd_dev->spec->image_id = NULL;
4953 rbd_dev_destroy(rbd_dev);
4957 * Probe for the existence of the header object for the given rbd
4958 * device. If this image is the one being mapped (i.e., not a
4959 * parent), initiate a watch on its header object before using that
4960 * object to get detailed information about the rbd image.
4962 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4968 * Get the id from the image id object. Unless there's an
4969 * error, rbd_dev->spec->image_id will be filled in with
4970 * a dynamically-allocated string, and rbd_dev->image_format
4971 * will be set to either 1 or 2.
4973 ret = rbd_dev_image_id(rbd_dev);
4976 rbd_assert(rbd_dev->spec->image_id);
4977 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4979 ret = rbd_dev_header_name(rbd_dev);
4981 goto err_out_format;
4984 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4986 goto out_header_name;
4989 if (rbd_dev->image_format == 1)
4990 ret = rbd_dev_v1_header_info(rbd_dev);
4992 ret = rbd_dev_v2_header_info(rbd_dev);
4996 ret = rbd_dev_spec_update(rbd_dev);
5000 ret = rbd_dev_probe_parent(rbd_dev);
5004 dout("discovered format %u image, header name is %s\n",
5005 rbd_dev->image_format, rbd_dev->header_name);
5009 rbd_dev_unprobe(rbd_dev);
5012 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
5014 rbd_warn(rbd_dev, "unable to tear down "
5015 "watch request (%d)\n", tmp);
5018 kfree(rbd_dev->header_name);
5019 rbd_dev->header_name = NULL;
5021 rbd_dev->image_format = 0;
5022 kfree(rbd_dev->spec->image_id);
5023 rbd_dev->spec->image_id = NULL;
5025 dout("probe failed, returning %d\n", ret);
5030 static ssize_t rbd_add(struct bus_type *bus,
5034 struct rbd_device *rbd_dev = NULL;
5035 struct ceph_options *ceph_opts = NULL;
5036 struct rbd_options *rbd_opts = NULL;
5037 struct rbd_spec *spec = NULL;
5038 struct rbd_client *rbdc;
5039 struct ceph_osd_client *osdc;
5043 if (!try_module_get(THIS_MODULE))
5046 /* parse add command */
5047 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5049 goto err_out_module;
5050 read_only = rbd_opts->read_only;
5052 rbd_opts = NULL; /* done with this */
5054 rbdc = rbd_get_client(ceph_opts);
5061 osdc = &rbdc->client->osdc;
5062 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5064 goto err_out_client;
5065 spec->pool_id = (u64)rc;
5067 /* The ceph file layout needs to fit pool id in 32 bits */
5069 if (spec->pool_id > (u64)U32_MAX) {
5070 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5071 (unsigned long long)spec->pool_id, U32_MAX);
5073 goto err_out_client;
5076 rbd_dev = rbd_dev_create(rbdc, spec);
5078 goto err_out_client;
5079 rbdc = NULL; /* rbd_dev now owns this */
5080 spec = NULL; /* rbd_dev now owns this */
5082 rc = rbd_dev_image_probe(rbd_dev, true);
5084 goto err_out_rbd_dev;
5086 /* If we are mapping a snapshot it must be marked read-only */
5088 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5090 rbd_dev->mapping.read_only = read_only;
5092 rc = rbd_dev_device_setup(rbd_dev);
5094 rbd_dev_image_release(rbd_dev);
5095 goto err_out_module;
5101 rbd_dev_destroy(rbd_dev);
5103 rbd_put_client(rbdc);
5107 module_put(THIS_MODULE);
5109 dout("Error adding device %s\n", buf);
5114 static void rbd_dev_device_release(struct device *dev)
5116 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5118 rbd_free_disk(rbd_dev);
5119 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5120 rbd_dev_mapping_clear(rbd_dev);
5121 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5123 rbd_dev_id_put(rbd_dev);
5124 rbd_dev_mapping_clear(rbd_dev);
5127 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5129 while (rbd_dev->parent) {
5130 struct rbd_device *first = rbd_dev;
5131 struct rbd_device *second = first->parent;
5132 struct rbd_device *third;
5135 * Follow to the parent with no grandparent and
5138 while (second && (third = second->parent)) {
5143 rbd_dev_image_release(second);
5144 first->parent = NULL;
5145 first->parent_overlap = 0;
5147 rbd_assert(first->parent_spec);
5148 rbd_spec_put(first->parent_spec);
5149 first->parent_spec = NULL;
5153 static ssize_t rbd_remove(struct bus_type *bus,
5157 struct rbd_device *rbd_dev = NULL;
5158 struct list_head *tmp;
5161 bool already = false;
5164 ret = kstrtoul(buf, 10, &ul);
5168 /* convert to int; abort if we lost anything in the conversion */
5174 spin_lock(&rbd_dev_list_lock);
5175 list_for_each(tmp, &rbd_dev_list) {
5176 rbd_dev = list_entry(tmp, struct rbd_device, node);
5177 if (rbd_dev->dev_id == dev_id) {
5183 spin_lock_irq(&rbd_dev->lock);
5184 if (rbd_dev->open_count)
5187 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5189 spin_unlock_irq(&rbd_dev->lock);
5191 spin_unlock(&rbd_dev_list_lock);
5192 if (ret < 0 || already)
5195 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5197 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5200 * flush remaining watch callbacks - these must be complete
5201 * before the osd_client is shutdown
5203 dout("%s: flushing notifies", __func__);
5204 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5206 * Don't free anything from rbd_dev->disk until after all
5207 * notifies are completely processed. Otherwise
5208 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5209 * in a potential use after free of rbd_dev->disk or rbd_dev.
5211 rbd_bus_del_dev(rbd_dev);
5212 rbd_dev_image_release(rbd_dev);
5213 module_put(THIS_MODULE);
5219 * create control files in sysfs
5222 static int rbd_sysfs_init(void)
5226 ret = device_register(&rbd_root_dev);
5230 ret = bus_register(&rbd_bus_type);
5232 device_unregister(&rbd_root_dev);
5237 static void rbd_sysfs_cleanup(void)
5239 bus_unregister(&rbd_bus_type);
5240 device_unregister(&rbd_root_dev);
5243 static int rbd_slab_init(void)
5245 rbd_assert(!rbd_img_request_cache);
5246 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5247 sizeof (struct rbd_img_request),
5248 __alignof__(struct rbd_img_request),
5250 if (!rbd_img_request_cache)
5253 rbd_assert(!rbd_obj_request_cache);
5254 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5255 sizeof (struct rbd_obj_request),
5256 __alignof__(struct rbd_obj_request),
5258 if (!rbd_obj_request_cache)
5261 rbd_assert(!rbd_segment_name_cache);
5262 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5263 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5264 if (rbd_segment_name_cache)
5267 if (rbd_obj_request_cache) {
5268 kmem_cache_destroy(rbd_obj_request_cache);
5269 rbd_obj_request_cache = NULL;
5272 kmem_cache_destroy(rbd_img_request_cache);
5273 rbd_img_request_cache = NULL;
5278 static void rbd_slab_exit(void)
5280 rbd_assert(rbd_segment_name_cache);
5281 kmem_cache_destroy(rbd_segment_name_cache);
5282 rbd_segment_name_cache = NULL;
5284 rbd_assert(rbd_obj_request_cache);
5285 kmem_cache_destroy(rbd_obj_request_cache);
5286 rbd_obj_request_cache = NULL;
5288 rbd_assert(rbd_img_request_cache);
5289 kmem_cache_destroy(rbd_img_request_cache);
5290 rbd_img_request_cache = NULL;
5293 static int __init rbd_init(void)
5297 if (!libceph_compatible(NULL)) {
5298 rbd_warn(NULL, "libceph incompatibility (quitting)");
5302 rc = rbd_slab_init();
5305 rc = rbd_sysfs_init();
5309 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5314 static void __exit rbd_exit(void)
5316 rbd_sysfs_cleanup();
5320 module_init(rbd_init);
5321 module_exit(rbd_exit);
5323 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5324 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5325 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5326 MODULE_DESCRIPTION("rados block device");
5328 /* following authorship retained from original osdblk.c */
5329 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5331 MODULE_LICENSE("GPL");