3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
119 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
121 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
122 RBD_FEATURE_STRIPINGV2 | \
123 RBD_FEATURE_EXCLUSIVE_LOCK | \
124 RBD_FEATURE_DATA_POOL | \
125 RBD_FEATURE_OPERATIONS)
127 /* Features supported by this (client software) implementation. */
129 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
132 * An RBD device name will be "rbd#", where the "rbd" comes from
133 * RBD_DRV_NAME above, and # is a unique integer identifier.
135 #define DEV_NAME_LEN 32
138 * block device image metadata (in-memory version)
140 struct rbd_image_header {
141 /* These six fields never change for a given rbd image */
147 u64 features; /* Might be changeable someday? */
149 /* The remaining fields need to be updated occasionally */
151 struct ceph_snap_context *snapc;
152 char *snap_names; /* format 1 only */
153 u64 *snap_sizes; /* format 1 only */
157 * An rbd image specification.
159 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
160 * identify an image. Each rbd_dev structure includes a pointer to
161 * an rbd_spec structure that encapsulates this identity.
163 * Each of the id's in an rbd_spec has an associated name. For a
164 * user-mapped image, the names are supplied and the id's associated
165 * with them are looked up. For a layered image, a parent image is
166 * defined by the tuple, and the names are looked up.
168 * An rbd_dev structure contains a parent_spec pointer which is
169 * non-null if the image it represents is a child in a layered
170 * image. This pointer will refer to the rbd_spec structure used
171 * by the parent rbd_dev for its own identity (i.e., the structure
172 * is shared between the parent and child).
174 * Since these structures are populated once, during the discovery
175 * phase of image construction, they are effectively immutable so
176 * we make no effort to synchronize access to them.
178 * Note that code herein does not assume the image name is known (it
179 * could be a null pointer).
183 const char *pool_name;
184 const char *pool_ns; /* NULL if default, never "" */
186 const char *image_id;
187 const char *image_name;
190 const char *snap_name;
196 * an instance of the client. multiple devices may share an rbd client.
199 struct ceph_client *client;
201 struct list_head node;
204 struct rbd_img_request;
206 enum obj_request_type {
207 OBJ_REQUEST_NODATA = 1,
208 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
209 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
210 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
213 enum obj_operation_type {
220 * Writes go through the following state machine to deal with
224 * RBD_OBJ_WRITE_GUARD ---------------> RBD_OBJ_WRITE_COPYUP
226 * v \------------------------------/
232 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
233 * there is a parent or not.
235 enum rbd_obj_write_state {
236 RBD_OBJ_WRITE_FLAT = 1,
238 RBD_OBJ_WRITE_COPYUP,
241 struct rbd_obj_request {
242 struct ceph_object_extent ex;
244 bool tried_parent; /* for reads */
245 enum rbd_obj_write_state write_state; /* for writes */
248 struct rbd_img_request *img_request;
249 struct ceph_file_extent *img_extents;
253 struct ceph_bio_iter bio_pos;
255 struct ceph_bvec_iter bvec_pos;
260 struct bio_vec *copyup_bvecs;
261 u32 copyup_bvec_count;
263 struct ceph_osd_request *osd_req;
265 u64 xferred; /* bytes transferred */
272 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
273 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
276 struct rbd_img_request {
277 struct rbd_device *rbd_dev;
278 enum obj_operation_type op_type;
279 enum obj_request_type data_type;
282 u64 snap_id; /* for reads */
283 struct ceph_snap_context *snapc; /* for writes */
286 struct request *rq; /* block request */
287 struct rbd_obj_request *obj_request; /* obj req initiator */
289 spinlock_t completion_lock;
290 u64 xferred;/* aggregate bytes transferred */
291 int result; /* first nonzero obj_request result */
293 struct list_head object_extents; /* obj_req.ex structs */
294 u32 obj_request_count;
300 #define for_each_obj_request(ireq, oreq) \
301 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
302 #define for_each_obj_request_safe(ireq, oreq, n) \
303 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
305 enum rbd_watch_state {
306 RBD_WATCH_STATE_UNREGISTERED,
307 RBD_WATCH_STATE_REGISTERED,
308 RBD_WATCH_STATE_ERROR,
311 enum rbd_lock_state {
312 RBD_LOCK_STATE_UNLOCKED,
313 RBD_LOCK_STATE_LOCKED,
314 RBD_LOCK_STATE_RELEASING,
317 /* WatchNotify::ClientId */
318 struct rbd_client_id {
332 int dev_id; /* blkdev unique id */
334 int major; /* blkdev assigned major */
336 struct gendisk *disk; /* blkdev's gendisk and rq */
338 u32 image_format; /* Either 1 or 2 */
339 struct rbd_client *rbd_client;
341 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
343 spinlock_t lock; /* queue, flags, open_count */
345 struct rbd_image_header header;
346 unsigned long flags; /* possibly lock protected */
347 struct rbd_spec *spec;
348 struct rbd_options *opts;
349 char *config_info; /* add{,_single_major} string */
351 struct ceph_object_id header_oid;
352 struct ceph_object_locator header_oloc;
354 struct ceph_file_layout layout; /* used for all rbd requests */
356 struct mutex watch_mutex;
357 enum rbd_watch_state watch_state;
358 struct ceph_osd_linger_request *watch_handle;
360 struct delayed_work watch_dwork;
362 struct rw_semaphore lock_rwsem;
363 enum rbd_lock_state lock_state;
364 char lock_cookie[32];
365 struct rbd_client_id owner_cid;
366 struct work_struct acquired_lock_work;
367 struct work_struct released_lock_work;
368 struct delayed_work lock_dwork;
369 struct work_struct unlock_work;
370 wait_queue_head_t lock_waitq;
372 struct workqueue_struct *task_wq;
374 struct rbd_spec *parent_spec;
377 struct rbd_device *parent;
379 /* Block layer tags. */
380 struct blk_mq_tag_set tag_set;
382 /* protects updating the header */
383 struct rw_semaphore header_rwsem;
385 struct rbd_mapping mapping;
387 struct list_head node;
391 unsigned long open_count; /* protected by lock */
395 * Flag bits for rbd_dev->flags:
396 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
398 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
401 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
402 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
403 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
406 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
408 static LIST_HEAD(rbd_dev_list); /* devices */
409 static DEFINE_SPINLOCK(rbd_dev_list_lock);
411 static LIST_HEAD(rbd_client_list); /* clients */
412 static DEFINE_SPINLOCK(rbd_client_list_lock);
414 /* Slab caches for frequently-allocated structures */
416 static struct kmem_cache *rbd_img_request_cache;
417 static struct kmem_cache *rbd_obj_request_cache;
419 static int rbd_major;
420 static DEFINE_IDA(rbd_dev_id_ida);
422 static struct workqueue_struct *rbd_wq;
425 * single-major requires >= 0.75 version of userspace rbd utility.
427 static bool single_major = true;
428 module_param(single_major, bool, 0444);
429 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
431 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
433 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
435 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
437 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
439 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
441 static int rbd_dev_id_to_minor(int dev_id)
443 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
446 static int minor_to_rbd_dev_id(int minor)
448 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
451 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
453 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
454 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
457 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
461 down_read(&rbd_dev->lock_rwsem);
462 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
463 up_read(&rbd_dev->lock_rwsem);
464 return is_lock_owner;
467 static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
469 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
472 static BUS_ATTR(add, 0200, NULL, rbd_add);
473 static BUS_ATTR(remove, 0200, NULL, rbd_remove);
474 static BUS_ATTR(add_single_major, 0200, NULL, rbd_add_single_major);
475 static BUS_ATTR(remove_single_major, 0200, NULL, rbd_remove_single_major);
476 static BUS_ATTR(supported_features, 0444, rbd_supported_features_show, NULL);
478 static struct attribute *rbd_bus_attrs[] = {
480 &bus_attr_remove.attr,
481 &bus_attr_add_single_major.attr,
482 &bus_attr_remove_single_major.attr,
483 &bus_attr_supported_features.attr,
487 static umode_t rbd_bus_is_visible(struct kobject *kobj,
488 struct attribute *attr, int index)
491 (attr == &bus_attr_add_single_major.attr ||
492 attr == &bus_attr_remove_single_major.attr))
498 static const struct attribute_group rbd_bus_group = {
499 .attrs = rbd_bus_attrs,
500 .is_visible = rbd_bus_is_visible,
502 __ATTRIBUTE_GROUPS(rbd_bus);
504 static struct bus_type rbd_bus_type = {
506 .bus_groups = rbd_bus_groups,
509 static void rbd_root_dev_release(struct device *dev)
513 static struct device rbd_root_dev = {
515 .release = rbd_root_dev_release,
518 static __printf(2, 3)
519 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
521 struct va_format vaf;
529 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
530 else if (rbd_dev->disk)
531 printk(KERN_WARNING "%s: %s: %pV\n",
532 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
533 else if (rbd_dev->spec && rbd_dev->spec->image_name)
534 printk(KERN_WARNING "%s: image %s: %pV\n",
535 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
536 else if (rbd_dev->spec && rbd_dev->spec->image_id)
537 printk(KERN_WARNING "%s: id %s: %pV\n",
538 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
540 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
541 RBD_DRV_NAME, rbd_dev, &vaf);
546 #define rbd_assert(expr) \
547 if (unlikely(!(expr))) { \
548 printk(KERN_ERR "\nAssertion failure in %s() " \
550 "\trbd_assert(%s);\n\n", \
551 __func__, __LINE__, #expr); \
554 #else /* !RBD_DEBUG */
555 # define rbd_assert(expr) ((void) 0)
556 #endif /* !RBD_DEBUG */
558 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
560 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
561 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
562 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
563 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
564 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
566 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
567 u8 *order, u64 *snap_size);
568 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
571 static int rbd_open(struct block_device *bdev, fmode_t mode)
573 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
574 bool removing = false;
576 spin_lock_irq(&rbd_dev->lock);
577 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
580 rbd_dev->open_count++;
581 spin_unlock_irq(&rbd_dev->lock);
585 (void) get_device(&rbd_dev->dev);
590 static void rbd_release(struct gendisk *disk, fmode_t mode)
592 struct rbd_device *rbd_dev = disk->private_data;
593 unsigned long open_count_before;
595 spin_lock_irq(&rbd_dev->lock);
596 open_count_before = rbd_dev->open_count--;
597 spin_unlock_irq(&rbd_dev->lock);
598 rbd_assert(open_count_before > 0);
600 put_device(&rbd_dev->dev);
603 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
607 if (get_user(ro, (int __user *)arg))
610 /* Snapshots can't be marked read-write */
611 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
614 /* Let blkdev_roset() handle it */
618 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
619 unsigned int cmd, unsigned long arg)
621 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
626 ret = rbd_ioctl_set_ro(rbd_dev, arg);
636 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
637 unsigned int cmd, unsigned long arg)
639 return rbd_ioctl(bdev, mode, cmd, arg);
641 #endif /* CONFIG_COMPAT */
643 static const struct block_device_operations rbd_bd_ops = {
644 .owner = THIS_MODULE,
646 .release = rbd_release,
649 .compat_ioctl = rbd_compat_ioctl,
654 * Initialize an rbd client instance. Success or not, this function
655 * consumes ceph_opts. Caller holds client_mutex.
657 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
659 struct rbd_client *rbdc;
662 dout("%s:\n", __func__);
663 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
667 kref_init(&rbdc->kref);
668 INIT_LIST_HEAD(&rbdc->node);
670 rbdc->client = ceph_create_client(ceph_opts, rbdc);
671 if (IS_ERR(rbdc->client))
673 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
675 ret = ceph_open_session(rbdc->client);
679 spin_lock(&rbd_client_list_lock);
680 list_add_tail(&rbdc->node, &rbd_client_list);
681 spin_unlock(&rbd_client_list_lock);
683 dout("%s: rbdc %p\n", __func__, rbdc);
687 ceph_destroy_client(rbdc->client);
692 ceph_destroy_options(ceph_opts);
693 dout("%s: error %d\n", __func__, ret);
698 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
700 kref_get(&rbdc->kref);
706 * Find a ceph client with specific addr and configuration. If
707 * found, bump its reference count.
709 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
711 struct rbd_client *client_node;
714 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
717 spin_lock(&rbd_client_list_lock);
718 list_for_each_entry(client_node, &rbd_client_list, node) {
719 if (!ceph_compare_options(ceph_opts, client_node->client)) {
720 __rbd_get_client(client_node);
726 spin_unlock(&rbd_client_list_lock);
728 return found ? client_node : NULL;
732 * (Per device) rbd map options
741 /* string args above */
750 static match_table_t rbd_opts_tokens = {
751 {Opt_queue_depth, "queue_depth=%d"},
752 {Opt_lock_timeout, "lock_timeout=%d"},
754 {Opt_pool_ns, "_pool_ns=%s"},
755 /* string args above */
756 {Opt_read_only, "read_only"},
757 {Opt_read_only, "ro"}, /* Alternate spelling */
758 {Opt_read_write, "read_write"},
759 {Opt_read_write, "rw"}, /* Alternate spelling */
760 {Opt_lock_on_read, "lock_on_read"},
761 {Opt_exclusive, "exclusive"},
762 {Opt_notrim, "notrim"},
768 unsigned long lock_timeout;
775 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
776 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
777 #define RBD_READ_ONLY_DEFAULT false
778 #define RBD_LOCK_ON_READ_DEFAULT false
779 #define RBD_EXCLUSIVE_DEFAULT false
780 #define RBD_TRIM_DEFAULT true
782 struct parse_rbd_opts_ctx {
783 struct rbd_spec *spec;
784 struct rbd_options *opts;
787 static int parse_rbd_opts_token(char *c, void *private)
789 struct parse_rbd_opts_ctx *pctx = private;
790 substring_t argstr[MAX_OPT_ARGS];
791 int token, intval, ret;
793 token = match_token(c, rbd_opts_tokens, argstr);
794 if (token < Opt_last_int) {
795 ret = match_int(&argstr[0], &intval);
797 pr_err("bad option arg (not int) at '%s'\n", c);
800 dout("got int token %d val %d\n", token, intval);
801 } else if (token > Opt_last_int && token < Opt_last_string) {
802 dout("got string token %d val %s\n", token, argstr[0].from);
804 dout("got token %d\n", token);
808 case Opt_queue_depth:
810 pr_err("queue_depth out of range\n");
813 pctx->opts->queue_depth = intval;
815 case Opt_lock_timeout:
816 /* 0 is "wait forever" (i.e. infinite timeout) */
817 if (intval < 0 || intval > INT_MAX / 1000) {
818 pr_err("lock_timeout out of range\n");
821 pctx->opts->lock_timeout = msecs_to_jiffies(intval * 1000);
824 kfree(pctx->spec->pool_ns);
825 pctx->spec->pool_ns = match_strdup(argstr);
826 if (!pctx->spec->pool_ns)
830 pctx->opts->read_only = true;
833 pctx->opts->read_only = false;
835 case Opt_lock_on_read:
836 pctx->opts->lock_on_read = true;
839 pctx->opts->exclusive = true;
842 pctx->opts->trim = false;
845 /* libceph prints "bad option" msg */
852 static char* obj_op_name(enum obj_operation_type op_type)
867 * Destroy ceph client
869 * Caller must hold rbd_client_list_lock.
871 static void rbd_client_release(struct kref *kref)
873 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
875 dout("%s: rbdc %p\n", __func__, rbdc);
876 spin_lock(&rbd_client_list_lock);
877 list_del(&rbdc->node);
878 spin_unlock(&rbd_client_list_lock);
880 ceph_destroy_client(rbdc->client);
885 * Drop reference to ceph client node. If it's not referenced anymore, release
888 static void rbd_put_client(struct rbd_client *rbdc)
891 kref_put(&rbdc->kref, rbd_client_release);
894 static int wait_for_latest_osdmap(struct ceph_client *client)
899 ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
903 if (client->osdc.osdmap->epoch >= newest_epoch)
906 ceph_osdc_maybe_request_map(&client->osdc);
907 return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
908 client->options->mount_timeout);
912 * Get a ceph client with specific addr and configuration, if one does
913 * not exist create it. Either way, ceph_opts is consumed by this
916 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
918 struct rbd_client *rbdc;
921 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
922 rbdc = rbd_client_find(ceph_opts);
924 ceph_destroy_options(ceph_opts);
927 * Using an existing client. Make sure ->pg_pools is up to
928 * date before we look up the pool id in do_rbd_add().
930 ret = wait_for_latest_osdmap(rbdc->client);
932 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
933 rbd_put_client(rbdc);
937 rbdc = rbd_client_create(ceph_opts);
939 mutex_unlock(&client_mutex);
944 static bool rbd_image_format_valid(u32 image_format)
946 return image_format == 1 || image_format == 2;
949 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
954 /* The header has to start with the magic rbd header text */
955 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
958 /* The bio layer requires at least sector-sized I/O */
960 if (ondisk->options.order < SECTOR_SHIFT)
963 /* If we use u64 in a few spots we may be able to loosen this */
965 if (ondisk->options.order > 8 * sizeof (int) - 1)
969 * The size of a snapshot header has to fit in a size_t, and
970 * that limits the number of snapshots.
972 snap_count = le32_to_cpu(ondisk->snap_count);
973 size = SIZE_MAX - sizeof (struct ceph_snap_context);
974 if (snap_count > size / sizeof (__le64))
978 * Not only that, but the size of the entire the snapshot
979 * header must also be representable in a size_t.
981 size -= snap_count * sizeof (__le64);
982 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
989 * returns the size of an object in the image
991 static u32 rbd_obj_bytes(struct rbd_image_header *header)
993 return 1U << header->obj_order;
996 static void rbd_init_layout(struct rbd_device *rbd_dev)
998 if (rbd_dev->header.stripe_unit == 0 ||
999 rbd_dev->header.stripe_count == 0) {
1000 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1001 rbd_dev->header.stripe_count = 1;
1004 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1005 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1006 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1007 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1008 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1009 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1013 * Fill an rbd image header with information from the given format 1
1016 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1017 struct rbd_image_header_ondisk *ondisk)
1019 struct rbd_image_header *header = &rbd_dev->header;
1020 bool first_time = header->object_prefix == NULL;
1021 struct ceph_snap_context *snapc;
1022 char *object_prefix = NULL;
1023 char *snap_names = NULL;
1024 u64 *snap_sizes = NULL;
1029 /* Allocate this now to avoid having to handle failure below */
1032 object_prefix = kstrndup(ondisk->object_prefix,
1033 sizeof(ondisk->object_prefix),
1039 /* Allocate the snapshot context and fill it in */
1041 snap_count = le32_to_cpu(ondisk->snap_count);
1042 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1045 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1047 struct rbd_image_snap_ondisk *snaps;
1048 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1050 /* We'll keep a copy of the snapshot names... */
1052 if (snap_names_len > (u64)SIZE_MAX)
1054 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1058 /* ...as well as the array of their sizes. */
1059 snap_sizes = kmalloc_array(snap_count,
1060 sizeof(*header->snap_sizes),
1066 * Copy the names, and fill in each snapshot's id
1069 * Note that rbd_dev_v1_header_info() guarantees the
1070 * ondisk buffer we're working with has
1071 * snap_names_len bytes beyond the end of the
1072 * snapshot id array, this memcpy() is safe.
1074 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1075 snaps = ondisk->snaps;
1076 for (i = 0; i < snap_count; i++) {
1077 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1078 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1082 /* We won't fail any more, fill in the header */
1085 header->object_prefix = object_prefix;
1086 header->obj_order = ondisk->options.order;
1087 rbd_init_layout(rbd_dev);
1089 ceph_put_snap_context(header->snapc);
1090 kfree(header->snap_names);
1091 kfree(header->snap_sizes);
1094 /* The remaining fields always get updated (when we refresh) */
1096 header->image_size = le64_to_cpu(ondisk->image_size);
1097 header->snapc = snapc;
1098 header->snap_names = snap_names;
1099 header->snap_sizes = snap_sizes;
1107 ceph_put_snap_context(snapc);
1108 kfree(object_prefix);
1113 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1115 const char *snap_name;
1117 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1119 /* Skip over names until we find the one we are looking for */
1121 snap_name = rbd_dev->header.snap_names;
1123 snap_name += strlen(snap_name) + 1;
1125 return kstrdup(snap_name, GFP_KERNEL);
1129 * Snapshot id comparison function for use with qsort()/bsearch().
1130 * Note that result is for snapshots in *descending* order.
1132 static int snapid_compare_reverse(const void *s1, const void *s2)
1134 u64 snap_id1 = *(u64 *)s1;
1135 u64 snap_id2 = *(u64 *)s2;
1137 if (snap_id1 < snap_id2)
1139 return snap_id1 == snap_id2 ? 0 : -1;
1143 * Search a snapshot context to see if the given snapshot id is
1146 * Returns the position of the snapshot id in the array if it's found,
1147 * or BAD_SNAP_INDEX otherwise.
1149 * Note: The snapshot array is in kept sorted (by the osd) in
1150 * reverse order, highest snapshot id first.
1152 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1154 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1157 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1158 sizeof (snap_id), snapid_compare_reverse);
1160 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1163 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1167 const char *snap_name;
1169 which = rbd_dev_snap_index(rbd_dev, snap_id);
1170 if (which == BAD_SNAP_INDEX)
1171 return ERR_PTR(-ENOENT);
1173 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1174 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1177 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1179 if (snap_id == CEPH_NOSNAP)
1180 return RBD_SNAP_HEAD_NAME;
1182 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1183 if (rbd_dev->image_format == 1)
1184 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1186 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1189 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1192 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1193 if (snap_id == CEPH_NOSNAP) {
1194 *snap_size = rbd_dev->header.image_size;
1195 } else if (rbd_dev->image_format == 1) {
1198 which = rbd_dev_snap_index(rbd_dev, snap_id);
1199 if (which == BAD_SNAP_INDEX)
1202 *snap_size = rbd_dev->header.snap_sizes[which];
1207 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1216 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1219 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1220 if (snap_id == CEPH_NOSNAP) {
1221 *snap_features = rbd_dev->header.features;
1222 } else if (rbd_dev->image_format == 1) {
1223 *snap_features = 0; /* No features for format 1 */
1228 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1232 *snap_features = features;
1237 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1239 u64 snap_id = rbd_dev->spec->snap_id;
1244 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1247 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1251 rbd_dev->mapping.size = size;
1252 rbd_dev->mapping.features = features;
1257 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1259 rbd_dev->mapping.size = 0;
1260 rbd_dev->mapping.features = 0;
1263 static void zero_bvec(struct bio_vec *bv)
1266 unsigned long flags;
1268 buf = bvec_kmap_irq(bv, &flags);
1269 memset(buf, 0, bv->bv_len);
1270 flush_dcache_page(bv->bv_page);
1271 bvec_kunmap_irq(buf, &flags);
1274 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1276 struct ceph_bio_iter it = *bio_pos;
1278 ceph_bio_iter_advance(&it, off);
1279 ceph_bio_iter_advance_step(&it, bytes, ({
1284 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1286 struct ceph_bvec_iter it = *bvec_pos;
1288 ceph_bvec_iter_advance(&it, off);
1289 ceph_bvec_iter_advance_step(&it, bytes, ({
1295 * Zero a range in @obj_req data buffer defined by a bio (list) or
1296 * (private) bio_vec array.
1298 * @off is relative to the start of the data buffer.
1300 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1303 switch (obj_req->img_request->data_type) {
1304 case OBJ_REQUEST_BIO:
1305 zero_bios(&obj_req->bio_pos, off, bytes);
1307 case OBJ_REQUEST_BVECS:
1308 case OBJ_REQUEST_OWN_BVECS:
1309 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1316 static void rbd_obj_request_destroy(struct kref *kref);
1317 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1319 rbd_assert(obj_request != NULL);
1320 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1321 kref_read(&obj_request->kref));
1322 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1325 static void rbd_img_request_get(struct rbd_img_request *img_request)
1327 dout("%s: img %p (was %d)\n", __func__, img_request,
1328 kref_read(&img_request->kref));
1329 kref_get(&img_request->kref);
1332 static void rbd_img_request_destroy(struct kref *kref);
1333 static void rbd_img_request_put(struct rbd_img_request *img_request)
1335 rbd_assert(img_request != NULL);
1336 dout("%s: img %p (was %d)\n", __func__, img_request,
1337 kref_read(&img_request->kref));
1338 kref_put(&img_request->kref, rbd_img_request_destroy);
1341 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1342 struct rbd_obj_request *obj_request)
1344 rbd_assert(obj_request->img_request == NULL);
1346 /* Image request now owns object's original reference */
1347 obj_request->img_request = img_request;
1348 img_request->obj_request_count++;
1349 img_request->pending_count++;
1350 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1353 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1354 struct rbd_obj_request *obj_request)
1356 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1357 list_del(&obj_request->ex.oe_item);
1358 rbd_assert(img_request->obj_request_count > 0);
1359 img_request->obj_request_count--;
1360 rbd_assert(obj_request->img_request == img_request);
1361 rbd_obj_request_put(obj_request);
1364 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
1366 struct ceph_osd_request *osd_req = obj_request->osd_req;
1368 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__,
1369 obj_request, obj_request->ex.oe_objno, obj_request->ex.oe_off,
1370 obj_request->ex.oe_len, osd_req);
1371 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1375 * The default/initial value for all image request flags is 0. Each
1376 * is conditionally set to 1 at image request initialization time
1377 * and currently never change thereafter.
1379 static void img_request_layered_set(struct rbd_img_request *img_request)
1381 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1385 static void img_request_layered_clear(struct rbd_img_request *img_request)
1387 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1391 static bool img_request_layered_test(struct rbd_img_request *img_request)
1394 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1397 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1399 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1401 return !obj_req->ex.oe_off &&
1402 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1405 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1407 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1409 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1410 rbd_dev->layout.object_size;
1413 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1415 return ceph_file_extents_bytes(obj_req->img_extents,
1416 obj_req->num_img_extents);
1419 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1421 switch (img_req->op_type) {
1425 case OBJ_OP_DISCARD:
1432 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req);
1434 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1436 struct rbd_obj_request *obj_req = osd_req->r_priv;
1438 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1439 osd_req->r_result, obj_req);
1440 rbd_assert(osd_req == obj_req->osd_req);
1442 obj_req->result = osd_req->r_result < 0 ? osd_req->r_result : 0;
1443 if (!obj_req->result && !rbd_img_is_write(obj_req->img_request))
1444 obj_req->xferred = osd_req->r_result;
1447 * Writes aren't allowed to return a data payload. In some
1448 * guarded write cases (e.g. stat + zero on an empty object)
1449 * a stat response makes it through, but we don't care.
1451 obj_req->xferred = 0;
1453 rbd_obj_handle_request(obj_req);
1456 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1458 struct ceph_osd_request *osd_req = obj_request->osd_req;
1460 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1461 osd_req->r_snapid = obj_request->img_request->snap_id;
1464 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1466 struct ceph_osd_request *osd_req = obj_request->osd_req;
1468 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1469 ktime_get_real_ts64(&osd_req->r_mtime);
1470 osd_req->r_data_offset = obj_request->ex.oe_off;
1473 static struct ceph_osd_request *
1474 rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
1476 struct rbd_img_request *img_req = obj_req->img_request;
1477 struct rbd_device *rbd_dev = img_req->rbd_dev;
1478 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1479 struct ceph_osd_request *req;
1480 const char *name_format = rbd_dev->image_format == 1 ?
1481 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1483 req = ceph_osdc_alloc_request(osdc,
1484 (rbd_img_is_write(img_req) ? img_req->snapc : NULL),
1485 num_ops, false, GFP_NOIO);
1489 req->r_callback = rbd_osd_req_callback;
1490 req->r_priv = obj_req;
1493 * Data objects may be stored in a separate pool, but always in
1494 * the same namespace in that pool as the header in its pool.
1496 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1497 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1499 if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1500 rbd_dev->header.object_prefix, obj_req->ex.oe_objno))
1506 ceph_osdc_put_request(req);
1510 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1512 ceph_osdc_put_request(osd_req);
1515 static struct rbd_obj_request *rbd_obj_request_create(void)
1517 struct rbd_obj_request *obj_request;
1519 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1523 ceph_object_extent_init(&obj_request->ex);
1524 kref_init(&obj_request->kref);
1526 dout("%s %p\n", __func__, obj_request);
1530 static void rbd_obj_request_destroy(struct kref *kref)
1532 struct rbd_obj_request *obj_request;
1535 obj_request = container_of(kref, struct rbd_obj_request, kref);
1537 dout("%s: obj %p\n", __func__, obj_request);
1539 if (obj_request->osd_req)
1540 rbd_osd_req_destroy(obj_request->osd_req);
1542 switch (obj_request->img_request->data_type) {
1543 case OBJ_REQUEST_NODATA:
1544 case OBJ_REQUEST_BIO:
1545 case OBJ_REQUEST_BVECS:
1546 break; /* Nothing to do */
1547 case OBJ_REQUEST_OWN_BVECS:
1548 kfree(obj_request->bvec_pos.bvecs);
1554 kfree(obj_request->img_extents);
1555 if (obj_request->copyup_bvecs) {
1556 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1557 if (obj_request->copyup_bvecs[i].bv_page)
1558 __free_page(obj_request->copyup_bvecs[i].bv_page);
1560 kfree(obj_request->copyup_bvecs);
1563 kmem_cache_free(rbd_obj_request_cache, obj_request);
1566 /* It's OK to call this for a device with no parent */
1568 static void rbd_spec_put(struct rbd_spec *spec);
1569 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1571 rbd_dev_remove_parent(rbd_dev);
1572 rbd_spec_put(rbd_dev->parent_spec);
1573 rbd_dev->parent_spec = NULL;
1574 rbd_dev->parent_overlap = 0;
1578 * Parent image reference counting is used to determine when an
1579 * image's parent fields can be safely torn down--after there are no
1580 * more in-flight requests to the parent image. When the last
1581 * reference is dropped, cleaning them up is safe.
1583 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1587 if (!rbd_dev->parent_spec)
1590 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1594 /* Last reference; clean up parent data structures */
1597 rbd_dev_unparent(rbd_dev);
1599 rbd_warn(rbd_dev, "parent reference underflow");
1603 * If an image has a non-zero parent overlap, get a reference to its
1606 * Returns true if the rbd device has a parent with a non-zero
1607 * overlap and a reference for it was successfully taken, or
1610 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1614 if (!rbd_dev->parent_spec)
1617 down_read(&rbd_dev->header_rwsem);
1618 if (rbd_dev->parent_overlap)
1619 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1620 up_read(&rbd_dev->header_rwsem);
1623 rbd_warn(rbd_dev, "parent reference overflow");
1629 * Caller is responsible for filling in the list of object requests
1630 * that comprises the image request, and the Linux request pointer
1631 * (if there is one).
1633 static struct rbd_img_request *rbd_img_request_create(
1634 struct rbd_device *rbd_dev,
1635 enum obj_operation_type op_type,
1636 struct ceph_snap_context *snapc)
1638 struct rbd_img_request *img_request;
1640 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
1644 img_request->rbd_dev = rbd_dev;
1645 img_request->op_type = op_type;
1646 if (!rbd_img_is_write(img_request))
1647 img_request->snap_id = rbd_dev->spec->snap_id;
1649 img_request->snapc = snapc;
1651 if (rbd_dev_parent_get(rbd_dev))
1652 img_request_layered_set(img_request);
1654 spin_lock_init(&img_request->completion_lock);
1655 INIT_LIST_HEAD(&img_request->object_extents);
1656 kref_init(&img_request->kref);
1658 dout("%s: rbd_dev %p %s -> img %p\n", __func__, rbd_dev,
1659 obj_op_name(op_type), img_request);
1663 static void rbd_img_request_destroy(struct kref *kref)
1665 struct rbd_img_request *img_request;
1666 struct rbd_obj_request *obj_request;
1667 struct rbd_obj_request *next_obj_request;
1669 img_request = container_of(kref, struct rbd_img_request, kref);
1671 dout("%s: img %p\n", __func__, img_request);
1673 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1674 rbd_img_obj_request_del(img_request, obj_request);
1675 rbd_assert(img_request->obj_request_count == 0);
1677 if (img_request_layered_test(img_request)) {
1678 img_request_layered_clear(img_request);
1679 rbd_dev_parent_put(img_request->rbd_dev);
1682 if (rbd_img_is_write(img_request))
1683 ceph_put_snap_context(img_request->snapc);
1685 kmem_cache_free(rbd_img_request_cache, img_request);
1688 static void prune_extents(struct ceph_file_extent *img_extents,
1689 u32 *num_img_extents, u64 overlap)
1691 u32 cnt = *num_img_extents;
1693 /* drop extents completely beyond the overlap */
1694 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
1698 struct ceph_file_extent *ex = &img_extents[cnt - 1];
1700 /* trim final overlapping extent */
1701 if (ex->fe_off + ex->fe_len > overlap)
1702 ex->fe_len = overlap - ex->fe_off;
1705 *num_img_extents = cnt;
1709 * Determine the byte range(s) covered by either just the object extent
1710 * or the entire object in the parent image.
1712 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
1715 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1718 if (!rbd_dev->parent_overlap)
1721 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
1722 entire ? 0 : obj_req->ex.oe_off,
1723 entire ? rbd_dev->layout.object_size :
1725 &obj_req->img_extents,
1726 &obj_req->num_img_extents);
1730 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
1731 rbd_dev->parent_overlap);
1735 static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
1737 switch (obj_req->img_request->data_type) {
1738 case OBJ_REQUEST_BIO:
1739 osd_req_op_extent_osd_data_bio(obj_req->osd_req, which,
1741 obj_req->ex.oe_len);
1743 case OBJ_REQUEST_BVECS:
1744 case OBJ_REQUEST_OWN_BVECS:
1745 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
1746 obj_req->ex.oe_len);
1747 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
1748 osd_req_op_extent_osd_data_bvec_pos(obj_req->osd_req, which,
1749 &obj_req->bvec_pos);
1756 static int rbd_obj_setup_read(struct rbd_obj_request *obj_req)
1758 obj_req->osd_req = rbd_osd_req_create(obj_req, 1);
1759 if (!obj_req->osd_req)
1762 osd_req_op_extent_init(obj_req->osd_req, 0, CEPH_OSD_OP_READ,
1763 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1764 rbd_osd_req_setup_data(obj_req, 0);
1766 rbd_osd_req_format_read(obj_req);
1770 static int __rbd_obj_setup_stat(struct rbd_obj_request *obj_req,
1773 struct page **pages;
1776 * The response data for a STAT call consists of:
1783 pages = ceph_alloc_page_vector(1, GFP_NOIO);
1785 return PTR_ERR(pages);
1787 osd_req_op_init(obj_req->osd_req, which, CEPH_OSD_OP_STAT, 0);
1788 osd_req_op_raw_data_in_pages(obj_req->osd_req, which, pages,
1789 8 + sizeof(struct ceph_timespec),
1794 static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req,
1797 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1800 osd_req_op_alloc_hint_init(obj_req->osd_req, which++,
1801 rbd_dev->layout.object_size,
1802 rbd_dev->layout.object_size);
1804 if (rbd_obj_is_entire(obj_req))
1805 opcode = CEPH_OSD_OP_WRITEFULL;
1807 opcode = CEPH_OSD_OP_WRITE;
1809 osd_req_op_extent_init(obj_req->osd_req, which, opcode,
1810 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1811 rbd_osd_req_setup_data(obj_req, which++);
1813 rbd_assert(which == obj_req->osd_req->r_num_ops);
1814 rbd_osd_req_format_write(obj_req);
1817 static int rbd_obj_setup_write(struct rbd_obj_request *obj_req)
1819 unsigned int num_osd_ops, which = 0;
1822 /* reverse map the entire object onto the parent */
1823 ret = rbd_obj_calc_img_extents(obj_req, true);
1827 if (obj_req->num_img_extents) {
1828 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1829 num_osd_ops = 3; /* stat + setallochint + write/writefull */
1831 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1832 num_osd_ops = 2; /* setallochint + write/writefull */
1835 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1836 if (!obj_req->osd_req)
1839 if (obj_req->num_img_extents) {
1840 ret = __rbd_obj_setup_stat(obj_req, which++);
1845 __rbd_obj_setup_write(obj_req, which);
1849 static void __rbd_obj_setup_discard(struct rbd_obj_request *obj_req,
1854 if (rbd_obj_is_entire(obj_req)) {
1855 if (obj_req->num_img_extents) {
1856 osd_req_op_init(obj_req->osd_req, which++,
1857 CEPH_OSD_OP_CREATE, 0);
1858 opcode = CEPH_OSD_OP_TRUNCATE;
1860 osd_req_op_init(obj_req->osd_req, which++,
1861 CEPH_OSD_OP_DELETE, 0);
1864 } else if (rbd_obj_is_tail(obj_req)) {
1865 opcode = CEPH_OSD_OP_TRUNCATE;
1867 opcode = CEPH_OSD_OP_ZERO;
1871 osd_req_op_extent_init(obj_req->osd_req, which++, opcode,
1872 obj_req->ex.oe_off, obj_req->ex.oe_len,
1875 rbd_assert(which == obj_req->osd_req->r_num_ops);
1876 rbd_osd_req_format_write(obj_req);
1879 static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
1881 unsigned int num_osd_ops, which = 0;
1884 /* reverse map the entire object onto the parent */
1885 ret = rbd_obj_calc_img_extents(obj_req, true);
1889 if (rbd_obj_is_entire(obj_req)) {
1890 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1891 if (obj_req->num_img_extents)
1892 num_osd_ops = 2; /* create + truncate */
1894 num_osd_ops = 1; /* delete */
1896 if (obj_req->num_img_extents) {
1897 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1898 num_osd_ops = 2; /* stat + truncate/zero */
1900 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1901 num_osd_ops = 1; /* truncate/zero */
1905 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1906 if (!obj_req->osd_req)
1909 if (!rbd_obj_is_entire(obj_req) && obj_req->num_img_extents) {
1910 ret = __rbd_obj_setup_stat(obj_req, which++);
1915 __rbd_obj_setup_discard(obj_req, which);
1920 * For each object request in @img_req, allocate an OSD request, add
1921 * individual OSD ops and prepare them for submission. The number of
1922 * OSD ops depends on op_type and the overlap point (if any).
1924 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
1926 struct rbd_obj_request *obj_req;
1929 for_each_obj_request(img_req, obj_req) {
1930 switch (img_req->op_type) {
1932 ret = rbd_obj_setup_read(obj_req);
1935 ret = rbd_obj_setup_write(obj_req);
1937 case OBJ_OP_DISCARD:
1938 ret = rbd_obj_setup_discard(obj_req);
1946 ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
1954 union rbd_img_fill_iter {
1955 struct ceph_bio_iter bio_iter;
1956 struct ceph_bvec_iter bvec_iter;
1959 struct rbd_img_fill_ctx {
1960 enum obj_request_type pos_type;
1961 union rbd_img_fill_iter *pos;
1962 union rbd_img_fill_iter iter;
1963 ceph_object_extent_fn_t set_pos_fn;
1964 ceph_object_extent_fn_t count_fn;
1965 ceph_object_extent_fn_t copy_fn;
1968 static struct ceph_object_extent *alloc_object_extent(void *arg)
1970 struct rbd_img_request *img_req = arg;
1971 struct rbd_obj_request *obj_req;
1973 obj_req = rbd_obj_request_create();
1977 rbd_img_obj_request_add(img_req, obj_req);
1978 return &obj_req->ex;
1982 * While su != os && sc == 1 is technically not fancy (it's the same
1983 * layout as su == os && sc == 1), we can't use the nocopy path for it
1984 * because ->set_pos_fn() should be called only once per object.
1985 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
1986 * treat su != os && sc == 1 as fancy.
1988 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
1990 return l->stripe_unit != l->object_size;
1993 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
1994 struct ceph_file_extent *img_extents,
1995 u32 num_img_extents,
1996 struct rbd_img_fill_ctx *fctx)
2001 img_req->data_type = fctx->pos_type;
2004 * Create object requests and set each object request's starting
2005 * position in the provided bio (list) or bio_vec array.
2007 fctx->iter = *fctx->pos;
2008 for (i = 0; i < num_img_extents; i++) {
2009 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2010 img_extents[i].fe_off,
2011 img_extents[i].fe_len,
2012 &img_req->object_extents,
2013 alloc_object_extent, img_req,
2014 fctx->set_pos_fn, &fctx->iter);
2019 return __rbd_img_fill_request(img_req);
2023 * Map a list of image extents to a list of object extents, create the
2024 * corresponding object requests (normally each to a different object,
2025 * but not always) and add them to @img_req. For each object request,
2026 * set up its data descriptor to point to the corresponding chunk(s) of
2027 * @fctx->pos data buffer.
2029 * Because ceph_file_to_extents() will merge adjacent object extents
2030 * together, each object request's data descriptor may point to multiple
2031 * different chunks of @fctx->pos data buffer.
2033 * @fctx->pos data buffer is assumed to be large enough.
2035 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2036 struct ceph_file_extent *img_extents,
2037 u32 num_img_extents,
2038 struct rbd_img_fill_ctx *fctx)
2040 struct rbd_device *rbd_dev = img_req->rbd_dev;
2041 struct rbd_obj_request *obj_req;
2045 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2046 !rbd_layout_is_fancy(&rbd_dev->layout))
2047 return rbd_img_fill_request_nocopy(img_req, img_extents,
2048 num_img_extents, fctx);
2050 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2053 * Create object requests and determine ->bvec_count for each object
2054 * request. Note that ->bvec_count sum over all object requests may
2055 * be greater than the number of bio_vecs in the provided bio (list)
2056 * or bio_vec array because when mapped, those bio_vecs can straddle
2057 * stripe unit boundaries.
2059 fctx->iter = *fctx->pos;
2060 for (i = 0; i < num_img_extents; i++) {
2061 ret = ceph_file_to_extents(&rbd_dev->layout,
2062 img_extents[i].fe_off,
2063 img_extents[i].fe_len,
2064 &img_req->object_extents,
2065 alloc_object_extent, img_req,
2066 fctx->count_fn, &fctx->iter);
2071 for_each_obj_request(img_req, obj_req) {
2072 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2073 sizeof(*obj_req->bvec_pos.bvecs),
2075 if (!obj_req->bvec_pos.bvecs)
2080 * Fill in each object request's private bio_vec array, splitting and
2081 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2083 fctx->iter = *fctx->pos;
2084 for (i = 0; i < num_img_extents; i++) {
2085 ret = ceph_iterate_extents(&rbd_dev->layout,
2086 img_extents[i].fe_off,
2087 img_extents[i].fe_len,
2088 &img_req->object_extents,
2089 fctx->copy_fn, &fctx->iter);
2094 return __rbd_img_fill_request(img_req);
2097 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2100 struct ceph_file_extent ex = { off, len };
2101 union rbd_img_fill_iter dummy;
2102 struct rbd_img_fill_ctx fctx = {
2103 .pos_type = OBJ_REQUEST_NODATA,
2107 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2110 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2112 struct rbd_obj_request *obj_req =
2113 container_of(ex, struct rbd_obj_request, ex);
2114 struct ceph_bio_iter *it = arg;
2116 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2117 obj_req->bio_pos = *it;
2118 ceph_bio_iter_advance(it, bytes);
2121 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2123 struct rbd_obj_request *obj_req =
2124 container_of(ex, struct rbd_obj_request, ex);
2125 struct ceph_bio_iter *it = arg;
2127 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2128 ceph_bio_iter_advance_step(it, bytes, ({
2129 obj_req->bvec_count++;
2134 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2136 struct rbd_obj_request *obj_req =
2137 container_of(ex, struct rbd_obj_request, ex);
2138 struct ceph_bio_iter *it = arg;
2140 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2141 ceph_bio_iter_advance_step(it, bytes, ({
2142 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2143 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2147 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2148 struct ceph_file_extent *img_extents,
2149 u32 num_img_extents,
2150 struct ceph_bio_iter *bio_pos)
2152 struct rbd_img_fill_ctx fctx = {
2153 .pos_type = OBJ_REQUEST_BIO,
2154 .pos = (union rbd_img_fill_iter *)bio_pos,
2155 .set_pos_fn = set_bio_pos,
2156 .count_fn = count_bio_bvecs,
2157 .copy_fn = copy_bio_bvecs,
2160 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2164 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2165 u64 off, u64 len, struct bio *bio)
2167 struct ceph_file_extent ex = { off, len };
2168 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2170 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2173 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2175 struct rbd_obj_request *obj_req =
2176 container_of(ex, struct rbd_obj_request, ex);
2177 struct ceph_bvec_iter *it = arg;
2179 obj_req->bvec_pos = *it;
2180 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2181 ceph_bvec_iter_advance(it, bytes);
2184 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2186 struct rbd_obj_request *obj_req =
2187 container_of(ex, struct rbd_obj_request, ex);
2188 struct ceph_bvec_iter *it = arg;
2190 ceph_bvec_iter_advance_step(it, bytes, ({
2191 obj_req->bvec_count++;
2195 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2197 struct rbd_obj_request *obj_req =
2198 container_of(ex, struct rbd_obj_request, ex);
2199 struct ceph_bvec_iter *it = arg;
2201 ceph_bvec_iter_advance_step(it, bytes, ({
2202 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2203 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2207 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2208 struct ceph_file_extent *img_extents,
2209 u32 num_img_extents,
2210 struct ceph_bvec_iter *bvec_pos)
2212 struct rbd_img_fill_ctx fctx = {
2213 .pos_type = OBJ_REQUEST_BVECS,
2214 .pos = (union rbd_img_fill_iter *)bvec_pos,
2215 .set_pos_fn = set_bvec_pos,
2216 .count_fn = count_bvecs,
2217 .copy_fn = copy_bvecs,
2220 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2224 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2225 struct ceph_file_extent *img_extents,
2226 u32 num_img_extents,
2227 struct bio_vec *bvecs)
2229 struct ceph_bvec_iter it = {
2231 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2235 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2239 static void rbd_img_request_submit(struct rbd_img_request *img_request)
2241 struct rbd_obj_request *obj_request;
2243 dout("%s: img %p\n", __func__, img_request);
2245 rbd_img_request_get(img_request);
2246 for_each_obj_request(img_request, obj_request)
2247 rbd_obj_request_submit(obj_request);
2249 rbd_img_request_put(img_request);
2252 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2254 struct rbd_img_request *img_req = obj_req->img_request;
2255 struct rbd_img_request *child_img_req;
2258 child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
2263 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2264 child_img_req->obj_request = obj_req;
2266 if (!rbd_img_is_write(img_req)) {
2267 switch (img_req->data_type) {
2268 case OBJ_REQUEST_BIO:
2269 ret = __rbd_img_fill_from_bio(child_img_req,
2270 obj_req->img_extents,
2271 obj_req->num_img_extents,
2274 case OBJ_REQUEST_BVECS:
2275 case OBJ_REQUEST_OWN_BVECS:
2276 ret = __rbd_img_fill_from_bvecs(child_img_req,
2277 obj_req->img_extents,
2278 obj_req->num_img_extents,
2279 &obj_req->bvec_pos);
2285 ret = rbd_img_fill_from_bvecs(child_img_req,
2286 obj_req->img_extents,
2287 obj_req->num_img_extents,
2288 obj_req->copyup_bvecs);
2291 rbd_img_request_put(child_img_req);
2295 rbd_img_request_submit(child_img_req);
2299 static bool rbd_obj_handle_read(struct rbd_obj_request *obj_req)
2301 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2304 if (obj_req->result == -ENOENT &&
2305 rbd_dev->parent_overlap && !obj_req->tried_parent) {
2306 /* reverse map this object extent onto the parent */
2307 ret = rbd_obj_calc_img_extents(obj_req, false);
2309 obj_req->result = ret;
2313 if (obj_req->num_img_extents) {
2314 obj_req->tried_parent = true;
2315 ret = rbd_obj_read_from_parent(obj_req);
2317 obj_req->result = ret;
2325 * -ENOENT means a hole in the image -- zero-fill the entire
2326 * length of the request. A short read also implies zero-fill
2327 * to the end of the request. In both cases we update xferred
2328 * count to indicate the whole request was satisfied.
2330 if (obj_req->result == -ENOENT ||
2331 (!obj_req->result && obj_req->xferred < obj_req->ex.oe_len)) {
2332 rbd_assert(!obj_req->xferred || !obj_req->result);
2333 rbd_obj_zero_range(obj_req, obj_req->xferred,
2334 obj_req->ex.oe_len - obj_req->xferred);
2335 obj_req->result = 0;
2336 obj_req->xferred = obj_req->ex.oe_len;
2343 * copyup_bvecs pages are never highmem pages
2345 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2347 struct ceph_bvec_iter it = {
2349 .iter = { .bi_size = bytes },
2352 ceph_bvec_iter_advance_step(&it, bytes, ({
2353 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
2360 static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
2362 unsigned int num_osd_ops = obj_req->osd_req->r_num_ops;
2365 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
2366 rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT);
2367 rbd_osd_req_destroy(obj_req->osd_req);
2370 * Create a copyup request with the same number of OSD ops as
2371 * the original request. The original request was stat + op(s),
2372 * the new copyup request will be copyup + the same op(s).
2374 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
2375 if (!obj_req->osd_req)
2378 ret = osd_req_op_cls_init(obj_req->osd_req, 0, "rbd", "copyup");
2383 * Only send non-zero copyup data to save some I/O and network
2384 * bandwidth -- zero copyup data is equivalent to the object not
2387 if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
2388 dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
2391 osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
2392 obj_req->copyup_bvecs,
2393 obj_req->copyup_bvec_count,
2396 switch (obj_req->img_request->op_type) {
2398 __rbd_obj_setup_write(obj_req, 1);
2400 case OBJ_OP_DISCARD:
2401 rbd_assert(!rbd_obj_is_entire(obj_req));
2402 __rbd_obj_setup_discard(obj_req, 1);
2408 ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
2412 rbd_obj_request_submit(obj_req);
2416 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
2420 rbd_assert(!obj_req->copyup_bvecs);
2421 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
2422 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
2423 sizeof(*obj_req->copyup_bvecs),
2425 if (!obj_req->copyup_bvecs)
2428 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
2429 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
2431 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
2432 if (!obj_req->copyup_bvecs[i].bv_page)
2435 obj_req->copyup_bvecs[i].bv_offset = 0;
2436 obj_req->copyup_bvecs[i].bv_len = len;
2440 rbd_assert(!obj_overlap);
2444 static int rbd_obj_handle_write_guard(struct rbd_obj_request *obj_req)
2446 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2449 rbd_assert(obj_req->num_img_extents);
2450 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2451 rbd_dev->parent_overlap);
2452 if (!obj_req->num_img_extents) {
2454 * The overlap has become 0 (most likely because the
2455 * image has been flattened). Use rbd_obj_issue_copyup()
2456 * to re-submit the original write request -- the copyup
2457 * operation itself will be a no-op, since someone must
2458 * have populated the child object while we weren't
2459 * looking. Move to WRITE_FLAT state as we'll be done
2460 * with the operation once the null copyup completes.
2462 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2463 return rbd_obj_issue_copyup(obj_req, 0);
2466 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
2470 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
2471 return rbd_obj_read_from_parent(obj_req);
2474 static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
2479 switch (obj_req->write_state) {
2480 case RBD_OBJ_WRITE_GUARD:
2481 rbd_assert(!obj_req->xferred);
2482 if (obj_req->result == -ENOENT) {
2484 * The target object doesn't exist. Read the data for
2485 * the entire target object up to the overlap point (if
2486 * any) from the parent, so we can use it for a copyup.
2488 ret = rbd_obj_handle_write_guard(obj_req);
2490 obj_req->result = ret;
2496 case RBD_OBJ_WRITE_FLAT:
2497 if (!obj_req->result)
2499 * There is no such thing as a successful short
2500 * write -- indicate the whole request was satisfied.
2502 obj_req->xferred = obj_req->ex.oe_len;
2504 case RBD_OBJ_WRITE_COPYUP:
2505 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2506 if (obj_req->result)
2509 rbd_assert(obj_req->xferred);
2510 ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred);
2512 obj_req->result = ret;
2522 * Returns true if @obj_req is completed, or false otherwise.
2524 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2526 switch (obj_req->img_request->op_type) {
2528 return rbd_obj_handle_read(obj_req);
2530 return rbd_obj_handle_write(obj_req);
2531 case OBJ_OP_DISCARD:
2532 if (rbd_obj_handle_write(obj_req)) {
2534 * Hide -ENOENT from delete/truncate/zero -- discarding
2535 * a non-existent object is not a problem.
2537 if (obj_req->result == -ENOENT) {
2538 obj_req->result = 0;
2539 obj_req->xferred = obj_req->ex.oe_len;
2549 static void rbd_obj_end_request(struct rbd_obj_request *obj_req)
2551 struct rbd_img_request *img_req = obj_req->img_request;
2553 rbd_assert((!obj_req->result &&
2554 obj_req->xferred == obj_req->ex.oe_len) ||
2555 (obj_req->result < 0 && !obj_req->xferred));
2556 if (!obj_req->result) {
2557 img_req->xferred += obj_req->xferred;
2561 rbd_warn(img_req->rbd_dev,
2562 "%s at objno %llu %llu~%llu result %d xferred %llu",
2563 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
2564 obj_req->ex.oe_off, obj_req->ex.oe_len, obj_req->result,
2566 if (!img_req->result) {
2567 img_req->result = obj_req->result;
2568 img_req->xferred = 0;
2572 static void rbd_img_end_child_request(struct rbd_img_request *img_req)
2574 struct rbd_obj_request *obj_req = img_req->obj_request;
2576 rbd_assert(test_bit(IMG_REQ_CHILD, &img_req->flags));
2577 rbd_assert((!img_req->result &&
2578 img_req->xferred == rbd_obj_img_extents_bytes(obj_req)) ||
2579 (img_req->result < 0 && !img_req->xferred));
2581 obj_req->result = img_req->result;
2582 obj_req->xferred = img_req->xferred;
2583 rbd_img_request_put(img_req);
2586 static void rbd_img_end_request(struct rbd_img_request *img_req)
2588 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
2589 rbd_assert((!img_req->result &&
2590 img_req->xferred == blk_rq_bytes(img_req->rq)) ||
2591 (img_req->result < 0 && !img_req->xferred));
2593 blk_mq_end_request(img_req->rq,
2594 errno_to_blk_status(img_req->result));
2595 rbd_img_request_put(img_req);
2598 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2600 struct rbd_img_request *img_req;
2603 if (!__rbd_obj_handle_request(obj_req))
2606 img_req = obj_req->img_request;
2607 spin_lock(&img_req->completion_lock);
2608 rbd_obj_end_request(obj_req);
2609 rbd_assert(img_req->pending_count);
2610 if (--img_req->pending_count) {
2611 spin_unlock(&img_req->completion_lock);
2615 spin_unlock(&img_req->completion_lock);
2616 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
2617 obj_req = img_req->obj_request;
2618 rbd_img_end_child_request(img_req);
2621 rbd_img_end_request(img_req);
2624 static const struct rbd_client_id rbd_empty_cid;
2626 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
2627 const struct rbd_client_id *rhs)
2629 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
2632 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
2634 struct rbd_client_id cid;
2636 mutex_lock(&rbd_dev->watch_mutex);
2637 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
2638 cid.handle = rbd_dev->watch_cookie;
2639 mutex_unlock(&rbd_dev->watch_mutex);
2644 * lock_rwsem must be held for write
2646 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
2647 const struct rbd_client_id *cid)
2649 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
2650 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
2651 cid->gid, cid->handle);
2652 rbd_dev->owner_cid = *cid; /* struct */
2655 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
2657 mutex_lock(&rbd_dev->watch_mutex);
2658 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
2659 mutex_unlock(&rbd_dev->watch_mutex);
2662 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
2664 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2666 strcpy(rbd_dev->lock_cookie, cookie);
2667 rbd_set_owner_cid(rbd_dev, &cid);
2668 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
2672 * lock_rwsem must be held for write
2674 static int rbd_lock(struct rbd_device *rbd_dev)
2676 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2680 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
2681 rbd_dev->lock_cookie[0] != '\0');
2683 format_lock_cookie(rbd_dev, cookie);
2684 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2685 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
2686 RBD_LOCK_TAG, "", 0);
2690 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
2691 __rbd_lock(rbd_dev, cookie);
2696 * lock_rwsem must be held for write
2698 static void rbd_unlock(struct rbd_device *rbd_dev)
2700 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2703 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
2704 rbd_dev->lock_cookie[0] == '\0');
2706 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2707 RBD_LOCK_NAME, rbd_dev->lock_cookie);
2708 if (ret && ret != -ENOENT)
2709 rbd_warn(rbd_dev, "failed to unlock: %d", ret);
2711 /* treat errors as the image is unlocked */
2712 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
2713 rbd_dev->lock_cookie[0] = '\0';
2714 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
2715 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
2718 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
2719 enum rbd_notify_op notify_op,
2720 struct page ***preply_pages,
2723 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2724 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2725 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
2726 int buf_size = sizeof(buf);
2729 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
2731 /* encode *LockPayload NotifyMessage (op + ClientId) */
2732 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
2733 ceph_encode_32(&p, notify_op);
2734 ceph_encode_64(&p, cid.gid);
2735 ceph_encode_64(&p, cid.handle);
2737 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
2738 &rbd_dev->header_oloc, buf, buf_size,
2739 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
2742 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
2743 enum rbd_notify_op notify_op)
2745 struct page **reply_pages;
2748 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
2749 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2752 static void rbd_notify_acquired_lock(struct work_struct *work)
2754 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2755 acquired_lock_work);
2757 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
2760 static void rbd_notify_released_lock(struct work_struct *work)
2762 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2763 released_lock_work);
2765 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
2768 static int rbd_request_lock(struct rbd_device *rbd_dev)
2770 struct page **reply_pages;
2772 bool lock_owner_responded = false;
2775 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2777 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
2778 &reply_pages, &reply_len);
2779 if (ret && ret != -ETIMEDOUT) {
2780 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
2784 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
2785 void *p = page_address(reply_pages[0]);
2786 void *const end = p + reply_len;
2789 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
2794 ceph_decode_need(&p, end, 8 + 8, e_inval);
2795 p += 8 + 8; /* skip gid and cookie */
2797 ceph_decode_32_safe(&p, end, len, e_inval);
2801 if (lock_owner_responded) {
2803 "duplicate lock owners detected");
2808 lock_owner_responded = true;
2809 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
2813 "failed to decode ResponseMessage: %d",
2818 ret = ceph_decode_32(&p);
2822 if (!lock_owner_responded) {
2823 rbd_warn(rbd_dev, "no lock owners detected");
2828 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2836 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
2838 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
2840 cancel_delayed_work(&rbd_dev->lock_dwork);
2842 wake_up_all(&rbd_dev->lock_waitq);
2844 wake_up(&rbd_dev->lock_waitq);
2847 static int get_lock_owner_info(struct rbd_device *rbd_dev,
2848 struct ceph_locker **lockers, u32 *num_lockers)
2850 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2855 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2857 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
2858 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2859 &lock_type, &lock_tag, lockers, num_lockers);
2863 if (*num_lockers == 0) {
2864 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
2868 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
2869 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
2875 if (lock_type == CEPH_CLS_LOCK_SHARED) {
2876 rbd_warn(rbd_dev, "shared lock type detected");
2881 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
2882 strlen(RBD_LOCK_COOKIE_PREFIX))) {
2883 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
2884 (*lockers)[0].id.cookie);
2894 static int find_watcher(struct rbd_device *rbd_dev,
2895 const struct ceph_locker *locker)
2897 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2898 struct ceph_watch_item *watchers;
2904 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
2905 &rbd_dev->header_oloc, &watchers,
2910 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
2911 for (i = 0; i < num_watchers; i++) {
2912 if (!memcmp(&watchers[i].addr, &locker->info.addr,
2913 sizeof(locker->info.addr)) &&
2914 watchers[i].cookie == cookie) {
2915 struct rbd_client_id cid = {
2916 .gid = le64_to_cpu(watchers[i].name.num),
2920 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
2921 rbd_dev, cid.gid, cid.handle);
2922 rbd_set_owner_cid(rbd_dev, &cid);
2928 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
2936 * lock_rwsem must be held for write
2938 static int rbd_try_lock(struct rbd_device *rbd_dev)
2940 struct ceph_client *client = rbd_dev->rbd_client->client;
2941 struct ceph_locker *lockers;
2946 ret = rbd_lock(rbd_dev);
2950 /* determine if the current lock holder is still alive */
2951 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
2955 if (num_lockers == 0)
2958 ret = find_watcher(rbd_dev, lockers);
2961 ret = 0; /* have to request lock */
2965 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
2966 ENTITY_NAME(lockers[0].id.name));
2968 ret = ceph_monc_blacklist_add(&client->monc,
2969 &lockers[0].info.addr);
2971 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
2972 ENTITY_NAME(lockers[0].id.name), ret);
2976 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
2977 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2978 lockers[0].id.cookie,
2979 &lockers[0].id.name);
2980 if (ret && ret != -ENOENT)
2984 ceph_free_lockers(lockers, num_lockers);
2988 ceph_free_lockers(lockers, num_lockers);
2993 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
2995 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
2998 enum rbd_lock_state lock_state;
3000 down_read(&rbd_dev->lock_rwsem);
3001 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3002 rbd_dev->lock_state);
3003 if (__rbd_is_lock_owner(rbd_dev)) {
3004 lock_state = rbd_dev->lock_state;
3005 up_read(&rbd_dev->lock_rwsem);
3009 up_read(&rbd_dev->lock_rwsem);
3010 down_write(&rbd_dev->lock_rwsem);
3011 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3012 rbd_dev->lock_state);
3013 if (!__rbd_is_lock_owner(rbd_dev)) {
3014 *pret = rbd_try_lock(rbd_dev);
3016 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3019 lock_state = rbd_dev->lock_state;
3020 up_write(&rbd_dev->lock_rwsem);
3024 static void rbd_acquire_lock(struct work_struct *work)
3026 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3027 struct rbd_device, lock_dwork);
3028 enum rbd_lock_state lock_state;
3031 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3033 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3034 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3035 if (lock_state == RBD_LOCK_STATE_LOCKED)
3036 wake_requests(rbd_dev, true);
3037 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3038 rbd_dev, lock_state, ret);
3042 ret = rbd_request_lock(rbd_dev);
3043 if (ret == -ETIMEDOUT) {
3044 goto again; /* treat this as a dead client */
3045 } else if (ret == -EROFS) {
3046 rbd_warn(rbd_dev, "peer will not release lock");
3048 * If this is rbd_add_acquire_lock(), we want to fail
3049 * immediately -- reuse BLACKLISTED flag. Otherwise we
3052 if (!(rbd_dev->disk->flags & GENHD_FL_UP)) {
3053 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3054 /* wake "rbd map --exclusive" process */
3055 wake_requests(rbd_dev, false);
3057 } else if (ret < 0) {
3058 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3059 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3063 * lock owner acked, but resend if we don't see them
3066 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3068 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3069 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3074 * lock_rwsem must be held for write
3076 static bool rbd_release_lock(struct rbd_device *rbd_dev)
3078 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3079 rbd_dev->lock_state);
3080 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3083 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3084 downgrade_write(&rbd_dev->lock_rwsem);
3086 * Ensure that all in-flight IO is flushed.
3088 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3089 * may be shared with other devices.
3091 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3092 up_read(&rbd_dev->lock_rwsem);
3094 down_write(&rbd_dev->lock_rwsem);
3095 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3096 rbd_dev->lock_state);
3097 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3100 rbd_unlock(rbd_dev);
3102 * Give others a chance to grab the lock - we would re-acquire
3103 * almost immediately if we got new IO during ceph_osdc_sync()
3104 * otherwise. We need to ack our own notifications, so this
3105 * lock_dwork will be requeued from rbd_wait_state_locked()
3106 * after wake_requests() in rbd_handle_released_lock().
3108 cancel_delayed_work(&rbd_dev->lock_dwork);
3112 static void rbd_release_lock_work(struct work_struct *work)
3114 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3117 down_write(&rbd_dev->lock_rwsem);
3118 rbd_release_lock(rbd_dev);
3119 up_write(&rbd_dev->lock_rwsem);
3122 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3125 struct rbd_client_id cid = { 0 };
3127 if (struct_v >= 2) {
3128 cid.gid = ceph_decode_64(p);
3129 cid.handle = ceph_decode_64(p);
3132 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3134 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3135 down_write(&rbd_dev->lock_rwsem);
3136 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3138 * we already know that the remote client is
3141 up_write(&rbd_dev->lock_rwsem);
3145 rbd_set_owner_cid(rbd_dev, &cid);
3146 downgrade_write(&rbd_dev->lock_rwsem);
3148 down_read(&rbd_dev->lock_rwsem);
3151 if (!__rbd_is_lock_owner(rbd_dev))
3152 wake_requests(rbd_dev, false);
3153 up_read(&rbd_dev->lock_rwsem);
3156 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3159 struct rbd_client_id cid = { 0 };
3161 if (struct_v >= 2) {
3162 cid.gid = ceph_decode_64(p);
3163 cid.handle = ceph_decode_64(p);
3166 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3168 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3169 down_write(&rbd_dev->lock_rwsem);
3170 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3171 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3172 __func__, rbd_dev, cid.gid, cid.handle,
3173 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3174 up_write(&rbd_dev->lock_rwsem);
3178 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3179 downgrade_write(&rbd_dev->lock_rwsem);
3181 down_read(&rbd_dev->lock_rwsem);
3184 if (!__rbd_is_lock_owner(rbd_dev))
3185 wake_requests(rbd_dev, false);
3186 up_read(&rbd_dev->lock_rwsem);
3190 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
3191 * ResponseMessage is needed.
3193 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3196 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3197 struct rbd_client_id cid = { 0 };
3200 if (struct_v >= 2) {
3201 cid.gid = ceph_decode_64(p);
3202 cid.handle = ceph_decode_64(p);
3205 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3207 if (rbd_cid_equal(&cid, &my_cid))
3210 down_read(&rbd_dev->lock_rwsem);
3211 if (__rbd_is_lock_owner(rbd_dev)) {
3212 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
3213 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
3217 * encode ResponseMessage(0) so the peer can detect
3222 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3223 if (!rbd_dev->opts->exclusive) {
3224 dout("%s rbd_dev %p queueing unlock_work\n",
3226 queue_work(rbd_dev->task_wq,
3227 &rbd_dev->unlock_work);
3229 /* refuse to release the lock */
3236 up_read(&rbd_dev->lock_rwsem);
3240 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3241 u64 notify_id, u64 cookie, s32 *result)
3243 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3244 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
3245 int buf_size = sizeof(buf);
3251 /* encode ResponseMessage */
3252 ceph_start_encoding(&p, 1, 1,
3253 buf_size - CEPH_ENCODING_START_BLK_LEN);
3254 ceph_encode_32(&p, *result);
3259 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3260 &rbd_dev->header_oloc, notify_id, cookie,
3263 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3266 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3269 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3270 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3273 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3274 u64 notify_id, u64 cookie, s32 result)
3276 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3277 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3280 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3281 u64 notifier_id, void *data, size_t data_len)
3283 struct rbd_device *rbd_dev = arg;
3285 void *const end = p + data_len;
3291 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3292 __func__, rbd_dev, cookie, notify_id, data_len);
3294 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3297 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3302 notify_op = ceph_decode_32(&p);
3304 /* legacy notification for header updates */
3305 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3309 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3310 switch (notify_op) {
3311 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3312 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3313 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3315 case RBD_NOTIFY_OP_RELEASED_LOCK:
3316 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3317 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3319 case RBD_NOTIFY_OP_REQUEST_LOCK:
3320 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
3322 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3325 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3327 case RBD_NOTIFY_OP_HEADER_UPDATE:
3328 ret = rbd_dev_refresh(rbd_dev);
3330 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3332 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3335 if (rbd_is_lock_owner(rbd_dev))
3336 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3337 cookie, -EOPNOTSUPP);
3339 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3344 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3346 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3348 struct rbd_device *rbd_dev = arg;
3350 rbd_warn(rbd_dev, "encountered watch error: %d", err);
3352 down_write(&rbd_dev->lock_rwsem);
3353 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3354 up_write(&rbd_dev->lock_rwsem);
3356 mutex_lock(&rbd_dev->watch_mutex);
3357 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3358 __rbd_unregister_watch(rbd_dev);
3359 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
3361 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
3363 mutex_unlock(&rbd_dev->watch_mutex);
3367 * watch_mutex must be locked
3369 static int __rbd_register_watch(struct rbd_device *rbd_dev)
3371 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3372 struct ceph_osd_linger_request *handle;
3374 rbd_assert(!rbd_dev->watch_handle);
3375 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3377 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3378 &rbd_dev->header_oloc, rbd_watch_cb,
3379 rbd_watch_errcb, rbd_dev);
3381 return PTR_ERR(handle);
3383 rbd_dev->watch_handle = handle;
3388 * watch_mutex must be locked
3390 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
3392 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3395 rbd_assert(rbd_dev->watch_handle);
3396 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3398 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3400 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3402 rbd_dev->watch_handle = NULL;
3405 static int rbd_register_watch(struct rbd_device *rbd_dev)
3409 mutex_lock(&rbd_dev->watch_mutex);
3410 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3411 ret = __rbd_register_watch(rbd_dev);
3415 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3416 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3419 mutex_unlock(&rbd_dev->watch_mutex);
3423 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3425 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3427 cancel_work_sync(&rbd_dev->acquired_lock_work);
3428 cancel_work_sync(&rbd_dev->released_lock_work);
3429 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3430 cancel_work_sync(&rbd_dev->unlock_work);
3433 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3435 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
3436 cancel_tasks_sync(rbd_dev);
3438 mutex_lock(&rbd_dev->watch_mutex);
3439 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3440 __rbd_unregister_watch(rbd_dev);
3441 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3442 mutex_unlock(&rbd_dev->watch_mutex);
3444 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
3445 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3449 * lock_rwsem must be held for write
3451 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3453 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3457 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3459 format_lock_cookie(rbd_dev, cookie);
3460 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
3461 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3462 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
3463 RBD_LOCK_TAG, cookie);
3465 if (ret != -EOPNOTSUPP)
3466 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
3470 * Lock cookie cannot be updated on older OSDs, so do
3471 * a manual release and queue an acquire.
3473 if (rbd_release_lock(rbd_dev))
3474 queue_delayed_work(rbd_dev->task_wq,
3475 &rbd_dev->lock_dwork, 0);
3477 __rbd_lock(rbd_dev, cookie);
3481 static void rbd_reregister_watch(struct work_struct *work)
3483 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3484 struct rbd_device, watch_dwork);
3487 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3489 mutex_lock(&rbd_dev->watch_mutex);
3490 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
3491 mutex_unlock(&rbd_dev->watch_mutex);
3495 ret = __rbd_register_watch(rbd_dev);
3497 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3498 if (ret == -EBLACKLISTED || ret == -ENOENT) {
3499 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3500 wake_requests(rbd_dev, true);
3502 queue_delayed_work(rbd_dev->task_wq,
3503 &rbd_dev->watch_dwork,
3506 mutex_unlock(&rbd_dev->watch_mutex);
3510 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3511 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3512 mutex_unlock(&rbd_dev->watch_mutex);
3514 down_write(&rbd_dev->lock_rwsem);
3515 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3516 rbd_reacquire_lock(rbd_dev);
3517 up_write(&rbd_dev->lock_rwsem);
3519 ret = rbd_dev_refresh(rbd_dev);
3521 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
3525 * Synchronous osd object method call. Returns the number of bytes
3526 * returned in the outbound buffer, or a negative error code.
3528 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3529 struct ceph_object_id *oid,
3530 struct ceph_object_locator *oloc,
3531 const char *method_name,
3532 const void *outbound,
3533 size_t outbound_size,
3535 size_t inbound_size)
3537 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3538 struct page *req_page = NULL;
3539 struct page *reply_page;
3543 * Method calls are ultimately read operations. The result
3544 * should placed into the inbound buffer provided. They
3545 * also supply outbound data--parameters for the object
3546 * method. Currently if this is present it will be a
3550 if (outbound_size > PAGE_SIZE)
3553 req_page = alloc_page(GFP_KERNEL);
3557 memcpy(page_address(req_page), outbound, outbound_size);
3560 reply_page = alloc_page(GFP_KERNEL);
3563 __free_page(req_page);
3567 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
3568 CEPH_OSD_FLAG_READ, req_page, outbound_size,
3569 reply_page, &inbound_size);
3571 memcpy(inbound, page_address(reply_page), inbound_size);
3576 __free_page(req_page);
3577 __free_page(reply_page);
3582 * lock_rwsem must be held for read
3584 static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
3587 unsigned long timeout;
3590 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
3591 return -EBLACKLISTED;
3593 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3597 rbd_warn(rbd_dev, "exclusive lock required");
3603 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3604 * and cancel_delayed_work() in wake_requests().
3606 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3607 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3608 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
3609 TASK_UNINTERRUPTIBLE);
3610 up_read(&rbd_dev->lock_rwsem);
3611 timeout = schedule_timeout(ceph_timeout_jiffies(
3612 rbd_dev->opts->lock_timeout));
3613 down_read(&rbd_dev->lock_rwsem);
3614 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3615 ret = -EBLACKLISTED;
3619 rbd_warn(rbd_dev, "timed out waiting for lock");
3623 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3625 finish_wait(&rbd_dev->lock_waitq, &wait);
3629 static void rbd_queue_workfn(struct work_struct *work)
3631 struct request *rq = blk_mq_rq_from_pdu(work);
3632 struct rbd_device *rbd_dev = rq->q->queuedata;
3633 struct rbd_img_request *img_request;
3634 struct ceph_snap_context *snapc = NULL;
3635 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3636 u64 length = blk_rq_bytes(rq);
3637 enum obj_operation_type op_type;
3639 bool must_be_locked;
3642 switch (req_op(rq)) {
3643 case REQ_OP_DISCARD:
3644 case REQ_OP_WRITE_ZEROES:
3645 op_type = OBJ_OP_DISCARD;
3648 op_type = OBJ_OP_WRITE;
3651 op_type = OBJ_OP_READ;
3654 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
3659 /* Ignore/skip any zero-length requests */
3662 dout("%s: zero-length request\n", __func__);
3667 rbd_assert(op_type == OBJ_OP_READ ||
3668 rbd_dev->spec->snap_id == CEPH_NOSNAP);
3671 * Quit early if the mapped snapshot no longer exists. It's
3672 * still possible the snapshot will have disappeared by the
3673 * time our request arrives at the osd, but there's no sense in
3674 * sending it if we already know.
3676 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3677 dout("request for non-existent snapshot");
3678 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3683 if (offset && length > U64_MAX - offset + 1) {
3684 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3687 goto err_rq; /* Shouldn't happen */
3690 blk_mq_start_request(rq);
3692 down_read(&rbd_dev->header_rwsem);
3693 mapping_size = rbd_dev->mapping.size;
3694 if (op_type != OBJ_OP_READ) {
3695 snapc = rbd_dev->header.snapc;
3696 ceph_get_snap_context(snapc);
3698 up_read(&rbd_dev->header_rwsem);
3700 if (offset + length > mapping_size) {
3701 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3702 length, mapping_size);
3708 (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
3709 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
3710 if (must_be_locked) {
3711 down_read(&rbd_dev->lock_rwsem);
3712 result = rbd_wait_state_locked(rbd_dev,
3713 !rbd_dev->opts->exclusive);
3718 img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
3723 img_request->rq = rq;
3724 snapc = NULL; /* img_request consumes a ref */
3726 if (op_type == OBJ_OP_DISCARD)
3727 result = rbd_img_fill_nodata(img_request, offset, length);
3729 result = rbd_img_fill_from_bio(img_request, offset, length,
3732 goto err_img_request;
3734 rbd_img_request_submit(img_request);
3736 up_read(&rbd_dev->lock_rwsem);
3740 rbd_img_request_put(img_request);
3743 up_read(&rbd_dev->lock_rwsem);
3746 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3747 obj_op_name(op_type), length, offset, result);
3748 ceph_put_snap_context(snapc);
3750 blk_mq_end_request(rq, errno_to_blk_status(result));
3753 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3754 const struct blk_mq_queue_data *bd)
3756 struct request *rq = bd->rq;
3757 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3759 queue_work(rbd_wq, work);
3763 static void rbd_free_disk(struct rbd_device *rbd_dev)
3765 blk_cleanup_queue(rbd_dev->disk->queue);
3766 blk_mq_free_tag_set(&rbd_dev->tag_set);
3767 put_disk(rbd_dev->disk);
3768 rbd_dev->disk = NULL;
3771 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3772 struct ceph_object_id *oid,
3773 struct ceph_object_locator *oloc,
3774 void *buf, int buf_len)
3777 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3778 struct ceph_osd_request *req;
3779 struct page **pages;
3780 int num_pages = calc_pages_for(0, buf_len);
3783 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
3787 ceph_oid_copy(&req->r_base_oid, oid);
3788 ceph_oloc_copy(&req->r_base_oloc, oloc);
3789 req->r_flags = CEPH_OSD_FLAG_READ;
3791 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
3792 if (IS_ERR(pages)) {
3793 ret = PTR_ERR(pages);
3797 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
3798 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
3801 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
3805 ceph_osdc_start_request(osdc, req, false);
3806 ret = ceph_osdc_wait_request(osdc, req);
3808 ceph_copy_from_page_vector(pages, buf, 0, ret);
3811 ceph_osdc_put_request(req);
3816 * Read the complete header for the given rbd device. On successful
3817 * return, the rbd_dev->header field will contain up-to-date
3818 * information about the image.
3820 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3822 struct rbd_image_header_ondisk *ondisk = NULL;
3829 * The complete header will include an array of its 64-bit
3830 * snapshot ids, followed by the names of those snapshots as
3831 * a contiguous block of NUL-terminated strings. Note that
3832 * the number of snapshots could change by the time we read
3833 * it in, in which case we re-read it.
3840 size = sizeof (*ondisk);
3841 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3843 ondisk = kmalloc(size, GFP_KERNEL);
3847 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
3848 &rbd_dev->header_oloc, ondisk, size);
3851 if ((size_t)ret < size) {
3853 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3857 if (!rbd_dev_ondisk_valid(ondisk)) {
3859 rbd_warn(rbd_dev, "invalid header");
3863 names_size = le64_to_cpu(ondisk->snap_names_len);
3864 want_count = snap_count;
3865 snap_count = le32_to_cpu(ondisk->snap_count);
3866 } while (snap_count != want_count);
3868 ret = rbd_header_from_disk(rbd_dev, ondisk);
3876 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3877 * has disappeared from the (just updated) snapshot context.
3879 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3883 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3886 snap_id = rbd_dev->spec->snap_id;
3887 if (snap_id == CEPH_NOSNAP)
3890 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3891 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3894 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3899 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3900 * try to update its size. If REMOVING is set, updating size
3901 * is just useless work since the device can't be opened.
3903 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3904 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3905 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3906 dout("setting size to %llu sectors", (unsigned long long)size);
3907 set_capacity(rbd_dev->disk, size);
3908 revalidate_disk(rbd_dev->disk);
3912 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3917 down_write(&rbd_dev->header_rwsem);
3918 mapping_size = rbd_dev->mapping.size;
3920 ret = rbd_dev_header_info(rbd_dev);
3925 * If there is a parent, see if it has disappeared due to the
3926 * mapped image getting flattened.
3928 if (rbd_dev->parent) {
3929 ret = rbd_dev_v2_parent_info(rbd_dev);
3934 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3935 rbd_dev->mapping.size = rbd_dev->header.image_size;
3937 /* validate mapped snapshot's EXISTS flag */
3938 rbd_exists_validate(rbd_dev);
3942 up_write(&rbd_dev->header_rwsem);
3943 if (!ret && mapping_size != rbd_dev->mapping.size)
3944 rbd_dev_update_size(rbd_dev);
3949 static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
3950 unsigned int hctx_idx, unsigned int numa_node)
3952 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3954 INIT_WORK(work, rbd_queue_workfn);
3958 static const struct blk_mq_ops rbd_mq_ops = {
3959 .queue_rq = rbd_queue_rq,
3960 .init_request = rbd_init_request,
3963 static int rbd_init_disk(struct rbd_device *rbd_dev)
3965 struct gendisk *disk;
3966 struct request_queue *q;
3967 unsigned int objset_bytes =
3968 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
3971 /* create gendisk info */
3972 disk = alloc_disk(single_major ?
3973 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3974 RBD_MINORS_PER_MAJOR);
3978 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3980 disk->major = rbd_dev->major;
3981 disk->first_minor = rbd_dev->minor;
3983 disk->flags |= GENHD_FL_EXT_DEVT;
3984 disk->fops = &rbd_bd_ops;
3985 disk->private_data = rbd_dev;
3987 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3988 rbd_dev->tag_set.ops = &rbd_mq_ops;
3989 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
3990 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3991 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
3992 rbd_dev->tag_set.nr_hw_queues = 1;
3993 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3995 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3999 q = blk_mq_init_queue(&rbd_dev->tag_set);
4005 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
4006 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4008 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
4009 q->limits.max_sectors = queue_max_hw_sectors(q);
4010 blk_queue_max_segments(q, USHRT_MAX);
4011 blk_queue_max_segment_size(q, UINT_MAX);
4012 blk_queue_io_min(q, objset_bytes);
4013 blk_queue_io_opt(q, objset_bytes);
4015 if (rbd_dev->opts->trim) {
4016 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
4017 q->limits.discard_granularity = objset_bytes;
4018 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
4019 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
4022 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4023 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
4026 * disk_release() expects a queue ref from add_disk() and will
4027 * put it. Hold an extra ref until add_disk() is called.
4029 WARN_ON(!blk_get_queue(q));
4031 q->queuedata = rbd_dev;
4033 rbd_dev->disk = disk;
4037 blk_mq_free_tag_set(&rbd_dev->tag_set);
4047 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4049 return container_of(dev, struct rbd_device, dev);
4052 static ssize_t rbd_size_show(struct device *dev,
4053 struct device_attribute *attr, char *buf)
4055 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4057 return sprintf(buf, "%llu\n",
4058 (unsigned long long)rbd_dev->mapping.size);
4062 * Note this shows the features for whatever's mapped, which is not
4063 * necessarily the base image.
4065 static ssize_t rbd_features_show(struct device *dev,
4066 struct device_attribute *attr, char *buf)
4068 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4070 return sprintf(buf, "0x%016llx\n",
4071 (unsigned long long)rbd_dev->mapping.features);
4074 static ssize_t rbd_major_show(struct device *dev,
4075 struct device_attribute *attr, char *buf)
4077 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4080 return sprintf(buf, "%d\n", rbd_dev->major);
4082 return sprintf(buf, "(none)\n");
4085 static ssize_t rbd_minor_show(struct device *dev,
4086 struct device_attribute *attr, char *buf)
4088 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4090 return sprintf(buf, "%d\n", rbd_dev->minor);
4093 static ssize_t rbd_client_addr_show(struct device *dev,
4094 struct device_attribute *attr, char *buf)
4096 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4097 struct ceph_entity_addr *client_addr =
4098 ceph_client_addr(rbd_dev->rbd_client->client);
4100 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4101 le32_to_cpu(client_addr->nonce));
4104 static ssize_t rbd_client_id_show(struct device *dev,
4105 struct device_attribute *attr, char *buf)
4107 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4109 return sprintf(buf, "client%lld\n",
4110 ceph_client_gid(rbd_dev->rbd_client->client));
4113 static ssize_t rbd_cluster_fsid_show(struct device *dev,
4114 struct device_attribute *attr, char *buf)
4116 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4118 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4121 static ssize_t rbd_config_info_show(struct device *dev,
4122 struct device_attribute *attr, char *buf)
4124 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4126 return sprintf(buf, "%s\n", rbd_dev->config_info);
4129 static ssize_t rbd_pool_show(struct device *dev,
4130 struct device_attribute *attr, char *buf)
4132 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4134 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
4137 static ssize_t rbd_pool_id_show(struct device *dev,
4138 struct device_attribute *attr, char *buf)
4140 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4142 return sprintf(buf, "%llu\n",
4143 (unsigned long long) rbd_dev->spec->pool_id);
4146 static ssize_t rbd_pool_ns_show(struct device *dev,
4147 struct device_attribute *attr, char *buf)
4149 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4151 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
4154 static ssize_t rbd_name_show(struct device *dev,
4155 struct device_attribute *attr, char *buf)
4157 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4159 if (rbd_dev->spec->image_name)
4160 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4162 return sprintf(buf, "(unknown)\n");
4165 static ssize_t rbd_image_id_show(struct device *dev,
4166 struct device_attribute *attr, char *buf)
4168 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4170 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
4174 * Shows the name of the currently-mapped snapshot (or
4175 * RBD_SNAP_HEAD_NAME for the base image).
4177 static ssize_t rbd_snap_show(struct device *dev,
4178 struct device_attribute *attr,
4181 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4183 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
4186 static ssize_t rbd_snap_id_show(struct device *dev,
4187 struct device_attribute *attr, char *buf)
4189 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4191 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4195 * For a v2 image, shows the chain of parent images, separated by empty
4196 * lines. For v1 images or if there is no parent, shows "(no parent
4199 static ssize_t rbd_parent_show(struct device *dev,
4200 struct device_attribute *attr,
4203 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4206 if (!rbd_dev->parent)
4207 return sprintf(buf, "(no parent image)\n");
4209 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4210 struct rbd_spec *spec = rbd_dev->parent_spec;
4212 count += sprintf(&buf[count], "%s"
4213 "pool_id %llu\npool_name %s\n"
4215 "image_id %s\nimage_name %s\n"
4216 "snap_id %llu\nsnap_name %s\n"
4218 !count ? "" : "\n", /* first? */
4219 spec->pool_id, spec->pool_name,
4220 spec->pool_ns ?: "",
4221 spec->image_id, spec->image_name ?: "(unknown)",
4222 spec->snap_id, spec->snap_name,
4223 rbd_dev->parent_overlap);
4229 static ssize_t rbd_image_refresh(struct device *dev,
4230 struct device_attribute *attr,
4234 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4237 ret = rbd_dev_refresh(rbd_dev);
4244 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
4245 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
4246 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
4247 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
4248 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
4249 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
4250 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
4251 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
4252 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
4253 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
4254 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
4255 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
4256 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
4257 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
4258 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
4259 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
4260 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
4262 static struct attribute *rbd_attrs[] = {
4263 &dev_attr_size.attr,
4264 &dev_attr_features.attr,
4265 &dev_attr_major.attr,
4266 &dev_attr_minor.attr,
4267 &dev_attr_client_addr.attr,
4268 &dev_attr_client_id.attr,
4269 &dev_attr_cluster_fsid.attr,
4270 &dev_attr_config_info.attr,
4271 &dev_attr_pool.attr,
4272 &dev_attr_pool_id.attr,
4273 &dev_attr_pool_ns.attr,
4274 &dev_attr_name.attr,
4275 &dev_attr_image_id.attr,
4276 &dev_attr_current_snap.attr,
4277 &dev_attr_snap_id.attr,
4278 &dev_attr_parent.attr,
4279 &dev_attr_refresh.attr,
4283 static struct attribute_group rbd_attr_group = {
4287 static const struct attribute_group *rbd_attr_groups[] = {
4292 static void rbd_dev_release(struct device *dev);
4294 static const struct device_type rbd_device_type = {
4296 .groups = rbd_attr_groups,
4297 .release = rbd_dev_release,
4300 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4302 kref_get(&spec->kref);
4307 static void rbd_spec_free(struct kref *kref);
4308 static void rbd_spec_put(struct rbd_spec *spec)
4311 kref_put(&spec->kref, rbd_spec_free);
4314 static struct rbd_spec *rbd_spec_alloc(void)
4316 struct rbd_spec *spec;
4318 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4322 spec->pool_id = CEPH_NOPOOL;
4323 spec->snap_id = CEPH_NOSNAP;
4324 kref_init(&spec->kref);
4329 static void rbd_spec_free(struct kref *kref)
4331 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4333 kfree(spec->pool_name);
4334 kfree(spec->pool_ns);
4335 kfree(spec->image_id);
4336 kfree(spec->image_name);
4337 kfree(spec->snap_name);
4341 static void rbd_dev_free(struct rbd_device *rbd_dev)
4343 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
4344 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
4346 ceph_oid_destroy(&rbd_dev->header_oid);
4347 ceph_oloc_destroy(&rbd_dev->header_oloc);
4348 kfree(rbd_dev->config_info);
4350 rbd_put_client(rbd_dev->rbd_client);
4351 rbd_spec_put(rbd_dev->spec);
4352 kfree(rbd_dev->opts);
4356 static void rbd_dev_release(struct device *dev)
4358 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4359 bool need_put = !!rbd_dev->opts;
4362 destroy_workqueue(rbd_dev->task_wq);
4363 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4366 rbd_dev_free(rbd_dev);
4369 * This is racy, but way better than putting module outside of
4370 * the release callback. The race window is pretty small, so
4371 * doing something similar to dm (dm-builtin.c) is overkill.
4374 module_put(THIS_MODULE);
4377 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4378 struct rbd_spec *spec)
4380 struct rbd_device *rbd_dev;
4382 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
4386 spin_lock_init(&rbd_dev->lock);
4387 INIT_LIST_HEAD(&rbd_dev->node);
4388 init_rwsem(&rbd_dev->header_rwsem);
4390 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
4391 ceph_oid_init(&rbd_dev->header_oid);
4392 rbd_dev->header_oloc.pool = spec->pool_id;
4393 if (spec->pool_ns) {
4394 WARN_ON(!*spec->pool_ns);
4395 rbd_dev->header_oloc.pool_ns =
4396 ceph_find_or_create_string(spec->pool_ns,
4397 strlen(spec->pool_ns));
4400 mutex_init(&rbd_dev->watch_mutex);
4401 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4402 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4404 init_rwsem(&rbd_dev->lock_rwsem);
4405 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4406 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4407 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4408 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4409 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4410 init_waitqueue_head(&rbd_dev->lock_waitq);
4412 rbd_dev->dev.bus = &rbd_bus_type;
4413 rbd_dev->dev.type = &rbd_device_type;
4414 rbd_dev->dev.parent = &rbd_root_dev;
4415 device_initialize(&rbd_dev->dev);
4417 rbd_dev->rbd_client = rbdc;
4418 rbd_dev->spec = spec;
4424 * Create a mapping rbd_dev.
4426 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4427 struct rbd_spec *spec,
4428 struct rbd_options *opts)
4430 struct rbd_device *rbd_dev;
4432 rbd_dev = __rbd_dev_create(rbdc, spec);
4436 rbd_dev->opts = opts;
4438 /* get an id and fill in device name */
4439 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4440 minor_to_rbd_dev_id(1 << MINORBITS),
4442 if (rbd_dev->dev_id < 0)
4445 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4446 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4448 if (!rbd_dev->task_wq)
4451 /* we have a ref from do_rbd_add() */
4452 __module_get(THIS_MODULE);
4454 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4458 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4460 rbd_dev_free(rbd_dev);
4464 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4467 put_device(&rbd_dev->dev);
4471 * Get the size and object order for an image snapshot, or if
4472 * snap_id is CEPH_NOSNAP, gets this information for the base
4475 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4476 u8 *order, u64 *snap_size)
4478 __le64 snapid = cpu_to_le64(snap_id);
4483 } __attribute__ ((packed)) size_buf = { 0 };
4485 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4486 &rbd_dev->header_oloc, "get_size",
4487 &snapid, sizeof(snapid),
4488 &size_buf, sizeof(size_buf));
4489 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4492 if (ret < sizeof (size_buf))
4496 *order = size_buf.order;
4497 dout(" order %u", (unsigned int)*order);
4499 *snap_size = le64_to_cpu(size_buf.size);
4501 dout(" snap_id 0x%016llx snap_size = %llu\n",
4502 (unsigned long long)snap_id,
4503 (unsigned long long)*snap_size);
4508 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4510 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4511 &rbd_dev->header.obj_order,
4512 &rbd_dev->header.image_size);
4515 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4521 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4525 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4526 &rbd_dev->header_oloc, "get_object_prefix",
4527 NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4528 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4533 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4534 p + ret, NULL, GFP_NOIO);
4537 if (IS_ERR(rbd_dev->header.object_prefix)) {
4538 ret = PTR_ERR(rbd_dev->header.object_prefix);
4539 rbd_dev->header.object_prefix = NULL;
4541 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4549 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4552 __le64 snapid = cpu_to_le64(snap_id);
4556 } __attribute__ ((packed)) features_buf = { 0 };
4560 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4561 &rbd_dev->header_oloc, "get_features",
4562 &snapid, sizeof(snapid),
4563 &features_buf, sizeof(features_buf));
4564 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4567 if (ret < sizeof (features_buf))
4570 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4572 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4577 *snap_features = le64_to_cpu(features_buf.features);
4579 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4580 (unsigned long long)snap_id,
4581 (unsigned long long)*snap_features,
4582 (unsigned long long)le64_to_cpu(features_buf.incompat));
4587 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4589 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4590 &rbd_dev->header.features);
4593 struct parent_image_info {
4595 const char *pool_ns;
4596 const char *image_id;
4604 * The caller is responsible for @pii.
4606 static int decode_parent_image_spec(void **p, void *end,
4607 struct parent_image_info *pii)
4613 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
4614 &struct_v, &struct_len);
4618 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
4619 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4620 if (IS_ERR(pii->pool_ns)) {
4621 ret = PTR_ERR(pii->pool_ns);
4622 pii->pool_ns = NULL;
4625 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4626 if (IS_ERR(pii->image_id)) {
4627 ret = PTR_ERR(pii->image_id);
4628 pii->image_id = NULL;
4631 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
4638 static int __get_parent_info(struct rbd_device *rbd_dev,
4639 struct page *req_page,
4640 struct page *reply_page,
4641 struct parent_image_info *pii)
4643 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4644 size_t reply_len = PAGE_SIZE;
4648 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4649 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
4650 req_page, sizeof(u64), reply_page, &reply_len);
4652 return ret == -EOPNOTSUPP ? 1 : ret;
4654 p = page_address(reply_page);
4655 end = p + reply_len;
4656 ret = decode_parent_image_spec(&p, end, pii);
4660 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4661 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
4662 req_page, sizeof(u64), reply_page, &reply_len);
4666 p = page_address(reply_page);
4667 end = p + reply_len;
4668 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
4669 if (pii->has_overlap)
4670 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4679 * The caller is responsible for @pii.
4681 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
4682 struct page *req_page,
4683 struct page *reply_page,
4684 struct parent_image_info *pii)
4686 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4687 size_t reply_len = PAGE_SIZE;
4691 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4692 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
4693 req_page, sizeof(u64), reply_page, &reply_len);
4697 p = page_address(reply_page);
4698 end = p + reply_len;
4699 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
4700 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4701 if (IS_ERR(pii->image_id)) {
4702 ret = PTR_ERR(pii->image_id);
4703 pii->image_id = NULL;
4706 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
4707 pii->has_overlap = true;
4708 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4716 static int get_parent_info(struct rbd_device *rbd_dev,
4717 struct parent_image_info *pii)
4719 struct page *req_page, *reply_page;
4723 req_page = alloc_page(GFP_KERNEL);
4727 reply_page = alloc_page(GFP_KERNEL);
4729 __free_page(req_page);
4733 p = page_address(req_page);
4734 ceph_encode_64(&p, rbd_dev->spec->snap_id);
4735 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
4737 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
4740 __free_page(req_page);
4741 __free_page(reply_page);
4745 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4747 struct rbd_spec *parent_spec;
4748 struct parent_image_info pii = { 0 };
4751 parent_spec = rbd_spec_alloc();
4755 ret = get_parent_info(rbd_dev, &pii);
4759 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
4760 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
4761 pii.has_overlap, pii.overlap);
4763 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
4765 * Either the parent never existed, or we have
4766 * record of it but the image got flattened so it no
4767 * longer has a parent. When the parent of a
4768 * layered image disappears we immediately set the
4769 * overlap to 0. The effect of this is that all new
4770 * requests will be treated as if the image had no
4773 * If !pii.has_overlap, the parent image spec is not
4774 * applicable. It's there to avoid duplication in each
4777 if (rbd_dev->parent_overlap) {
4778 rbd_dev->parent_overlap = 0;
4779 rbd_dev_parent_put(rbd_dev);
4780 pr_info("%s: clone image has been flattened\n",
4781 rbd_dev->disk->disk_name);
4784 goto out; /* No parent? No problem. */
4787 /* The ceph file layout needs to fit pool id in 32 bits */
4790 if (pii.pool_id > (u64)U32_MAX) {
4791 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4792 (unsigned long long)pii.pool_id, U32_MAX);
4797 * The parent won't change (except when the clone is
4798 * flattened, already handled that). So we only need to
4799 * record the parent spec we have not already done so.
4801 if (!rbd_dev->parent_spec) {
4802 parent_spec->pool_id = pii.pool_id;
4803 if (pii.pool_ns && *pii.pool_ns) {
4804 parent_spec->pool_ns = pii.pool_ns;
4807 parent_spec->image_id = pii.image_id;
4808 pii.image_id = NULL;
4809 parent_spec->snap_id = pii.snap_id;
4811 rbd_dev->parent_spec = parent_spec;
4812 parent_spec = NULL; /* rbd_dev now owns this */
4816 * We always update the parent overlap. If it's zero we issue
4817 * a warning, as we will proceed as if there was no parent.
4821 /* refresh, careful to warn just once */
4822 if (rbd_dev->parent_overlap)
4824 "clone now standalone (overlap became 0)");
4827 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4830 rbd_dev->parent_overlap = pii.overlap;
4836 kfree(pii.image_id);
4837 rbd_spec_put(parent_spec);
4841 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4845 __le64 stripe_count;
4846 } __attribute__ ((packed)) striping_info_buf = { 0 };
4847 size_t size = sizeof (striping_info_buf);
4851 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4852 &rbd_dev->header_oloc, "get_stripe_unit_count",
4853 NULL, 0, &striping_info_buf, size);
4854 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4860 p = &striping_info_buf;
4861 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
4862 rbd_dev->header.stripe_count = ceph_decode_64(&p);
4866 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
4868 __le64 data_pool_id;
4871 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4872 &rbd_dev->header_oloc, "get_data_pool",
4873 NULL, 0, &data_pool_id, sizeof(data_pool_id));
4876 if (ret < sizeof(data_pool_id))
4879 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
4880 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
4884 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4886 CEPH_DEFINE_OID_ONSTACK(oid);
4887 size_t image_id_size;
4892 void *reply_buf = NULL;
4894 char *image_name = NULL;
4897 rbd_assert(!rbd_dev->spec->image_name);
4899 len = strlen(rbd_dev->spec->image_id);
4900 image_id_size = sizeof (__le32) + len;
4901 image_id = kmalloc(image_id_size, GFP_KERNEL);
4906 end = image_id + image_id_size;
4907 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4909 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4910 reply_buf = kmalloc(size, GFP_KERNEL);
4914 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
4915 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
4916 "dir_get_name", image_id, image_id_size,
4921 end = reply_buf + ret;
4923 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4924 if (IS_ERR(image_name))
4927 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4935 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4937 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4938 const char *snap_name;
4941 /* Skip over names until we find the one we are looking for */
4943 snap_name = rbd_dev->header.snap_names;
4944 while (which < snapc->num_snaps) {
4945 if (!strcmp(name, snap_name))
4946 return snapc->snaps[which];
4947 snap_name += strlen(snap_name) + 1;
4953 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4955 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4960 for (which = 0; !found && which < snapc->num_snaps; which++) {
4961 const char *snap_name;
4963 snap_id = snapc->snaps[which];
4964 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4965 if (IS_ERR(snap_name)) {
4966 /* ignore no-longer existing snapshots */
4967 if (PTR_ERR(snap_name) == -ENOENT)
4972 found = !strcmp(name, snap_name);
4975 return found ? snap_id : CEPH_NOSNAP;
4979 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4980 * no snapshot by that name is found, or if an error occurs.
4982 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4984 if (rbd_dev->image_format == 1)
4985 return rbd_v1_snap_id_by_name(rbd_dev, name);
4987 return rbd_v2_snap_id_by_name(rbd_dev, name);
4991 * An image being mapped will have everything but the snap id.
4993 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4995 struct rbd_spec *spec = rbd_dev->spec;
4997 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4998 rbd_assert(spec->image_id && spec->image_name);
4999 rbd_assert(spec->snap_name);
5001 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5004 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5005 if (snap_id == CEPH_NOSNAP)
5008 spec->snap_id = snap_id;
5010 spec->snap_id = CEPH_NOSNAP;
5017 * A parent image will have all ids but none of the names.
5019 * All names in an rbd spec are dynamically allocated. It's OK if we
5020 * can't figure out the name for an image id.
5022 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
5024 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5025 struct rbd_spec *spec = rbd_dev->spec;
5026 const char *pool_name;
5027 const char *image_name;
5028 const char *snap_name;
5031 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5032 rbd_assert(spec->image_id);
5033 rbd_assert(spec->snap_id != CEPH_NOSNAP);
5035 /* Get the pool name; we have to make our own copy of this */
5037 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5039 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
5042 pool_name = kstrdup(pool_name, GFP_KERNEL);
5046 /* Fetch the image name; tolerate failure here */
5048 image_name = rbd_dev_image_name(rbd_dev);
5050 rbd_warn(rbd_dev, "unable to get image name");
5052 /* Fetch the snapshot name */
5054 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
5055 if (IS_ERR(snap_name)) {
5056 ret = PTR_ERR(snap_name);
5060 spec->pool_name = pool_name;
5061 spec->image_name = image_name;
5062 spec->snap_name = snap_name;
5072 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
5081 struct ceph_snap_context *snapc;
5085 * We'll need room for the seq value (maximum snapshot id),
5086 * snapshot count, and array of that many snapshot ids.
5087 * For now we have a fixed upper limit on the number we're
5088 * prepared to receive.
5090 size = sizeof (__le64) + sizeof (__le32) +
5091 RBD_MAX_SNAP_COUNT * sizeof (__le64);
5092 reply_buf = kzalloc(size, GFP_KERNEL);
5096 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5097 &rbd_dev->header_oloc, "get_snapcontext",
5098 NULL, 0, reply_buf, size);
5099 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5104 end = reply_buf + ret;
5106 ceph_decode_64_safe(&p, end, seq, out);
5107 ceph_decode_32_safe(&p, end, snap_count, out);
5110 * Make sure the reported number of snapshot ids wouldn't go
5111 * beyond the end of our buffer. But before checking that,
5112 * make sure the computed size of the snapshot context we
5113 * allocate is representable in a size_t.
5115 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5120 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5124 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
5130 for (i = 0; i < snap_count; i++)
5131 snapc->snaps[i] = ceph_decode_64(&p);
5133 ceph_put_snap_context(rbd_dev->header.snapc);
5134 rbd_dev->header.snapc = snapc;
5136 dout(" snap context seq = %llu, snap_count = %u\n",
5137 (unsigned long long)seq, (unsigned int)snap_count);
5144 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5155 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5156 reply_buf = kmalloc(size, GFP_KERNEL);
5158 return ERR_PTR(-ENOMEM);
5160 snapid = cpu_to_le64(snap_id);
5161 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5162 &rbd_dev->header_oloc, "get_snapshot_name",
5163 &snapid, sizeof(snapid), reply_buf, size);
5164 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5166 snap_name = ERR_PTR(ret);
5171 end = reply_buf + ret;
5172 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5173 if (IS_ERR(snap_name))
5176 dout(" snap_id 0x%016llx snap_name = %s\n",
5177 (unsigned long long)snap_id, snap_name);
5184 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
5186 bool first_time = rbd_dev->header.object_prefix == NULL;
5189 ret = rbd_dev_v2_image_size(rbd_dev);
5194 ret = rbd_dev_v2_header_onetime(rbd_dev);
5199 ret = rbd_dev_v2_snap_context(rbd_dev);
5200 if (ret && first_time) {
5201 kfree(rbd_dev->header.object_prefix);
5202 rbd_dev->header.object_prefix = NULL;
5208 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5210 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5212 if (rbd_dev->image_format == 1)
5213 return rbd_dev_v1_header_info(rbd_dev);
5215 return rbd_dev_v2_header_info(rbd_dev);
5219 * Skips over white space at *buf, and updates *buf to point to the
5220 * first found non-space character (if any). Returns the length of
5221 * the token (string of non-white space characters) found. Note
5222 * that *buf must be terminated with '\0'.
5224 static inline size_t next_token(const char **buf)
5227 * These are the characters that produce nonzero for
5228 * isspace() in the "C" and "POSIX" locales.
5230 const char *spaces = " \f\n\r\t\v";
5232 *buf += strspn(*buf, spaces); /* Find start of token */
5234 return strcspn(*buf, spaces); /* Return token length */
5238 * Finds the next token in *buf, dynamically allocates a buffer big
5239 * enough to hold a copy of it, and copies the token into the new
5240 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5241 * that a duplicate buffer is created even for a zero-length token.
5243 * Returns a pointer to the newly-allocated duplicate, or a null
5244 * pointer if memory for the duplicate was not available. If
5245 * the lenp argument is a non-null pointer, the length of the token
5246 * (not including the '\0') is returned in *lenp.
5248 * If successful, the *buf pointer will be updated to point beyond
5249 * the end of the found token.
5251 * Note: uses GFP_KERNEL for allocation.
5253 static inline char *dup_token(const char **buf, size_t *lenp)
5258 len = next_token(buf);
5259 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
5262 *(dup + len) = '\0';
5272 * Parse the options provided for an "rbd add" (i.e., rbd image
5273 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5274 * and the data written is passed here via a NUL-terminated buffer.
5275 * Returns 0 if successful or an error code otherwise.
5277 * The information extracted from these options is recorded in
5278 * the other parameters which return dynamically-allocated
5281 * The address of a pointer that will refer to a ceph options
5282 * structure. Caller must release the returned pointer using
5283 * ceph_destroy_options() when it is no longer needed.
5285 * Address of an rbd options pointer. Fully initialized by
5286 * this function; caller must release with kfree().
5288 * Address of an rbd image specification pointer. Fully
5289 * initialized by this function based on parsed options.
5290 * Caller must release with rbd_spec_put().
5292 * The options passed take this form:
5293 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5296 * A comma-separated list of one or more monitor addresses.
5297 * A monitor address is an ip address, optionally followed
5298 * by a port number (separated by a colon).
5299 * I.e.: ip1[:port1][,ip2[:port2]...]
5301 * A comma-separated list of ceph and/or rbd options.
5303 * The name of the rados pool containing the rbd image.
5305 * The name of the image in that pool to map.
5307 * An optional snapshot id. If provided, the mapping will
5308 * present data from the image at the time that snapshot was
5309 * created. The image head is used if no snapshot id is
5310 * provided. Snapshot mappings are always read-only.
5312 static int rbd_add_parse_args(const char *buf,
5313 struct ceph_options **ceph_opts,
5314 struct rbd_options **opts,
5315 struct rbd_spec **rbd_spec)
5319 const char *mon_addrs;
5321 size_t mon_addrs_size;
5322 struct parse_rbd_opts_ctx pctx = { 0 };
5323 struct ceph_options *copts;
5326 /* The first four tokens are required */
5328 len = next_token(&buf);
5330 rbd_warn(NULL, "no monitor address(es) provided");
5334 mon_addrs_size = len + 1;
5338 options = dup_token(&buf, NULL);
5342 rbd_warn(NULL, "no options provided");
5346 pctx.spec = rbd_spec_alloc();
5350 pctx.spec->pool_name = dup_token(&buf, NULL);
5351 if (!pctx.spec->pool_name)
5353 if (!*pctx.spec->pool_name) {
5354 rbd_warn(NULL, "no pool name provided");
5358 pctx.spec->image_name = dup_token(&buf, NULL);
5359 if (!pctx.spec->image_name)
5361 if (!*pctx.spec->image_name) {
5362 rbd_warn(NULL, "no image name provided");
5367 * Snapshot name is optional; default is to use "-"
5368 * (indicating the head/no snapshot).
5370 len = next_token(&buf);
5372 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5373 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
5374 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
5375 ret = -ENAMETOOLONG;
5378 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5381 *(snap_name + len) = '\0';
5382 pctx.spec->snap_name = snap_name;
5384 /* Initialize all rbd options to the defaults */
5386 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
5390 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
5391 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5392 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
5393 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5394 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
5395 pctx.opts->trim = RBD_TRIM_DEFAULT;
5397 copts = ceph_parse_options(options, mon_addrs,
5398 mon_addrs + mon_addrs_size - 1,
5399 parse_rbd_opts_token, &pctx);
5400 if (IS_ERR(copts)) {
5401 ret = PTR_ERR(copts);
5408 *rbd_spec = pctx.spec;
5415 rbd_spec_put(pctx.spec);
5421 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
5423 down_write(&rbd_dev->lock_rwsem);
5424 if (__rbd_is_lock_owner(rbd_dev))
5425 rbd_unlock(rbd_dev);
5426 up_write(&rbd_dev->lock_rwsem);
5429 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
5433 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
5434 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
5438 /* FIXME: "rbd map --exclusive" should be in interruptible */
5439 down_read(&rbd_dev->lock_rwsem);
5440 ret = rbd_wait_state_locked(rbd_dev, true);
5441 up_read(&rbd_dev->lock_rwsem);
5443 rbd_warn(rbd_dev, "failed to acquire exclusive lock");
5451 * An rbd format 2 image has a unique identifier, distinct from the
5452 * name given to it by the user. Internally, that identifier is
5453 * what's used to specify the names of objects related to the image.
5455 * A special "rbd id" object is used to map an rbd image name to its
5456 * id. If that object doesn't exist, then there is no v2 rbd image
5457 * with the supplied name.
5459 * This function will record the given rbd_dev's image_id field if
5460 * it can be determined, and in that case will return 0. If any
5461 * errors occur a negative errno will be returned and the rbd_dev's
5462 * image_id field will be unchanged (and should be NULL).
5464 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5468 CEPH_DEFINE_OID_ONSTACK(oid);
5473 * When probing a parent image, the image id is already
5474 * known (and the image name likely is not). There's no
5475 * need to fetch the image id again in this case. We
5476 * do still need to set the image format though.
5478 if (rbd_dev->spec->image_id) {
5479 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5485 * First, see if the format 2 image id file exists, and if
5486 * so, get the image's persistent id from it.
5488 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
5489 rbd_dev->spec->image_name);
5493 dout("rbd id object name is %s\n", oid.name);
5495 /* Response will be an encoded string, which includes a length */
5497 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5498 response = kzalloc(size, GFP_NOIO);
5504 /* If it doesn't exist we'll assume it's a format 1 image */
5506 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5508 response, RBD_IMAGE_ID_LEN_MAX);
5509 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5510 if (ret == -ENOENT) {
5511 image_id = kstrdup("", GFP_KERNEL);
5512 ret = image_id ? 0 : -ENOMEM;
5514 rbd_dev->image_format = 1;
5515 } else if (ret >= 0) {
5518 image_id = ceph_extract_encoded_string(&p, p + ret,
5520 ret = PTR_ERR_OR_ZERO(image_id);
5522 rbd_dev->image_format = 2;
5526 rbd_dev->spec->image_id = image_id;
5527 dout("image_id is %s\n", image_id);
5531 ceph_oid_destroy(&oid);
5536 * Undo whatever state changes are made by v1 or v2 header info
5539 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5541 struct rbd_image_header *header;
5543 rbd_dev_parent_put(rbd_dev);
5545 /* Free dynamic fields from the header, then zero it out */
5547 header = &rbd_dev->header;
5548 ceph_put_snap_context(header->snapc);
5549 kfree(header->snap_sizes);
5550 kfree(header->snap_names);
5551 kfree(header->object_prefix);
5552 memset(header, 0, sizeof (*header));
5555 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5559 ret = rbd_dev_v2_object_prefix(rbd_dev);
5564 * Get the and check features for the image. Currently the
5565 * features are assumed to never change.
5567 ret = rbd_dev_v2_features(rbd_dev);
5571 /* If the image supports fancy striping, get its parameters */
5573 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5574 ret = rbd_dev_v2_striping_info(rbd_dev);
5579 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
5580 ret = rbd_dev_v2_data_pool(rbd_dev);
5585 rbd_init_layout(rbd_dev);
5589 rbd_dev->header.features = 0;
5590 kfree(rbd_dev->header.object_prefix);
5591 rbd_dev->header.object_prefix = NULL;
5596 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5597 * rbd_dev_image_probe() recursion depth, which means it's also the
5598 * length of the already discovered part of the parent chain.
5600 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5602 struct rbd_device *parent = NULL;
5605 if (!rbd_dev->parent_spec)
5608 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5609 pr_info("parent chain is too long (%d)\n", depth);
5614 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
5621 * Images related by parent/child relationships always share
5622 * rbd_client and spec/parent_spec, so bump their refcounts.
5624 __rbd_get_client(rbd_dev->rbd_client);
5625 rbd_spec_get(rbd_dev->parent_spec);
5627 ret = rbd_dev_image_probe(parent, depth);
5631 rbd_dev->parent = parent;
5632 atomic_set(&rbd_dev->parent_ref, 1);
5636 rbd_dev_unparent(rbd_dev);
5637 rbd_dev_destroy(parent);
5641 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5643 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5644 rbd_dev_mapping_clear(rbd_dev);
5645 rbd_free_disk(rbd_dev);
5647 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5651 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5654 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5658 /* Record our major and minor device numbers. */
5660 if (!single_major) {
5661 ret = register_blkdev(0, rbd_dev->name);
5663 goto err_out_unlock;
5665 rbd_dev->major = ret;
5668 rbd_dev->major = rbd_major;
5669 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5672 /* Set up the blkdev mapping. */
5674 ret = rbd_init_disk(rbd_dev);
5676 goto err_out_blkdev;
5678 ret = rbd_dev_mapping_set(rbd_dev);
5682 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5683 set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
5685 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5687 goto err_out_mapping;
5689 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5690 up_write(&rbd_dev->header_rwsem);
5694 rbd_dev_mapping_clear(rbd_dev);
5696 rbd_free_disk(rbd_dev);
5699 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5701 up_write(&rbd_dev->header_rwsem);
5705 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5707 struct rbd_spec *spec = rbd_dev->spec;
5710 /* Record the header object name for this rbd image. */
5712 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5713 if (rbd_dev->image_format == 1)
5714 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5715 spec->image_name, RBD_SUFFIX);
5717 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5718 RBD_HEADER_PREFIX, spec->image_id);
5723 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5725 rbd_dev_unprobe(rbd_dev);
5727 rbd_unregister_watch(rbd_dev);
5728 rbd_dev->image_format = 0;
5729 kfree(rbd_dev->spec->image_id);
5730 rbd_dev->spec->image_id = NULL;
5734 * Probe for the existence of the header object for the given rbd
5735 * device. If this image is the one being mapped (i.e., not a
5736 * parent), initiate a watch on its header object before using that
5737 * object to get detailed information about the rbd image.
5739 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5744 * Get the id from the image id object. Unless there's an
5745 * error, rbd_dev->spec->image_id will be filled in with
5746 * a dynamically-allocated string, and rbd_dev->image_format
5747 * will be set to either 1 or 2.
5749 ret = rbd_dev_image_id(rbd_dev);
5753 ret = rbd_dev_header_name(rbd_dev);
5755 goto err_out_format;
5758 ret = rbd_register_watch(rbd_dev);
5761 pr_info("image %s/%s%s%s does not exist\n",
5762 rbd_dev->spec->pool_name,
5763 rbd_dev->spec->pool_ns ?: "",
5764 rbd_dev->spec->pool_ns ? "/" : "",
5765 rbd_dev->spec->image_name);
5766 goto err_out_format;
5770 ret = rbd_dev_header_info(rbd_dev);
5775 * If this image is the one being mapped, we have pool name and
5776 * id, image name and id, and snap name - need to fill snap id.
5777 * Otherwise this is a parent image, identified by pool, image
5778 * and snap ids - need to fill in names for those ids.
5781 ret = rbd_spec_fill_snap_id(rbd_dev);
5783 ret = rbd_spec_fill_names(rbd_dev);
5786 pr_info("snap %s/%s%s%s@%s does not exist\n",
5787 rbd_dev->spec->pool_name,
5788 rbd_dev->spec->pool_ns ?: "",
5789 rbd_dev->spec->pool_ns ? "/" : "",
5790 rbd_dev->spec->image_name,
5791 rbd_dev->spec->snap_name);
5795 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5796 ret = rbd_dev_v2_parent_info(rbd_dev);
5801 * Need to warn users if this image is the one being
5802 * mapped and has a parent.
5804 if (!depth && rbd_dev->parent_spec)
5806 "WARNING: kernel layering is EXPERIMENTAL!");
5809 ret = rbd_dev_probe_parent(rbd_dev, depth);
5813 dout("discovered format %u image, header name is %s\n",
5814 rbd_dev->image_format, rbd_dev->header_oid.name);
5818 rbd_dev_unprobe(rbd_dev);
5821 rbd_unregister_watch(rbd_dev);
5823 rbd_dev->image_format = 0;
5824 kfree(rbd_dev->spec->image_id);
5825 rbd_dev->spec->image_id = NULL;
5829 static ssize_t do_rbd_add(struct bus_type *bus,
5833 struct rbd_device *rbd_dev = NULL;
5834 struct ceph_options *ceph_opts = NULL;
5835 struct rbd_options *rbd_opts = NULL;
5836 struct rbd_spec *spec = NULL;
5837 struct rbd_client *rbdc;
5840 if (!try_module_get(THIS_MODULE))
5843 /* parse add command */
5844 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5848 rbdc = rbd_get_client(ceph_opts);
5855 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
5858 pr_info("pool %s does not exist\n", spec->pool_name);
5859 goto err_out_client;
5861 spec->pool_id = (u64)rc;
5863 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5866 goto err_out_client;
5868 rbdc = NULL; /* rbd_dev now owns this */
5869 spec = NULL; /* rbd_dev now owns this */
5870 rbd_opts = NULL; /* rbd_dev now owns this */
5872 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
5873 if (!rbd_dev->config_info) {
5875 goto err_out_rbd_dev;
5878 down_write(&rbd_dev->header_rwsem);
5879 rc = rbd_dev_image_probe(rbd_dev, 0);
5881 up_write(&rbd_dev->header_rwsem);
5882 goto err_out_rbd_dev;
5885 /* If we are mapping a snapshot it must be marked read-only */
5886 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5887 rbd_dev->opts->read_only = true;
5889 rc = rbd_dev_device_setup(rbd_dev);
5891 goto err_out_image_probe;
5893 if (rbd_dev->opts->exclusive) {
5894 rc = rbd_add_acquire_lock(rbd_dev);
5896 goto err_out_device_setup;
5899 /* Everything's ready. Announce the disk to the world. */
5901 rc = device_add(&rbd_dev->dev);
5903 goto err_out_image_lock;
5905 add_disk(rbd_dev->disk);
5906 /* see rbd_init_disk() */
5907 blk_put_queue(rbd_dev->disk->queue);
5909 spin_lock(&rbd_dev_list_lock);
5910 list_add_tail(&rbd_dev->node, &rbd_dev_list);
5911 spin_unlock(&rbd_dev_list_lock);
5913 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
5914 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
5915 rbd_dev->header.features);
5918 module_put(THIS_MODULE);
5922 rbd_dev_image_unlock(rbd_dev);
5923 err_out_device_setup:
5924 rbd_dev_device_release(rbd_dev);
5925 err_out_image_probe:
5926 rbd_dev_image_release(rbd_dev);
5928 rbd_dev_destroy(rbd_dev);
5930 rbd_put_client(rbdc);
5937 static ssize_t rbd_add(struct bus_type *bus,
5944 return do_rbd_add(bus, buf, count);
5947 static ssize_t rbd_add_single_major(struct bus_type *bus,
5951 return do_rbd_add(bus, buf, count);
5954 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5956 while (rbd_dev->parent) {
5957 struct rbd_device *first = rbd_dev;
5958 struct rbd_device *second = first->parent;
5959 struct rbd_device *third;
5962 * Follow to the parent with no grandparent and
5965 while (second && (third = second->parent)) {
5970 rbd_dev_image_release(second);
5971 rbd_dev_destroy(second);
5972 first->parent = NULL;
5973 first->parent_overlap = 0;
5975 rbd_assert(first->parent_spec);
5976 rbd_spec_put(first->parent_spec);
5977 first->parent_spec = NULL;
5981 static ssize_t do_rbd_remove(struct bus_type *bus,
5985 struct rbd_device *rbd_dev = NULL;
5986 struct list_head *tmp;
5989 bool already = false;
5995 sscanf(buf, "%d %5s", &dev_id, opt_buf);
5997 pr_err("dev_id out of range\n");
6000 if (opt_buf[0] != '\0') {
6001 if (!strcmp(opt_buf, "force")) {
6004 pr_err("bad remove option at '%s'\n", opt_buf);
6010 spin_lock(&rbd_dev_list_lock);
6011 list_for_each(tmp, &rbd_dev_list) {
6012 rbd_dev = list_entry(tmp, struct rbd_device, node);
6013 if (rbd_dev->dev_id == dev_id) {
6019 spin_lock_irq(&rbd_dev->lock);
6020 if (rbd_dev->open_count && !force)
6023 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6025 spin_unlock_irq(&rbd_dev->lock);
6027 spin_unlock(&rbd_dev_list_lock);
6028 if (ret < 0 || already)
6033 * Prevent new IO from being queued and wait for existing
6034 * IO to complete/fail.
6036 blk_mq_freeze_queue(rbd_dev->disk->queue);
6037 blk_set_queue_dying(rbd_dev->disk->queue);
6040 del_gendisk(rbd_dev->disk);
6041 spin_lock(&rbd_dev_list_lock);
6042 list_del_init(&rbd_dev->node);
6043 spin_unlock(&rbd_dev_list_lock);
6044 device_del(&rbd_dev->dev);
6046 rbd_dev_image_unlock(rbd_dev);
6047 rbd_dev_device_release(rbd_dev);
6048 rbd_dev_image_release(rbd_dev);
6049 rbd_dev_destroy(rbd_dev);
6053 static ssize_t rbd_remove(struct bus_type *bus,
6060 return do_rbd_remove(bus, buf, count);
6063 static ssize_t rbd_remove_single_major(struct bus_type *bus,
6067 return do_rbd_remove(bus, buf, count);
6071 * create control files in sysfs
6074 static int __init rbd_sysfs_init(void)
6078 ret = device_register(&rbd_root_dev);
6082 ret = bus_register(&rbd_bus_type);
6084 device_unregister(&rbd_root_dev);
6089 static void __exit rbd_sysfs_cleanup(void)
6091 bus_unregister(&rbd_bus_type);
6092 device_unregister(&rbd_root_dev);
6095 static int __init rbd_slab_init(void)
6097 rbd_assert(!rbd_img_request_cache);
6098 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
6099 if (!rbd_img_request_cache)
6102 rbd_assert(!rbd_obj_request_cache);
6103 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
6104 if (!rbd_obj_request_cache)
6110 kmem_cache_destroy(rbd_img_request_cache);
6111 rbd_img_request_cache = NULL;
6115 static void rbd_slab_exit(void)
6117 rbd_assert(rbd_obj_request_cache);
6118 kmem_cache_destroy(rbd_obj_request_cache);
6119 rbd_obj_request_cache = NULL;
6121 rbd_assert(rbd_img_request_cache);
6122 kmem_cache_destroy(rbd_img_request_cache);
6123 rbd_img_request_cache = NULL;
6126 static int __init rbd_init(void)
6130 if (!libceph_compatible(NULL)) {
6131 rbd_warn(NULL, "libceph incompatibility (quitting)");
6135 rc = rbd_slab_init();
6140 * The number of active work items is limited by the number of
6141 * rbd devices * queue depth, so leave @max_active at default.
6143 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6150 rbd_major = register_blkdev(0, RBD_DRV_NAME);
6151 if (rbd_major < 0) {
6157 rc = rbd_sysfs_init();
6159 goto err_out_blkdev;
6162 pr_info("loaded (major %d)\n", rbd_major);
6164 pr_info("loaded\n");
6170 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6172 destroy_workqueue(rbd_wq);
6178 static void __exit rbd_exit(void)
6180 ida_destroy(&rbd_dev_id_ida);
6181 rbd_sysfs_cleanup();
6183 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6184 destroy_workqueue(rbd_wq);
6188 module_init(rbd_init);
6189 module_exit(rbd_exit);
6191 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6192 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6193 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6194 /* following authorship retained from original osdblk.c */
6195 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6197 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6198 MODULE_LICENSE("GPL");