rbd: fix image request leak on parent read
[linux-2.6-block.git] / drivers / block / rbd.c
CommitLineData
e2a58ee5 1
602adf40
YS
2/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
dfc5606d 25 For usage instructions, please refer to:
602adf40 26
dfc5606d 27 Documentation/ABI/testing/sysfs-bus-rbd
602adf40
YS
28
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
34#include <linux/ceph/decode.h>
59c2be1e 35#include <linux/parser.h>
30d1cff8 36#include <linux/bsearch.h>
602adf40
YS
37
38#include <linux/kernel.h>
39#include <linux/device.h>
40#include <linux/module.h>
41#include <linux/fs.h>
42#include <linux/blkdev.h>
1c2a9dfe 43#include <linux/slab.h>
602adf40
YS
44
45#include "rbd_types.h"
46
aafb230e
AE
47#define RBD_DEBUG /* Activate rbd_assert() calls */
48
593a9e7b
AE
49/*
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
54 */
55#define SECTOR_SHIFT 9
56#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
57
f0f8cef5
AE
58#define RBD_DRV_NAME "rbd"
59#define RBD_DRV_NAME_LONG "rbd (rados block device)"
602adf40
YS
60
61#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
62
d4b125e9
AE
63#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
64#define RBD_MAX_SNAP_NAME_LEN \
65 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
66
35d489f9 67#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
602adf40
YS
68
69#define RBD_SNAP_HEAD_NAME "-"
70
9682fc6d
AE
71#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
72
9e15b77d
AE
73/* This allows a single page to hold an image name sent by OSD */
74#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
1e130199 75#define RBD_IMAGE_ID_LEN_MAX 64
9e15b77d 76
1e130199 77#define RBD_OBJ_PREFIX_LEN_MAX 64
589d30e0 78
d889140c
AE
79/* Feature bits */
80
5cbf6f12
AE
81#define RBD_FEATURE_LAYERING (1<<0)
82#define RBD_FEATURE_STRIPINGV2 (1<<1)
83#define RBD_FEATURES_ALL \
84 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
d889140c
AE
85
86/* Features supported by this (client software) implementation. */
87
770eba6e 88#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
d889140c 89
81a89793
AE
90/*
91 * An RBD device name will be "rbd#", where the "rbd" comes from
92 * RBD_DRV_NAME above, and # is a unique integer identifier.
93 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
94 * enough to hold all possible device names.
95 */
602adf40 96#define DEV_NAME_LEN 32
81a89793 97#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
602adf40
YS
98
99/*
100 * block device image metadata (in-memory version)
101 */
102struct rbd_image_header {
f84344f3 103 /* These four fields never change for a given rbd image */
849b4260 104 char *object_prefix;
34b13184 105 u64 features;
602adf40
YS
106 __u8 obj_order;
107 __u8 crypt_type;
108 __u8 comp_type;
602adf40 109
f84344f3
AE
110 /* The remaining fields need to be updated occasionally */
111 u64 image_size;
112 struct ceph_snap_context *snapc;
602adf40
YS
113 char *snap_names;
114 u64 *snap_sizes;
59c2be1e 115
500d0c0f
AE
116 u64 stripe_unit;
117 u64 stripe_count;
59c2be1e
YS
118};
119
0d7dbfce
AE
120/*
121 * An rbd image specification.
122 *
123 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
c66c6e0c
AE
124 * identify an image. Each rbd_dev structure includes a pointer to
125 * an rbd_spec structure that encapsulates this identity.
126 *
127 * Each of the id's in an rbd_spec has an associated name. For a
128 * user-mapped image, the names are supplied and the id's associated
129 * with them are looked up. For a layered image, a parent image is
130 * defined by the tuple, and the names are looked up.
131 *
132 * An rbd_dev structure contains a parent_spec pointer which is
133 * non-null if the image it represents is a child in a layered
134 * image. This pointer will refer to the rbd_spec structure used
135 * by the parent rbd_dev for its own identity (i.e., the structure
136 * is shared between the parent and child).
137 *
138 * Since these structures are populated once, during the discovery
139 * phase of image construction, they are effectively immutable so
140 * we make no effort to synchronize access to them.
141 *
142 * Note that code herein does not assume the image name is known (it
143 * could be a null pointer).
0d7dbfce
AE
144 */
145struct rbd_spec {
146 u64 pool_id;
ecb4dc22 147 const char *pool_name;
0d7dbfce 148
ecb4dc22
AE
149 const char *image_id;
150 const char *image_name;
0d7dbfce
AE
151
152 u64 snap_id;
ecb4dc22 153 const char *snap_name;
0d7dbfce
AE
154
155 struct kref kref;
156};
157
602adf40 158/*
f0f8cef5 159 * an instance of the client. multiple devices may share an rbd client.
602adf40
YS
160 */
161struct rbd_client {
162 struct ceph_client *client;
163 struct kref kref;
164 struct list_head node;
165};
166
bf0d5f50
AE
167struct rbd_img_request;
168typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
169
170#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
171
172struct rbd_obj_request;
173typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
174
9969ebc5
AE
175enum obj_request_type {
176 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
177};
bf0d5f50 178
926f9b3f
AE
179enum obj_req_flags {
180 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
6365d33a 181 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
5679c59f
AE
182 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
183 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
926f9b3f
AE
184};
185
bf0d5f50
AE
186struct rbd_obj_request {
187 const char *object_name;
188 u64 offset; /* object start byte */
189 u64 length; /* bytes from offset */
926f9b3f 190 unsigned long flags;
bf0d5f50 191
c5b5ef6c
AE
192 /*
193 * An object request associated with an image will have its
194 * img_data flag set; a standalone object request will not.
195 *
196 * A standalone object request will have which == BAD_WHICH
197 * and a null obj_request pointer.
198 *
199 * An object request initiated in support of a layered image
200 * object (to check for its existence before a write) will
201 * have which == BAD_WHICH and a non-null obj_request pointer.
202 *
203 * Finally, an object request for rbd image data will have
204 * which != BAD_WHICH, and will have a non-null img_request
205 * pointer. The value of which will be in the range
206 * 0..(img_request->obj_request_count-1).
207 */
208 union {
209 struct rbd_obj_request *obj_request; /* STAT op */
210 struct {
211 struct rbd_img_request *img_request;
212 u64 img_offset;
213 /* links for img_request->obj_requests list */
214 struct list_head links;
215 };
216 };
bf0d5f50
AE
217 u32 which; /* posn image request list */
218
219 enum obj_request_type type;
788e2df3
AE
220 union {
221 struct bio *bio_list;
222 struct {
223 struct page **pages;
224 u32 page_count;
225 };
226 };
0eefd470 227 struct page **copyup_pages;
bf0d5f50
AE
228
229 struct ceph_osd_request *osd_req;
230
231 u64 xferred; /* bytes transferred */
1b83bef2 232 int result;
bf0d5f50
AE
233
234 rbd_obj_callback_t callback;
788e2df3 235 struct completion completion;
bf0d5f50
AE
236
237 struct kref kref;
238};
239
0c425248 240enum img_req_flags {
9849e986
AE
241 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
242 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
d0b2e944 243 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
0c425248
AE
244};
245
bf0d5f50 246struct rbd_img_request {
bf0d5f50
AE
247 struct rbd_device *rbd_dev;
248 u64 offset; /* starting image byte offset */
249 u64 length; /* byte count from offset */
0c425248 250 unsigned long flags;
bf0d5f50 251 union {
9849e986 252 u64 snap_id; /* for reads */
bf0d5f50 253 struct ceph_snap_context *snapc; /* for writes */
9849e986
AE
254 };
255 union {
256 struct request *rq; /* block request */
257 struct rbd_obj_request *obj_request; /* obj req initiator */
bf0d5f50 258 };
3d7efd18 259 struct page **copyup_pages;
bf0d5f50
AE
260 spinlock_t completion_lock;/* protects next_completion */
261 u32 next_completion;
262 rbd_img_callback_t callback;
55f27e09 263 u64 xferred;/* aggregate bytes transferred */
a5a337d4 264 int result; /* first nonzero obj_request result */
bf0d5f50
AE
265
266 u32 obj_request_count;
267 struct list_head obj_requests; /* rbd_obj_request structs */
268
269 struct kref kref;
270};
271
272#define for_each_obj_request(ireq, oreq) \
ef06f4d3 273 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
bf0d5f50 274#define for_each_obj_request_from(ireq, oreq) \
ef06f4d3 275 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
bf0d5f50 276#define for_each_obj_request_safe(ireq, oreq, n) \
ef06f4d3 277 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
bf0d5f50 278
f84344f3 279struct rbd_mapping {
99c1f08f 280 u64 size;
34b13184 281 u64 features;
f84344f3
AE
282 bool read_only;
283};
284
602adf40
YS
285/*
286 * a single device
287 */
288struct rbd_device {
de71a297 289 int dev_id; /* blkdev unique id */
602adf40
YS
290
291 int major; /* blkdev assigned major */
292 struct gendisk *disk; /* blkdev's gendisk and rq */
602adf40 293
a30b71b9 294 u32 image_format; /* Either 1 or 2 */
602adf40
YS
295 struct rbd_client *rbd_client;
296
297 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
298
b82d167b 299 spinlock_t lock; /* queue, flags, open_count */
602adf40
YS
300
301 struct rbd_image_header header;
b82d167b 302 unsigned long flags; /* possibly lock protected */
0d7dbfce 303 struct rbd_spec *spec;
602adf40 304
0d7dbfce 305 char *header_name;
971f839a 306
0903e875
AE
307 struct ceph_file_layout layout;
308
59c2be1e 309 struct ceph_osd_event *watch_event;
975241af 310 struct rbd_obj_request *watch_request;
59c2be1e 311
86b00e0d
AE
312 struct rbd_spec *parent_spec;
313 u64 parent_overlap;
2f82ee54 314 struct rbd_device *parent;
86b00e0d 315
c666601a
JD
316 /* protects updating the header */
317 struct rw_semaphore header_rwsem;
f84344f3
AE
318
319 struct rbd_mapping mapping;
602adf40
YS
320
321 struct list_head node;
dfc5606d 322
dfc5606d
YS
323 /* sysfs related */
324 struct device dev;
b82d167b 325 unsigned long open_count; /* protected by lock */
dfc5606d
YS
326};
327
b82d167b
AE
328/*
329 * Flag bits for rbd_dev->flags. If atomicity is required,
330 * rbd_dev->lock is used to protect access.
331 *
332 * Currently, only the "removing" flag (which is coupled with the
333 * "open_count" field) requires atomic access.
334 */
6d292906
AE
335enum rbd_dev_flags {
336 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
b82d167b 337 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
6d292906
AE
338};
339
602adf40 340static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
e124a82f 341
602adf40 342static LIST_HEAD(rbd_dev_list); /* devices */
e124a82f
AE
343static DEFINE_SPINLOCK(rbd_dev_list_lock);
344
432b8587
AE
345static LIST_HEAD(rbd_client_list); /* clients */
346static DEFINE_SPINLOCK(rbd_client_list_lock);
602adf40 347
78c2a44a
AE
348/* Slab caches for frequently-allocated structures */
349
1c2a9dfe 350static struct kmem_cache *rbd_img_request_cache;
868311b1 351static struct kmem_cache *rbd_obj_request_cache;
78c2a44a 352static struct kmem_cache *rbd_segment_name_cache;
1c2a9dfe 353
3d7efd18
AE
354static int rbd_img_request_submit(struct rbd_img_request *img_request);
355
200a6a8b 356static void rbd_dev_device_release(struct device *dev);
dfc5606d 357
f0f8cef5
AE
358static ssize_t rbd_add(struct bus_type *bus, const char *buf,
359 size_t count);
360static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
361 size_t count);
71f293e2 362static int rbd_dev_image_probe(struct rbd_device *rbd_dev);
f0f8cef5
AE
363
364static struct bus_attribute rbd_bus_attrs[] = {
365 __ATTR(add, S_IWUSR, NULL, rbd_add),
366 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
367 __ATTR_NULL
368};
369
370static struct bus_type rbd_bus_type = {
371 .name = "rbd",
372 .bus_attrs = rbd_bus_attrs,
373};
374
375static void rbd_root_dev_release(struct device *dev)
376{
377}
378
379static struct device rbd_root_dev = {
380 .init_name = "rbd",
381 .release = rbd_root_dev_release,
382};
383
06ecc6cb
AE
384static __printf(2, 3)
385void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
386{
387 struct va_format vaf;
388 va_list args;
389
390 va_start(args, fmt);
391 vaf.fmt = fmt;
392 vaf.va = &args;
393
394 if (!rbd_dev)
395 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
396 else if (rbd_dev->disk)
397 printk(KERN_WARNING "%s: %s: %pV\n",
398 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
399 else if (rbd_dev->spec && rbd_dev->spec->image_name)
400 printk(KERN_WARNING "%s: image %s: %pV\n",
401 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
402 else if (rbd_dev->spec && rbd_dev->spec->image_id)
403 printk(KERN_WARNING "%s: id %s: %pV\n",
404 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
405 else /* punt */
406 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
407 RBD_DRV_NAME, rbd_dev, &vaf);
408 va_end(args);
409}
410
aafb230e
AE
411#ifdef RBD_DEBUG
412#define rbd_assert(expr) \
413 if (unlikely(!(expr))) { \
414 printk(KERN_ERR "\nAssertion failure in %s() " \
415 "at line %d:\n\n" \
416 "\trbd_assert(%s);\n\n", \
417 __func__, __LINE__, #expr); \
418 BUG(); \
419 }
420#else /* !RBD_DEBUG */
421# define rbd_assert(expr) ((void) 0)
422#endif /* !RBD_DEBUG */
dfc5606d 423
b454e36d 424static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
05a46afd
AE
425static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
426static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
8b3e1a56 427
cc4a38bd
AE
428static int rbd_dev_refresh(struct rbd_device *rbd_dev);
429static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
54cac61f
AE
430static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
431 u64 snap_id);
2ad3d716
AE
432static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
433 u8 *order, u64 *snap_size);
434static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
435 u64 *snap_features);
436static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
59c2be1e 437
602adf40
YS
438static int rbd_open(struct block_device *bdev, fmode_t mode)
439{
f0f8cef5 440 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
b82d167b 441 bool removing = false;
602adf40 442
f84344f3 443 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
602adf40
YS
444 return -EROFS;
445
a14ea269 446 spin_lock_irq(&rbd_dev->lock);
b82d167b
AE
447 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
448 removing = true;
449 else
450 rbd_dev->open_count++;
a14ea269 451 spin_unlock_irq(&rbd_dev->lock);
b82d167b
AE
452 if (removing)
453 return -ENOENT;
454
42382b70 455 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
c3e946ce 456 (void) get_device(&rbd_dev->dev);
f84344f3 457 set_device_ro(bdev, rbd_dev->mapping.read_only);
42382b70 458 mutex_unlock(&ctl_mutex);
340c7a2b 459
602adf40
YS
460 return 0;
461}
462
dfc5606d
YS
463static int rbd_release(struct gendisk *disk, fmode_t mode)
464{
465 struct rbd_device *rbd_dev = disk->private_data;
b82d167b
AE
466 unsigned long open_count_before;
467
a14ea269 468 spin_lock_irq(&rbd_dev->lock);
b82d167b 469 open_count_before = rbd_dev->open_count--;
a14ea269 470 spin_unlock_irq(&rbd_dev->lock);
b82d167b 471 rbd_assert(open_count_before > 0);
dfc5606d 472
42382b70 473 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
c3e946ce 474 put_device(&rbd_dev->dev);
42382b70 475 mutex_unlock(&ctl_mutex);
dfc5606d
YS
476
477 return 0;
478}
479
602adf40
YS
480static const struct block_device_operations rbd_bd_ops = {
481 .owner = THIS_MODULE,
482 .open = rbd_open,
dfc5606d 483 .release = rbd_release,
602adf40
YS
484};
485
486/*
487 * Initialize an rbd client instance.
43ae4701 488 * We own *ceph_opts.
602adf40 489 */
f8c38929 490static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
602adf40
YS
491{
492 struct rbd_client *rbdc;
493 int ret = -ENOMEM;
494
37206ee5 495 dout("%s:\n", __func__);
602adf40
YS
496 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
497 if (!rbdc)
498 goto out_opt;
499
500 kref_init(&rbdc->kref);
501 INIT_LIST_HEAD(&rbdc->node);
502
bc534d86
AE
503 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
504
43ae4701 505 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
602adf40 506 if (IS_ERR(rbdc->client))
bc534d86 507 goto out_mutex;
43ae4701 508 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
602adf40
YS
509
510 ret = ceph_open_session(rbdc->client);
511 if (ret < 0)
512 goto out_err;
513
432b8587 514 spin_lock(&rbd_client_list_lock);
602adf40 515 list_add_tail(&rbdc->node, &rbd_client_list);
432b8587 516 spin_unlock(&rbd_client_list_lock);
602adf40 517
bc534d86 518 mutex_unlock(&ctl_mutex);
37206ee5 519 dout("%s: rbdc %p\n", __func__, rbdc);
bc534d86 520
602adf40
YS
521 return rbdc;
522
523out_err:
524 ceph_destroy_client(rbdc->client);
bc534d86
AE
525out_mutex:
526 mutex_unlock(&ctl_mutex);
602adf40
YS
527 kfree(rbdc);
528out_opt:
43ae4701
AE
529 if (ceph_opts)
530 ceph_destroy_options(ceph_opts);
37206ee5
AE
531 dout("%s: error %d\n", __func__, ret);
532
28f259b7 533 return ERR_PTR(ret);
602adf40
YS
534}
535
2f82ee54
AE
536static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
537{
538 kref_get(&rbdc->kref);
539
540 return rbdc;
541}
542
602adf40 543/*
1f7ba331
AE
544 * Find a ceph client with specific addr and configuration. If
545 * found, bump its reference count.
602adf40 546 */
1f7ba331 547static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
602adf40
YS
548{
549 struct rbd_client *client_node;
1f7ba331 550 bool found = false;
602adf40 551
43ae4701 552 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
602adf40
YS
553 return NULL;
554
1f7ba331
AE
555 spin_lock(&rbd_client_list_lock);
556 list_for_each_entry(client_node, &rbd_client_list, node) {
557 if (!ceph_compare_options(ceph_opts, client_node->client)) {
2f82ee54
AE
558 __rbd_get_client(client_node);
559
1f7ba331
AE
560 found = true;
561 break;
562 }
563 }
564 spin_unlock(&rbd_client_list_lock);
565
566 return found ? client_node : NULL;
602adf40
YS
567}
568
59c2be1e
YS
569/*
570 * mount options
571 */
572enum {
59c2be1e
YS
573 Opt_last_int,
574 /* int args above */
575 Opt_last_string,
576 /* string args above */
cc0538b6
AE
577 Opt_read_only,
578 Opt_read_write,
579 /* Boolean args above */
580 Opt_last_bool,
59c2be1e
YS
581};
582
43ae4701 583static match_table_t rbd_opts_tokens = {
59c2be1e
YS
584 /* int args above */
585 /* string args above */
be466c1c 586 {Opt_read_only, "read_only"},
cc0538b6
AE
587 {Opt_read_only, "ro"}, /* Alternate spelling */
588 {Opt_read_write, "read_write"},
589 {Opt_read_write, "rw"}, /* Alternate spelling */
590 /* Boolean args above */
59c2be1e
YS
591 {-1, NULL}
592};
593
98571b5a
AE
594struct rbd_options {
595 bool read_only;
596};
597
598#define RBD_READ_ONLY_DEFAULT false
599
59c2be1e
YS
600static int parse_rbd_opts_token(char *c, void *private)
601{
43ae4701 602 struct rbd_options *rbd_opts = private;
59c2be1e
YS
603 substring_t argstr[MAX_OPT_ARGS];
604 int token, intval, ret;
605
43ae4701 606 token = match_token(c, rbd_opts_tokens, argstr);
59c2be1e
YS
607 if (token < 0)
608 return -EINVAL;
609
610 if (token < Opt_last_int) {
611 ret = match_int(&argstr[0], &intval);
612 if (ret < 0) {
613 pr_err("bad mount option arg (not int) "
614 "at '%s'\n", c);
615 return ret;
616 }
617 dout("got int token %d val %d\n", token, intval);
618 } else if (token > Opt_last_int && token < Opt_last_string) {
619 dout("got string token %d val %s\n", token,
620 argstr[0].from);
cc0538b6
AE
621 } else if (token > Opt_last_string && token < Opt_last_bool) {
622 dout("got Boolean token %d\n", token);
59c2be1e
YS
623 } else {
624 dout("got token %d\n", token);
625 }
626
627 switch (token) {
cc0538b6
AE
628 case Opt_read_only:
629 rbd_opts->read_only = true;
630 break;
631 case Opt_read_write:
632 rbd_opts->read_only = false;
633 break;
59c2be1e 634 default:
aafb230e
AE
635 rbd_assert(false);
636 break;
59c2be1e
YS
637 }
638 return 0;
639}
640
602adf40
YS
641/*
642 * Get a ceph client with specific addr and configuration, if one does
643 * not exist create it.
644 */
9d3997fd 645static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
602adf40 646{
f8c38929 647 struct rbd_client *rbdc;
59c2be1e 648
1f7ba331 649 rbdc = rbd_client_find(ceph_opts);
9d3997fd 650 if (rbdc) /* using an existing client */
43ae4701 651 ceph_destroy_options(ceph_opts);
9d3997fd 652 else
f8c38929 653 rbdc = rbd_client_create(ceph_opts);
602adf40 654
9d3997fd 655 return rbdc;
602adf40
YS
656}
657
658/*
659 * Destroy ceph client
d23a4b3f 660 *
432b8587 661 * Caller must hold rbd_client_list_lock.
602adf40
YS
662 */
663static void rbd_client_release(struct kref *kref)
664{
665 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
666
37206ee5 667 dout("%s: rbdc %p\n", __func__, rbdc);
cd9d9f5d 668 spin_lock(&rbd_client_list_lock);
602adf40 669 list_del(&rbdc->node);
cd9d9f5d 670 spin_unlock(&rbd_client_list_lock);
602adf40
YS
671
672 ceph_destroy_client(rbdc->client);
673 kfree(rbdc);
674}
675
676/*
677 * Drop reference to ceph client node. If it's not referenced anymore, release
678 * it.
679 */
9d3997fd 680static void rbd_put_client(struct rbd_client *rbdc)
602adf40 681{
c53d5893
AE
682 if (rbdc)
683 kref_put(&rbdc->kref, rbd_client_release);
602adf40
YS
684}
685
a30b71b9
AE
686static bool rbd_image_format_valid(u32 image_format)
687{
688 return image_format == 1 || image_format == 2;
689}
690
8e94af8e
AE
691static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
692{
103a150f
AE
693 size_t size;
694 u32 snap_count;
695
696 /* The header has to start with the magic rbd header text */
697 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
698 return false;
699
db2388b6
AE
700 /* The bio layer requires at least sector-sized I/O */
701
702 if (ondisk->options.order < SECTOR_SHIFT)
703 return false;
704
705 /* If we use u64 in a few spots we may be able to loosen this */
706
707 if (ondisk->options.order > 8 * sizeof (int) - 1)
708 return false;
709
103a150f
AE
710 /*
711 * The size of a snapshot header has to fit in a size_t, and
712 * that limits the number of snapshots.
713 */
714 snap_count = le32_to_cpu(ondisk->snap_count);
715 size = SIZE_MAX - sizeof (struct ceph_snap_context);
716 if (snap_count > size / sizeof (__le64))
717 return false;
718
719 /*
720 * Not only that, but the size of the entire the snapshot
721 * header must also be representable in a size_t.
722 */
723 size -= snap_count * sizeof (__le64);
724 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
725 return false;
726
727 return true;
8e94af8e
AE
728}
729
602adf40
YS
730/*
731 * Create a new header structure, translate header format from the on-disk
732 * header.
733 */
734static int rbd_header_from_disk(struct rbd_image_header *header,
4156d998 735 struct rbd_image_header_ondisk *ondisk)
602adf40 736{
ccece235 737 u32 snap_count;
58c17b0e 738 size_t len;
d2bb24e5 739 size_t size;
621901d6 740 u32 i;
602adf40 741
6a52325f
AE
742 memset(header, 0, sizeof (*header));
743
103a150f
AE
744 snap_count = le32_to_cpu(ondisk->snap_count);
745
58c17b0e
AE
746 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
747 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
6a52325f 748 if (!header->object_prefix)
602adf40 749 return -ENOMEM;
58c17b0e
AE
750 memcpy(header->object_prefix, ondisk->object_prefix, len);
751 header->object_prefix[len] = '\0';
00f1f36f 752
602adf40 753 if (snap_count) {
f785cc1d
AE
754 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
755
621901d6
AE
756 /* Save a copy of the snapshot names */
757
f785cc1d
AE
758 if (snap_names_len > (u64) SIZE_MAX)
759 return -EIO;
760 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
602adf40 761 if (!header->snap_names)
6a52325f 762 goto out_err;
f785cc1d
AE
763 /*
764 * Note that rbd_dev_v1_header_read() guarantees
765 * the ondisk buffer we're working with has
766 * snap_names_len bytes beyond the end of the
767 * snapshot id array, this memcpy() is safe.
768 */
769 memcpy(header->snap_names, &ondisk->snaps[snap_count],
770 snap_names_len);
6a52325f 771
621901d6
AE
772 /* Record each snapshot's size */
773
d2bb24e5
AE
774 size = snap_count * sizeof (*header->snap_sizes);
775 header->snap_sizes = kmalloc(size, GFP_KERNEL);
602adf40 776 if (!header->snap_sizes)
6a52325f 777 goto out_err;
621901d6
AE
778 for (i = 0; i < snap_count; i++)
779 header->snap_sizes[i] =
780 le64_to_cpu(ondisk->snaps[i].image_size);
602adf40
YS
781 } else {
782 header->snap_names = NULL;
783 header->snap_sizes = NULL;
784 }
849b4260 785
34b13184 786 header->features = 0; /* No features support in v1 images */
602adf40
YS
787 header->obj_order = ondisk->options.order;
788 header->crypt_type = ondisk->options.crypt_type;
789 header->comp_type = ondisk->options.comp_type;
6a52325f 790
621901d6
AE
791 /* Allocate and fill in the snapshot context */
792
f84344f3 793 header->image_size = le64_to_cpu(ondisk->image_size);
468521c1 794
812164f8 795 header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6a52325f
AE
796 if (!header->snapc)
797 goto out_err;
505cbb9b 798 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
621901d6 799 for (i = 0; i < snap_count; i++)
468521c1 800 header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
602adf40
YS
801
802 return 0;
803
6a52325f 804out_err:
849b4260 805 kfree(header->snap_sizes);
ccece235 806 header->snap_sizes = NULL;
602adf40 807 kfree(header->snap_names);
ccece235 808 header->snap_names = NULL;
6a52325f
AE
809 kfree(header->object_prefix);
810 header->object_prefix = NULL;
ccece235 811
00f1f36f 812 return -ENOMEM;
602adf40
YS
813}
814
9682fc6d
AE
815static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
816{
817 const char *snap_name;
818
819 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
820
821 /* Skip over names until we find the one we are looking for */
822
823 snap_name = rbd_dev->header.snap_names;
824 while (which--)
825 snap_name += strlen(snap_name) + 1;
826
827 return kstrdup(snap_name, GFP_KERNEL);
828}
829
30d1cff8
AE
830/*
831 * Snapshot id comparison function for use with qsort()/bsearch().
832 * Note that result is for snapshots in *descending* order.
833 */
834static int snapid_compare_reverse(const void *s1, const void *s2)
835{
836 u64 snap_id1 = *(u64 *)s1;
837 u64 snap_id2 = *(u64 *)s2;
838
839 if (snap_id1 < snap_id2)
840 return 1;
841 return snap_id1 == snap_id2 ? 0 : -1;
842}
843
844/*
845 * Search a snapshot context to see if the given snapshot id is
846 * present.
847 *
848 * Returns the position of the snapshot id in the array if it's found,
849 * or BAD_SNAP_INDEX otherwise.
850 *
851 * Note: The snapshot array is in kept sorted (by the osd) in
852 * reverse order, highest snapshot id first.
853 */
9682fc6d
AE
854static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
855{
856 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
30d1cff8 857 u64 *found;
9682fc6d 858
30d1cff8
AE
859 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
860 sizeof (snap_id), snapid_compare_reverse);
9682fc6d 861
30d1cff8 862 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
9682fc6d
AE
863}
864
2ad3d716
AE
865static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
866 u64 snap_id)
9e15b77d 867{
54cac61f 868 u32 which;
9e15b77d 869
54cac61f
AE
870 which = rbd_dev_snap_index(rbd_dev, snap_id);
871 if (which == BAD_SNAP_INDEX)
872 return NULL;
873
874 return _rbd_dev_v1_snap_name(rbd_dev, which);
875}
876
877static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
878{
9e15b77d
AE
879 if (snap_id == CEPH_NOSNAP)
880 return RBD_SNAP_HEAD_NAME;
881
54cac61f
AE
882 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
883 if (rbd_dev->image_format == 1)
884 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
9e15b77d 885
54cac61f 886 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
9e15b77d
AE
887}
888
2ad3d716
AE
889static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
890 u64 *snap_size)
602adf40 891{
2ad3d716
AE
892 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
893 if (snap_id == CEPH_NOSNAP) {
894 *snap_size = rbd_dev->header.image_size;
895 } else if (rbd_dev->image_format == 1) {
896 u32 which;
602adf40 897
2ad3d716
AE
898 which = rbd_dev_snap_index(rbd_dev, snap_id);
899 if (which == BAD_SNAP_INDEX)
900 return -ENOENT;
e86924a8 901
2ad3d716
AE
902 *snap_size = rbd_dev->header.snap_sizes[which];
903 } else {
904 u64 size = 0;
905 int ret;
906
907 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
908 if (ret)
909 return ret;
910
911 *snap_size = size;
912 }
913 return 0;
602adf40
YS
914}
915
2ad3d716
AE
916static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
917 u64 *snap_features)
602adf40 918{
2ad3d716
AE
919 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
920 if (snap_id == CEPH_NOSNAP) {
921 *snap_features = rbd_dev->header.features;
922 } else if (rbd_dev->image_format == 1) {
923 *snap_features = 0; /* No features for format 1 */
602adf40 924 } else {
2ad3d716
AE
925 u64 features = 0;
926 int ret;
8b0241f8 927
2ad3d716
AE
928 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
929 if (ret)
930 return ret;
931
932 *snap_features = features;
933 }
934 return 0;
935}
936
937static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
938{
939 const char *snap_name = rbd_dev->spec->snap_name;
940 u64 snap_id;
941 u64 size = 0;
942 u64 features = 0;
943 int ret;
944
945 if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) {
946 snap_id = rbd_snap_id_by_name(rbd_dev, snap_name);
947 if (snap_id == CEPH_NOSNAP)
8b0241f8 948 return -ENOENT;
2ad3d716
AE
949 } else {
950 snap_id = CEPH_NOSNAP;
602adf40 951 }
6d292906 952
2ad3d716
AE
953 ret = rbd_snap_size(rbd_dev, snap_id, &size);
954 if (ret)
955 return ret;
956 ret = rbd_snap_features(rbd_dev, snap_id, &features);
957 if (ret)
958 return ret;
959
960 rbd_dev->mapping.size = size;
961 rbd_dev->mapping.features = features;
962
963 /* If we are mapping a snapshot it must be marked read-only */
964
965 if (snap_id != CEPH_NOSNAP)
966 rbd_dev->mapping.read_only = true;
967
8b0241f8 968 return 0;
602adf40
YS
969}
970
d1cf5788
AE
971static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
972{
973 rbd_dev->mapping.size = 0;
974 rbd_dev->mapping.features = 0;
975 rbd_dev->mapping.read_only = true;
976}
977
200a6a8b
AE
978static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
979{
980 rbd_dev->mapping.size = 0;
981 rbd_dev->mapping.features = 0;
982 rbd_dev->mapping.read_only = true;
983}
984
98571b5a 985static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
602adf40 986{
65ccfe21
AE
987 char *name;
988 u64 segment;
989 int ret;
602adf40 990
78c2a44a 991 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
65ccfe21
AE
992 if (!name)
993 return NULL;
994 segment = offset >> rbd_dev->header.obj_order;
2fd82b9e 995 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
65ccfe21 996 rbd_dev->header.object_prefix, segment);
2fd82b9e 997 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
65ccfe21
AE
998 pr_err("error formatting segment name for #%llu (%d)\n",
999 segment, ret);
1000 kfree(name);
1001 name = NULL;
1002 }
602adf40 1003
65ccfe21
AE
1004 return name;
1005}
602adf40 1006
78c2a44a
AE
1007static void rbd_segment_name_free(const char *name)
1008{
1009 /* The explicit cast here is needed to drop the const qualifier */
1010
1011 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1012}
1013
65ccfe21
AE
1014static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1015{
1016 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
602adf40 1017
65ccfe21
AE
1018 return offset & (segment_size - 1);
1019}
1020
1021static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1022 u64 offset, u64 length)
1023{
1024 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1025
1026 offset &= segment_size - 1;
1027
aafb230e 1028 rbd_assert(length <= U64_MAX - offset);
65ccfe21
AE
1029 if (offset + length > segment_size)
1030 length = segment_size - offset;
1031
1032 return length;
602adf40
YS
1033}
1034
029bcbd8
JD
1035/*
1036 * returns the size of an object in the image
1037 */
1038static u64 rbd_obj_bytes(struct rbd_image_header *header)
1039{
1040 return 1 << header->obj_order;
1041}
1042
602adf40
YS
1043/*
1044 * bio helpers
1045 */
1046
1047static void bio_chain_put(struct bio *chain)
1048{
1049 struct bio *tmp;
1050
1051 while (chain) {
1052 tmp = chain;
1053 chain = chain->bi_next;
1054 bio_put(tmp);
1055 }
1056}
1057
1058/*
1059 * zeros a bio chain, starting at specific offset
1060 */
1061static void zero_bio_chain(struct bio *chain, int start_ofs)
1062{
1063 struct bio_vec *bv;
1064 unsigned long flags;
1065 void *buf;
1066 int i;
1067 int pos = 0;
1068
1069 while (chain) {
1070 bio_for_each_segment(bv, chain, i) {
1071 if (pos + bv->bv_len > start_ofs) {
1072 int remainder = max(start_ofs - pos, 0);
1073 buf = bvec_kmap_irq(bv, &flags);
1074 memset(buf + remainder, 0,
1075 bv->bv_len - remainder);
85b5aaa6 1076 bvec_kunmap_irq(buf, &flags);
602adf40
YS
1077 }
1078 pos += bv->bv_len;
1079 }
1080
1081 chain = chain->bi_next;
1082 }
1083}
1084
b9434c5b
AE
1085/*
1086 * similar to zero_bio_chain(), zeros data defined by a page array,
1087 * starting at the given byte offset from the start of the array and
1088 * continuing up to the given end offset. The pages array is
1089 * assumed to be big enough to hold all bytes up to the end.
1090 */
1091static void zero_pages(struct page **pages, u64 offset, u64 end)
1092{
1093 struct page **page = &pages[offset >> PAGE_SHIFT];
1094
1095 rbd_assert(end > offset);
1096 rbd_assert(end - offset <= (u64)SIZE_MAX);
1097 while (offset < end) {
1098 size_t page_offset;
1099 size_t length;
1100 unsigned long flags;
1101 void *kaddr;
1102
1103 page_offset = (size_t)(offset & ~PAGE_MASK);
1104 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1105 local_irq_save(flags);
1106 kaddr = kmap_atomic(*page);
1107 memset(kaddr + page_offset, 0, length);
1108 kunmap_atomic(kaddr);
1109 local_irq_restore(flags);
1110
1111 offset += length;
1112 page++;
1113 }
1114}
1115
602adf40 1116/*
f7760dad
AE
1117 * Clone a portion of a bio, starting at the given byte offset
1118 * and continuing for the number of bytes indicated.
602adf40 1119 */
f7760dad
AE
1120static struct bio *bio_clone_range(struct bio *bio_src,
1121 unsigned int offset,
1122 unsigned int len,
1123 gfp_t gfpmask)
602adf40 1124{
f7760dad
AE
1125 struct bio_vec *bv;
1126 unsigned int resid;
1127 unsigned short idx;
1128 unsigned int voff;
1129 unsigned short end_idx;
1130 unsigned short vcnt;
1131 struct bio *bio;
1132
1133 /* Handle the easy case for the caller */
1134
1135 if (!offset && len == bio_src->bi_size)
1136 return bio_clone(bio_src, gfpmask);
1137
1138 if (WARN_ON_ONCE(!len))
1139 return NULL;
1140 if (WARN_ON_ONCE(len > bio_src->bi_size))
1141 return NULL;
1142 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1143 return NULL;
1144
1145 /* Find first affected segment... */
1146
1147 resid = offset;
1148 __bio_for_each_segment(bv, bio_src, idx, 0) {
1149 if (resid < bv->bv_len)
1150 break;
1151 resid -= bv->bv_len;
602adf40 1152 }
f7760dad 1153 voff = resid;
602adf40 1154
f7760dad 1155 /* ...and the last affected segment */
602adf40 1156
f7760dad
AE
1157 resid += len;
1158 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1159 if (resid <= bv->bv_len)
1160 break;
1161 resid -= bv->bv_len;
1162 }
1163 vcnt = end_idx - idx + 1;
1164
1165 /* Build the clone */
1166
1167 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1168 if (!bio)
1169 return NULL; /* ENOMEM */
602adf40 1170
f7760dad
AE
1171 bio->bi_bdev = bio_src->bi_bdev;
1172 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1173 bio->bi_rw = bio_src->bi_rw;
1174 bio->bi_flags |= 1 << BIO_CLONED;
1175
1176 /*
1177 * Copy over our part of the bio_vec, then update the first
1178 * and last (or only) entries.
1179 */
1180 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1181 vcnt * sizeof (struct bio_vec));
1182 bio->bi_io_vec[0].bv_offset += voff;
1183 if (vcnt > 1) {
1184 bio->bi_io_vec[0].bv_len -= voff;
1185 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1186 } else {
1187 bio->bi_io_vec[0].bv_len = len;
602adf40
YS
1188 }
1189
f7760dad
AE
1190 bio->bi_vcnt = vcnt;
1191 bio->bi_size = len;
1192 bio->bi_idx = 0;
1193
1194 return bio;
1195}
1196
1197/*
1198 * Clone a portion of a bio chain, starting at the given byte offset
1199 * into the first bio in the source chain and continuing for the
1200 * number of bytes indicated. The result is another bio chain of
1201 * exactly the given length, or a null pointer on error.
1202 *
1203 * The bio_src and offset parameters are both in-out. On entry they
1204 * refer to the first source bio and the offset into that bio where
1205 * the start of data to be cloned is located.
1206 *
1207 * On return, bio_src is updated to refer to the bio in the source
1208 * chain that contains first un-cloned byte, and *offset will
1209 * contain the offset of that byte within that bio.
1210 */
1211static struct bio *bio_chain_clone_range(struct bio **bio_src,
1212 unsigned int *offset,
1213 unsigned int len,
1214 gfp_t gfpmask)
1215{
1216 struct bio *bi = *bio_src;
1217 unsigned int off = *offset;
1218 struct bio *chain = NULL;
1219 struct bio **end;
1220
1221 /* Build up a chain of clone bios up to the limit */
1222
1223 if (!bi || off >= bi->bi_size || !len)
1224 return NULL; /* Nothing to clone */
602adf40 1225
f7760dad
AE
1226 end = &chain;
1227 while (len) {
1228 unsigned int bi_size;
1229 struct bio *bio;
1230
f5400b7a
AE
1231 if (!bi) {
1232 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
f7760dad 1233 goto out_err; /* EINVAL; ran out of bio's */
f5400b7a 1234 }
f7760dad
AE
1235 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1236 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1237 if (!bio)
1238 goto out_err; /* ENOMEM */
1239
1240 *end = bio;
1241 end = &bio->bi_next;
602adf40 1242
f7760dad
AE
1243 off += bi_size;
1244 if (off == bi->bi_size) {
1245 bi = bi->bi_next;
1246 off = 0;
1247 }
1248 len -= bi_size;
1249 }
1250 *bio_src = bi;
1251 *offset = off;
1252
1253 return chain;
1254out_err:
1255 bio_chain_put(chain);
602adf40 1256
602adf40
YS
1257 return NULL;
1258}
1259
926f9b3f
AE
1260/*
1261 * The default/initial value for all object request flags is 0. For
1262 * each flag, once its value is set to 1 it is never reset to 0
1263 * again.
1264 */
57acbaa7 1265static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
926f9b3f 1266{
57acbaa7 1267 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
926f9b3f
AE
1268 struct rbd_device *rbd_dev;
1269
57acbaa7
AE
1270 rbd_dev = obj_request->img_request->rbd_dev;
1271 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
926f9b3f
AE
1272 obj_request);
1273 }
1274}
1275
57acbaa7 1276static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
926f9b3f
AE
1277{
1278 smp_mb();
57acbaa7 1279 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
926f9b3f
AE
1280}
1281
57acbaa7 1282static void obj_request_done_set(struct rbd_obj_request *obj_request)
6365d33a 1283{
57acbaa7
AE
1284 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1285 struct rbd_device *rbd_dev = NULL;
6365d33a 1286
57acbaa7
AE
1287 if (obj_request_img_data_test(obj_request))
1288 rbd_dev = obj_request->img_request->rbd_dev;
1289 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
6365d33a
AE
1290 obj_request);
1291 }
1292}
1293
57acbaa7 1294static bool obj_request_done_test(struct rbd_obj_request *obj_request)
6365d33a
AE
1295{
1296 smp_mb();
57acbaa7 1297 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
6365d33a
AE
1298}
1299
5679c59f
AE
1300/*
1301 * This sets the KNOWN flag after (possibly) setting the EXISTS
1302 * flag. The latter is set based on the "exists" value provided.
1303 *
1304 * Note that for our purposes once an object exists it never goes
1305 * away again. It's possible that the response from two existence
1306 * checks are separated by the creation of the target object, and
1307 * the first ("doesn't exist") response arrives *after* the second
1308 * ("does exist"). In that case we ignore the second one.
1309 */
1310static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1311 bool exists)
1312{
1313 if (exists)
1314 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1315 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1316 smp_mb();
1317}
1318
1319static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1320{
1321 smp_mb();
1322 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1323}
1324
1325static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1326{
1327 smp_mb();
1328 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1329}
1330
bf0d5f50
AE
1331static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1332{
37206ee5
AE
1333 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1334 atomic_read(&obj_request->kref.refcount));
bf0d5f50
AE
1335 kref_get(&obj_request->kref);
1336}
1337
1338static void rbd_obj_request_destroy(struct kref *kref);
1339static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1340{
1341 rbd_assert(obj_request != NULL);
37206ee5
AE
1342 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1343 atomic_read(&obj_request->kref.refcount));
bf0d5f50
AE
1344 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1345}
1346
1347static void rbd_img_request_get(struct rbd_img_request *img_request)
1348{
37206ee5
AE
1349 dout("%s: img %p (was %d)\n", __func__, img_request,
1350 atomic_read(&img_request->kref.refcount));
bf0d5f50
AE
1351 kref_get(&img_request->kref);
1352}
1353
1354static void rbd_img_request_destroy(struct kref *kref);
1355static void rbd_img_request_put(struct rbd_img_request *img_request)
1356{
1357 rbd_assert(img_request != NULL);
37206ee5
AE
1358 dout("%s: img %p (was %d)\n", __func__, img_request,
1359 atomic_read(&img_request->kref.refcount));
bf0d5f50
AE
1360 kref_put(&img_request->kref, rbd_img_request_destroy);
1361}
1362
1363static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1364 struct rbd_obj_request *obj_request)
1365{
25dcf954
AE
1366 rbd_assert(obj_request->img_request == NULL);
1367
b155e86c 1368 /* Image request now owns object's original reference */
bf0d5f50 1369 obj_request->img_request = img_request;
25dcf954 1370 obj_request->which = img_request->obj_request_count;
6365d33a
AE
1371 rbd_assert(!obj_request_img_data_test(obj_request));
1372 obj_request_img_data_set(obj_request);
bf0d5f50 1373 rbd_assert(obj_request->which != BAD_WHICH);
25dcf954
AE
1374 img_request->obj_request_count++;
1375 list_add_tail(&obj_request->links, &img_request->obj_requests);
37206ee5
AE
1376 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1377 obj_request->which);
bf0d5f50
AE
1378}
1379
1380static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1381 struct rbd_obj_request *obj_request)
1382{
1383 rbd_assert(obj_request->which != BAD_WHICH);
25dcf954 1384
37206ee5
AE
1385 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1386 obj_request->which);
bf0d5f50 1387 list_del(&obj_request->links);
25dcf954
AE
1388 rbd_assert(img_request->obj_request_count > 0);
1389 img_request->obj_request_count--;
1390 rbd_assert(obj_request->which == img_request->obj_request_count);
1391 obj_request->which = BAD_WHICH;
6365d33a 1392 rbd_assert(obj_request_img_data_test(obj_request));
bf0d5f50 1393 rbd_assert(obj_request->img_request == img_request);
bf0d5f50 1394 obj_request->img_request = NULL;
25dcf954 1395 obj_request->callback = NULL;
bf0d5f50
AE
1396 rbd_obj_request_put(obj_request);
1397}
1398
1399static bool obj_request_type_valid(enum obj_request_type type)
1400{
1401 switch (type) {
9969ebc5 1402 case OBJ_REQUEST_NODATA:
bf0d5f50 1403 case OBJ_REQUEST_BIO:
788e2df3 1404 case OBJ_REQUEST_PAGES:
bf0d5f50
AE
1405 return true;
1406 default:
1407 return false;
1408 }
1409}
1410
bf0d5f50
AE
1411static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1412 struct rbd_obj_request *obj_request)
1413{
37206ee5
AE
1414 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1415
bf0d5f50
AE
1416 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1417}
1418
1419static void rbd_img_request_complete(struct rbd_img_request *img_request)
1420{
55f27e09 1421
37206ee5 1422 dout("%s: img %p\n", __func__, img_request);
55f27e09
AE
1423
1424 /*
1425 * If no error occurred, compute the aggregate transfer
1426 * count for the image request. We could instead use
1427 * atomic64_cmpxchg() to update it as each object request
1428 * completes; not clear which way is better off hand.
1429 */
1430 if (!img_request->result) {
1431 struct rbd_obj_request *obj_request;
1432 u64 xferred = 0;
1433
1434 for_each_obj_request(img_request, obj_request)
1435 xferred += obj_request->xferred;
1436 img_request->xferred = xferred;
1437 }
1438
bf0d5f50
AE
1439 if (img_request->callback)
1440 img_request->callback(img_request);
1441 else
1442 rbd_img_request_put(img_request);
1443}
1444
788e2df3
AE
1445/* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1446
1447static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1448{
37206ee5
AE
1449 dout("%s: obj %p\n", __func__, obj_request);
1450
788e2df3
AE
1451 return wait_for_completion_interruptible(&obj_request->completion);
1452}
1453
0c425248
AE
1454/*
1455 * The default/initial value for all image request flags is 0. Each
1456 * is conditionally set to 1 at image request initialization time
1457 * and currently never change thereafter.
1458 */
1459static void img_request_write_set(struct rbd_img_request *img_request)
1460{
1461 set_bit(IMG_REQ_WRITE, &img_request->flags);
1462 smp_mb();
1463}
1464
1465static bool img_request_write_test(struct rbd_img_request *img_request)
1466{
1467 smp_mb();
1468 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1469}
1470
9849e986
AE
1471static void img_request_child_set(struct rbd_img_request *img_request)
1472{
1473 set_bit(IMG_REQ_CHILD, &img_request->flags);
1474 smp_mb();
1475}
1476
1477static bool img_request_child_test(struct rbd_img_request *img_request)
1478{
1479 smp_mb();
1480 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1481}
1482
d0b2e944
AE
1483static void img_request_layered_set(struct rbd_img_request *img_request)
1484{
1485 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1486 smp_mb();
1487}
1488
1489static bool img_request_layered_test(struct rbd_img_request *img_request)
1490{
1491 smp_mb();
1492 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1493}
1494
6e2a4505
AE
1495static void
1496rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1497{
b9434c5b
AE
1498 u64 xferred = obj_request->xferred;
1499 u64 length = obj_request->length;
1500
6e2a4505
AE
1501 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1502 obj_request, obj_request->img_request, obj_request->result,
b9434c5b 1503 xferred, length);
6e2a4505
AE
1504 /*
1505 * ENOENT means a hole in the image. We zero-fill the
1506 * entire length of the request. A short read also implies
1507 * zero-fill to the end of the request. Either way we
1508 * update the xferred count to indicate the whole request
1509 * was satisfied.
1510 */
b9434c5b 1511 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
6e2a4505 1512 if (obj_request->result == -ENOENT) {
b9434c5b
AE
1513 if (obj_request->type == OBJ_REQUEST_BIO)
1514 zero_bio_chain(obj_request->bio_list, 0);
1515 else
1516 zero_pages(obj_request->pages, 0, length);
6e2a4505 1517 obj_request->result = 0;
b9434c5b
AE
1518 obj_request->xferred = length;
1519 } else if (xferred < length && !obj_request->result) {
1520 if (obj_request->type == OBJ_REQUEST_BIO)
1521 zero_bio_chain(obj_request->bio_list, xferred);
1522 else
1523 zero_pages(obj_request->pages, xferred, length);
1524 obj_request->xferred = length;
6e2a4505
AE
1525 }
1526 obj_request_done_set(obj_request);
1527}
1528
bf0d5f50
AE
1529static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1530{
37206ee5
AE
1531 dout("%s: obj %p cb %p\n", __func__, obj_request,
1532 obj_request->callback);
bf0d5f50
AE
1533 if (obj_request->callback)
1534 obj_request->callback(obj_request);
788e2df3
AE
1535 else
1536 complete_all(&obj_request->completion);
bf0d5f50
AE
1537}
1538
c47f9371 1539static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
39bf2c5d
AE
1540{
1541 dout("%s: obj %p\n", __func__, obj_request);
1542 obj_request_done_set(obj_request);
1543}
1544
c47f9371 1545static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
bf0d5f50 1546{
57acbaa7 1547 struct rbd_img_request *img_request = NULL;
a9e8ba2c 1548 struct rbd_device *rbd_dev = NULL;
57acbaa7
AE
1549 bool layered = false;
1550
1551 if (obj_request_img_data_test(obj_request)) {
1552 img_request = obj_request->img_request;
1553 layered = img_request && img_request_layered_test(img_request);
a9e8ba2c 1554 rbd_dev = img_request->rbd_dev;
57acbaa7 1555 }
8b3e1a56
AE
1556
1557 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1558 obj_request, img_request, obj_request->result,
1559 obj_request->xferred, obj_request->length);
a9e8ba2c
AE
1560 if (layered && obj_request->result == -ENOENT &&
1561 obj_request->img_offset < rbd_dev->parent_overlap)
8b3e1a56
AE
1562 rbd_img_parent_read(obj_request);
1563 else if (img_request)
6e2a4505
AE
1564 rbd_img_obj_request_read_callback(obj_request);
1565 else
1566 obj_request_done_set(obj_request);
bf0d5f50
AE
1567}
1568
c47f9371 1569static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
bf0d5f50 1570{
1b83bef2
SW
1571 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1572 obj_request->result, obj_request->length);
1573 /*
8b3e1a56
AE
1574 * There is no such thing as a successful short write. Set
1575 * it to our originally-requested length.
1b83bef2
SW
1576 */
1577 obj_request->xferred = obj_request->length;
07741308 1578 obj_request_done_set(obj_request);
bf0d5f50
AE
1579}
1580
fbfab539
AE
1581/*
1582 * For a simple stat call there's nothing to do. We'll do more if
1583 * this is part of a write sequence for a layered image.
1584 */
c47f9371 1585static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
fbfab539 1586{
37206ee5 1587 dout("%s: obj %p\n", __func__, obj_request);
fbfab539
AE
1588 obj_request_done_set(obj_request);
1589}
1590
bf0d5f50
AE
1591static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1592 struct ceph_msg *msg)
1593{
1594 struct rbd_obj_request *obj_request = osd_req->r_priv;
bf0d5f50
AE
1595 u16 opcode;
1596
37206ee5 1597 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
bf0d5f50 1598 rbd_assert(osd_req == obj_request->osd_req);
57acbaa7
AE
1599 if (obj_request_img_data_test(obj_request)) {
1600 rbd_assert(obj_request->img_request);
1601 rbd_assert(obj_request->which != BAD_WHICH);
1602 } else {
1603 rbd_assert(obj_request->which == BAD_WHICH);
1604 }
bf0d5f50 1605
1b83bef2
SW
1606 if (osd_req->r_result < 0)
1607 obj_request->result = osd_req->r_result;
bf0d5f50 1608
0eefd470 1609 BUG_ON(osd_req->r_num_ops > 2);
bf0d5f50 1610
c47f9371
AE
1611 /*
1612 * We support a 64-bit length, but ultimately it has to be
1613 * passed to blk_end_request(), which takes an unsigned int.
1614 */
1b83bef2 1615 obj_request->xferred = osd_req->r_reply_op_len[0];
8b3e1a56 1616 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
79528734 1617 opcode = osd_req->r_ops[0].op;
bf0d5f50
AE
1618 switch (opcode) {
1619 case CEPH_OSD_OP_READ:
c47f9371 1620 rbd_osd_read_callback(obj_request);
bf0d5f50
AE
1621 break;
1622 case CEPH_OSD_OP_WRITE:
c47f9371 1623 rbd_osd_write_callback(obj_request);
bf0d5f50 1624 break;
fbfab539 1625 case CEPH_OSD_OP_STAT:
c47f9371 1626 rbd_osd_stat_callback(obj_request);
fbfab539 1627 break;
36be9a76 1628 case CEPH_OSD_OP_CALL:
b8d70035 1629 case CEPH_OSD_OP_NOTIFY_ACK:
9969ebc5 1630 case CEPH_OSD_OP_WATCH:
c47f9371 1631 rbd_osd_trivial_callback(obj_request);
9969ebc5 1632 break;
bf0d5f50
AE
1633 default:
1634 rbd_warn(NULL, "%s: unsupported op %hu\n",
1635 obj_request->object_name, (unsigned short) opcode);
1636 break;
1637 }
1638
07741308 1639 if (obj_request_done_test(obj_request))
bf0d5f50
AE
1640 rbd_obj_request_complete(obj_request);
1641}
1642
9d4df01f 1643static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
430c28c3
AE
1644{
1645 struct rbd_img_request *img_request = obj_request->img_request;
8c042b0d 1646 struct ceph_osd_request *osd_req = obj_request->osd_req;
9d4df01f 1647 u64 snap_id;
430c28c3 1648
8c042b0d 1649 rbd_assert(osd_req != NULL);
430c28c3 1650
9d4df01f 1651 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
8c042b0d 1652 ceph_osdc_build_request(osd_req, obj_request->offset,
9d4df01f
AE
1653 NULL, snap_id, NULL);
1654}
1655
1656static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1657{
1658 struct rbd_img_request *img_request = obj_request->img_request;
1659 struct ceph_osd_request *osd_req = obj_request->osd_req;
1660 struct ceph_snap_context *snapc;
1661 struct timespec mtime = CURRENT_TIME;
1662
1663 rbd_assert(osd_req != NULL);
1664
1665 snapc = img_request ? img_request->snapc : NULL;
1666 ceph_osdc_build_request(osd_req, obj_request->offset,
1667 snapc, CEPH_NOSNAP, &mtime);
430c28c3
AE
1668}
1669
bf0d5f50
AE
1670static struct ceph_osd_request *rbd_osd_req_create(
1671 struct rbd_device *rbd_dev,
1672 bool write_request,
430c28c3 1673 struct rbd_obj_request *obj_request)
bf0d5f50 1674{
bf0d5f50
AE
1675 struct ceph_snap_context *snapc = NULL;
1676 struct ceph_osd_client *osdc;
1677 struct ceph_osd_request *osd_req;
bf0d5f50 1678
6365d33a
AE
1679 if (obj_request_img_data_test(obj_request)) {
1680 struct rbd_img_request *img_request = obj_request->img_request;
1681
0c425248
AE
1682 rbd_assert(write_request ==
1683 img_request_write_test(img_request));
1684 if (write_request)
bf0d5f50 1685 snapc = img_request->snapc;
bf0d5f50
AE
1686 }
1687
1688 /* Allocate and initialize the request, for the single op */
1689
1690 osdc = &rbd_dev->rbd_client->client->osdc;
1691 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1692 if (!osd_req)
1693 return NULL; /* ENOMEM */
bf0d5f50 1694
430c28c3 1695 if (write_request)
bf0d5f50 1696 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
430c28c3 1697 else
bf0d5f50 1698 osd_req->r_flags = CEPH_OSD_FLAG_READ;
bf0d5f50
AE
1699
1700 osd_req->r_callback = rbd_osd_req_callback;
1701 osd_req->r_priv = obj_request;
1702
1703 osd_req->r_oid_len = strlen(obj_request->object_name);
1704 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1705 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1706
1707 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1708
bf0d5f50
AE
1709 return osd_req;
1710}
1711
0eefd470
AE
1712/*
1713 * Create a copyup osd request based on the information in the
1714 * object request supplied. A copyup request has two osd ops,
1715 * a copyup method call, and a "normal" write request.
1716 */
1717static struct ceph_osd_request *
1718rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1719{
1720 struct rbd_img_request *img_request;
1721 struct ceph_snap_context *snapc;
1722 struct rbd_device *rbd_dev;
1723 struct ceph_osd_client *osdc;
1724 struct ceph_osd_request *osd_req;
1725
1726 rbd_assert(obj_request_img_data_test(obj_request));
1727 img_request = obj_request->img_request;
1728 rbd_assert(img_request);
1729 rbd_assert(img_request_write_test(img_request));
1730
1731 /* Allocate and initialize the request, for the two ops */
1732
1733 snapc = img_request->snapc;
1734 rbd_dev = img_request->rbd_dev;
1735 osdc = &rbd_dev->rbd_client->client->osdc;
1736 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1737 if (!osd_req)
1738 return NULL; /* ENOMEM */
1739
1740 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1741 osd_req->r_callback = rbd_osd_req_callback;
1742 osd_req->r_priv = obj_request;
1743
1744 osd_req->r_oid_len = strlen(obj_request->object_name);
1745 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1746 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1747
1748 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1749
1750 return osd_req;
1751}
1752
1753
bf0d5f50
AE
1754static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1755{
1756 ceph_osdc_put_request(osd_req);
1757}
1758
1759/* object_name is assumed to be a non-null pointer and NUL-terminated */
1760
1761static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1762 u64 offset, u64 length,
1763 enum obj_request_type type)
1764{
1765 struct rbd_obj_request *obj_request;
1766 size_t size;
1767 char *name;
1768
1769 rbd_assert(obj_request_type_valid(type));
1770
1771 size = strlen(object_name) + 1;
f907ad55
AE
1772 name = kmalloc(size, GFP_KERNEL);
1773 if (!name)
bf0d5f50
AE
1774 return NULL;
1775
868311b1 1776 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
f907ad55
AE
1777 if (!obj_request) {
1778 kfree(name);
1779 return NULL;
1780 }
1781
bf0d5f50
AE
1782 obj_request->object_name = memcpy(name, object_name, size);
1783 obj_request->offset = offset;
1784 obj_request->length = length;
926f9b3f 1785 obj_request->flags = 0;
bf0d5f50
AE
1786 obj_request->which = BAD_WHICH;
1787 obj_request->type = type;
1788 INIT_LIST_HEAD(&obj_request->links);
788e2df3 1789 init_completion(&obj_request->completion);
bf0d5f50
AE
1790 kref_init(&obj_request->kref);
1791
37206ee5
AE
1792 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1793 offset, length, (int)type, obj_request);
1794
bf0d5f50
AE
1795 return obj_request;
1796}
1797
1798static void rbd_obj_request_destroy(struct kref *kref)
1799{
1800 struct rbd_obj_request *obj_request;
1801
1802 obj_request = container_of(kref, struct rbd_obj_request, kref);
1803
37206ee5
AE
1804 dout("%s: obj %p\n", __func__, obj_request);
1805
bf0d5f50
AE
1806 rbd_assert(obj_request->img_request == NULL);
1807 rbd_assert(obj_request->which == BAD_WHICH);
1808
1809 if (obj_request->osd_req)
1810 rbd_osd_req_destroy(obj_request->osd_req);
1811
1812 rbd_assert(obj_request_type_valid(obj_request->type));
1813 switch (obj_request->type) {
9969ebc5
AE
1814 case OBJ_REQUEST_NODATA:
1815 break; /* Nothing to do */
bf0d5f50
AE
1816 case OBJ_REQUEST_BIO:
1817 if (obj_request->bio_list)
1818 bio_chain_put(obj_request->bio_list);
1819 break;
788e2df3
AE
1820 case OBJ_REQUEST_PAGES:
1821 if (obj_request->pages)
1822 ceph_release_page_vector(obj_request->pages,
1823 obj_request->page_count);
1824 break;
bf0d5f50
AE
1825 }
1826
f907ad55 1827 kfree(obj_request->object_name);
868311b1
AE
1828 obj_request->object_name = NULL;
1829 kmem_cache_free(rbd_obj_request_cache, obj_request);
bf0d5f50
AE
1830}
1831
1832/*
1833 * Caller is responsible for filling in the list of object requests
1834 * that comprises the image request, and the Linux request pointer
1835 * (if there is one).
1836 */
cc344fa1
AE
1837static struct rbd_img_request *rbd_img_request_create(
1838 struct rbd_device *rbd_dev,
bf0d5f50 1839 u64 offset, u64 length,
9849e986
AE
1840 bool write_request,
1841 bool child_request)
bf0d5f50
AE
1842{
1843 struct rbd_img_request *img_request;
bf0d5f50 1844
1c2a9dfe 1845 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
bf0d5f50
AE
1846 if (!img_request)
1847 return NULL;
1848
1849 if (write_request) {
1850 down_read(&rbd_dev->header_rwsem);
812164f8 1851 ceph_get_snap_context(rbd_dev->header.snapc);
bf0d5f50 1852 up_read(&rbd_dev->header_rwsem);
bf0d5f50
AE
1853 }
1854
1855 img_request->rq = NULL;
1856 img_request->rbd_dev = rbd_dev;
1857 img_request->offset = offset;
1858 img_request->length = length;
0c425248
AE
1859 img_request->flags = 0;
1860 if (write_request) {
1861 img_request_write_set(img_request);
468521c1 1862 img_request->snapc = rbd_dev->header.snapc;
0c425248 1863 } else {
bf0d5f50 1864 img_request->snap_id = rbd_dev->spec->snap_id;
0c425248 1865 }
9849e986
AE
1866 if (child_request)
1867 img_request_child_set(img_request);
d0b2e944
AE
1868 if (rbd_dev->parent_spec)
1869 img_request_layered_set(img_request);
bf0d5f50
AE
1870 spin_lock_init(&img_request->completion_lock);
1871 img_request->next_completion = 0;
1872 img_request->callback = NULL;
a5a337d4 1873 img_request->result = 0;
bf0d5f50
AE
1874 img_request->obj_request_count = 0;
1875 INIT_LIST_HEAD(&img_request->obj_requests);
1876 kref_init(&img_request->kref);
1877
1878 rbd_img_request_get(img_request); /* Avoid a warning */
1879 rbd_img_request_put(img_request); /* TEMPORARY */
1880
37206ee5
AE
1881 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1882 write_request ? "write" : "read", offset, length,
1883 img_request);
1884
bf0d5f50
AE
1885 return img_request;
1886}
1887
1888static void rbd_img_request_destroy(struct kref *kref)
1889{
1890 struct rbd_img_request *img_request;
1891 struct rbd_obj_request *obj_request;
1892 struct rbd_obj_request *next_obj_request;
1893
1894 img_request = container_of(kref, struct rbd_img_request, kref);
1895
37206ee5
AE
1896 dout("%s: img %p\n", __func__, img_request);
1897
bf0d5f50
AE
1898 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1899 rbd_img_obj_request_del(img_request, obj_request);
25dcf954 1900 rbd_assert(img_request->obj_request_count == 0);
bf0d5f50 1901
0c425248 1902 if (img_request_write_test(img_request))
812164f8 1903 ceph_put_snap_context(img_request->snapc);
bf0d5f50 1904
8b3e1a56
AE
1905 if (img_request_child_test(img_request))
1906 rbd_obj_request_put(img_request->obj_request);
1907
1c2a9dfe 1908 kmem_cache_free(rbd_img_request_cache, img_request);
bf0d5f50
AE
1909}
1910
1217857f
AE
1911static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1912{
6365d33a 1913 struct rbd_img_request *img_request;
1217857f
AE
1914 unsigned int xferred;
1915 int result;
8b3e1a56 1916 bool more;
1217857f 1917
6365d33a
AE
1918 rbd_assert(obj_request_img_data_test(obj_request));
1919 img_request = obj_request->img_request;
1920
1217857f
AE
1921 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
1922 xferred = (unsigned int)obj_request->xferred;
1923 result = obj_request->result;
1924 if (result) {
1925 struct rbd_device *rbd_dev = img_request->rbd_dev;
1926
1927 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
1928 img_request_write_test(img_request) ? "write" : "read",
1929 obj_request->length, obj_request->img_offset,
1930 obj_request->offset);
1931 rbd_warn(rbd_dev, " result %d xferred %x\n",
1932 result, xferred);
1933 if (!img_request->result)
1934 img_request->result = result;
1935 }
1936
f1a4739f
AE
1937 /* Image object requests don't own their page array */
1938
1939 if (obj_request->type == OBJ_REQUEST_PAGES) {
1940 obj_request->pages = NULL;
1941 obj_request->page_count = 0;
1942 }
1943
8b3e1a56
AE
1944 if (img_request_child_test(img_request)) {
1945 rbd_assert(img_request->obj_request != NULL);
1946 more = obj_request->which < img_request->obj_request_count - 1;
1947 } else {
1948 rbd_assert(img_request->rq != NULL);
1949 more = blk_end_request(img_request->rq, result, xferred);
1950 }
1951
1952 return more;
1217857f
AE
1953}
1954
2169238d
AE
1955static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1956{
1957 struct rbd_img_request *img_request;
1958 u32 which = obj_request->which;
1959 bool more = true;
1960
6365d33a 1961 rbd_assert(obj_request_img_data_test(obj_request));
2169238d
AE
1962 img_request = obj_request->img_request;
1963
1964 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1965 rbd_assert(img_request != NULL);
2169238d
AE
1966 rbd_assert(img_request->obj_request_count > 0);
1967 rbd_assert(which != BAD_WHICH);
1968 rbd_assert(which < img_request->obj_request_count);
1969 rbd_assert(which >= img_request->next_completion);
1970
1971 spin_lock_irq(&img_request->completion_lock);
1972 if (which != img_request->next_completion)
1973 goto out;
1974
1975 for_each_obj_request_from(img_request, obj_request) {
2169238d
AE
1976 rbd_assert(more);
1977 rbd_assert(which < img_request->obj_request_count);
1978
1979 if (!obj_request_done_test(obj_request))
1980 break;
1217857f 1981 more = rbd_img_obj_end_request(obj_request);
2169238d
AE
1982 which++;
1983 }
1984
1985 rbd_assert(more ^ (which == img_request->obj_request_count));
1986 img_request->next_completion = which;
1987out:
1988 spin_unlock_irq(&img_request->completion_lock);
1989
1990 if (!more)
1991 rbd_img_request_complete(img_request);
1992}
1993
f1a4739f
AE
1994/*
1995 * Split up an image request into one or more object requests, each
1996 * to a different object. The "type" parameter indicates whether
1997 * "data_desc" is the pointer to the head of a list of bio
1998 * structures, or the base of a page array. In either case this
1999 * function assumes data_desc describes memory sufficient to hold
2000 * all data described by the image request.
2001 */
2002static int rbd_img_request_fill(struct rbd_img_request *img_request,
2003 enum obj_request_type type,
2004 void *data_desc)
bf0d5f50
AE
2005{
2006 struct rbd_device *rbd_dev = img_request->rbd_dev;
2007 struct rbd_obj_request *obj_request = NULL;
2008 struct rbd_obj_request *next_obj_request;
0c425248 2009 bool write_request = img_request_write_test(img_request);
f1a4739f
AE
2010 struct bio *bio_list;
2011 unsigned int bio_offset = 0;
2012 struct page **pages;
7da22d29 2013 u64 img_offset;
bf0d5f50
AE
2014 u64 resid;
2015 u16 opcode;
2016
f1a4739f
AE
2017 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2018 (int)type, data_desc);
37206ee5 2019
430c28c3 2020 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
7da22d29 2021 img_offset = img_request->offset;
bf0d5f50 2022 resid = img_request->length;
4dda41d3 2023 rbd_assert(resid > 0);
f1a4739f
AE
2024
2025 if (type == OBJ_REQUEST_BIO) {
2026 bio_list = data_desc;
2027 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2028 } else {
2029 rbd_assert(type == OBJ_REQUEST_PAGES);
2030 pages = data_desc;
2031 }
2032
bf0d5f50 2033 while (resid) {
2fa12320 2034 struct ceph_osd_request *osd_req;
bf0d5f50 2035 const char *object_name;
bf0d5f50
AE
2036 u64 offset;
2037 u64 length;
2038
7da22d29 2039 object_name = rbd_segment_name(rbd_dev, img_offset);
bf0d5f50
AE
2040 if (!object_name)
2041 goto out_unwind;
7da22d29
AE
2042 offset = rbd_segment_offset(rbd_dev, img_offset);
2043 length = rbd_segment_length(rbd_dev, img_offset, resid);
bf0d5f50 2044 obj_request = rbd_obj_request_create(object_name,
f1a4739f 2045 offset, length, type);
78c2a44a
AE
2046 /* object request has its own copy of the object name */
2047 rbd_segment_name_free(object_name);
bf0d5f50
AE
2048 if (!obj_request)
2049 goto out_unwind;
2050
f1a4739f
AE
2051 if (type == OBJ_REQUEST_BIO) {
2052 unsigned int clone_size;
2053
2054 rbd_assert(length <= (u64)UINT_MAX);
2055 clone_size = (unsigned int)length;
2056 obj_request->bio_list =
2057 bio_chain_clone_range(&bio_list,
2058 &bio_offset,
2059 clone_size,
2060 GFP_ATOMIC);
2061 if (!obj_request->bio_list)
2062 goto out_partial;
2063 } else {
2064 unsigned int page_count;
2065
2066 obj_request->pages = pages;
2067 page_count = (u32)calc_pages_for(offset, length);
2068 obj_request->page_count = page_count;
2069 if ((offset + length) & ~PAGE_MASK)
2070 page_count--; /* more on last page */
2071 pages += page_count;
2072 }
bf0d5f50 2073
2fa12320
AE
2074 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2075 obj_request);
2076 if (!osd_req)
bf0d5f50 2077 goto out_partial;
2fa12320 2078 obj_request->osd_req = osd_req;
2169238d 2079 obj_request->callback = rbd_img_obj_callback;
430c28c3 2080
2fa12320
AE
2081 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2082 0, 0);
f1a4739f
AE
2083 if (type == OBJ_REQUEST_BIO)
2084 osd_req_op_extent_osd_data_bio(osd_req, 0,
2085 obj_request->bio_list, length);
2086 else
2087 osd_req_op_extent_osd_data_pages(osd_req, 0,
2088 obj_request->pages, length,
2089 offset & ~PAGE_MASK, false, false);
9d4df01f
AE
2090
2091 if (write_request)
2092 rbd_osd_req_format_write(obj_request);
2093 else
2094 rbd_osd_req_format_read(obj_request);
430c28c3 2095
7da22d29 2096 obj_request->img_offset = img_offset;
bf0d5f50
AE
2097 rbd_img_obj_request_add(img_request, obj_request);
2098
7da22d29 2099 img_offset += length;
bf0d5f50
AE
2100 resid -= length;
2101 }
2102
2103 return 0;
2104
2105out_partial:
2106 rbd_obj_request_put(obj_request);
2107out_unwind:
2108 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2109 rbd_obj_request_put(obj_request);
2110
2111 return -ENOMEM;
2112}
2113
0eefd470
AE
2114static void
2115rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2116{
2117 struct rbd_img_request *img_request;
2118 struct rbd_device *rbd_dev;
2119 u64 length;
2120 u32 page_count;
2121
2122 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2123 rbd_assert(obj_request_img_data_test(obj_request));
2124 img_request = obj_request->img_request;
2125 rbd_assert(img_request);
2126
2127 rbd_dev = img_request->rbd_dev;
2128 rbd_assert(rbd_dev);
2129 length = (u64)1 << rbd_dev->header.obj_order;
2130 page_count = (u32)calc_pages_for(0, length);
2131
2132 rbd_assert(obj_request->copyup_pages);
2133 ceph_release_page_vector(obj_request->copyup_pages, page_count);
2134 obj_request->copyup_pages = NULL;
2135
2136 /*
2137 * We want the transfer count to reflect the size of the
2138 * original write request. There is no such thing as a
2139 * successful short write, so if the request was successful
2140 * we can just set it to the originally-requested length.
2141 */
2142 if (!obj_request->result)
2143 obj_request->xferred = obj_request->length;
2144
2145 /* Finish up with the normal image object callback */
2146
2147 rbd_img_obj_callback(obj_request);
2148}
2149
3d7efd18
AE
2150static void
2151rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2152{
2153 struct rbd_obj_request *orig_request;
0eefd470
AE
2154 struct ceph_osd_request *osd_req;
2155 struct ceph_osd_client *osdc;
2156 struct rbd_device *rbd_dev;
3d7efd18 2157 struct page **pages;
3d7efd18
AE
2158 int result;
2159 u64 obj_size;
2160 u64 xferred;
2161
2162 rbd_assert(img_request_child_test(img_request));
2163
2164 /* First get what we need from the image request */
2165
2166 pages = img_request->copyup_pages;
2167 rbd_assert(pages != NULL);
2168 img_request->copyup_pages = NULL;
2169
2170 orig_request = img_request->obj_request;
2171 rbd_assert(orig_request != NULL);
0eefd470 2172 rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
3d7efd18
AE
2173 result = img_request->result;
2174 obj_size = img_request->length;
2175 xferred = img_request->xferred;
2176
0eefd470
AE
2177 rbd_dev = img_request->rbd_dev;
2178 rbd_assert(rbd_dev);
2179 rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2180
3d7efd18
AE
2181 rbd_img_request_put(img_request);
2182
0eefd470
AE
2183 if (result)
2184 goto out_err;
2185
2186 /* Allocate the new copyup osd request for the original request */
2187
2188 result = -ENOMEM;
2189 rbd_assert(!orig_request->osd_req);
2190 osd_req = rbd_osd_req_create_copyup(orig_request);
2191 if (!osd_req)
2192 goto out_err;
2193 orig_request->osd_req = osd_req;
2194 orig_request->copyup_pages = pages;
3d7efd18 2195
0eefd470 2196 /* Initialize the copyup op */
3d7efd18 2197
0eefd470
AE
2198 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2199 osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
2200 false, false);
3d7efd18 2201
0eefd470
AE
2202 /* Then the original write request op */
2203
2204 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2205 orig_request->offset,
2206 orig_request->length, 0, 0);
2207 osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
2208 orig_request->length);
2209
2210 rbd_osd_req_format_write(orig_request);
2211
2212 /* All set, send it off. */
2213
2214 orig_request->callback = rbd_img_obj_copyup_callback;
2215 osdc = &rbd_dev->rbd_client->client->osdc;
2216 result = rbd_obj_request_submit(osdc, orig_request);
2217 if (!result)
2218 return;
2219out_err:
2220 /* Record the error code and complete the request */
2221
2222 orig_request->result = result;
2223 orig_request->xferred = 0;
2224 obj_request_done_set(orig_request);
2225 rbd_obj_request_complete(orig_request);
3d7efd18
AE
2226}
2227
2228/*
2229 * Read from the parent image the range of data that covers the
2230 * entire target of the given object request. This is used for
2231 * satisfying a layered image write request when the target of an
2232 * object request from the image request does not exist.
2233 *
2234 * A page array big enough to hold the returned data is allocated
2235 * and supplied to rbd_img_request_fill() as the "data descriptor."
2236 * When the read completes, this page array will be transferred to
2237 * the original object request for the copyup operation.
2238 *
2239 * If an error occurs, record it as the result of the original
2240 * object request and mark it done so it gets completed.
2241 */
2242static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2243{
2244 struct rbd_img_request *img_request = NULL;
2245 struct rbd_img_request *parent_request = NULL;
2246 struct rbd_device *rbd_dev;
2247 u64 img_offset;
2248 u64 length;
2249 struct page **pages = NULL;
2250 u32 page_count;
2251 int result;
2252
2253 rbd_assert(obj_request_img_data_test(obj_request));
2254 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2255
2256 img_request = obj_request->img_request;
2257 rbd_assert(img_request != NULL);
2258 rbd_dev = img_request->rbd_dev;
2259 rbd_assert(rbd_dev->parent != NULL);
2260
0eefd470
AE
2261 /*
2262 * First things first. The original osd request is of no
2263 * use to use any more, we'll need a new one that can hold
2264 * the two ops in a copyup request. We'll get that later,
2265 * but for now we can release the old one.
2266 */
2267 rbd_osd_req_destroy(obj_request->osd_req);
2268 obj_request->osd_req = NULL;
2269
3d7efd18
AE
2270 /*
2271 * Determine the byte range covered by the object in the
2272 * child image to which the original request was to be sent.
2273 */
2274 img_offset = obj_request->img_offset - obj_request->offset;
2275 length = (u64)1 << rbd_dev->header.obj_order;
2276
a9e8ba2c
AE
2277 /*
2278 * There is no defined parent data beyond the parent
2279 * overlap, so limit what we read at that boundary if
2280 * necessary.
2281 */
2282 if (img_offset + length > rbd_dev->parent_overlap) {
2283 rbd_assert(img_offset < rbd_dev->parent_overlap);
2284 length = rbd_dev->parent_overlap - img_offset;
2285 }
2286
3d7efd18
AE
2287 /*
2288 * Allocate a page array big enough to receive the data read
2289 * from the parent.
2290 */
2291 page_count = (u32)calc_pages_for(0, length);
2292 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2293 if (IS_ERR(pages)) {
2294 result = PTR_ERR(pages);
2295 pages = NULL;
2296 goto out_err;
2297 }
2298
2299 result = -ENOMEM;
2300 parent_request = rbd_img_request_create(rbd_dev->parent,
2301 img_offset, length,
2302 false, true);
2303 if (!parent_request)
2304 goto out_err;
2305 rbd_obj_request_get(obj_request);
2306 parent_request->obj_request = obj_request;
2307
2308 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2309 if (result)
2310 goto out_err;
2311 parent_request->copyup_pages = pages;
2312
2313 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2314 result = rbd_img_request_submit(parent_request);
2315 if (!result)
2316 return 0;
2317
2318 parent_request->copyup_pages = NULL;
2319 parent_request->obj_request = NULL;
2320 rbd_obj_request_put(obj_request);
2321out_err:
2322 if (pages)
2323 ceph_release_page_vector(pages, page_count);
2324 if (parent_request)
2325 rbd_img_request_put(parent_request);
2326 obj_request->result = result;
2327 obj_request->xferred = 0;
2328 obj_request_done_set(obj_request);
2329
2330 return result;
2331}
2332
c5b5ef6c
AE
2333static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2334{
c5b5ef6c
AE
2335 struct rbd_obj_request *orig_request;
2336 int result;
2337
2338 rbd_assert(!obj_request_img_data_test(obj_request));
2339
2340 /*
2341 * All we need from the object request is the original
2342 * request and the result of the STAT op. Grab those, then
2343 * we're done with the request.
2344 */
2345 orig_request = obj_request->obj_request;
2346 obj_request->obj_request = NULL;
2347 rbd_assert(orig_request);
2348 rbd_assert(orig_request->img_request);
2349
2350 result = obj_request->result;
2351 obj_request->result = 0;
2352
2353 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2354 obj_request, orig_request, result,
2355 obj_request->xferred, obj_request->length);
2356 rbd_obj_request_put(obj_request);
2357
2358 rbd_assert(orig_request);
2359 rbd_assert(orig_request->img_request);
c5b5ef6c
AE
2360
2361 /*
2362 * Our only purpose here is to determine whether the object
2363 * exists, and we don't want to treat the non-existence as
2364 * an error. If something else comes back, transfer the
2365 * error to the original request and complete it now.
2366 */
2367 if (!result) {
2368 obj_request_existence_set(orig_request, true);
2369 } else if (result == -ENOENT) {
2370 obj_request_existence_set(orig_request, false);
2371 } else if (result) {
2372 orig_request->result = result;
3d7efd18 2373 goto out;
c5b5ef6c
AE
2374 }
2375
2376 /*
2377 * Resubmit the original request now that we have recorded
2378 * whether the target object exists.
2379 */
b454e36d 2380 orig_request->result = rbd_img_obj_request_submit(orig_request);
3d7efd18 2381out:
c5b5ef6c
AE
2382 if (orig_request->result)
2383 rbd_obj_request_complete(orig_request);
2384 rbd_obj_request_put(orig_request);
2385}
2386
2387static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2388{
2389 struct rbd_obj_request *stat_request;
2390 struct rbd_device *rbd_dev;
2391 struct ceph_osd_client *osdc;
2392 struct page **pages = NULL;
2393 u32 page_count;
2394 size_t size;
2395 int ret;
2396
2397 /*
2398 * The response data for a STAT call consists of:
2399 * le64 length;
2400 * struct {
2401 * le32 tv_sec;
2402 * le32 tv_nsec;
2403 * } mtime;
2404 */
2405 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2406 page_count = (u32)calc_pages_for(0, size);
2407 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2408 if (IS_ERR(pages))
2409 return PTR_ERR(pages);
2410
2411 ret = -ENOMEM;
2412 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2413 OBJ_REQUEST_PAGES);
2414 if (!stat_request)
2415 goto out;
2416
2417 rbd_obj_request_get(obj_request);
2418 stat_request->obj_request = obj_request;
2419 stat_request->pages = pages;
2420 stat_request->page_count = page_count;
2421
2422 rbd_assert(obj_request->img_request);
2423 rbd_dev = obj_request->img_request->rbd_dev;
2424 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2425 stat_request);
2426 if (!stat_request->osd_req)
2427 goto out;
2428 stat_request->callback = rbd_img_obj_exists_callback;
2429
2430 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2431 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2432 false, false);
9d4df01f 2433 rbd_osd_req_format_read(stat_request);
c5b5ef6c
AE
2434
2435 osdc = &rbd_dev->rbd_client->client->osdc;
2436 ret = rbd_obj_request_submit(osdc, stat_request);
2437out:
2438 if (ret)
2439 rbd_obj_request_put(obj_request);
2440
2441 return ret;
2442}
2443
b454e36d
AE
2444static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2445{
2446 struct rbd_img_request *img_request;
a9e8ba2c 2447 struct rbd_device *rbd_dev;
3d7efd18 2448 bool known;
b454e36d
AE
2449
2450 rbd_assert(obj_request_img_data_test(obj_request));
2451
2452 img_request = obj_request->img_request;
2453 rbd_assert(img_request);
a9e8ba2c 2454 rbd_dev = img_request->rbd_dev;
b454e36d 2455
b454e36d 2456 /*
a9e8ba2c
AE
2457 * Only writes to layered images need special handling.
2458 * Reads and non-layered writes are simple object requests.
2459 * Layered writes that start beyond the end of the overlap
2460 * with the parent have no parent data, so they too are
2461 * simple object requests. Finally, if the target object is
2462 * known to already exist, its parent data has already been
2463 * copied, so a write to the object can also be handled as a
2464 * simple object request.
b454e36d
AE
2465 */
2466 if (!img_request_write_test(img_request) ||
2467 !img_request_layered_test(img_request) ||
a9e8ba2c 2468 rbd_dev->parent_overlap <= obj_request->img_offset ||
3d7efd18
AE
2469 ((known = obj_request_known_test(obj_request)) &&
2470 obj_request_exists_test(obj_request))) {
b454e36d
AE
2471
2472 struct rbd_device *rbd_dev;
2473 struct ceph_osd_client *osdc;
2474
2475 rbd_dev = obj_request->img_request->rbd_dev;
2476 osdc = &rbd_dev->rbd_client->client->osdc;
2477
2478 return rbd_obj_request_submit(osdc, obj_request);
2479 }
2480
2481 /*
3d7efd18
AE
2482 * It's a layered write. The target object might exist but
2483 * we may not know that yet. If we know it doesn't exist,
2484 * start by reading the data for the full target object from
2485 * the parent so we can use it for a copyup to the target.
b454e36d 2486 */
3d7efd18
AE
2487 if (known)
2488 return rbd_img_obj_parent_read_full(obj_request);
2489
2490 /* We don't know whether the target exists. Go find out. */
b454e36d
AE
2491
2492 return rbd_img_obj_exists_submit(obj_request);
2493}
2494
bf0d5f50
AE
2495static int rbd_img_request_submit(struct rbd_img_request *img_request)
2496{
bf0d5f50 2497 struct rbd_obj_request *obj_request;
46faeed4 2498 struct rbd_obj_request *next_obj_request;
bf0d5f50 2499
37206ee5 2500 dout("%s: img %p\n", __func__, img_request);
46faeed4 2501 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
bf0d5f50
AE
2502 int ret;
2503
b454e36d 2504 ret = rbd_img_obj_request_submit(obj_request);
bf0d5f50
AE
2505 if (ret)
2506 return ret;
bf0d5f50
AE
2507 }
2508
2509 return 0;
2510}
8b3e1a56
AE
2511
2512static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2513{
2514 struct rbd_obj_request *obj_request;
a9e8ba2c
AE
2515 struct rbd_device *rbd_dev;
2516 u64 obj_end;
8b3e1a56
AE
2517
2518 rbd_assert(img_request_child_test(img_request));
2519
2520 obj_request = img_request->obj_request;
a9e8ba2c
AE
2521 rbd_assert(obj_request);
2522 rbd_assert(obj_request->img_request);
2523
8b3e1a56 2524 obj_request->result = img_request->result;
a9e8ba2c
AE
2525 if (obj_request->result)
2526 goto out;
2527
2528 /*
2529 * We need to zero anything beyond the parent overlap
2530 * boundary. Since rbd_img_obj_request_read_callback()
2531 * will zero anything beyond the end of a short read, an
2532 * easy way to do this is to pretend the data from the
2533 * parent came up short--ending at the overlap boundary.
2534 */
2535 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2536 obj_end = obj_request->img_offset + obj_request->length;
2537 rbd_dev = obj_request->img_request->rbd_dev;
2538 if (obj_end > rbd_dev->parent_overlap) {
2539 u64 xferred = 0;
2540
2541 if (obj_request->img_offset < rbd_dev->parent_overlap)
2542 xferred = rbd_dev->parent_overlap -
2543 obj_request->img_offset;
8b3e1a56 2544
a9e8ba2c
AE
2545 obj_request->xferred = min(img_request->xferred, xferred);
2546 } else {
2547 obj_request->xferred = img_request->xferred;
2548 }
2549out:
b5b09be3 2550 rbd_img_request_put(img_request);
8b3e1a56
AE
2551 rbd_img_obj_request_read_callback(obj_request);
2552 rbd_obj_request_complete(obj_request);
2553}
2554
2555static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2556{
2557 struct rbd_device *rbd_dev;
2558 struct rbd_img_request *img_request;
2559 int result;
2560
2561 rbd_assert(obj_request_img_data_test(obj_request));
2562 rbd_assert(obj_request->img_request != NULL);
2563 rbd_assert(obj_request->result == (s32) -ENOENT);
2564 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2565
2566 rbd_dev = obj_request->img_request->rbd_dev;
2567 rbd_assert(rbd_dev->parent != NULL);
2568 /* rbd_read_finish(obj_request, obj_request->length); */
2569 img_request = rbd_img_request_create(rbd_dev->parent,
2570 obj_request->img_offset,
2571 obj_request->length,
2572 false, true);
2573 result = -ENOMEM;
2574 if (!img_request)
2575 goto out_err;
2576
2577 rbd_obj_request_get(obj_request);
2578 img_request->obj_request = obj_request;
2579
f1a4739f
AE
2580 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2581 obj_request->bio_list);
8b3e1a56
AE
2582 if (result)
2583 goto out_err;
2584
2585 img_request->callback = rbd_img_parent_read_callback;
2586 result = rbd_img_request_submit(img_request);
2587 if (result)
2588 goto out_err;
2589
2590 return;
2591out_err:
2592 if (img_request)
2593 rbd_img_request_put(img_request);
2594 obj_request->result = result;
2595 obj_request->xferred = 0;
2596 obj_request_done_set(obj_request);
2597}
bf0d5f50 2598
cc4a38bd 2599static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
b8d70035
AE
2600{
2601 struct rbd_obj_request *obj_request;
2169238d 2602 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
b8d70035
AE
2603 int ret;
2604
2605 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2606 OBJ_REQUEST_NODATA);
2607 if (!obj_request)
2608 return -ENOMEM;
2609
2610 ret = -ENOMEM;
430c28c3 2611 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
b8d70035
AE
2612 if (!obj_request->osd_req)
2613 goto out;
2169238d 2614 obj_request->callback = rbd_obj_request_put;
b8d70035 2615
c99d2d4a 2616 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
cc4a38bd 2617 notify_id, 0, 0);
9d4df01f 2618 rbd_osd_req_format_read(obj_request);
430c28c3 2619
b8d70035 2620 ret = rbd_obj_request_submit(osdc, obj_request);
b8d70035 2621out:
cf81b60e
AE
2622 if (ret)
2623 rbd_obj_request_put(obj_request);
b8d70035
AE
2624
2625 return ret;
2626}
2627
2628static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2629{
2630 struct rbd_device *rbd_dev = (struct rbd_device *)data;
b8d70035
AE
2631
2632 if (!rbd_dev)
2633 return;
2634
37206ee5 2635 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
cc4a38bd
AE
2636 rbd_dev->header_name, (unsigned long long)notify_id,
2637 (unsigned int)opcode);
2638 (void)rbd_dev_refresh(rbd_dev);
b8d70035 2639
cc4a38bd 2640 rbd_obj_notify_ack(rbd_dev, notify_id);
b8d70035
AE
2641}
2642
9969ebc5
AE
2643/*
2644 * Request sync osd watch/unwatch. The value of "start" determines
2645 * whether a watch request is being initiated or torn down.
2646 */
2647static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2648{
2649 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2650 struct rbd_obj_request *obj_request;
9969ebc5
AE
2651 int ret;
2652
2653 rbd_assert(start ^ !!rbd_dev->watch_event);
2654 rbd_assert(start ^ !!rbd_dev->watch_request);
2655
2656 if (start) {
3c663bbd 2657 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
9969ebc5
AE
2658 &rbd_dev->watch_event);
2659 if (ret < 0)
2660 return ret;
8eb87565 2661 rbd_assert(rbd_dev->watch_event != NULL);
9969ebc5
AE
2662 }
2663
2664 ret = -ENOMEM;
2665 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2666 OBJ_REQUEST_NODATA);
2667 if (!obj_request)
2668 goto out_cancel;
2669
430c28c3
AE
2670 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2671 if (!obj_request->osd_req)
2672 goto out_cancel;
2673
8eb87565 2674 if (start)
975241af 2675 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
8eb87565 2676 else
6977c3f9 2677 ceph_osdc_unregister_linger_request(osdc,
975241af 2678 rbd_dev->watch_request->osd_req);
2169238d
AE
2679
2680 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
b21ebddd 2681 rbd_dev->watch_event->cookie, 0, start);
9d4df01f 2682 rbd_osd_req_format_write(obj_request);
2169238d 2683
9969ebc5
AE
2684 ret = rbd_obj_request_submit(osdc, obj_request);
2685 if (ret)
2686 goto out_cancel;
2687 ret = rbd_obj_request_wait(obj_request);
2688 if (ret)
2689 goto out_cancel;
9969ebc5
AE
2690 ret = obj_request->result;
2691 if (ret)
2692 goto out_cancel;
2693
8eb87565
AE
2694 /*
2695 * A watch request is set to linger, so the underlying osd
2696 * request won't go away until we unregister it. We retain
2697 * a pointer to the object request during that time (in
2698 * rbd_dev->watch_request), so we'll keep a reference to
2699 * it. We'll drop that reference (below) after we've
2700 * unregistered it.
2701 */
2702 if (start) {
2703 rbd_dev->watch_request = obj_request;
2704
2705 return 0;
2706 }
2707
2708 /* We have successfully torn down the watch request */
2709
2710 rbd_obj_request_put(rbd_dev->watch_request);
2711 rbd_dev->watch_request = NULL;
9969ebc5
AE
2712out_cancel:
2713 /* Cancel the event if we're tearing down, or on error */
2714 ceph_osdc_cancel_event(rbd_dev->watch_event);
2715 rbd_dev->watch_event = NULL;
9969ebc5
AE
2716 if (obj_request)
2717 rbd_obj_request_put(obj_request);
2718
2719 return ret;
2720}
2721
36be9a76 2722/*
f40eb349
AE
2723 * Synchronous osd object method call. Returns the number of bytes
2724 * returned in the outbound buffer, or a negative error code.
36be9a76
AE
2725 */
2726static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2727 const char *object_name,
2728 const char *class_name,
2729 const char *method_name,
4157976b 2730 const void *outbound,
36be9a76 2731 size_t outbound_size,
4157976b 2732 void *inbound,
e2a58ee5 2733 size_t inbound_size)
36be9a76 2734{
2169238d 2735 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
36be9a76 2736 struct rbd_obj_request *obj_request;
36be9a76
AE
2737 struct page **pages;
2738 u32 page_count;
2739 int ret;
2740
2741 /*
6010a451
AE
2742 * Method calls are ultimately read operations. The result
2743 * should placed into the inbound buffer provided. They
2744 * also supply outbound data--parameters for the object
2745 * method. Currently if this is present it will be a
2746 * snapshot id.
36be9a76 2747 */
57385b51 2748 page_count = (u32)calc_pages_for(0, inbound_size);
36be9a76
AE
2749 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2750 if (IS_ERR(pages))
2751 return PTR_ERR(pages);
2752
2753 ret = -ENOMEM;
6010a451 2754 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
36be9a76
AE
2755 OBJ_REQUEST_PAGES);
2756 if (!obj_request)
2757 goto out;
2758
2759 obj_request->pages = pages;
2760 obj_request->page_count = page_count;
2761
430c28c3 2762 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
36be9a76
AE
2763 if (!obj_request->osd_req)
2764 goto out;
2765
c99d2d4a 2766 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
04017e29
AE
2767 class_name, method_name);
2768 if (outbound_size) {
2769 struct ceph_pagelist *pagelist;
2770
2771 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2772 if (!pagelist)
2773 goto out;
2774
2775 ceph_pagelist_init(pagelist);
2776 ceph_pagelist_append(pagelist, outbound, outbound_size);
2777 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2778 pagelist);
2779 }
a4ce40a9
AE
2780 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2781 obj_request->pages, inbound_size,
44cd188d 2782 0, false, false);
9d4df01f 2783 rbd_osd_req_format_read(obj_request);
430c28c3 2784
36be9a76
AE
2785 ret = rbd_obj_request_submit(osdc, obj_request);
2786 if (ret)
2787 goto out;
2788 ret = rbd_obj_request_wait(obj_request);
2789 if (ret)
2790 goto out;
2791
2792 ret = obj_request->result;
2793 if (ret < 0)
2794 goto out;
57385b51
AE
2795
2796 rbd_assert(obj_request->xferred < (u64)INT_MAX);
2797 ret = (int)obj_request->xferred;
903bb32e 2798 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
36be9a76
AE
2799out:
2800 if (obj_request)
2801 rbd_obj_request_put(obj_request);
2802 else
2803 ceph_release_page_vector(pages, page_count);
2804
2805 return ret;
2806}
2807
bf0d5f50 2808static void rbd_request_fn(struct request_queue *q)
cc344fa1 2809 __releases(q->queue_lock) __acquires(q->queue_lock)
bf0d5f50
AE
2810{
2811 struct rbd_device *rbd_dev = q->queuedata;
2812 bool read_only = rbd_dev->mapping.read_only;
2813 struct request *rq;
2814 int result;
2815
2816 while ((rq = blk_fetch_request(q))) {
2817 bool write_request = rq_data_dir(rq) == WRITE;
2818 struct rbd_img_request *img_request;
2819 u64 offset;
2820 u64 length;
2821
2822 /* Ignore any non-FS requests that filter through. */
2823
2824 if (rq->cmd_type != REQ_TYPE_FS) {
4dda41d3
AE
2825 dout("%s: non-fs request type %d\n", __func__,
2826 (int) rq->cmd_type);
2827 __blk_end_request_all(rq, 0);
2828 continue;
2829 }
2830
2831 /* Ignore/skip any zero-length requests */
2832
2833 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
2834 length = (u64) blk_rq_bytes(rq);
2835
2836 if (!length) {
2837 dout("%s: zero-length request\n", __func__);
bf0d5f50
AE
2838 __blk_end_request_all(rq, 0);
2839 continue;
2840 }
2841
2842 spin_unlock_irq(q->queue_lock);
2843
2844 /* Disallow writes to a read-only device */
2845
2846 if (write_request) {
2847 result = -EROFS;
2848 if (read_only)
2849 goto end_request;
2850 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
2851 }
2852
6d292906
AE
2853 /*
2854 * Quit early if the mapped snapshot no longer
2855 * exists. It's still possible the snapshot will
2856 * have disappeared by the time our request arrives
2857 * at the osd, but there's no sense in sending it if
2858 * we already know.
2859 */
2860 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
bf0d5f50
AE
2861 dout("request for non-existent snapshot");
2862 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
2863 result = -ENXIO;
2864 goto end_request;
2865 }
2866
bf0d5f50 2867 result = -EINVAL;
c0cd10db
AE
2868 if (offset && length > U64_MAX - offset + 1) {
2869 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
2870 offset, length);
bf0d5f50 2871 goto end_request; /* Shouldn't happen */
c0cd10db 2872 }
bf0d5f50
AE
2873
2874 result = -ENOMEM;
2875 img_request = rbd_img_request_create(rbd_dev, offset, length,
9849e986 2876 write_request, false);
bf0d5f50
AE
2877 if (!img_request)
2878 goto end_request;
2879
2880 img_request->rq = rq;
2881
f1a4739f
AE
2882 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2883 rq->bio);
bf0d5f50
AE
2884 if (!result)
2885 result = rbd_img_request_submit(img_request);
2886 if (result)
2887 rbd_img_request_put(img_request);
2888end_request:
2889 spin_lock_irq(q->queue_lock);
2890 if (result < 0) {
7da22d29
AE
2891 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
2892 write_request ? "write" : "read",
2893 length, offset, result);
2894
bf0d5f50
AE
2895 __blk_end_request_all(rq, result);
2896 }
2897 }
2898}
2899
602adf40
YS
2900/*
2901 * a queue callback. Makes sure that we don't create a bio that spans across
2902 * multiple osd objects. One exception would be with a single page bios,
f7760dad 2903 * which we handle later at bio_chain_clone_range()
602adf40
YS
2904 */
2905static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2906 struct bio_vec *bvec)
2907{
2908 struct rbd_device *rbd_dev = q->queuedata;
e5cfeed2
AE
2909 sector_t sector_offset;
2910 sector_t sectors_per_obj;
2911 sector_t obj_sector_offset;
2912 int ret;
2913
2914 /*
2915 * Find how far into its rbd object the partition-relative
2916 * bio start sector is to offset relative to the enclosing
2917 * device.
2918 */
2919 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2920 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2921 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2922
2923 /*
2924 * Compute the number of bytes from that offset to the end
2925 * of the object. Account for what's already used by the bio.
2926 */
2927 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2928 if (ret > bmd->bi_size)
2929 ret -= bmd->bi_size;
2930 else
2931 ret = 0;
2932
2933 /*
2934 * Don't send back more than was asked for. And if the bio
2935 * was empty, let the whole thing through because: "Note
2936 * that a block device *must* allow a single page to be
2937 * added to an empty bio."
2938 */
2939 rbd_assert(bvec->bv_len <= PAGE_SIZE);
2940 if (ret > (int) bvec->bv_len || !bmd->bi_size)
2941 ret = (int) bvec->bv_len;
2942
2943 return ret;
602adf40
YS
2944}
2945
2946static void rbd_free_disk(struct rbd_device *rbd_dev)
2947{
2948 struct gendisk *disk = rbd_dev->disk;
2949
2950 if (!disk)
2951 return;
2952
a0cab924
AE
2953 rbd_dev->disk = NULL;
2954 if (disk->flags & GENHD_FL_UP) {
602adf40 2955 del_gendisk(disk);
a0cab924
AE
2956 if (disk->queue)
2957 blk_cleanup_queue(disk->queue);
2958 }
602adf40
YS
2959 put_disk(disk);
2960}
2961
788e2df3
AE
2962static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2963 const char *object_name,
7097f8df 2964 u64 offset, u64 length, void *buf)
788e2df3
AE
2965
2966{
2169238d 2967 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
788e2df3 2968 struct rbd_obj_request *obj_request;
788e2df3
AE
2969 struct page **pages = NULL;
2970 u32 page_count;
1ceae7ef 2971 size_t size;
788e2df3
AE
2972 int ret;
2973
2974 page_count = (u32) calc_pages_for(offset, length);
2975 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2976 if (IS_ERR(pages))
2977 ret = PTR_ERR(pages);
2978
2979 ret = -ENOMEM;
2980 obj_request = rbd_obj_request_create(object_name, offset, length,
36be9a76 2981 OBJ_REQUEST_PAGES);
788e2df3
AE
2982 if (!obj_request)
2983 goto out;
2984
2985 obj_request->pages = pages;
2986 obj_request->page_count = page_count;
2987
430c28c3 2988 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
788e2df3
AE
2989 if (!obj_request->osd_req)
2990 goto out;
2991
c99d2d4a
AE
2992 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
2993 offset, length, 0, 0);
406e2c9f 2994 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
a4ce40a9 2995 obj_request->pages,
44cd188d
AE
2996 obj_request->length,
2997 obj_request->offset & ~PAGE_MASK,
2998 false, false);
9d4df01f 2999 rbd_osd_req_format_read(obj_request);
430c28c3 3000
788e2df3
AE
3001 ret = rbd_obj_request_submit(osdc, obj_request);
3002 if (ret)
3003 goto out;
3004 ret = rbd_obj_request_wait(obj_request);
3005 if (ret)
3006 goto out;
3007
3008 ret = obj_request->result;
3009 if (ret < 0)
3010 goto out;
1ceae7ef
AE
3011
3012 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3013 size = (size_t) obj_request->xferred;
903bb32e 3014 ceph_copy_from_page_vector(pages, buf, 0, size);
7097f8df
AE
3015 rbd_assert(size <= (size_t)INT_MAX);
3016 ret = (int)size;
788e2df3
AE
3017out:
3018 if (obj_request)
3019 rbd_obj_request_put(obj_request);
3020 else
3021 ceph_release_page_vector(pages, page_count);
3022
3023 return ret;
3024}
3025
602adf40 3026/*
4156d998
AE
3027 * Read the complete header for the given rbd device.
3028 *
3029 * Returns a pointer to a dynamically-allocated buffer containing
3030 * the complete and validated header. Caller can pass the address
3031 * of a variable that will be filled in with the version of the
3032 * header object at the time it was read.
3033 *
3034 * Returns a pointer-coded errno if a failure occurs.
602adf40 3035 */
4156d998 3036static struct rbd_image_header_ondisk *
7097f8df 3037rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
602adf40 3038{
4156d998 3039 struct rbd_image_header_ondisk *ondisk = NULL;
50f7c4c9 3040 u32 snap_count = 0;
4156d998
AE
3041 u64 names_size = 0;
3042 u32 want_count;
3043 int ret;
602adf40 3044
00f1f36f 3045 /*
4156d998
AE
3046 * The complete header will include an array of its 64-bit
3047 * snapshot ids, followed by the names of those snapshots as
3048 * a contiguous block of NUL-terminated strings. Note that
3049 * the number of snapshots could change by the time we read
3050 * it in, in which case we re-read it.
00f1f36f 3051 */
4156d998
AE
3052 do {
3053 size_t size;
3054
3055 kfree(ondisk);
3056
3057 size = sizeof (*ondisk);
3058 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3059 size += names_size;
3060 ondisk = kmalloc(size, GFP_KERNEL);
3061 if (!ondisk)
3062 return ERR_PTR(-ENOMEM);
3063
788e2df3 3064 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
7097f8df 3065 0, size, ondisk);
4156d998
AE
3066 if (ret < 0)
3067 goto out_err;
c0cd10db 3068 if ((size_t)ret < size) {
4156d998 3069 ret = -ENXIO;
06ecc6cb
AE
3070 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3071 size, ret);
4156d998
AE
3072 goto out_err;
3073 }
3074 if (!rbd_dev_ondisk_valid(ondisk)) {
3075 ret = -ENXIO;
06ecc6cb 3076 rbd_warn(rbd_dev, "invalid header");
4156d998 3077 goto out_err;
81e759fb 3078 }
602adf40 3079
4156d998
AE
3080 names_size = le64_to_cpu(ondisk->snap_names_len);
3081 want_count = snap_count;
3082 snap_count = le32_to_cpu(ondisk->snap_count);
3083 } while (snap_count != want_count);
00f1f36f 3084
4156d998 3085 return ondisk;
00f1f36f 3086
4156d998
AE
3087out_err:
3088 kfree(ondisk);
3089
3090 return ERR_PTR(ret);
3091}
3092
3093/*
3094 * reload the ondisk the header
3095 */
3096static int rbd_read_header(struct rbd_device *rbd_dev,
3097 struct rbd_image_header *header)
3098{
3099 struct rbd_image_header_ondisk *ondisk;
4156d998 3100 int ret;
602adf40 3101
7097f8df 3102 ondisk = rbd_dev_v1_header_read(rbd_dev);
4156d998
AE
3103 if (IS_ERR(ondisk))
3104 return PTR_ERR(ondisk);
3105 ret = rbd_header_from_disk(header, ondisk);
4156d998
AE
3106 kfree(ondisk);
3107
3108 return ret;
602adf40
YS
3109}
3110
9478554a
AE
3111static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
3112{
0d7dbfce 3113 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
9478554a
AE
3114 return;
3115
e28626a0
AE
3116 if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
3117 sector_t size;
3118
3119 rbd_dev->mapping.size = rbd_dev->header.image_size;
3120 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3121 dout("setting size to %llu sectors", (unsigned long long)size);
3122 set_capacity(rbd_dev->disk, size);
3123 }
9478554a
AE
3124}
3125
602adf40
YS
3126/*
3127 * only read the first part of the ondisk header, without the snaps info
3128 */
cc4a38bd 3129static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
602adf40
YS
3130{
3131 int ret;
3132 struct rbd_image_header h;
602adf40
YS
3133
3134 ret = rbd_read_header(rbd_dev, &h);
3135 if (ret < 0)
3136 return ret;
3137
a51aa0c0
JD
3138 down_write(&rbd_dev->header_rwsem);
3139
9478554a
AE
3140 /* Update image size, and check for resize of mapped image */
3141 rbd_dev->header.image_size = h.image_size;
3142 rbd_update_mapping_size(rbd_dev);
9db4b3e3 3143
849b4260 3144 /* rbd_dev->header.object_prefix shouldn't change */
602adf40 3145 kfree(rbd_dev->header.snap_sizes);
849b4260 3146 kfree(rbd_dev->header.snap_names);
d1d25646 3147 /* osd requests may still refer to snapc */
812164f8 3148 ceph_put_snap_context(rbd_dev->header.snapc);
602adf40 3149
93a24e08 3150 rbd_dev->header.image_size = h.image_size;
602adf40
YS
3151 rbd_dev->header.snapc = h.snapc;
3152 rbd_dev->header.snap_names = h.snap_names;
3153 rbd_dev->header.snap_sizes = h.snap_sizes;
849b4260 3154 /* Free the extra copy of the object prefix */
c0cd10db
AE
3155 if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
3156 rbd_warn(rbd_dev, "object prefix changed (ignoring)");
849b4260
AE
3157 kfree(h.object_prefix);
3158
c666601a 3159 up_write(&rbd_dev->header_rwsem);
602adf40 3160
dfc5606d 3161 return ret;
602adf40
YS
3162}
3163
15228ede
AE
3164/*
3165 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3166 * has disappeared from the (just updated) snapshot context.
3167 */
3168static void rbd_exists_validate(struct rbd_device *rbd_dev)
3169{
3170 u64 snap_id;
3171
3172 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3173 return;
3174
3175 snap_id = rbd_dev->spec->snap_id;
3176 if (snap_id == CEPH_NOSNAP)
3177 return;
3178
3179 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3180 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3181}
3182
cc4a38bd 3183static int rbd_dev_refresh(struct rbd_device *rbd_dev)
1fe5e993 3184{
a3fbe5d4 3185 u64 image_size;
1fe5e993
AE
3186 int ret;
3187
117973fb 3188 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
a3fbe5d4 3189 image_size = rbd_dev->header.image_size;
1fe5e993 3190 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
117973fb 3191 if (rbd_dev->image_format == 1)
cc4a38bd 3192 ret = rbd_dev_v1_refresh(rbd_dev);
117973fb 3193 else
cc4a38bd 3194 ret = rbd_dev_v2_refresh(rbd_dev);
15228ede
AE
3195
3196 /* If it's a mapped snapshot, validate its EXISTS flag */
3197
3198 rbd_exists_validate(rbd_dev);
1fe5e993 3199 mutex_unlock(&ctl_mutex);
522a0cc0
AE
3200 if (ret)
3201 rbd_warn(rbd_dev, "got notification but failed to "
3202 " update snaps: %d\n", ret);
a3fbe5d4
AE
3203 if (image_size != rbd_dev->header.image_size)
3204 revalidate_disk(rbd_dev->disk);
1fe5e993
AE
3205
3206 return ret;
3207}
3208
602adf40
YS
3209static int rbd_init_disk(struct rbd_device *rbd_dev)
3210{
3211 struct gendisk *disk;
3212 struct request_queue *q;
593a9e7b 3213 u64 segment_size;
602adf40 3214
602adf40 3215 /* create gendisk info */
602adf40
YS
3216 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3217 if (!disk)
1fcdb8aa 3218 return -ENOMEM;
602adf40 3219
f0f8cef5 3220 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
de71a297 3221 rbd_dev->dev_id);
602adf40
YS
3222 disk->major = rbd_dev->major;
3223 disk->first_minor = 0;
3224 disk->fops = &rbd_bd_ops;
3225 disk->private_data = rbd_dev;
3226
bf0d5f50 3227 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
602adf40
YS
3228 if (!q)
3229 goto out_disk;
029bcbd8 3230
593a9e7b
AE
3231 /* We use the default size, but let's be explicit about it. */
3232 blk_queue_physical_block_size(q, SECTOR_SIZE);
3233
029bcbd8 3234 /* set io sizes to object size */
593a9e7b
AE
3235 segment_size = rbd_obj_bytes(&rbd_dev->header);
3236 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3237 blk_queue_max_segment_size(q, segment_size);
3238 blk_queue_io_min(q, segment_size);
3239 blk_queue_io_opt(q, segment_size);
029bcbd8 3240
602adf40
YS
3241 blk_queue_merge_bvec(q, rbd_merge_bvec);
3242 disk->queue = q;
3243
3244 q->queuedata = rbd_dev;
3245
3246 rbd_dev->disk = disk;
602adf40 3247
602adf40 3248 return 0;
602adf40
YS
3249out_disk:
3250 put_disk(disk);
1fcdb8aa
AE
3251
3252 return -ENOMEM;
602adf40
YS
3253}
3254
dfc5606d
YS
3255/*
3256 sysfs
3257*/
3258
593a9e7b
AE
3259static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3260{
3261 return container_of(dev, struct rbd_device, dev);
3262}
3263
dfc5606d
YS
3264static ssize_t rbd_size_show(struct device *dev,
3265 struct device_attribute *attr, char *buf)
3266{
593a9e7b 3267 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
a51aa0c0 3268
fc71d833
AE
3269 return sprintf(buf, "%llu\n",
3270 (unsigned long long)rbd_dev->mapping.size);
dfc5606d
YS
3271}
3272
34b13184
AE
3273/*
3274 * Note this shows the features for whatever's mapped, which is not
3275 * necessarily the base image.
3276 */
3277static ssize_t rbd_features_show(struct device *dev,
3278 struct device_attribute *attr, char *buf)
3279{
3280 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3281
3282 return sprintf(buf, "0x%016llx\n",
fc71d833 3283 (unsigned long long)rbd_dev->mapping.features);
34b13184
AE
3284}
3285
dfc5606d
YS
3286static ssize_t rbd_major_show(struct device *dev,
3287 struct device_attribute *attr, char *buf)
3288{
593a9e7b 3289 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 3290
fc71d833
AE
3291 if (rbd_dev->major)
3292 return sprintf(buf, "%d\n", rbd_dev->major);
3293
3294 return sprintf(buf, "(none)\n");
3295
dfc5606d
YS
3296}
3297
3298static ssize_t rbd_client_id_show(struct device *dev,
3299 struct device_attribute *attr, char *buf)
602adf40 3300{
593a9e7b 3301 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3302
1dbb4399
AE
3303 return sprintf(buf, "client%lld\n",
3304 ceph_client_id(rbd_dev->rbd_client->client));
602adf40
YS
3305}
3306
dfc5606d
YS
3307static ssize_t rbd_pool_show(struct device *dev,
3308 struct device_attribute *attr, char *buf)
602adf40 3309{
593a9e7b 3310 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3311
0d7dbfce 3312 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
dfc5606d
YS
3313}
3314
9bb2f334
AE
3315static ssize_t rbd_pool_id_show(struct device *dev,
3316 struct device_attribute *attr, char *buf)
3317{
3318 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3319
0d7dbfce 3320 return sprintf(buf, "%llu\n",
fc71d833 3321 (unsigned long long) rbd_dev->spec->pool_id);
9bb2f334
AE
3322}
3323
dfc5606d
YS
3324static ssize_t rbd_name_show(struct device *dev,
3325 struct device_attribute *attr, char *buf)
3326{
593a9e7b 3327 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3328
a92ffdf8
AE
3329 if (rbd_dev->spec->image_name)
3330 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3331
3332 return sprintf(buf, "(unknown)\n");
dfc5606d
YS
3333}
3334
589d30e0
AE
3335static ssize_t rbd_image_id_show(struct device *dev,
3336 struct device_attribute *attr, char *buf)
3337{
3338 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3339
0d7dbfce 3340 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
589d30e0
AE
3341}
3342
34b13184
AE
3343/*
3344 * Shows the name of the currently-mapped snapshot (or
3345 * RBD_SNAP_HEAD_NAME for the base image).
3346 */
dfc5606d
YS
3347static ssize_t rbd_snap_show(struct device *dev,
3348 struct device_attribute *attr,
3349 char *buf)
3350{
593a9e7b 3351 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3352
0d7dbfce 3353 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
dfc5606d
YS
3354}
3355
86b00e0d
AE
3356/*
3357 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3358 * for the parent image. If there is no parent, simply shows
3359 * "(no parent image)".
3360 */
3361static ssize_t rbd_parent_show(struct device *dev,
3362 struct device_attribute *attr,
3363 char *buf)
3364{
3365 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3366 struct rbd_spec *spec = rbd_dev->parent_spec;
3367 int count;
3368 char *bufp = buf;
3369
3370 if (!spec)
3371 return sprintf(buf, "(no parent image)\n");
3372
3373 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3374 (unsigned long long) spec->pool_id, spec->pool_name);
3375 if (count < 0)
3376 return count;
3377 bufp += count;
3378
3379 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3380 spec->image_name ? spec->image_name : "(unknown)");
3381 if (count < 0)
3382 return count;
3383 bufp += count;
3384
3385 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3386 (unsigned long long) spec->snap_id, spec->snap_name);
3387 if (count < 0)
3388 return count;
3389 bufp += count;
3390
3391 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3392 if (count < 0)
3393 return count;
3394 bufp += count;
3395
3396 return (ssize_t) (bufp - buf);
3397}
3398
dfc5606d
YS
3399static ssize_t rbd_image_refresh(struct device *dev,
3400 struct device_attribute *attr,
3401 const char *buf,
3402 size_t size)
3403{
593a9e7b 3404 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
b813623a 3405 int ret;
602adf40 3406
cc4a38bd 3407 ret = rbd_dev_refresh(rbd_dev);
b813623a
AE
3408
3409 return ret < 0 ? ret : size;
dfc5606d 3410}
602adf40 3411
dfc5606d 3412static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
34b13184 3413static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
dfc5606d
YS
3414static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3415static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3416static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
9bb2f334 3417static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
dfc5606d 3418static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
589d30e0 3419static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
dfc5606d
YS
3420static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3421static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
86b00e0d 3422static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
dfc5606d
YS
3423
3424static struct attribute *rbd_attrs[] = {
3425 &dev_attr_size.attr,
34b13184 3426 &dev_attr_features.attr,
dfc5606d
YS
3427 &dev_attr_major.attr,
3428 &dev_attr_client_id.attr,
3429 &dev_attr_pool.attr,
9bb2f334 3430 &dev_attr_pool_id.attr,
dfc5606d 3431 &dev_attr_name.attr,
589d30e0 3432 &dev_attr_image_id.attr,
dfc5606d 3433 &dev_attr_current_snap.attr,
86b00e0d 3434 &dev_attr_parent.attr,
dfc5606d 3435 &dev_attr_refresh.attr,
dfc5606d
YS
3436 NULL
3437};
3438
3439static struct attribute_group rbd_attr_group = {
3440 .attrs = rbd_attrs,
3441};
3442
3443static const struct attribute_group *rbd_attr_groups[] = {
3444 &rbd_attr_group,
3445 NULL
3446};
3447
3448static void rbd_sysfs_dev_release(struct device *dev)
3449{
3450}
3451
3452static struct device_type rbd_device_type = {
3453 .name = "rbd",
3454 .groups = rbd_attr_groups,
3455 .release = rbd_sysfs_dev_release,
3456};
3457
8b8fb99c
AE
3458static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3459{
3460 kref_get(&spec->kref);
3461
3462 return spec;
3463}
3464
3465static void rbd_spec_free(struct kref *kref);
3466static void rbd_spec_put(struct rbd_spec *spec)
3467{
3468 if (spec)
3469 kref_put(&spec->kref, rbd_spec_free);
3470}
3471
3472static struct rbd_spec *rbd_spec_alloc(void)
3473{
3474 struct rbd_spec *spec;
3475
3476 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3477 if (!spec)
3478 return NULL;
3479 kref_init(&spec->kref);
3480
8b8fb99c
AE
3481 return spec;
3482}
3483
3484static void rbd_spec_free(struct kref *kref)
3485{
3486 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3487
3488 kfree(spec->pool_name);
3489 kfree(spec->image_id);
3490 kfree(spec->image_name);
3491 kfree(spec->snap_name);
3492 kfree(spec);
3493}
3494
cc344fa1 3495static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
c53d5893
AE
3496 struct rbd_spec *spec)
3497{
3498 struct rbd_device *rbd_dev;
3499
3500 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3501 if (!rbd_dev)
3502 return NULL;
3503
3504 spin_lock_init(&rbd_dev->lock);
6d292906 3505 rbd_dev->flags = 0;
c53d5893 3506 INIT_LIST_HEAD(&rbd_dev->node);
c53d5893
AE
3507 init_rwsem(&rbd_dev->header_rwsem);
3508
3509 rbd_dev->spec = spec;
3510 rbd_dev->rbd_client = rbdc;
3511
0903e875
AE
3512 /* Initialize the layout used for all rbd requests */
3513
3514 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3515 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3516 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3517 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3518
c53d5893
AE
3519 return rbd_dev;
3520}
3521
3522static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3523{
c53d5893
AE
3524 rbd_put_client(rbd_dev->rbd_client);
3525 rbd_spec_put(rbd_dev->spec);
3526 kfree(rbd_dev);
3527}
3528
9d475de5
AE
3529/*
3530 * Get the size and object order for an image snapshot, or if
3531 * snap_id is CEPH_NOSNAP, gets this information for the base
3532 * image.
3533 */
3534static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3535 u8 *order, u64 *snap_size)
3536{
3537 __le64 snapid = cpu_to_le64(snap_id);
3538 int ret;
3539 struct {
3540 u8 order;
3541 __le64 size;
3542 } __attribute__ ((packed)) size_buf = { 0 };
3543
36be9a76 3544 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
9d475de5 3545 "rbd", "get_size",
4157976b 3546 &snapid, sizeof (snapid),
e2a58ee5 3547 &size_buf, sizeof (size_buf));
36be9a76 3548 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
9d475de5
AE
3549 if (ret < 0)
3550 return ret;
57385b51
AE
3551 if (ret < sizeof (size_buf))
3552 return -ERANGE;
9d475de5 3553
c86f86e9
AE
3554 if (order)
3555 *order = size_buf.order;
9d475de5
AE
3556 *snap_size = le64_to_cpu(size_buf.size);
3557
3558 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
57385b51
AE
3559 (unsigned long long)snap_id, (unsigned int)*order,
3560 (unsigned long long)*snap_size);
9d475de5
AE
3561
3562 return 0;
3563}
3564
3565static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3566{
3567 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3568 &rbd_dev->header.obj_order,
3569 &rbd_dev->header.image_size);
3570}
3571
1e130199
AE
3572static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3573{
3574 void *reply_buf;
3575 int ret;
3576 void *p;
3577
3578 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3579 if (!reply_buf)
3580 return -ENOMEM;
3581
36be9a76 3582 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4157976b 3583 "rbd", "get_object_prefix", NULL, 0,
e2a58ee5 3584 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
36be9a76 3585 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
1e130199
AE
3586 if (ret < 0)
3587 goto out;
3588
3589 p = reply_buf;
3590 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
57385b51
AE
3591 p + ret, NULL, GFP_NOIO);
3592 ret = 0;
1e130199
AE
3593
3594 if (IS_ERR(rbd_dev->header.object_prefix)) {
3595 ret = PTR_ERR(rbd_dev->header.object_prefix);
3596 rbd_dev->header.object_prefix = NULL;
3597 } else {
3598 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3599 }
1e130199
AE
3600out:
3601 kfree(reply_buf);
3602
3603 return ret;
3604}
3605
b1b5402a
AE
3606static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3607 u64 *snap_features)
3608{
3609 __le64 snapid = cpu_to_le64(snap_id);
3610 struct {
3611 __le64 features;
3612 __le64 incompat;
4157976b 3613 } __attribute__ ((packed)) features_buf = { 0 };
d889140c 3614 u64 incompat;
b1b5402a
AE
3615 int ret;
3616
36be9a76 3617 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
b1b5402a 3618 "rbd", "get_features",
4157976b 3619 &snapid, sizeof (snapid),
e2a58ee5 3620 &features_buf, sizeof (features_buf));
36be9a76 3621 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
b1b5402a
AE
3622 if (ret < 0)
3623 return ret;
57385b51
AE
3624 if (ret < sizeof (features_buf))
3625 return -ERANGE;
d889140c
AE
3626
3627 incompat = le64_to_cpu(features_buf.incompat);
5cbf6f12 3628 if (incompat & ~RBD_FEATURES_SUPPORTED)
b8f5c6ed 3629 return -ENXIO;
d889140c 3630
b1b5402a
AE
3631 *snap_features = le64_to_cpu(features_buf.features);
3632
3633 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
57385b51
AE
3634 (unsigned long long)snap_id,
3635 (unsigned long long)*snap_features,
3636 (unsigned long long)le64_to_cpu(features_buf.incompat));
b1b5402a
AE
3637
3638 return 0;
3639}
3640
3641static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3642{
3643 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3644 &rbd_dev->header.features);
3645}
3646
86b00e0d
AE
3647static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3648{
3649 struct rbd_spec *parent_spec;
3650 size_t size;
3651 void *reply_buf = NULL;
3652 __le64 snapid;
3653 void *p;
3654 void *end;
3655 char *image_id;
3656 u64 overlap;
86b00e0d
AE
3657 int ret;
3658
3659 parent_spec = rbd_spec_alloc();
3660 if (!parent_spec)
3661 return -ENOMEM;
3662
3663 size = sizeof (__le64) + /* pool_id */
3664 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3665 sizeof (__le64) + /* snap_id */
3666 sizeof (__le64); /* overlap */
3667 reply_buf = kmalloc(size, GFP_KERNEL);
3668 if (!reply_buf) {
3669 ret = -ENOMEM;
3670 goto out_err;
3671 }
3672
3673 snapid = cpu_to_le64(CEPH_NOSNAP);
36be9a76 3674 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
86b00e0d 3675 "rbd", "get_parent",
4157976b 3676 &snapid, sizeof (snapid),
e2a58ee5 3677 reply_buf, size);
36be9a76 3678 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
86b00e0d
AE
3679 if (ret < 0)
3680 goto out_err;
3681
86b00e0d 3682 p = reply_buf;
57385b51
AE
3683 end = reply_buf + ret;
3684 ret = -ERANGE;
86b00e0d
AE
3685 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
3686 if (parent_spec->pool_id == CEPH_NOPOOL)
3687 goto out; /* No parent? No problem. */
3688
0903e875
AE
3689 /* The ceph file layout needs to fit pool id in 32 bits */
3690
3691 ret = -EIO;
c0cd10db
AE
3692 if (parent_spec->pool_id > (u64)U32_MAX) {
3693 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3694 (unsigned long long)parent_spec->pool_id, U32_MAX);
57385b51 3695 goto out_err;
c0cd10db 3696 }
0903e875 3697
979ed480 3698 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
86b00e0d
AE
3699 if (IS_ERR(image_id)) {
3700 ret = PTR_ERR(image_id);
3701 goto out_err;
3702 }
3703 parent_spec->image_id = image_id;
3704 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3705 ceph_decode_64_safe(&p, end, overlap, out_err);
3706
3707 rbd_dev->parent_overlap = overlap;
3708 rbd_dev->parent_spec = parent_spec;
3709 parent_spec = NULL; /* rbd_dev now owns this */
3710out:
3711 ret = 0;
3712out_err:
3713 kfree(reply_buf);
3714 rbd_spec_put(parent_spec);
3715
3716 return ret;
3717}
3718
cc070d59
AE
3719static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3720{
3721 struct {
3722 __le64 stripe_unit;
3723 __le64 stripe_count;
3724 } __attribute__ ((packed)) striping_info_buf = { 0 };
3725 size_t size = sizeof (striping_info_buf);
3726 void *p;
3727 u64 obj_size;
3728 u64 stripe_unit;
3729 u64 stripe_count;
3730 int ret;
3731
3732 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3733 "rbd", "get_stripe_unit_count", NULL, 0,
e2a58ee5 3734 (char *)&striping_info_buf, size);
cc070d59
AE
3735 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3736 if (ret < 0)
3737 return ret;
3738 if (ret < size)
3739 return -ERANGE;
3740
3741 /*
3742 * We don't actually support the "fancy striping" feature
3743 * (STRIPINGV2) yet, but if the striping sizes are the
3744 * defaults the behavior is the same as before. So find
3745 * out, and only fail if the image has non-default values.
3746 */
3747 ret = -EINVAL;
3748 obj_size = (u64)1 << rbd_dev->header.obj_order;
3749 p = &striping_info_buf;
3750 stripe_unit = ceph_decode_64(&p);
3751 if (stripe_unit != obj_size) {
3752 rbd_warn(rbd_dev, "unsupported stripe unit "
3753 "(got %llu want %llu)",
3754 stripe_unit, obj_size);
3755 return -EINVAL;
3756 }
3757 stripe_count = ceph_decode_64(&p);
3758 if (stripe_count != 1) {
3759 rbd_warn(rbd_dev, "unsupported stripe count "
3760 "(got %llu want 1)", stripe_count);
3761 return -EINVAL;
3762 }
500d0c0f
AE
3763 rbd_dev->header.stripe_unit = stripe_unit;
3764 rbd_dev->header.stripe_count = stripe_count;
cc070d59
AE
3765
3766 return 0;
3767}
3768
9e15b77d
AE
3769static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3770{
3771 size_t image_id_size;
3772 char *image_id;
3773 void *p;
3774 void *end;
3775 size_t size;
3776 void *reply_buf = NULL;
3777 size_t len = 0;
3778 char *image_name = NULL;
3779 int ret;
3780
3781 rbd_assert(!rbd_dev->spec->image_name);
3782
69e7a02f
AE
3783 len = strlen(rbd_dev->spec->image_id);
3784 image_id_size = sizeof (__le32) + len;
9e15b77d
AE
3785 image_id = kmalloc(image_id_size, GFP_KERNEL);
3786 if (!image_id)
3787 return NULL;
3788
3789 p = image_id;
4157976b 3790 end = image_id + image_id_size;
57385b51 3791 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
9e15b77d
AE
3792
3793 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3794 reply_buf = kmalloc(size, GFP_KERNEL);
3795 if (!reply_buf)
3796 goto out;
3797
36be9a76 3798 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
9e15b77d
AE
3799 "rbd", "dir_get_name",
3800 image_id, image_id_size,
e2a58ee5 3801 reply_buf, size);
9e15b77d
AE
3802 if (ret < 0)
3803 goto out;
3804 p = reply_buf;
f40eb349
AE
3805 end = reply_buf + ret;
3806
9e15b77d
AE
3807 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3808 if (IS_ERR(image_name))
3809 image_name = NULL;
3810 else
3811 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3812out:
3813 kfree(reply_buf);
3814 kfree(image_id);
3815
3816 return image_name;
3817}
3818
2ad3d716
AE
3819static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3820{
3821 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3822 const char *snap_name;
3823 u32 which = 0;
3824
3825 /* Skip over names until we find the one we are looking for */
3826
3827 snap_name = rbd_dev->header.snap_names;
3828 while (which < snapc->num_snaps) {
3829 if (!strcmp(name, snap_name))
3830 return snapc->snaps[which];
3831 snap_name += strlen(snap_name) + 1;
3832 which++;
3833 }
3834 return CEPH_NOSNAP;
3835}
3836
3837static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3838{
3839 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3840 u32 which;
3841 bool found = false;
3842 u64 snap_id;
3843
3844 for (which = 0; !found && which < snapc->num_snaps; which++) {
3845 const char *snap_name;
3846
3847 snap_id = snapc->snaps[which];
3848 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
3849 if (IS_ERR(snap_name))
3850 break;
3851 found = !strcmp(name, snap_name);
3852 kfree(snap_name);
3853 }
3854 return found ? snap_id : CEPH_NOSNAP;
3855}
3856
3857/*
3858 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
3859 * no snapshot by that name is found, or if an error occurs.
3860 */
3861static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3862{
3863 if (rbd_dev->image_format == 1)
3864 return rbd_v1_snap_id_by_name(rbd_dev, name);
3865
3866 return rbd_v2_snap_id_by_name(rbd_dev, name);
3867}
3868
9e15b77d 3869/*
2e9f7f1c
AE
3870 * When an rbd image has a parent image, it is identified by the
3871 * pool, image, and snapshot ids (not names). This function fills
3872 * in the names for those ids. (It's OK if we can't figure out the
3873 * name for an image id, but the pool and snapshot ids should always
3874 * exist and have names.) All names in an rbd spec are dynamically
3875 * allocated.
e1d4213f
AE
3876 *
3877 * When an image being mapped (not a parent) is probed, we have the
3878 * pool name and pool id, image name and image id, and the snapshot
3879 * name. The only thing we're missing is the snapshot id.
9e15b77d 3880 */
2e9f7f1c 3881static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
9e15b77d 3882{
2e9f7f1c
AE
3883 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3884 struct rbd_spec *spec = rbd_dev->spec;
3885 const char *pool_name;
3886 const char *image_name;
3887 const char *snap_name;
9e15b77d
AE
3888 int ret;
3889
e1d4213f
AE
3890 /*
3891 * An image being mapped will have the pool name (etc.), but
3892 * we need to look up the snapshot id.
3893 */
2e9f7f1c
AE
3894 if (spec->pool_name) {
3895 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
2ad3d716 3896 u64 snap_id;
e1d4213f 3897
2ad3d716
AE
3898 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
3899 if (snap_id == CEPH_NOSNAP)
e1d4213f 3900 return -ENOENT;
2ad3d716 3901 spec->snap_id = snap_id;
e1d4213f 3902 } else {
2e9f7f1c 3903 spec->snap_id = CEPH_NOSNAP;
e1d4213f
AE
3904 }
3905
3906 return 0;
3907 }
9e15b77d 3908
2e9f7f1c 3909 /* Get the pool name; we have to make our own copy of this */
9e15b77d 3910
2e9f7f1c
AE
3911 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
3912 if (!pool_name) {
3913 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
935dc89f
AE
3914 return -EIO;
3915 }
2e9f7f1c
AE
3916 pool_name = kstrdup(pool_name, GFP_KERNEL);
3917 if (!pool_name)
9e15b77d
AE
3918 return -ENOMEM;
3919
3920 /* Fetch the image name; tolerate failure here */
3921
2e9f7f1c
AE
3922 image_name = rbd_dev_image_name(rbd_dev);
3923 if (!image_name)
06ecc6cb 3924 rbd_warn(rbd_dev, "unable to get image name");
9e15b77d 3925
2e9f7f1c 3926 /* Look up the snapshot name, and make a copy */
9e15b77d 3927
2e9f7f1c 3928 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
2e9f7f1c
AE
3929 if (!snap_name) {
3930 ret = -ENOMEM;
9e15b77d 3931 goto out_err;
2e9f7f1c
AE
3932 }
3933
3934 spec->pool_name = pool_name;
3935 spec->image_name = image_name;
3936 spec->snap_name = snap_name;
9e15b77d
AE
3937
3938 return 0;
3939out_err:
2e9f7f1c
AE
3940 kfree(image_name);
3941 kfree(pool_name);
9e15b77d
AE
3942
3943 return ret;
3944}
3945
cc4a38bd 3946static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
35d489f9
AE
3947{
3948 size_t size;
3949 int ret;
3950 void *reply_buf;
3951 void *p;
3952 void *end;
3953 u64 seq;
3954 u32 snap_count;
3955 struct ceph_snap_context *snapc;
3956 u32 i;
3957
3958 /*
3959 * We'll need room for the seq value (maximum snapshot id),
3960 * snapshot count, and array of that many snapshot ids.
3961 * For now we have a fixed upper limit on the number we're
3962 * prepared to receive.
3963 */
3964 size = sizeof (__le64) + sizeof (__le32) +
3965 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3966 reply_buf = kzalloc(size, GFP_KERNEL);
3967 if (!reply_buf)
3968 return -ENOMEM;
3969
36be9a76 3970 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4157976b 3971 "rbd", "get_snapcontext", NULL, 0,
e2a58ee5 3972 reply_buf, size);
36be9a76 3973 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
35d489f9
AE
3974 if (ret < 0)
3975 goto out;
3976
35d489f9 3977 p = reply_buf;
57385b51
AE
3978 end = reply_buf + ret;
3979 ret = -ERANGE;
35d489f9
AE
3980 ceph_decode_64_safe(&p, end, seq, out);
3981 ceph_decode_32_safe(&p, end, snap_count, out);
3982
3983 /*
3984 * Make sure the reported number of snapshot ids wouldn't go
3985 * beyond the end of our buffer. But before checking that,
3986 * make sure the computed size of the snapshot context we
3987 * allocate is representable in a size_t.
3988 */
3989 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3990 / sizeof (u64)) {
3991 ret = -EINVAL;
3992 goto out;
3993 }
3994 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3995 goto out;
468521c1 3996 ret = 0;
35d489f9 3997
812164f8 3998 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
35d489f9
AE
3999 if (!snapc) {
4000 ret = -ENOMEM;
4001 goto out;
4002 }
35d489f9 4003 snapc->seq = seq;
35d489f9
AE
4004 for (i = 0; i < snap_count; i++)
4005 snapc->snaps[i] = ceph_decode_64(&p);
4006
4007 rbd_dev->header.snapc = snapc;
4008
4009 dout(" snap context seq = %llu, snap_count = %u\n",
57385b51 4010 (unsigned long long)seq, (unsigned int)snap_count);
35d489f9
AE
4011out:
4012 kfree(reply_buf);
4013
57385b51 4014 return ret;
35d489f9
AE
4015}
4016
54cac61f
AE
4017static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4018 u64 snap_id)
b8b1e2db
AE
4019{
4020 size_t size;
4021 void *reply_buf;
54cac61f 4022 __le64 snapid;
b8b1e2db
AE
4023 int ret;
4024 void *p;
4025 void *end;
b8b1e2db
AE
4026 char *snap_name;
4027
4028 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4029 reply_buf = kmalloc(size, GFP_KERNEL);
4030 if (!reply_buf)
4031 return ERR_PTR(-ENOMEM);
4032
54cac61f 4033 snapid = cpu_to_le64(snap_id);
36be9a76 4034 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
b8b1e2db 4035 "rbd", "get_snapshot_name",
54cac61f 4036 &snapid, sizeof (snapid),
e2a58ee5 4037 reply_buf, size);
36be9a76 4038 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
f40eb349
AE
4039 if (ret < 0) {
4040 snap_name = ERR_PTR(ret);
b8b1e2db 4041 goto out;
f40eb349 4042 }
b8b1e2db
AE
4043
4044 p = reply_buf;
f40eb349 4045 end = reply_buf + ret;
e5c35534 4046 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
f40eb349 4047 if (IS_ERR(snap_name))
b8b1e2db 4048 goto out;
b8b1e2db 4049
f40eb349 4050 dout(" snap_id 0x%016llx snap_name = %s\n",
54cac61f 4051 (unsigned long long)snap_id, snap_name);
b8b1e2db
AE
4052out:
4053 kfree(reply_buf);
4054
f40eb349 4055 return snap_name;
b8b1e2db
AE
4056}
4057
cc4a38bd 4058static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
117973fb
AE
4059{
4060 int ret;
117973fb
AE
4061
4062 down_write(&rbd_dev->header_rwsem);
4063
117973fb
AE
4064 ret = rbd_dev_v2_image_size(rbd_dev);
4065 if (ret)
4066 goto out;
117973fb
AE
4067 rbd_update_mapping_size(rbd_dev);
4068
cc4a38bd 4069 ret = rbd_dev_v2_snap_context(rbd_dev);
117973fb
AE
4070 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4071 if (ret)
4072 goto out;
117973fb
AE
4073out:
4074 up_write(&rbd_dev->header_rwsem);
4075
4076 return ret;
4077}
4078
dfc5606d
YS
4079static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4080{
dfc5606d 4081 struct device *dev;
cd789ab9 4082 int ret;
dfc5606d
YS
4083
4084 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
dfc5606d 4085
cd789ab9 4086 dev = &rbd_dev->dev;
dfc5606d
YS
4087 dev->bus = &rbd_bus_type;
4088 dev->type = &rbd_device_type;
4089 dev->parent = &rbd_root_dev;
200a6a8b 4090 dev->release = rbd_dev_device_release;
de71a297 4091 dev_set_name(dev, "%d", rbd_dev->dev_id);
dfc5606d 4092 ret = device_register(dev);
dfc5606d 4093
dfc5606d 4094 mutex_unlock(&ctl_mutex);
cd789ab9 4095
dfc5606d 4096 return ret;
602adf40
YS
4097}
4098
dfc5606d
YS
4099static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4100{
4101 device_unregister(&rbd_dev->dev);
4102}
4103
e2839308 4104static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
1ddbe94e
AE
4105
4106/*
499afd5b
AE
4107 * Get a unique rbd identifier for the given new rbd_dev, and add
4108 * the rbd_dev to the global list. The minimum rbd id is 1.
1ddbe94e 4109 */
e2839308 4110static void rbd_dev_id_get(struct rbd_device *rbd_dev)
b7f23c36 4111{
e2839308 4112 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
499afd5b
AE
4113
4114 spin_lock(&rbd_dev_list_lock);
4115 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4116 spin_unlock(&rbd_dev_list_lock);
e2839308
AE
4117 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4118 (unsigned long long) rbd_dev->dev_id);
1ddbe94e 4119}
b7f23c36 4120
1ddbe94e 4121/*
499afd5b
AE
4122 * Remove an rbd_dev from the global list, and record that its
4123 * identifier is no longer in use.
1ddbe94e 4124 */
e2839308 4125static void rbd_dev_id_put(struct rbd_device *rbd_dev)
1ddbe94e 4126{
d184f6bf 4127 struct list_head *tmp;
de71a297 4128 int rbd_id = rbd_dev->dev_id;
d184f6bf
AE
4129 int max_id;
4130
aafb230e 4131 rbd_assert(rbd_id > 0);
499afd5b 4132
e2839308
AE
4133 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4134 (unsigned long long) rbd_dev->dev_id);
499afd5b
AE
4135 spin_lock(&rbd_dev_list_lock);
4136 list_del_init(&rbd_dev->node);
d184f6bf
AE
4137
4138 /*
4139 * If the id being "put" is not the current maximum, there
4140 * is nothing special we need to do.
4141 */
e2839308 4142 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
d184f6bf
AE
4143 spin_unlock(&rbd_dev_list_lock);
4144 return;
4145 }
4146
4147 /*
4148 * We need to update the current maximum id. Search the
4149 * list to find out what it is. We're more likely to find
4150 * the maximum at the end, so search the list backward.
4151 */
4152 max_id = 0;
4153 list_for_each_prev(tmp, &rbd_dev_list) {
4154 struct rbd_device *rbd_dev;
4155
4156 rbd_dev = list_entry(tmp, struct rbd_device, node);
b213e0b1
AE
4157 if (rbd_dev->dev_id > max_id)
4158 max_id = rbd_dev->dev_id;
d184f6bf 4159 }
499afd5b 4160 spin_unlock(&rbd_dev_list_lock);
b7f23c36 4161
1ddbe94e 4162 /*
e2839308 4163 * The max id could have been updated by rbd_dev_id_get(), in
d184f6bf
AE
4164 * which case it now accurately reflects the new maximum.
4165 * Be careful not to overwrite the maximum value in that
4166 * case.
1ddbe94e 4167 */
e2839308
AE
4168 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4169 dout(" max dev id has been reset\n");
b7f23c36
AE
4170}
4171
e28fff26
AE
4172/*
4173 * Skips over white space at *buf, and updates *buf to point to the
4174 * first found non-space character (if any). Returns the length of
593a9e7b
AE
4175 * the token (string of non-white space characters) found. Note
4176 * that *buf must be terminated with '\0'.
e28fff26
AE
4177 */
4178static inline size_t next_token(const char **buf)
4179{
4180 /*
4181 * These are the characters that produce nonzero for
4182 * isspace() in the "C" and "POSIX" locales.
4183 */
4184 const char *spaces = " \f\n\r\t\v";
4185
4186 *buf += strspn(*buf, spaces); /* Find start of token */
4187
4188 return strcspn(*buf, spaces); /* Return token length */
4189}
4190
4191/*
4192 * Finds the next token in *buf, and if the provided token buffer is
4193 * big enough, copies the found token into it. The result, if
593a9e7b
AE
4194 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4195 * must be terminated with '\0' on entry.
e28fff26
AE
4196 *
4197 * Returns the length of the token found (not including the '\0').
4198 * Return value will be 0 if no token is found, and it will be >=
4199 * token_size if the token would not fit.
4200 *
593a9e7b 4201 * The *buf pointer will be updated to point beyond the end of the
e28fff26
AE
4202 * found token. Note that this occurs even if the token buffer is
4203 * too small to hold it.
4204 */
4205static inline size_t copy_token(const char **buf,
4206 char *token,
4207 size_t token_size)
4208{
4209 size_t len;
4210
4211 len = next_token(buf);
4212 if (len < token_size) {
4213 memcpy(token, *buf, len);
4214 *(token + len) = '\0';
4215 }
4216 *buf += len;
4217
4218 return len;
4219}
4220
ea3352f4
AE
4221/*
4222 * Finds the next token in *buf, dynamically allocates a buffer big
4223 * enough to hold a copy of it, and copies the token into the new
4224 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4225 * that a duplicate buffer is created even for a zero-length token.
4226 *
4227 * Returns a pointer to the newly-allocated duplicate, or a null
4228 * pointer if memory for the duplicate was not available. If
4229 * the lenp argument is a non-null pointer, the length of the token
4230 * (not including the '\0') is returned in *lenp.
4231 *
4232 * If successful, the *buf pointer will be updated to point beyond
4233 * the end of the found token.
4234 *
4235 * Note: uses GFP_KERNEL for allocation.
4236 */
4237static inline char *dup_token(const char **buf, size_t *lenp)
4238{
4239 char *dup;
4240 size_t len;
4241
4242 len = next_token(buf);
4caf35f9 4243 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
ea3352f4
AE
4244 if (!dup)
4245 return NULL;
ea3352f4
AE
4246 *(dup + len) = '\0';
4247 *buf += len;
4248
4249 if (lenp)
4250 *lenp = len;
4251
4252 return dup;
4253}
4254
a725f65e 4255/*
859c31df
AE
4256 * Parse the options provided for an "rbd add" (i.e., rbd image
4257 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4258 * and the data written is passed here via a NUL-terminated buffer.
4259 * Returns 0 if successful or an error code otherwise.
d22f76e7 4260 *
859c31df
AE
4261 * The information extracted from these options is recorded in
4262 * the other parameters which return dynamically-allocated
4263 * structures:
4264 * ceph_opts
4265 * The address of a pointer that will refer to a ceph options
4266 * structure. Caller must release the returned pointer using
4267 * ceph_destroy_options() when it is no longer needed.
4268 * rbd_opts
4269 * Address of an rbd options pointer. Fully initialized by
4270 * this function; caller must release with kfree().
4271 * spec
4272 * Address of an rbd image specification pointer. Fully
4273 * initialized by this function based on parsed options.
4274 * Caller must release with rbd_spec_put().
4275 *
4276 * The options passed take this form:
4277 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4278 * where:
4279 * <mon_addrs>
4280 * A comma-separated list of one or more monitor addresses.
4281 * A monitor address is an ip address, optionally followed
4282 * by a port number (separated by a colon).
4283 * I.e.: ip1[:port1][,ip2[:port2]...]
4284 * <options>
4285 * A comma-separated list of ceph and/or rbd options.
4286 * <pool_name>
4287 * The name of the rados pool containing the rbd image.
4288 * <image_name>
4289 * The name of the image in that pool to map.
4290 * <snap_id>
4291 * An optional snapshot id. If provided, the mapping will
4292 * present data from the image at the time that snapshot was
4293 * created. The image head is used if no snapshot id is
4294 * provided. Snapshot mappings are always read-only.
a725f65e 4295 */
859c31df 4296static int rbd_add_parse_args(const char *buf,
dc79b113 4297 struct ceph_options **ceph_opts,
859c31df
AE
4298 struct rbd_options **opts,
4299 struct rbd_spec **rbd_spec)
e28fff26 4300{
d22f76e7 4301 size_t len;
859c31df 4302 char *options;
0ddebc0c 4303 const char *mon_addrs;
ecb4dc22 4304 char *snap_name;
0ddebc0c 4305 size_t mon_addrs_size;
859c31df 4306 struct rbd_spec *spec = NULL;
4e9afeba 4307 struct rbd_options *rbd_opts = NULL;
859c31df 4308 struct ceph_options *copts;
dc79b113 4309 int ret;
e28fff26
AE
4310
4311 /* The first four tokens are required */
4312
7ef3214a 4313 len = next_token(&buf);
4fb5d671
AE
4314 if (!len) {
4315 rbd_warn(NULL, "no monitor address(es) provided");
4316 return -EINVAL;
4317 }
0ddebc0c 4318 mon_addrs = buf;
f28e565a 4319 mon_addrs_size = len + 1;
7ef3214a 4320 buf += len;
a725f65e 4321
dc79b113 4322 ret = -EINVAL;
f28e565a
AE
4323 options = dup_token(&buf, NULL);
4324 if (!options)
dc79b113 4325 return -ENOMEM;
4fb5d671
AE
4326 if (!*options) {
4327 rbd_warn(NULL, "no options provided");
4328 goto out_err;
4329 }
e28fff26 4330
859c31df
AE
4331 spec = rbd_spec_alloc();
4332 if (!spec)
f28e565a 4333 goto out_mem;
859c31df
AE
4334
4335 spec->pool_name = dup_token(&buf, NULL);
4336 if (!spec->pool_name)
4337 goto out_mem;
4fb5d671
AE
4338 if (!*spec->pool_name) {
4339 rbd_warn(NULL, "no pool name provided");
4340 goto out_err;
4341 }
e28fff26 4342
69e7a02f 4343 spec->image_name = dup_token(&buf, NULL);
859c31df 4344 if (!spec->image_name)
f28e565a 4345 goto out_mem;
4fb5d671
AE
4346 if (!*spec->image_name) {
4347 rbd_warn(NULL, "no image name provided");
4348 goto out_err;
4349 }
d4b125e9 4350
f28e565a
AE
4351 /*
4352 * Snapshot name is optional; default is to use "-"
4353 * (indicating the head/no snapshot).
4354 */
3feeb894 4355 len = next_token(&buf);
820a5f3e 4356 if (!len) {
3feeb894
AE
4357 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4358 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
f28e565a 4359 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
dc79b113 4360 ret = -ENAMETOOLONG;
f28e565a 4361 goto out_err;
849b4260 4362 }
ecb4dc22
AE
4363 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4364 if (!snap_name)
f28e565a 4365 goto out_mem;
ecb4dc22
AE
4366 *(snap_name + len) = '\0';
4367 spec->snap_name = snap_name;
e5c35534 4368
0ddebc0c 4369 /* Initialize all rbd options to the defaults */
e28fff26 4370
4e9afeba
AE
4371 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4372 if (!rbd_opts)
4373 goto out_mem;
4374
4375 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
d22f76e7 4376
859c31df 4377 copts = ceph_parse_options(options, mon_addrs,
0ddebc0c 4378 mon_addrs + mon_addrs_size - 1,
4e9afeba 4379 parse_rbd_opts_token, rbd_opts);
859c31df
AE
4380 if (IS_ERR(copts)) {
4381 ret = PTR_ERR(copts);
dc79b113
AE
4382 goto out_err;
4383 }
859c31df
AE
4384 kfree(options);
4385
4386 *ceph_opts = copts;
4e9afeba 4387 *opts = rbd_opts;
859c31df 4388 *rbd_spec = spec;
0ddebc0c 4389
dc79b113 4390 return 0;
f28e565a 4391out_mem:
dc79b113 4392 ret = -ENOMEM;
d22f76e7 4393out_err:
859c31df
AE
4394 kfree(rbd_opts);
4395 rbd_spec_put(spec);
f28e565a 4396 kfree(options);
d22f76e7 4397
dc79b113 4398 return ret;
a725f65e
AE
4399}
4400
589d30e0
AE
4401/*
4402 * An rbd format 2 image has a unique identifier, distinct from the
4403 * name given to it by the user. Internally, that identifier is
4404 * what's used to specify the names of objects related to the image.
4405 *
4406 * A special "rbd id" object is used to map an rbd image name to its
4407 * id. If that object doesn't exist, then there is no v2 rbd image
4408 * with the supplied name.
4409 *
4410 * This function will record the given rbd_dev's image_id field if
4411 * it can be determined, and in that case will return 0. If any
4412 * errors occur a negative errno will be returned and the rbd_dev's
4413 * image_id field will be unchanged (and should be NULL).
4414 */
4415static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4416{
4417 int ret;
4418 size_t size;
4419 char *object_name;
4420 void *response;
c0fba368 4421 char *image_id;
2f82ee54 4422
2c0d0a10
AE
4423 /*
4424 * When probing a parent image, the image id is already
4425 * known (and the image name likely is not). There's no
c0fba368
AE
4426 * need to fetch the image id again in this case. We
4427 * do still need to set the image format though.
2c0d0a10 4428 */
c0fba368
AE
4429 if (rbd_dev->spec->image_id) {
4430 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4431
2c0d0a10 4432 return 0;
c0fba368 4433 }
2c0d0a10 4434
589d30e0
AE
4435 /*
4436 * First, see if the format 2 image id file exists, and if
4437 * so, get the image's persistent id from it.
4438 */
69e7a02f 4439 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
589d30e0
AE
4440 object_name = kmalloc(size, GFP_NOIO);
4441 if (!object_name)
4442 return -ENOMEM;
0d7dbfce 4443 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
589d30e0
AE
4444 dout("rbd id object name is %s\n", object_name);
4445
4446 /* Response will be an encoded string, which includes a length */
4447
4448 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4449 response = kzalloc(size, GFP_NOIO);
4450 if (!response) {
4451 ret = -ENOMEM;
4452 goto out;
4453 }
4454
c0fba368
AE
4455 /* If it doesn't exist we'll assume it's a format 1 image */
4456
36be9a76 4457 ret = rbd_obj_method_sync(rbd_dev, object_name,
4157976b 4458 "rbd", "get_id", NULL, 0,
e2a58ee5 4459 response, RBD_IMAGE_ID_LEN_MAX);
36be9a76 4460 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
c0fba368
AE
4461 if (ret == -ENOENT) {
4462 image_id = kstrdup("", GFP_KERNEL);
4463 ret = image_id ? 0 : -ENOMEM;
4464 if (!ret)
4465 rbd_dev->image_format = 1;
4466 } else if (ret > sizeof (__le32)) {
4467 void *p = response;
4468
4469 image_id = ceph_extract_encoded_string(&p, p + ret,
979ed480 4470 NULL, GFP_NOIO);
c0fba368
AE
4471 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4472 if (!ret)
4473 rbd_dev->image_format = 2;
589d30e0 4474 } else {
c0fba368
AE
4475 ret = -EINVAL;
4476 }
4477
4478 if (!ret) {
4479 rbd_dev->spec->image_id = image_id;
4480 dout("image_id is %s\n", image_id);
589d30e0
AE
4481 }
4482out:
4483 kfree(response);
4484 kfree(object_name);
4485
4486 return ret;
4487}
4488
6fd48b3b
AE
4489/* Undo whatever state changes are made by v1 or v2 image probe */
4490
4491static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4492{
4493 struct rbd_image_header *header;
4494
4495 rbd_dev_remove_parent(rbd_dev);
4496 rbd_spec_put(rbd_dev->parent_spec);
4497 rbd_dev->parent_spec = NULL;
4498 rbd_dev->parent_overlap = 0;
4499
4500 /* Free dynamic fields from the header, then zero it out */
4501
4502 header = &rbd_dev->header;
812164f8 4503 ceph_put_snap_context(header->snapc);
6fd48b3b
AE
4504 kfree(header->snap_sizes);
4505 kfree(header->snap_names);
4506 kfree(header->object_prefix);
4507 memset(header, 0, sizeof (*header));
4508}
4509
a30b71b9
AE
4510static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
4511{
4512 int ret;
a30b71b9
AE
4513
4514 /* Populate rbd image metadata */
4515
4516 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
4517 if (ret < 0)
4518 goto out_err;
86b00e0d
AE
4519
4520 /* Version 1 images have no parent (no layering) */
4521
4522 rbd_dev->parent_spec = NULL;
4523 rbd_dev->parent_overlap = 0;
4524
a30b71b9
AE
4525 dout("discovered version 1 image, header name is %s\n",
4526 rbd_dev->header_name);
4527
4528 return 0;
4529
4530out_err:
4531 kfree(rbd_dev->header_name);
4532 rbd_dev->header_name = NULL;
0d7dbfce
AE
4533 kfree(rbd_dev->spec->image_id);
4534 rbd_dev->spec->image_id = NULL;
a30b71b9
AE
4535
4536 return ret;
4537}
4538
4539static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4540{
9d475de5 4541 int ret;
a30b71b9 4542
9d475de5 4543 ret = rbd_dev_v2_image_size(rbd_dev);
57385b51 4544 if (ret)
1e130199
AE
4545 goto out_err;
4546
4547 /* Get the object prefix (a.k.a. block_name) for the image */
4548
4549 ret = rbd_dev_v2_object_prefix(rbd_dev);
57385b51 4550 if (ret)
b1b5402a
AE
4551 goto out_err;
4552
d889140c 4553 /* Get the and check features for the image */
b1b5402a
AE
4554
4555 ret = rbd_dev_v2_features(rbd_dev);
57385b51 4556 if (ret)
9d475de5 4557 goto out_err;
35d489f9 4558
86b00e0d
AE
4559 /* If the image supports layering, get the parent info */
4560
4561 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4562 ret = rbd_dev_v2_parent_info(rbd_dev);
57385b51 4563 if (ret)
86b00e0d 4564 goto out_err;
96882f55
AE
4565
4566 /*
4567 * Don't print a warning for parent images. We can
4568 * tell this point because we won't know its pool
4569 * name yet (just its pool id).
4570 */
4571 if (rbd_dev->spec->pool_name)
4572 rbd_warn(rbd_dev, "WARNING: kernel layering "
4573 "is EXPERIMENTAL!");
86b00e0d
AE
4574 }
4575
cc070d59
AE
4576 /* If the image supports fancy striping, get its parameters */
4577
4578 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4579 ret = rbd_dev_v2_striping_info(rbd_dev);
4580 if (ret < 0)
4581 goto out_err;
4582 }
4583
6e14b1a6
AE
4584 /* crypto and compression type aren't (yet) supported for v2 images */
4585
4586 rbd_dev->header.crypt_type = 0;
4587 rbd_dev->header.comp_type = 0;
35d489f9 4588
6e14b1a6
AE
4589 /* Get the snapshot context, plus the header version */
4590
cc4a38bd 4591 ret = rbd_dev_v2_snap_context(rbd_dev);
35d489f9
AE
4592 if (ret)
4593 goto out_err;
6e14b1a6 4594
a30b71b9
AE
4595 dout("discovered version 2 image, header name is %s\n",
4596 rbd_dev->header_name);
4597
35152979 4598 return 0;
9d475de5 4599out_err:
86b00e0d
AE
4600 rbd_dev->parent_overlap = 0;
4601 rbd_spec_put(rbd_dev->parent_spec);
4602 rbd_dev->parent_spec = NULL;
9d475de5
AE
4603 kfree(rbd_dev->header_name);
4604 rbd_dev->header_name = NULL;
1e130199
AE
4605 kfree(rbd_dev->header.object_prefix);
4606 rbd_dev->header.object_prefix = NULL;
9d475de5
AE
4607
4608 return ret;
a30b71b9
AE
4609}
4610
124afba2 4611static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
83a06263 4612{
2f82ee54 4613 struct rbd_device *parent = NULL;
124afba2
AE
4614 struct rbd_spec *parent_spec;
4615 struct rbd_client *rbdc;
4616 int ret;
4617
4618 if (!rbd_dev->parent_spec)
4619 return 0;
4620 /*
4621 * We need to pass a reference to the client and the parent
4622 * spec when creating the parent rbd_dev. Images related by
4623 * parent/child relationships always share both.
4624 */
4625 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4626 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4627
4628 ret = -ENOMEM;
4629 parent = rbd_dev_create(rbdc, parent_spec);
4630 if (!parent)
4631 goto out_err;
4632
4633 ret = rbd_dev_image_probe(parent);
4634 if (ret < 0)
4635 goto out_err;
4636 rbd_dev->parent = parent;
4637
4638 return 0;
4639out_err:
4640 if (parent) {
4641 rbd_spec_put(rbd_dev->parent_spec);
4642 kfree(rbd_dev->header_name);
4643 rbd_dev_destroy(parent);
4644 } else {
4645 rbd_put_client(rbdc);
4646 rbd_spec_put(parent_spec);
4647 }
4648
4649 return ret;
4650}
4651
200a6a8b 4652static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
124afba2 4653{
83a06263 4654 int ret;
d1cf5788
AE
4655
4656 ret = rbd_dev_mapping_set(rbd_dev);
83a06263 4657 if (ret)
9bb81c9b 4658 return ret;
5de10f3b 4659
83a06263
AE
4660 /* generate unique id: find highest unique id, add one */
4661 rbd_dev_id_get(rbd_dev);
4662
4663 /* Fill in the device name, now that we have its id. */
4664 BUILD_BUG_ON(DEV_NAME_LEN
4665 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4666 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4667
4668 /* Get our block major device number. */
4669
4670 ret = register_blkdev(0, rbd_dev->name);
4671 if (ret < 0)
4672 goto err_out_id;
4673 rbd_dev->major = ret;
4674
4675 /* Set up the blkdev mapping. */
4676
4677 ret = rbd_init_disk(rbd_dev);
4678 if (ret)
4679 goto err_out_blkdev;
4680
4681 ret = rbd_bus_add_dev(rbd_dev);
4682 if (ret)
4683 goto err_out_disk;
4684
83a06263
AE
4685 /* Everything's ready. Announce the disk to the world. */
4686
b5156e76 4687 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
129b79d4 4688 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
83a06263
AE
4689 add_disk(rbd_dev->disk);
4690
4691 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4692 (unsigned long long) rbd_dev->mapping.size);
4693
4694 return ret;
2f82ee54 4695
83a06263
AE
4696err_out_disk:
4697 rbd_free_disk(rbd_dev);
4698err_out_blkdev:
4699 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4700err_out_id:
4701 rbd_dev_id_put(rbd_dev);
d1cf5788 4702 rbd_dev_mapping_clear(rbd_dev);
83a06263
AE
4703
4704 return ret;
4705}
4706
332bb12d
AE
4707static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4708{
4709 struct rbd_spec *spec = rbd_dev->spec;
4710 size_t size;
4711
4712 /* Record the header object name for this rbd image. */
4713
4714 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4715
4716 if (rbd_dev->image_format == 1)
4717 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4718 else
4719 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4720
4721 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4722 if (!rbd_dev->header_name)
4723 return -ENOMEM;
4724
4725 if (rbd_dev->image_format == 1)
4726 sprintf(rbd_dev->header_name, "%s%s",
4727 spec->image_name, RBD_SUFFIX);
4728 else
4729 sprintf(rbd_dev->header_name, "%s%s",
4730 RBD_HEADER_PREFIX, spec->image_id);
4731 return 0;
4732}
4733
200a6a8b
AE
4734static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4735{
6fd48b3b
AE
4736 int ret;
4737
6fd48b3b
AE
4738 rbd_dev_unprobe(rbd_dev);
4739 ret = rbd_dev_header_watch_sync(rbd_dev, 0);
4740 if (ret)
4741 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
200a6a8b 4742 kfree(rbd_dev->header_name);
6fd48b3b
AE
4743 rbd_dev->header_name = NULL;
4744 rbd_dev->image_format = 0;
4745 kfree(rbd_dev->spec->image_id);
4746 rbd_dev->spec->image_id = NULL;
4747
200a6a8b
AE
4748 rbd_dev_destroy(rbd_dev);
4749}
4750
a30b71b9
AE
4751/*
4752 * Probe for the existence of the header object for the given rbd
4753 * device. For format 2 images this includes determining the image
4754 * id.
4755 */
71f293e2 4756static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
a30b71b9
AE
4757{
4758 int ret;
b644de2b 4759 int tmp;
a30b71b9
AE
4760
4761 /*
4762 * Get the id from the image id object. If it's not a
4763 * format 2 image, we'll get ENOENT back, and we'll assume
4764 * it's a format 1 image.
4765 */
4766 ret = rbd_dev_image_id(rbd_dev);
4767 if (ret)
c0fba368
AE
4768 return ret;
4769 rbd_assert(rbd_dev->spec->image_id);
4770 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4771
332bb12d
AE
4772 ret = rbd_dev_header_name(rbd_dev);
4773 if (ret)
4774 goto err_out_format;
4775
b644de2b
AE
4776 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
4777 if (ret)
4778 goto out_header_name;
4779
c0fba368 4780 if (rbd_dev->image_format == 1)
a30b71b9
AE
4781 ret = rbd_dev_v1_probe(rbd_dev);
4782 else
4783 ret = rbd_dev_v2_probe(rbd_dev);
5655c4d9 4784 if (ret)
b644de2b 4785 goto err_out_watch;
83a06263 4786
9bb81c9b
AE
4787 ret = rbd_dev_spec_update(rbd_dev);
4788 if (ret)
33dca39f 4789 goto err_out_probe;
9bb81c9b
AE
4790
4791 ret = rbd_dev_probe_parent(rbd_dev);
6fd48b3b
AE
4792 if (!ret)
4793 return 0;
83a06263 4794
6fd48b3b
AE
4795err_out_probe:
4796 rbd_dev_unprobe(rbd_dev);
b644de2b
AE
4797err_out_watch:
4798 tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
4799 if (tmp)
4800 rbd_warn(rbd_dev, "unable to tear down watch request\n");
332bb12d
AE
4801out_header_name:
4802 kfree(rbd_dev->header_name);
4803 rbd_dev->header_name = NULL;
4804err_out_format:
4805 rbd_dev->image_format = 0;
5655c4d9
AE
4806 kfree(rbd_dev->spec->image_id);
4807 rbd_dev->spec->image_id = NULL;
4808
4809 dout("probe failed, returning %d\n", ret);
4810
a30b71b9
AE
4811 return ret;
4812}
4813
59c2be1e
YS
4814static ssize_t rbd_add(struct bus_type *bus,
4815 const char *buf,
4816 size_t count)
602adf40 4817{
cb8627c7 4818 struct rbd_device *rbd_dev = NULL;
dc79b113 4819 struct ceph_options *ceph_opts = NULL;
4e9afeba 4820 struct rbd_options *rbd_opts = NULL;
859c31df 4821 struct rbd_spec *spec = NULL;
9d3997fd 4822 struct rbd_client *rbdc;
27cc2594
AE
4823 struct ceph_osd_client *osdc;
4824 int rc = -ENOMEM;
602adf40
YS
4825
4826 if (!try_module_get(THIS_MODULE))
4827 return -ENODEV;
4828
602adf40 4829 /* parse add command */
859c31df 4830 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
dc79b113 4831 if (rc < 0)
bd4ba655 4832 goto err_out_module;
78cea76e 4833
9d3997fd
AE
4834 rbdc = rbd_get_client(ceph_opts);
4835 if (IS_ERR(rbdc)) {
4836 rc = PTR_ERR(rbdc);
0ddebc0c 4837 goto err_out_args;
9d3997fd 4838 }
c53d5893 4839 ceph_opts = NULL; /* rbd_dev client now owns this */
602adf40 4840
602adf40 4841 /* pick the pool */
9d3997fd 4842 osdc = &rbdc->client->osdc;
859c31df 4843 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
602adf40
YS
4844 if (rc < 0)
4845 goto err_out_client;
c0cd10db 4846 spec->pool_id = (u64)rc;
859c31df 4847
0903e875
AE
4848 /* The ceph file layout needs to fit pool id in 32 bits */
4849
c0cd10db
AE
4850 if (spec->pool_id > (u64)U32_MAX) {
4851 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
4852 (unsigned long long)spec->pool_id, U32_MAX);
0903e875
AE
4853 rc = -EIO;
4854 goto err_out_client;
4855 }
4856
c53d5893 4857 rbd_dev = rbd_dev_create(rbdc, spec);
bd4ba655
AE
4858 if (!rbd_dev)
4859 goto err_out_client;
c53d5893
AE
4860 rbdc = NULL; /* rbd_dev now owns this */
4861 spec = NULL; /* rbd_dev now owns this */
602adf40 4862
bd4ba655 4863 rbd_dev->mapping.read_only = rbd_opts->read_only;
c53d5893
AE
4864 kfree(rbd_opts);
4865 rbd_opts = NULL; /* done with this */
bd4ba655 4866
71f293e2 4867 rc = rbd_dev_image_probe(rbd_dev);
a30b71b9 4868 if (rc < 0)
c53d5893 4869 goto err_out_rbd_dev;
05fd6f6f 4870
b536f69a
AE
4871 rc = rbd_dev_device_setup(rbd_dev);
4872 if (!rc)
4873 return count;
4874
4875 rbd_dev_image_release(rbd_dev);
c53d5893
AE
4876err_out_rbd_dev:
4877 rbd_dev_destroy(rbd_dev);
bd4ba655 4878err_out_client:
9d3997fd 4879 rbd_put_client(rbdc);
0ddebc0c 4880err_out_args:
78cea76e
AE
4881 if (ceph_opts)
4882 ceph_destroy_options(ceph_opts);
4e9afeba 4883 kfree(rbd_opts);
859c31df 4884 rbd_spec_put(spec);
bd4ba655
AE
4885err_out_module:
4886 module_put(THIS_MODULE);
27cc2594 4887
602adf40 4888 dout("Error adding device %s\n", buf);
27cc2594 4889
c0cd10db 4890 return (ssize_t)rc;
602adf40
YS
4891}
4892
de71a297 4893static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
602adf40
YS
4894{
4895 struct list_head *tmp;
4896 struct rbd_device *rbd_dev;
4897
e124a82f 4898 spin_lock(&rbd_dev_list_lock);
602adf40
YS
4899 list_for_each(tmp, &rbd_dev_list) {
4900 rbd_dev = list_entry(tmp, struct rbd_device, node);
de71a297 4901 if (rbd_dev->dev_id == dev_id) {
e124a82f 4902 spin_unlock(&rbd_dev_list_lock);
602adf40 4903 return rbd_dev;
e124a82f 4904 }
602adf40 4905 }
e124a82f 4906 spin_unlock(&rbd_dev_list_lock);
602adf40
YS
4907 return NULL;
4908}
4909
200a6a8b 4910static void rbd_dev_device_release(struct device *dev)
602adf40 4911{
593a9e7b 4912 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 4913
602adf40 4914 rbd_free_disk(rbd_dev);
200a6a8b
AE
4915 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4916 rbd_dev_clear_mapping(rbd_dev);
602adf40 4917 unregister_blkdev(rbd_dev->major, rbd_dev->name);
200a6a8b 4918 rbd_dev->major = 0;
e2839308 4919 rbd_dev_id_put(rbd_dev);
d1cf5788 4920 rbd_dev_mapping_clear(rbd_dev);
602adf40
YS
4921}
4922
05a46afd
AE
4923static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
4924{
ad945fc1 4925 while (rbd_dev->parent) {
05a46afd
AE
4926 struct rbd_device *first = rbd_dev;
4927 struct rbd_device *second = first->parent;
4928 struct rbd_device *third;
4929
4930 /*
4931 * Follow to the parent with no grandparent and
4932 * remove it.
4933 */
4934 while (second && (third = second->parent)) {
4935 first = second;
4936 second = third;
4937 }
ad945fc1 4938 rbd_assert(second);
8ad42cd0 4939 rbd_dev_image_release(second);
ad945fc1
AE
4940 first->parent = NULL;
4941 first->parent_overlap = 0;
4942
4943 rbd_assert(first->parent_spec);
05a46afd
AE
4944 rbd_spec_put(first->parent_spec);
4945 first->parent_spec = NULL;
05a46afd
AE
4946 }
4947}
4948
dfc5606d
YS
4949static ssize_t rbd_remove(struct bus_type *bus,
4950 const char *buf,
4951 size_t count)
602adf40
YS
4952{
4953 struct rbd_device *rbd_dev = NULL;
0d8189e1 4954 int target_id;
602adf40 4955 unsigned long ul;
0d8189e1 4956 int ret;
602adf40 4957
0d8189e1
AE
4958 ret = strict_strtoul(buf, 10, &ul);
4959 if (ret)
4960 return ret;
602adf40
YS
4961
4962 /* convert to int; abort if we lost anything in the conversion */
4963 target_id = (int) ul;
4964 if (target_id != ul)
4965 return -EINVAL;
4966
4967 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4968
4969 rbd_dev = __rbd_get_dev(target_id);
4970 if (!rbd_dev) {
4971 ret = -ENOENT;
4972 goto done;
42382b70
AE
4973 }
4974
a14ea269 4975 spin_lock_irq(&rbd_dev->lock);
b82d167b 4976 if (rbd_dev->open_count)
42382b70 4977 ret = -EBUSY;
b82d167b
AE
4978 else
4979 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
a14ea269 4980 spin_unlock_irq(&rbd_dev->lock);
b82d167b 4981 if (ret < 0)
42382b70 4982 goto done;
0d8189e1 4983 ret = count;
b480815a 4984 rbd_bus_del_dev(rbd_dev);
8ad42cd0 4985 rbd_dev_image_release(rbd_dev);
79ab7558 4986 module_put(THIS_MODULE);
602adf40
YS
4987done:
4988 mutex_unlock(&ctl_mutex);
aafb230e 4989
602adf40
YS
4990 return ret;
4991}
4992
602adf40
YS
4993/*
4994 * create control files in sysfs
dfc5606d 4995 * /sys/bus/rbd/...
602adf40
YS
4996 */
4997static int rbd_sysfs_init(void)
4998{
dfc5606d 4999 int ret;
602adf40 5000
fed4c143 5001 ret = device_register(&rbd_root_dev);
21079786 5002 if (ret < 0)
dfc5606d 5003 return ret;
602adf40 5004
fed4c143
AE
5005 ret = bus_register(&rbd_bus_type);
5006 if (ret < 0)
5007 device_unregister(&rbd_root_dev);
602adf40 5008
602adf40
YS
5009 return ret;
5010}
5011
5012static void rbd_sysfs_cleanup(void)
5013{
dfc5606d 5014 bus_unregister(&rbd_bus_type);
fed4c143 5015 device_unregister(&rbd_root_dev);
602adf40
YS
5016}
5017
1c2a9dfe
AE
5018static int rbd_slab_init(void)
5019{
5020 rbd_assert(!rbd_img_request_cache);
5021 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5022 sizeof (struct rbd_img_request),
5023 __alignof__(struct rbd_img_request),
5024 0, NULL);
868311b1
AE
5025 if (!rbd_img_request_cache)
5026 return -ENOMEM;
5027
5028 rbd_assert(!rbd_obj_request_cache);
5029 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5030 sizeof (struct rbd_obj_request),
5031 __alignof__(struct rbd_obj_request),
5032 0, NULL);
78c2a44a
AE
5033 if (!rbd_obj_request_cache)
5034 goto out_err;
5035
5036 rbd_assert(!rbd_segment_name_cache);
5037 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5038 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5039 if (rbd_segment_name_cache)
1c2a9dfe 5040 return 0;
78c2a44a
AE
5041out_err:
5042 if (rbd_obj_request_cache) {
5043 kmem_cache_destroy(rbd_obj_request_cache);
5044 rbd_obj_request_cache = NULL;
5045 }
1c2a9dfe 5046
868311b1
AE
5047 kmem_cache_destroy(rbd_img_request_cache);
5048 rbd_img_request_cache = NULL;
5049
1c2a9dfe
AE
5050 return -ENOMEM;
5051}
5052
5053static void rbd_slab_exit(void)
5054{
78c2a44a
AE
5055 rbd_assert(rbd_segment_name_cache);
5056 kmem_cache_destroy(rbd_segment_name_cache);
5057 rbd_segment_name_cache = NULL;
5058
868311b1
AE
5059 rbd_assert(rbd_obj_request_cache);
5060 kmem_cache_destroy(rbd_obj_request_cache);
5061 rbd_obj_request_cache = NULL;
5062
1c2a9dfe
AE
5063 rbd_assert(rbd_img_request_cache);
5064 kmem_cache_destroy(rbd_img_request_cache);
5065 rbd_img_request_cache = NULL;
5066}
5067
cc344fa1 5068static int __init rbd_init(void)
602adf40
YS
5069{
5070 int rc;
5071
1e32d34c
AE
5072 if (!libceph_compatible(NULL)) {
5073 rbd_warn(NULL, "libceph incompatibility (quitting)");
5074
5075 return -EINVAL;
5076 }
1c2a9dfe 5077 rc = rbd_slab_init();
602adf40
YS
5078 if (rc)
5079 return rc;
1c2a9dfe
AE
5080 rc = rbd_sysfs_init();
5081 if (rc)
5082 rbd_slab_exit();
5083 else
5084 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5085
5086 return rc;
602adf40
YS
5087}
5088
cc344fa1 5089static void __exit rbd_exit(void)
602adf40
YS
5090{
5091 rbd_sysfs_cleanup();
1c2a9dfe 5092 rbd_slab_exit();
602adf40
YS
5093}
5094
5095module_init(rbd_init);
5096module_exit(rbd_exit);
5097
5098MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5099MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5100MODULE_DESCRIPTION("rados block device");
5101
5102/* following authorship retained from original osdblk.c */
5103MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5104
5105MODULE_LICENSE("GPL");