rbd: have rbd_dev_image_id() set format 1 image id
[linux-2.6-block.git] / drivers / block / rbd.c
CommitLineData
602adf40
YS
1/*
2 rbd.c -- Export ceph rados objects as a Linux block device
3
4
5 based on drivers/block/osdblk.c:
6
7 Copyright 2009 Red Hat, Inc.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21
22
23
dfc5606d 24 For usage instructions, please refer to:
602adf40 25
dfc5606d 26 Documentation/ABI/testing/sysfs-bus-rbd
602adf40
YS
27
28 */
29
30#include <linux/ceph/libceph.h>
31#include <linux/ceph/osd_client.h>
32#include <linux/ceph/mon_client.h>
33#include <linux/ceph/decode.h>
59c2be1e 34#include <linux/parser.h>
602adf40
YS
35
36#include <linux/kernel.h>
37#include <linux/device.h>
38#include <linux/module.h>
39#include <linux/fs.h>
40#include <linux/blkdev.h>
41
42#include "rbd_types.h"
43
aafb230e
AE
44#define RBD_DEBUG /* Activate rbd_assert() calls */
45
593a9e7b
AE
46/*
47 * The basic unit of block I/O is a sector. It is interpreted in a
48 * number of contexts in Linux (blk, bio, genhd), but the default is
49 * universally 512 bytes. These symbols are just slightly more
50 * meaningful than the bare numbers they represent.
51 */
52#define SECTOR_SHIFT 9
53#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
54
f0f8cef5
AE
55#define RBD_DRV_NAME "rbd"
56#define RBD_DRV_NAME_LONG "rbd (rados block device)"
602adf40
YS
57
58#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
59
d4b125e9
AE
60#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
61#define RBD_MAX_SNAP_NAME_LEN \
62 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
63
35d489f9 64#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
602adf40
YS
65
66#define RBD_SNAP_HEAD_NAME "-"
67
9e15b77d
AE
68/* This allows a single page to hold an image name sent by OSD */
69#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
1e130199 70#define RBD_IMAGE_ID_LEN_MAX 64
9e15b77d 71
1e130199 72#define RBD_OBJ_PREFIX_LEN_MAX 64
589d30e0 73
d889140c
AE
74/* Feature bits */
75
5cbf6f12
AE
76#define RBD_FEATURE_LAYERING (1<<0)
77#define RBD_FEATURE_STRIPINGV2 (1<<1)
78#define RBD_FEATURES_ALL \
79 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
d889140c
AE
80
81/* Features supported by this (client software) implementation. */
82
770eba6e 83#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
d889140c 84
81a89793
AE
85/*
86 * An RBD device name will be "rbd#", where the "rbd" comes from
87 * RBD_DRV_NAME above, and # is a unique integer identifier.
88 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
89 * enough to hold all possible device names.
90 */
602adf40 91#define DEV_NAME_LEN 32
81a89793 92#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
602adf40
YS
93
94/*
95 * block device image metadata (in-memory version)
96 */
97struct rbd_image_header {
f84344f3 98 /* These four fields never change for a given rbd image */
849b4260 99 char *object_prefix;
34b13184 100 u64 features;
602adf40
YS
101 __u8 obj_order;
102 __u8 crypt_type;
103 __u8 comp_type;
602adf40 104
f84344f3
AE
105 /* The remaining fields need to be updated occasionally */
106 u64 image_size;
107 struct ceph_snap_context *snapc;
602adf40
YS
108 char *snap_names;
109 u64 *snap_sizes;
59c2be1e
YS
110
111 u64 obj_version;
112};
113
0d7dbfce
AE
114/*
115 * An rbd image specification.
116 *
117 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
c66c6e0c
AE
118 * identify an image. Each rbd_dev structure includes a pointer to
119 * an rbd_spec structure that encapsulates this identity.
120 *
121 * Each of the id's in an rbd_spec has an associated name. For a
122 * user-mapped image, the names are supplied and the id's associated
123 * with them are looked up. For a layered image, a parent image is
124 * defined by the tuple, and the names are looked up.
125 *
126 * An rbd_dev structure contains a parent_spec pointer which is
127 * non-null if the image it represents is a child in a layered
128 * image. This pointer will refer to the rbd_spec structure used
129 * by the parent rbd_dev for its own identity (i.e., the structure
130 * is shared between the parent and child).
131 *
132 * Since these structures are populated once, during the discovery
133 * phase of image construction, they are effectively immutable so
134 * we make no effort to synchronize access to them.
135 *
136 * Note that code herein does not assume the image name is known (it
137 * could be a null pointer).
0d7dbfce
AE
138 */
139struct rbd_spec {
140 u64 pool_id;
141 char *pool_name;
142
143 char *image_id;
0d7dbfce 144 char *image_name;
0d7dbfce
AE
145
146 u64 snap_id;
147 char *snap_name;
148
149 struct kref kref;
150};
151
602adf40 152/*
f0f8cef5 153 * an instance of the client. multiple devices may share an rbd client.
602adf40
YS
154 */
155struct rbd_client {
156 struct ceph_client *client;
157 struct kref kref;
158 struct list_head node;
159};
160
bf0d5f50
AE
161struct rbd_img_request;
162typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
163
164#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
165
166struct rbd_obj_request;
167typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
168
9969ebc5
AE
169enum obj_request_type {
170 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
171};
bf0d5f50 172
926f9b3f
AE
173enum obj_req_flags {
174 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
6365d33a 175 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
5679c59f
AE
176 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
177 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
926f9b3f
AE
178};
179
bf0d5f50
AE
180struct rbd_obj_request {
181 const char *object_name;
182 u64 offset; /* object start byte */
183 u64 length; /* bytes from offset */
926f9b3f 184 unsigned long flags;
bf0d5f50 185
c5b5ef6c
AE
186 /*
187 * An object request associated with an image will have its
188 * img_data flag set; a standalone object request will not.
189 *
190 * A standalone object request will have which == BAD_WHICH
191 * and a null obj_request pointer.
192 *
193 * An object request initiated in support of a layered image
194 * object (to check for its existence before a write) will
195 * have which == BAD_WHICH and a non-null obj_request pointer.
196 *
197 * Finally, an object request for rbd image data will have
198 * which != BAD_WHICH, and will have a non-null img_request
199 * pointer. The value of which will be in the range
200 * 0..(img_request->obj_request_count-1).
201 */
202 union {
203 struct rbd_obj_request *obj_request; /* STAT op */
204 struct {
205 struct rbd_img_request *img_request;
206 u64 img_offset;
207 /* links for img_request->obj_requests list */
208 struct list_head links;
209 };
210 };
bf0d5f50
AE
211 u32 which; /* posn image request list */
212
213 enum obj_request_type type;
788e2df3
AE
214 union {
215 struct bio *bio_list;
216 struct {
217 struct page **pages;
218 u32 page_count;
219 };
220 };
0eefd470 221 struct page **copyup_pages;
bf0d5f50
AE
222
223 struct ceph_osd_request *osd_req;
224
225 u64 xferred; /* bytes transferred */
226 u64 version;
1b83bef2 227 int result;
bf0d5f50
AE
228
229 rbd_obj_callback_t callback;
788e2df3 230 struct completion completion;
bf0d5f50
AE
231
232 struct kref kref;
233};
234
0c425248 235enum img_req_flags {
9849e986
AE
236 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
237 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
d0b2e944 238 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
0c425248
AE
239};
240
bf0d5f50 241struct rbd_img_request {
bf0d5f50
AE
242 struct rbd_device *rbd_dev;
243 u64 offset; /* starting image byte offset */
244 u64 length; /* byte count from offset */
0c425248 245 unsigned long flags;
bf0d5f50 246 union {
9849e986 247 u64 snap_id; /* for reads */
bf0d5f50 248 struct ceph_snap_context *snapc; /* for writes */
9849e986
AE
249 };
250 union {
251 struct request *rq; /* block request */
252 struct rbd_obj_request *obj_request; /* obj req initiator */
bf0d5f50 253 };
3d7efd18 254 struct page **copyup_pages;
bf0d5f50
AE
255 spinlock_t completion_lock;/* protects next_completion */
256 u32 next_completion;
257 rbd_img_callback_t callback;
55f27e09 258 u64 xferred;/* aggregate bytes transferred */
a5a337d4 259 int result; /* first nonzero obj_request result */
bf0d5f50
AE
260
261 u32 obj_request_count;
262 struct list_head obj_requests; /* rbd_obj_request structs */
263
264 struct kref kref;
265};
266
267#define for_each_obj_request(ireq, oreq) \
ef06f4d3 268 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
bf0d5f50 269#define for_each_obj_request_from(ireq, oreq) \
ef06f4d3 270 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
bf0d5f50 271#define for_each_obj_request_safe(ireq, oreq, n) \
ef06f4d3 272 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
bf0d5f50 273
dfc5606d 274struct rbd_snap {
dfc5606d 275 const char *name;
3591538f 276 u64 size;
dfc5606d
YS
277 struct list_head node;
278 u64 id;
34b13184 279 u64 features;
dfc5606d
YS
280};
281
f84344f3 282struct rbd_mapping {
99c1f08f 283 u64 size;
34b13184 284 u64 features;
f84344f3
AE
285 bool read_only;
286};
287
602adf40
YS
288/*
289 * a single device
290 */
291struct rbd_device {
de71a297 292 int dev_id; /* blkdev unique id */
602adf40
YS
293
294 int major; /* blkdev assigned major */
295 struct gendisk *disk; /* blkdev's gendisk and rq */
602adf40 296
a30b71b9 297 u32 image_format; /* Either 1 or 2 */
602adf40
YS
298 struct rbd_client *rbd_client;
299
300 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
301
b82d167b 302 spinlock_t lock; /* queue, flags, open_count */
602adf40
YS
303
304 struct rbd_image_header header;
b82d167b 305 unsigned long flags; /* possibly lock protected */
0d7dbfce 306 struct rbd_spec *spec;
602adf40 307
0d7dbfce 308 char *header_name;
971f839a 309
0903e875
AE
310 struct ceph_file_layout layout;
311
59c2be1e 312 struct ceph_osd_event *watch_event;
975241af 313 struct rbd_obj_request *watch_request;
59c2be1e 314
86b00e0d
AE
315 struct rbd_spec *parent_spec;
316 u64 parent_overlap;
2f82ee54 317 struct rbd_device *parent;
86b00e0d 318
cc070d59
AE
319 u64 stripe_unit;
320 u64 stripe_count;
321
c666601a
JD
322 /* protects updating the header */
323 struct rw_semaphore header_rwsem;
f84344f3
AE
324
325 struct rbd_mapping mapping;
602adf40
YS
326
327 struct list_head node;
dfc5606d
YS
328
329 /* list of snapshots */
330 struct list_head snaps;
331
332 /* sysfs related */
333 struct device dev;
b82d167b 334 unsigned long open_count; /* protected by lock */
dfc5606d
YS
335};
336
b82d167b
AE
337/*
338 * Flag bits for rbd_dev->flags. If atomicity is required,
339 * rbd_dev->lock is used to protect access.
340 *
341 * Currently, only the "removing" flag (which is coupled with the
342 * "open_count" field) requires atomic access.
343 */
6d292906
AE
344enum rbd_dev_flags {
345 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
b82d167b 346 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
6d292906
AE
347};
348
602adf40 349static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
e124a82f 350
602adf40 351static LIST_HEAD(rbd_dev_list); /* devices */
e124a82f
AE
352static DEFINE_SPINLOCK(rbd_dev_list_lock);
353
432b8587
AE
354static LIST_HEAD(rbd_client_list); /* clients */
355static DEFINE_SPINLOCK(rbd_client_list_lock);
602adf40 356
3d7efd18
AE
357static int rbd_img_request_submit(struct rbd_img_request *img_request);
358
304f6808 359static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
304f6808 360
dfc5606d 361static void rbd_dev_release(struct device *dev);
6087b51b 362static void rbd_snap_destroy(struct rbd_snap *snap);
dfc5606d 363
f0f8cef5
AE
364static ssize_t rbd_add(struct bus_type *bus, const char *buf,
365 size_t count);
366static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
367 size_t count);
2f82ee54 368static int rbd_dev_probe(struct rbd_device *rbd_dev);
f0f8cef5
AE
369
370static struct bus_attribute rbd_bus_attrs[] = {
371 __ATTR(add, S_IWUSR, NULL, rbd_add),
372 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
373 __ATTR_NULL
374};
375
376static struct bus_type rbd_bus_type = {
377 .name = "rbd",
378 .bus_attrs = rbd_bus_attrs,
379};
380
381static void rbd_root_dev_release(struct device *dev)
382{
383}
384
385static struct device rbd_root_dev = {
386 .init_name = "rbd",
387 .release = rbd_root_dev_release,
388};
389
06ecc6cb
AE
390static __printf(2, 3)
391void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
392{
393 struct va_format vaf;
394 va_list args;
395
396 va_start(args, fmt);
397 vaf.fmt = fmt;
398 vaf.va = &args;
399
400 if (!rbd_dev)
401 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
402 else if (rbd_dev->disk)
403 printk(KERN_WARNING "%s: %s: %pV\n",
404 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
405 else if (rbd_dev->spec && rbd_dev->spec->image_name)
406 printk(KERN_WARNING "%s: image %s: %pV\n",
407 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
408 else if (rbd_dev->spec && rbd_dev->spec->image_id)
409 printk(KERN_WARNING "%s: id %s: %pV\n",
410 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
411 else /* punt */
412 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
413 RBD_DRV_NAME, rbd_dev, &vaf);
414 va_end(args);
415}
416
aafb230e
AE
417#ifdef RBD_DEBUG
418#define rbd_assert(expr) \
419 if (unlikely(!(expr))) { \
420 printk(KERN_ERR "\nAssertion failure in %s() " \
421 "at line %d:\n\n" \
422 "\trbd_assert(%s);\n\n", \
423 __func__, __LINE__, #expr); \
424 BUG(); \
425 }
426#else /* !RBD_DEBUG */
427# define rbd_assert(expr) ((void) 0)
428#endif /* !RBD_DEBUG */
dfc5606d 429
8b3e1a56 430static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
b454e36d 431static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
8b3e1a56 432
117973fb
AE
433static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
434static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
59c2be1e 435
602adf40
YS
436static int rbd_open(struct block_device *bdev, fmode_t mode)
437{
f0f8cef5 438 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
b82d167b 439 bool removing = false;
602adf40 440
f84344f3 441 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
602adf40
YS
442 return -EROFS;
443
a14ea269 444 spin_lock_irq(&rbd_dev->lock);
b82d167b
AE
445 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
446 removing = true;
447 else
448 rbd_dev->open_count++;
a14ea269 449 spin_unlock_irq(&rbd_dev->lock);
b82d167b
AE
450 if (removing)
451 return -ENOENT;
452
42382b70 453 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
c3e946ce 454 (void) get_device(&rbd_dev->dev);
f84344f3 455 set_device_ro(bdev, rbd_dev->mapping.read_only);
42382b70 456 mutex_unlock(&ctl_mutex);
340c7a2b 457
602adf40
YS
458 return 0;
459}
460
dfc5606d
YS
461static int rbd_release(struct gendisk *disk, fmode_t mode)
462{
463 struct rbd_device *rbd_dev = disk->private_data;
b82d167b
AE
464 unsigned long open_count_before;
465
a14ea269 466 spin_lock_irq(&rbd_dev->lock);
b82d167b 467 open_count_before = rbd_dev->open_count--;
a14ea269 468 spin_unlock_irq(&rbd_dev->lock);
b82d167b 469 rbd_assert(open_count_before > 0);
dfc5606d 470
42382b70 471 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
c3e946ce 472 put_device(&rbd_dev->dev);
42382b70 473 mutex_unlock(&ctl_mutex);
dfc5606d
YS
474
475 return 0;
476}
477
602adf40
YS
478static const struct block_device_operations rbd_bd_ops = {
479 .owner = THIS_MODULE,
480 .open = rbd_open,
dfc5606d 481 .release = rbd_release,
602adf40
YS
482};
483
484/*
485 * Initialize an rbd client instance.
43ae4701 486 * We own *ceph_opts.
602adf40 487 */
f8c38929 488static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
602adf40
YS
489{
490 struct rbd_client *rbdc;
491 int ret = -ENOMEM;
492
37206ee5 493 dout("%s:\n", __func__);
602adf40
YS
494 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
495 if (!rbdc)
496 goto out_opt;
497
498 kref_init(&rbdc->kref);
499 INIT_LIST_HEAD(&rbdc->node);
500
bc534d86
AE
501 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
502
43ae4701 503 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
602adf40 504 if (IS_ERR(rbdc->client))
bc534d86 505 goto out_mutex;
43ae4701 506 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
602adf40
YS
507
508 ret = ceph_open_session(rbdc->client);
509 if (ret < 0)
510 goto out_err;
511
432b8587 512 spin_lock(&rbd_client_list_lock);
602adf40 513 list_add_tail(&rbdc->node, &rbd_client_list);
432b8587 514 spin_unlock(&rbd_client_list_lock);
602adf40 515
bc534d86 516 mutex_unlock(&ctl_mutex);
37206ee5 517 dout("%s: rbdc %p\n", __func__, rbdc);
bc534d86 518
602adf40
YS
519 return rbdc;
520
521out_err:
522 ceph_destroy_client(rbdc->client);
bc534d86
AE
523out_mutex:
524 mutex_unlock(&ctl_mutex);
602adf40
YS
525 kfree(rbdc);
526out_opt:
43ae4701
AE
527 if (ceph_opts)
528 ceph_destroy_options(ceph_opts);
37206ee5
AE
529 dout("%s: error %d\n", __func__, ret);
530
28f259b7 531 return ERR_PTR(ret);
602adf40
YS
532}
533
2f82ee54
AE
534static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
535{
536 kref_get(&rbdc->kref);
537
538 return rbdc;
539}
540
602adf40 541/*
1f7ba331
AE
542 * Find a ceph client with specific addr and configuration. If
543 * found, bump its reference count.
602adf40 544 */
1f7ba331 545static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
602adf40
YS
546{
547 struct rbd_client *client_node;
1f7ba331 548 bool found = false;
602adf40 549
43ae4701 550 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
602adf40
YS
551 return NULL;
552
1f7ba331
AE
553 spin_lock(&rbd_client_list_lock);
554 list_for_each_entry(client_node, &rbd_client_list, node) {
555 if (!ceph_compare_options(ceph_opts, client_node->client)) {
2f82ee54
AE
556 __rbd_get_client(client_node);
557
1f7ba331
AE
558 found = true;
559 break;
560 }
561 }
562 spin_unlock(&rbd_client_list_lock);
563
564 return found ? client_node : NULL;
602adf40
YS
565}
566
59c2be1e
YS
567/*
568 * mount options
569 */
570enum {
59c2be1e
YS
571 Opt_last_int,
572 /* int args above */
573 Opt_last_string,
574 /* string args above */
cc0538b6
AE
575 Opt_read_only,
576 Opt_read_write,
577 /* Boolean args above */
578 Opt_last_bool,
59c2be1e
YS
579};
580
43ae4701 581static match_table_t rbd_opts_tokens = {
59c2be1e
YS
582 /* int args above */
583 /* string args above */
be466c1c 584 {Opt_read_only, "read_only"},
cc0538b6
AE
585 {Opt_read_only, "ro"}, /* Alternate spelling */
586 {Opt_read_write, "read_write"},
587 {Opt_read_write, "rw"}, /* Alternate spelling */
588 /* Boolean args above */
59c2be1e
YS
589 {-1, NULL}
590};
591
98571b5a
AE
592struct rbd_options {
593 bool read_only;
594};
595
596#define RBD_READ_ONLY_DEFAULT false
597
59c2be1e
YS
598static int parse_rbd_opts_token(char *c, void *private)
599{
43ae4701 600 struct rbd_options *rbd_opts = private;
59c2be1e
YS
601 substring_t argstr[MAX_OPT_ARGS];
602 int token, intval, ret;
603
43ae4701 604 token = match_token(c, rbd_opts_tokens, argstr);
59c2be1e
YS
605 if (token < 0)
606 return -EINVAL;
607
608 if (token < Opt_last_int) {
609 ret = match_int(&argstr[0], &intval);
610 if (ret < 0) {
611 pr_err("bad mount option arg (not int) "
612 "at '%s'\n", c);
613 return ret;
614 }
615 dout("got int token %d val %d\n", token, intval);
616 } else if (token > Opt_last_int && token < Opt_last_string) {
617 dout("got string token %d val %s\n", token,
618 argstr[0].from);
cc0538b6
AE
619 } else if (token > Opt_last_string && token < Opt_last_bool) {
620 dout("got Boolean token %d\n", token);
59c2be1e
YS
621 } else {
622 dout("got token %d\n", token);
623 }
624
625 switch (token) {
cc0538b6
AE
626 case Opt_read_only:
627 rbd_opts->read_only = true;
628 break;
629 case Opt_read_write:
630 rbd_opts->read_only = false;
631 break;
59c2be1e 632 default:
aafb230e
AE
633 rbd_assert(false);
634 break;
59c2be1e
YS
635 }
636 return 0;
637}
638
602adf40
YS
639/*
640 * Get a ceph client with specific addr and configuration, if one does
641 * not exist create it.
642 */
9d3997fd 643static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
602adf40 644{
f8c38929 645 struct rbd_client *rbdc;
59c2be1e 646
1f7ba331 647 rbdc = rbd_client_find(ceph_opts);
9d3997fd 648 if (rbdc) /* using an existing client */
43ae4701 649 ceph_destroy_options(ceph_opts);
9d3997fd 650 else
f8c38929 651 rbdc = rbd_client_create(ceph_opts);
602adf40 652
9d3997fd 653 return rbdc;
602adf40
YS
654}
655
656/*
657 * Destroy ceph client
d23a4b3f 658 *
432b8587 659 * Caller must hold rbd_client_list_lock.
602adf40
YS
660 */
661static void rbd_client_release(struct kref *kref)
662{
663 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
664
37206ee5 665 dout("%s: rbdc %p\n", __func__, rbdc);
cd9d9f5d 666 spin_lock(&rbd_client_list_lock);
602adf40 667 list_del(&rbdc->node);
cd9d9f5d 668 spin_unlock(&rbd_client_list_lock);
602adf40
YS
669
670 ceph_destroy_client(rbdc->client);
671 kfree(rbdc);
672}
673
674/*
675 * Drop reference to ceph client node. If it's not referenced anymore, release
676 * it.
677 */
9d3997fd 678static void rbd_put_client(struct rbd_client *rbdc)
602adf40 679{
c53d5893
AE
680 if (rbdc)
681 kref_put(&rbdc->kref, rbd_client_release);
602adf40
YS
682}
683
a30b71b9
AE
684static bool rbd_image_format_valid(u32 image_format)
685{
686 return image_format == 1 || image_format == 2;
687}
688
8e94af8e
AE
689static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
690{
103a150f
AE
691 size_t size;
692 u32 snap_count;
693
694 /* The header has to start with the magic rbd header text */
695 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
696 return false;
697
db2388b6
AE
698 /* The bio layer requires at least sector-sized I/O */
699
700 if (ondisk->options.order < SECTOR_SHIFT)
701 return false;
702
703 /* If we use u64 in a few spots we may be able to loosen this */
704
705 if (ondisk->options.order > 8 * sizeof (int) - 1)
706 return false;
707
103a150f
AE
708 /*
709 * The size of a snapshot header has to fit in a size_t, and
710 * that limits the number of snapshots.
711 */
712 snap_count = le32_to_cpu(ondisk->snap_count);
713 size = SIZE_MAX - sizeof (struct ceph_snap_context);
714 if (snap_count > size / sizeof (__le64))
715 return false;
716
717 /*
718 * Not only that, but the size of the entire the snapshot
719 * header must also be representable in a size_t.
720 */
721 size -= snap_count * sizeof (__le64);
722 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
723 return false;
724
725 return true;
8e94af8e
AE
726}
727
602adf40
YS
728/*
729 * Create a new header structure, translate header format from the on-disk
730 * header.
731 */
732static int rbd_header_from_disk(struct rbd_image_header *header,
4156d998 733 struct rbd_image_header_ondisk *ondisk)
602adf40 734{
ccece235 735 u32 snap_count;
58c17b0e 736 size_t len;
d2bb24e5 737 size_t size;
621901d6 738 u32 i;
602adf40 739
6a52325f
AE
740 memset(header, 0, sizeof (*header));
741
103a150f
AE
742 snap_count = le32_to_cpu(ondisk->snap_count);
743
58c17b0e
AE
744 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
745 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
6a52325f 746 if (!header->object_prefix)
602adf40 747 return -ENOMEM;
58c17b0e
AE
748 memcpy(header->object_prefix, ondisk->object_prefix, len);
749 header->object_prefix[len] = '\0';
00f1f36f 750
602adf40 751 if (snap_count) {
f785cc1d
AE
752 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
753
621901d6
AE
754 /* Save a copy of the snapshot names */
755
f785cc1d
AE
756 if (snap_names_len > (u64) SIZE_MAX)
757 return -EIO;
758 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
602adf40 759 if (!header->snap_names)
6a52325f 760 goto out_err;
f785cc1d
AE
761 /*
762 * Note that rbd_dev_v1_header_read() guarantees
763 * the ondisk buffer we're working with has
764 * snap_names_len bytes beyond the end of the
765 * snapshot id array, this memcpy() is safe.
766 */
767 memcpy(header->snap_names, &ondisk->snaps[snap_count],
768 snap_names_len);
6a52325f 769
621901d6
AE
770 /* Record each snapshot's size */
771
d2bb24e5
AE
772 size = snap_count * sizeof (*header->snap_sizes);
773 header->snap_sizes = kmalloc(size, GFP_KERNEL);
602adf40 774 if (!header->snap_sizes)
6a52325f 775 goto out_err;
621901d6
AE
776 for (i = 0; i < snap_count; i++)
777 header->snap_sizes[i] =
778 le64_to_cpu(ondisk->snaps[i].image_size);
602adf40 779 } else {
ccece235 780 WARN_ON(ondisk->snap_names_len);
602adf40
YS
781 header->snap_names = NULL;
782 header->snap_sizes = NULL;
783 }
849b4260 784
34b13184 785 header->features = 0; /* No features support in v1 images */
602adf40
YS
786 header->obj_order = ondisk->options.order;
787 header->crypt_type = ondisk->options.crypt_type;
788 header->comp_type = ondisk->options.comp_type;
6a52325f 789
621901d6
AE
790 /* Allocate and fill in the snapshot context */
791
f84344f3 792 header->image_size = le64_to_cpu(ondisk->image_size);
6a52325f
AE
793 size = sizeof (struct ceph_snap_context);
794 size += snap_count * sizeof (header->snapc->snaps[0]);
795 header->snapc = kzalloc(size, GFP_KERNEL);
796 if (!header->snapc)
797 goto out_err;
602adf40
YS
798
799 atomic_set(&header->snapc->nref, 1);
505cbb9b 800 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
602adf40 801 header->snapc->num_snaps = snap_count;
621901d6
AE
802 for (i = 0; i < snap_count; i++)
803 header->snapc->snaps[i] =
804 le64_to_cpu(ondisk->snaps[i].id);
602adf40
YS
805
806 return 0;
807
6a52325f 808out_err:
849b4260 809 kfree(header->snap_sizes);
ccece235 810 header->snap_sizes = NULL;
602adf40 811 kfree(header->snap_names);
ccece235 812 header->snap_names = NULL;
6a52325f
AE
813 kfree(header->object_prefix);
814 header->object_prefix = NULL;
ccece235 815
00f1f36f 816 return -ENOMEM;
602adf40
YS
817}
818
9e15b77d
AE
819static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
820{
821 struct rbd_snap *snap;
822
823 if (snap_id == CEPH_NOSNAP)
824 return RBD_SNAP_HEAD_NAME;
825
826 list_for_each_entry(snap, &rbd_dev->snaps, node)
827 if (snap_id == snap->id)
828 return snap->name;
829
830 return NULL;
831}
832
8836b995 833static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
602adf40 834{
602adf40 835
e86924a8 836 struct rbd_snap *snap;
602adf40 837
e86924a8
AE
838 list_for_each_entry(snap, &rbd_dev->snaps, node) {
839 if (!strcmp(snap_name, snap->name)) {
0d7dbfce 840 rbd_dev->spec->snap_id = snap->id;
e86924a8 841 rbd_dev->mapping.size = snap->size;
34b13184 842 rbd_dev->mapping.features = snap->features;
602adf40 843
e86924a8 844 return 0;
00f1f36f 845 }
00f1f36f 846 }
e86924a8 847
00f1f36f 848 return -ENOENT;
602adf40
YS
849}
850
819d52bf 851static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
602adf40 852{
78dc447d 853 int ret;
602adf40 854
0d7dbfce 855 if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
cc9d734c 856 sizeof (RBD_SNAP_HEAD_NAME))) {
0d7dbfce 857 rbd_dev->spec->snap_id = CEPH_NOSNAP;
99c1f08f 858 rbd_dev->mapping.size = rbd_dev->header.image_size;
34b13184 859 rbd_dev->mapping.features = rbd_dev->header.features;
e86924a8 860 ret = 0;
602adf40 861 } else {
0d7dbfce 862 ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
602adf40
YS
863 if (ret < 0)
864 goto done;
f84344f3 865 rbd_dev->mapping.read_only = true;
602adf40 866 }
6d292906
AE
867 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
868
602adf40 869done:
602adf40
YS
870 return ret;
871}
872
873static void rbd_header_free(struct rbd_image_header *header)
874{
849b4260 875 kfree(header->object_prefix);
d78fd7ae 876 header->object_prefix = NULL;
602adf40 877 kfree(header->snap_sizes);
d78fd7ae 878 header->snap_sizes = NULL;
849b4260 879 kfree(header->snap_names);
d78fd7ae 880 header->snap_names = NULL;
d1d25646 881 ceph_put_snap_context(header->snapc);
d78fd7ae 882 header->snapc = NULL;
602adf40
YS
883}
884
98571b5a 885static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
602adf40 886{
65ccfe21
AE
887 char *name;
888 u64 segment;
889 int ret;
602adf40 890
2fd82b9e 891 name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
65ccfe21
AE
892 if (!name)
893 return NULL;
894 segment = offset >> rbd_dev->header.obj_order;
2fd82b9e 895 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
65ccfe21 896 rbd_dev->header.object_prefix, segment);
2fd82b9e 897 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
65ccfe21
AE
898 pr_err("error formatting segment name for #%llu (%d)\n",
899 segment, ret);
900 kfree(name);
901 name = NULL;
902 }
602adf40 903
65ccfe21
AE
904 return name;
905}
602adf40 906
65ccfe21
AE
907static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
908{
909 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
602adf40 910
65ccfe21
AE
911 return offset & (segment_size - 1);
912}
913
914static u64 rbd_segment_length(struct rbd_device *rbd_dev,
915 u64 offset, u64 length)
916{
917 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
918
919 offset &= segment_size - 1;
920
aafb230e 921 rbd_assert(length <= U64_MAX - offset);
65ccfe21
AE
922 if (offset + length > segment_size)
923 length = segment_size - offset;
924
925 return length;
602adf40
YS
926}
927
029bcbd8
JD
928/*
929 * returns the size of an object in the image
930 */
931static u64 rbd_obj_bytes(struct rbd_image_header *header)
932{
933 return 1 << header->obj_order;
934}
935
602adf40
YS
936/*
937 * bio helpers
938 */
939
940static void bio_chain_put(struct bio *chain)
941{
942 struct bio *tmp;
943
944 while (chain) {
945 tmp = chain;
946 chain = chain->bi_next;
947 bio_put(tmp);
948 }
949}
950
951/*
952 * zeros a bio chain, starting at specific offset
953 */
954static void zero_bio_chain(struct bio *chain, int start_ofs)
955{
956 struct bio_vec *bv;
957 unsigned long flags;
958 void *buf;
959 int i;
960 int pos = 0;
961
962 while (chain) {
963 bio_for_each_segment(bv, chain, i) {
964 if (pos + bv->bv_len > start_ofs) {
965 int remainder = max(start_ofs - pos, 0);
966 buf = bvec_kmap_irq(bv, &flags);
967 memset(buf + remainder, 0,
968 bv->bv_len - remainder);
85b5aaa6 969 bvec_kunmap_irq(buf, &flags);
602adf40
YS
970 }
971 pos += bv->bv_len;
972 }
973
974 chain = chain->bi_next;
975 }
976}
977
b9434c5b
AE
978/*
979 * similar to zero_bio_chain(), zeros data defined by a page array,
980 * starting at the given byte offset from the start of the array and
981 * continuing up to the given end offset. The pages array is
982 * assumed to be big enough to hold all bytes up to the end.
983 */
984static void zero_pages(struct page **pages, u64 offset, u64 end)
985{
986 struct page **page = &pages[offset >> PAGE_SHIFT];
987
988 rbd_assert(end > offset);
989 rbd_assert(end - offset <= (u64)SIZE_MAX);
990 while (offset < end) {
991 size_t page_offset;
992 size_t length;
993 unsigned long flags;
994 void *kaddr;
995
996 page_offset = (size_t)(offset & ~PAGE_MASK);
997 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
998 local_irq_save(flags);
999 kaddr = kmap_atomic(*page);
1000 memset(kaddr + page_offset, 0, length);
1001 kunmap_atomic(kaddr);
1002 local_irq_restore(flags);
1003
1004 offset += length;
1005 page++;
1006 }
1007}
1008
602adf40 1009/*
f7760dad
AE
1010 * Clone a portion of a bio, starting at the given byte offset
1011 * and continuing for the number of bytes indicated.
602adf40 1012 */
f7760dad
AE
1013static struct bio *bio_clone_range(struct bio *bio_src,
1014 unsigned int offset,
1015 unsigned int len,
1016 gfp_t gfpmask)
602adf40 1017{
f7760dad
AE
1018 struct bio_vec *bv;
1019 unsigned int resid;
1020 unsigned short idx;
1021 unsigned int voff;
1022 unsigned short end_idx;
1023 unsigned short vcnt;
1024 struct bio *bio;
1025
1026 /* Handle the easy case for the caller */
1027
1028 if (!offset && len == bio_src->bi_size)
1029 return bio_clone(bio_src, gfpmask);
1030
1031 if (WARN_ON_ONCE(!len))
1032 return NULL;
1033 if (WARN_ON_ONCE(len > bio_src->bi_size))
1034 return NULL;
1035 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1036 return NULL;
1037
1038 /* Find first affected segment... */
1039
1040 resid = offset;
1041 __bio_for_each_segment(bv, bio_src, idx, 0) {
1042 if (resid < bv->bv_len)
1043 break;
1044 resid -= bv->bv_len;
602adf40 1045 }
f7760dad 1046 voff = resid;
602adf40 1047
f7760dad 1048 /* ...and the last affected segment */
602adf40 1049
f7760dad
AE
1050 resid += len;
1051 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1052 if (resid <= bv->bv_len)
1053 break;
1054 resid -= bv->bv_len;
1055 }
1056 vcnt = end_idx - idx + 1;
1057
1058 /* Build the clone */
1059
1060 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1061 if (!bio)
1062 return NULL; /* ENOMEM */
602adf40 1063
f7760dad
AE
1064 bio->bi_bdev = bio_src->bi_bdev;
1065 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1066 bio->bi_rw = bio_src->bi_rw;
1067 bio->bi_flags |= 1 << BIO_CLONED;
1068
1069 /*
1070 * Copy over our part of the bio_vec, then update the first
1071 * and last (or only) entries.
1072 */
1073 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1074 vcnt * sizeof (struct bio_vec));
1075 bio->bi_io_vec[0].bv_offset += voff;
1076 if (vcnt > 1) {
1077 bio->bi_io_vec[0].bv_len -= voff;
1078 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1079 } else {
1080 bio->bi_io_vec[0].bv_len = len;
602adf40
YS
1081 }
1082
f7760dad
AE
1083 bio->bi_vcnt = vcnt;
1084 bio->bi_size = len;
1085 bio->bi_idx = 0;
1086
1087 return bio;
1088}
1089
1090/*
1091 * Clone a portion of a bio chain, starting at the given byte offset
1092 * into the first bio in the source chain and continuing for the
1093 * number of bytes indicated. The result is another bio chain of
1094 * exactly the given length, or a null pointer on error.
1095 *
1096 * The bio_src and offset parameters are both in-out. On entry they
1097 * refer to the first source bio and the offset into that bio where
1098 * the start of data to be cloned is located.
1099 *
1100 * On return, bio_src is updated to refer to the bio in the source
1101 * chain that contains first un-cloned byte, and *offset will
1102 * contain the offset of that byte within that bio.
1103 */
1104static struct bio *bio_chain_clone_range(struct bio **bio_src,
1105 unsigned int *offset,
1106 unsigned int len,
1107 gfp_t gfpmask)
1108{
1109 struct bio *bi = *bio_src;
1110 unsigned int off = *offset;
1111 struct bio *chain = NULL;
1112 struct bio **end;
1113
1114 /* Build up a chain of clone bios up to the limit */
1115
1116 if (!bi || off >= bi->bi_size || !len)
1117 return NULL; /* Nothing to clone */
602adf40 1118
f7760dad
AE
1119 end = &chain;
1120 while (len) {
1121 unsigned int bi_size;
1122 struct bio *bio;
1123
f5400b7a
AE
1124 if (!bi) {
1125 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
f7760dad 1126 goto out_err; /* EINVAL; ran out of bio's */
f5400b7a 1127 }
f7760dad
AE
1128 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1129 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1130 if (!bio)
1131 goto out_err; /* ENOMEM */
1132
1133 *end = bio;
1134 end = &bio->bi_next;
602adf40 1135
f7760dad
AE
1136 off += bi_size;
1137 if (off == bi->bi_size) {
1138 bi = bi->bi_next;
1139 off = 0;
1140 }
1141 len -= bi_size;
1142 }
1143 *bio_src = bi;
1144 *offset = off;
1145
1146 return chain;
1147out_err:
1148 bio_chain_put(chain);
602adf40 1149
602adf40
YS
1150 return NULL;
1151}
1152
926f9b3f
AE
1153/*
1154 * The default/initial value for all object request flags is 0. For
1155 * each flag, once its value is set to 1 it is never reset to 0
1156 * again.
1157 */
57acbaa7 1158static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
926f9b3f 1159{
57acbaa7 1160 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
926f9b3f
AE
1161 struct rbd_device *rbd_dev;
1162
57acbaa7
AE
1163 rbd_dev = obj_request->img_request->rbd_dev;
1164 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
926f9b3f
AE
1165 obj_request);
1166 }
1167}
1168
57acbaa7 1169static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
926f9b3f
AE
1170{
1171 smp_mb();
57acbaa7 1172 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
926f9b3f
AE
1173}
1174
57acbaa7 1175static void obj_request_done_set(struct rbd_obj_request *obj_request)
6365d33a 1176{
57acbaa7
AE
1177 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1178 struct rbd_device *rbd_dev = NULL;
6365d33a 1179
57acbaa7
AE
1180 if (obj_request_img_data_test(obj_request))
1181 rbd_dev = obj_request->img_request->rbd_dev;
1182 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
6365d33a
AE
1183 obj_request);
1184 }
1185}
1186
57acbaa7 1187static bool obj_request_done_test(struct rbd_obj_request *obj_request)
6365d33a
AE
1188{
1189 smp_mb();
57acbaa7 1190 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
6365d33a
AE
1191}
1192
5679c59f
AE
1193/*
1194 * This sets the KNOWN flag after (possibly) setting the EXISTS
1195 * flag. The latter is set based on the "exists" value provided.
1196 *
1197 * Note that for our purposes once an object exists it never goes
1198 * away again. It's possible that the response from two existence
1199 * checks are separated by the creation of the target object, and
1200 * the first ("doesn't exist") response arrives *after* the second
1201 * ("does exist"). In that case we ignore the second one.
1202 */
1203static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1204 bool exists)
1205{
1206 if (exists)
1207 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1208 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1209 smp_mb();
1210}
1211
1212static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1213{
1214 smp_mb();
1215 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1216}
1217
1218static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1219{
1220 smp_mb();
1221 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1222}
1223
bf0d5f50
AE
1224static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1225{
37206ee5
AE
1226 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1227 atomic_read(&obj_request->kref.refcount));
bf0d5f50
AE
1228 kref_get(&obj_request->kref);
1229}
1230
1231static void rbd_obj_request_destroy(struct kref *kref);
1232static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1233{
1234 rbd_assert(obj_request != NULL);
37206ee5
AE
1235 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1236 atomic_read(&obj_request->kref.refcount));
bf0d5f50
AE
1237 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1238}
1239
1240static void rbd_img_request_get(struct rbd_img_request *img_request)
1241{
37206ee5
AE
1242 dout("%s: img %p (was %d)\n", __func__, img_request,
1243 atomic_read(&img_request->kref.refcount));
bf0d5f50
AE
1244 kref_get(&img_request->kref);
1245}
1246
1247static void rbd_img_request_destroy(struct kref *kref);
1248static void rbd_img_request_put(struct rbd_img_request *img_request)
1249{
1250 rbd_assert(img_request != NULL);
37206ee5
AE
1251 dout("%s: img %p (was %d)\n", __func__, img_request,
1252 atomic_read(&img_request->kref.refcount));
bf0d5f50
AE
1253 kref_put(&img_request->kref, rbd_img_request_destroy);
1254}
1255
1256static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1257 struct rbd_obj_request *obj_request)
1258{
25dcf954
AE
1259 rbd_assert(obj_request->img_request == NULL);
1260
b155e86c 1261 /* Image request now owns object's original reference */
bf0d5f50 1262 obj_request->img_request = img_request;
25dcf954 1263 obj_request->which = img_request->obj_request_count;
6365d33a
AE
1264 rbd_assert(!obj_request_img_data_test(obj_request));
1265 obj_request_img_data_set(obj_request);
bf0d5f50 1266 rbd_assert(obj_request->which != BAD_WHICH);
25dcf954
AE
1267 img_request->obj_request_count++;
1268 list_add_tail(&obj_request->links, &img_request->obj_requests);
37206ee5
AE
1269 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1270 obj_request->which);
bf0d5f50
AE
1271}
1272
1273static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1274 struct rbd_obj_request *obj_request)
1275{
1276 rbd_assert(obj_request->which != BAD_WHICH);
25dcf954 1277
37206ee5
AE
1278 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1279 obj_request->which);
bf0d5f50 1280 list_del(&obj_request->links);
25dcf954
AE
1281 rbd_assert(img_request->obj_request_count > 0);
1282 img_request->obj_request_count--;
1283 rbd_assert(obj_request->which == img_request->obj_request_count);
1284 obj_request->which = BAD_WHICH;
6365d33a 1285 rbd_assert(obj_request_img_data_test(obj_request));
bf0d5f50 1286 rbd_assert(obj_request->img_request == img_request);
bf0d5f50 1287 obj_request->img_request = NULL;
25dcf954 1288 obj_request->callback = NULL;
bf0d5f50
AE
1289 rbd_obj_request_put(obj_request);
1290}
1291
1292static bool obj_request_type_valid(enum obj_request_type type)
1293{
1294 switch (type) {
9969ebc5 1295 case OBJ_REQUEST_NODATA:
bf0d5f50 1296 case OBJ_REQUEST_BIO:
788e2df3 1297 case OBJ_REQUEST_PAGES:
bf0d5f50
AE
1298 return true;
1299 default:
1300 return false;
1301 }
1302}
1303
bf0d5f50
AE
1304static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1305 struct rbd_obj_request *obj_request)
1306{
37206ee5
AE
1307 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1308
bf0d5f50
AE
1309 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1310}
1311
1312static void rbd_img_request_complete(struct rbd_img_request *img_request)
1313{
55f27e09 1314
37206ee5 1315 dout("%s: img %p\n", __func__, img_request);
55f27e09
AE
1316
1317 /*
1318 * If no error occurred, compute the aggregate transfer
1319 * count for the image request. We could instead use
1320 * atomic64_cmpxchg() to update it as each object request
1321 * completes; not clear which way is better off hand.
1322 */
1323 if (!img_request->result) {
1324 struct rbd_obj_request *obj_request;
1325 u64 xferred = 0;
1326
1327 for_each_obj_request(img_request, obj_request)
1328 xferred += obj_request->xferred;
1329 img_request->xferred = xferred;
1330 }
1331
bf0d5f50
AE
1332 if (img_request->callback)
1333 img_request->callback(img_request);
1334 else
1335 rbd_img_request_put(img_request);
1336}
1337
788e2df3
AE
1338/* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1339
1340static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1341{
37206ee5
AE
1342 dout("%s: obj %p\n", __func__, obj_request);
1343
788e2df3
AE
1344 return wait_for_completion_interruptible(&obj_request->completion);
1345}
1346
0c425248
AE
1347/*
1348 * The default/initial value for all image request flags is 0. Each
1349 * is conditionally set to 1 at image request initialization time
1350 * and currently never change thereafter.
1351 */
1352static void img_request_write_set(struct rbd_img_request *img_request)
1353{
1354 set_bit(IMG_REQ_WRITE, &img_request->flags);
1355 smp_mb();
1356}
1357
1358static bool img_request_write_test(struct rbd_img_request *img_request)
1359{
1360 smp_mb();
1361 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1362}
1363
9849e986
AE
1364static void img_request_child_set(struct rbd_img_request *img_request)
1365{
1366 set_bit(IMG_REQ_CHILD, &img_request->flags);
1367 smp_mb();
1368}
1369
1370static bool img_request_child_test(struct rbd_img_request *img_request)
1371{
1372 smp_mb();
1373 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1374}
1375
d0b2e944
AE
1376static void img_request_layered_set(struct rbd_img_request *img_request)
1377{
1378 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1379 smp_mb();
1380}
1381
1382static bool img_request_layered_test(struct rbd_img_request *img_request)
1383{
1384 smp_mb();
1385 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1386}
1387
6e2a4505
AE
1388static void
1389rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1390{
b9434c5b
AE
1391 u64 xferred = obj_request->xferred;
1392 u64 length = obj_request->length;
1393
6e2a4505
AE
1394 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1395 obj_request, obj_request->img_request, obj_request->result,
b9434c5b 1396 xferred, length);
6e2a4505
AE
1397 /*
1398 * ENOENT means a hole in the image. We zero-fill the
1399 * entire length of the request. A short read also implies
1400 * zero-fill to the end of the request. Either way we
1401 * update the xferred count to indicate the whole request
1402 * was satisfied.
1403 */
b9434c5b 1404 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
6e2a4505 1405 if (obj_request->result == -ENOENT) {
b9434c5b
AE
1406 if (obj_request->type == OBJ_REQUEST_BIO)
1407 zero_bio_chain(obj_request->bio_list, 0);
1408 else
1409 zero_pages(obj_request->pages, 0, length);
6e2a4505 1410 obj_request->result = 0;
b9434c5b
AE
1411 obj_request->xferred = length;
1412 } else if (xferred < length && !obj_request->result) {
1413 if (obj_request->type == OBJ_REQUEST_BIO)
1414 zero_bio_chain(obj_request->bio_list, xferred);
1415 else
1416 zero_pages(obj_request->pages, xferred, length);
1417 obj_request->xferred = length;
6e2a4505
AE
1418 }
1419 obj_request_done_set(obj_request);
1420}
1421
bf0d5f50
AE
1422static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1423{
37206ee5
AE
1424 dout("%s: obj %p cb %p\n", __func__, obj_request,
1425 obj_request->callback);
bf0d5f50
AE
1426 if (obj_request->callback)
1427 obj_request->callback(obj_request);
788e2df3
AE
1428 else
1429 complete_all(&obj_request->completion);
bf0d5f50
AE
1430}
1431
c47f9371 1432static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
39bf2c5d
AE
1433{
1434 dout("%s: obj %p\n", __func__, obj_request);
1435 obj_request_done_set(obj_request);
1436}
1437
c47f9371 1438static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
bf0d5f50 1439{
57acbaa7 1440 struct rbd_img_request *img_request = NULL;
a9e8ba2c 1441 struct rbd_device *rbd_dev = NULL;
57acbaa7
AE
1442 bool layered = false;
1443
1444 if (obj_request_img_data_test(obj_request)) {
1445 img_request = obj_request->img_request;
1446 layered = img_request && img_request_layered_test(img_request);
a9e8ba2c 1447 rbd_dev = img_request->rbd_dev;
57acbaa7 1448 }
8b3e1a56
AE
1449
1450 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1451 obj_request, img_request, obj_request->result,
1452 obj_request->xferred, obj_request->length);
a9e8ba2c
AE
1453 if (layered && obj_request->result == -ENOENT &&
1454 obj_request->img_offset < rbd_dev->parent_overlap)
8b3e1a56
AE
1455 rbd_img_parent_read(obj_request);
1456 else if (img_request)
6e2a4505
AE
1457 rbd_img_obj_request_read_callback(obj_request);
1458 else
1459 obj_request_done_set(obj_request);
bf0d5f50
AE
1460}
1461
c47f9371 1462static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
bf0d5f50 1463{
1b83bef2
SW
1464 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1465 obj_request->result, obj_request->length);
1466 /*
8b3e1a56
AE
1467 * There is no such thing as a successful short write. Set
1468 * it to our originally-requested length.
1b83bef2
SW
1469 */
1470 obj_request->xferred = obj_request->length;
07741308 1471 obj_request_done_set(obj_request);
bf0d5f50
AE
1472}
1473
fbfab539
AE
1474/*
1475 * For a simple stat call there's nothing to do. We'll do more if
1476 * this is part of a write sequence for a layered image.
1477 */
c47f9371 1478static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
fbfab539 1479{
37206ee5 1480 dout("%s: obj %p\n", __func__, obj_request);
fbfab539
AE
1481 obj_request_done_set(obj_request);
1482}
1483
bf0d5f50
AE
1484static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1485 struct ceph_msg *msg)
1486{
1487 struct rbd_obj_request *obj_request = osd_req->r_priv;
bf0d5f50
AE
1488 u16 opcode;
1489
37206ee5 1490 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
bf0d5f50 1491 rbd_assert(osd_req == obj_request->osd_req);
57acbaa7
AE
1492 if (obj_request_img_data_test(obj_request)) {
1493 rbd_assert(obj_request->img_request);
1494 rbd_assert(obj_request->which != BAD_WHICH);
1495 } else {
1496 rbd_assert(obj_request->which == BAD_WHICH);
1497 }
bf0d5f50 1498
1b83bef2
SW
1499 if (osd_req->r_result < 0)
1500 obj_request->result = osd_req->r_result;
bf0d5f50
AE
1501 obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
1502
0eefd470 1503 BUG_ON(osd_req->r_num_ops > 2);
bf0d5f50 1504
c47f9371
AE
1505 /*
1506 * We support a 64-bit length, but ultimately it has to be
1507 * passed to blk_end_request(), which takes an unsigned int.
1508 */
1b83bef2 1509 obj_request->xferred = osd_req->r_reply_op_len[0];
8b3e1a56 1510 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
79528734 1511 opcode = osd_req->r_ops[0].op;
bf0d5f50
AE
1512 switch (opcode) {
1513 case CEPH_OSD_OP_READ:
c47f9371 1514 rbd_osd_read_callback(obj_request);
bf0d5f50
AE
1515 break;
1516 case CEPH_OSD_OP_WRITE:
c47f9371 1517 rbd_osd_write_callback(obj_request);
bf0d5f50 1518 break;
fbfab539 1519 case CEPH_OSD_OP_STAT:
c47f9371 1520 rbd_osd_stat_callback(obj_request);
fbfab539 1521 break;
36be9a76 1522 case CEPH_OSD_OP_CALL:
b8d70035 1523 case CEPH_OSD_OP_NOTIFY_ACK:
9969ebc5 1524 case CEPH_OSD_OP_WATCH:
c47f9371 1525 rbd_osd_trivial_callback(obj_request);
9969ebc5 1526 break;
bf0d5f50
AE
1527 default:
1528 rbd_warn(NULL, "%s: unsupported op %hu\n",
1529 obj_request->object_name, (unsigned short) opcode);
1530 break;
1531 }
1532
07741308 1533 if (obj_request_done_test(obj_request))
bf0d5f50
AE
1534 rbd_obj_request_complete(obj_request);
1535}
1536
9d4df01f 1537static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
430c28c3
AE
1538{
1539 struct rbd_img_request *img_request = obj_request->img_request;
8c042b0d 1540 struct ceph_osd_request *osd_req = obj_request->osd_req;
9d4df01f 1541 u64 snap_id;
430c28c3 1542
8c042b0d 1543 rbd_assert(osd_req != NULL);
430c28c3 1544
9d4df01f 1545 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
8c042b0d 1546 ceph_osdc_build_request(osd_req, obj_request->offset,
9d4df01f
AE
1547 NULL, snap_id, NULL);
1548}
1549
1550static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1551{
1552 struct rbd_img_request *img_request = obj_request->img_request;
1553 struct ceph_osd_request *osd_req = obj_request->osd_req;
1554 struct ceph_snap_context *snapc;
1555 struct timespec mtime = CURRENT_TIME;
1556
1557 rbd_assert(osd_req != NULL);
1558
1559 snapc = img_request ? img_request->snapc : NULL;
1560 ceph_osdc_build_request(osd_req, obj_request->offset,
1561 snapc, CEPH_NOSNAP, &mtime);
430c28c3
AE
1562}
1563
bf0d5f50
AE
1564static struct ceph_osd_request *rbd_osd_req_create(
1565 struct rbd_device *rbd_dev,
1566 bool write_request,
430c28c3 1567 struct rbd_obj_request *obj_request)
bf0d5f50 1568{
bf0d5f50
AE
1569 struct ceph_snap_context *snapc = NULL;
1570 struct ceph_osd_client *osdc;
1571 struct ceph_osd_request *osd_req;
bf0d5f50 1572
6365d33a
AE
1573 if (obj_request_img_data_test(obj_request)) {
1574 struct rbd_img_request *img_request = obj_request->img_request;
1575
0c425248
AE
1576 rbd_assert(write_request ==
1577 img_request_write_test(img_request));
1578 if (write_request)
bf0d5f50 1579 snapc = img_request->snapc;
bf0d5f50
AE
1580 }
1581
1582 /* Allocate and initialize the request, for the single op */
1583
1584 osdc = &rbd_dev->rbd_client->client->osdc;
1585 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1586 if (!osd_req)
1587 return NULL; /* ENOMEM */
bf0d5f50 1588
430c28c3 1589 if (write_request)
bf0d5f50 1590 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
430c28c3 1591 else
bf0d5f50 1592 osd_req->r_flags = CEPH_OSD_FLAG_READ;
bf0d5f50
AE
1593
1594 osd_req->r_callback = rbd_osd_req_callback;
1595 osd_req->r_priv = obj_request;
1596
1597 osd_req->r_oid_len = strlen(obj_request->object_name);
1598 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1599 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1600
1601 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1602
bf0d5f50
AE
1603 return osd_req;
1604}
1605
0eefd470
AE
1606/*
1607 * Create a copyup osd request based on the information in the
1608 * object request supplied. A copyup request has two osd ops,
1609 * a copyup method call, and a "normal" write request.
1610 */
1611static struct ceph_osd_request *
1612rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1613{
1614 struct rbd_img_request *img_request;
1615 struct ceph_snap_context *snapc;
1616 struct rbd_device *rbd_dev;
1617 struct ceph_osd_client *osdc;
1618 struct ceph_osd_request *osd_req;
1619
1620 rbd_assert(obj_request_img_data_test(obj_request));
1621 img_request = obj_request->img_request;
1622 rbd_assert(img_request);
1623 rbd_assert(img_request_write_test(img_request));
1624
1625 /* Allocate and initialize the request, for the two ops */
1626
1627 snapc = img_request->snapc;
1628 rbd_dev = img_request->rbd_dev;
1629 osdc = &rbd_dev->rbd_client->client->osdc;
1630 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1631 if (!osd_req)
1632 return NULL; /* ENOMEM */
1633
1634 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1635 osd_req->r_callback = rbd_osd_req_callback;
1636 osd_req->r_priv = obj_request;
1637
1638 osd_req->r_oid_len = strlen(obj_request->object_name);
1639 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1640 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1641
1642 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1643
1644 return osd_req;
1645}
1646
1647
bf0d5f50
AE
1648static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1649{
1650 ceph_osdc_put_request(osd_req);
1651}
1652
1653/* object_name is assumed to be a non-null pointer and NUL-terminated */
1654
1655static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1656 u64 offset, u64 length,
1657 enum obj_request_type type)
1658{
1659 struct rbd_obj_request *obj_request;
1660 size_t size;
1661 char *name;
1662
1663 rbd_assert(obj_request_type_valid(type));
1664
1665 size = strlen(object_name) + 1;
1666 obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
1667 if (!obj_request)
1668 return NULL;
1669
1670 name = (char *)(obj_request + 1);
1671 obj_request->object_name = memcpy(name, object_name, size);
1672 obj_request->offset = offset;
1673 obj_request->length = length;
926f9b3f 1674 obj_request->flags = 0;
bf0d5f50
AE
1675 obj_request->which = BAD_WHICH;
1676 obj_request->type = type;
1677 INIT_LIST_HEAD(&obj_request->links);
788e2df3 1678 init_completion(&obj_request->completion);
bf0d5f50
AE
1679 kref_init(&obj_request->kref);
1680
37206ee5
AE
1681 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1682 offset, length, (int)type, obj_request);
1683
bf0d5f50
AE
1684 return obj_request;
1685}
1686
1687static void rbd_obj_request_destroy(struct kref *kref)
1688{
1689 struct rbd_obj_request *obj_request;
1690
1691 obj_request = container_of(kref, struct rbd_obj_request, kref);
1692
37206ee5
AE
1693 dout("%s: obj %p\n", __func__, obj_request);
1694
bf0d5f50
AE
1695 rbd_assert(obj_request->img_request == NULL);
1696 rbd_assert(obj_request->which == BAD_WHICH);
1697
1698 if (obj_request->osd_req)
1699 rbd_osd_req_destroy(obj_request->osd_req);
1700
1701 rbd_assert(obj_request_type_valid(obj_request->type));
1702 switch (obj_request->type) {
9969ebc5
AE
1703 case OBJ_REQUEST_NODATA:
1704 break; /* Nothing to do */
bf0d5f50
AE
1705 case OBJ_REQUEST_BIO:
1706 if (obj_request->bio_list)
1707 bio_chain_put(obj_request->bio_list);
1708 break;
788e2df3
AE
1709 case OBJ_REQUEST_PAGES:
1710 if (obj_request->pages)
1711 ceph_release_page_vector(obj_request->pages,
1712 obj_request->page_count);
1713 break;
bf0d5f50
AE
1714 }
1715
1716 kfree(obj_request);
1717}
1718
1719/*
1720 * Caller is responsible for filling in the list of object requests
1721 * that comprises the image request, and the Linux request pointer
1722 * (if there is one).
1723 */
cc344fa1
AE
1724static struct rbd_img_request *rbd_img_request_create(
1725 struct rbd_device *rbd_dev,
bf0d5f50 1726 u64 offset, u64 length,
9849e986
AE
1727 bool write_request,
1728 bool child_request)
bf0d5f50
AE
1729{
1730 struct rbd_img_request *img_request;
1731 struct ceph_snap_context *snapc = NULL;
1732
1733 img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
1734 if (!img_request)
1735 return NULL;
1736
1737 if (write_request) {
1738 down_read(&rbd_dev->header_rwsem);
1739 snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1740 up_read(&rbd_dev->header_rwsem);
1741 if (WARN_ON(!snapc)) {
1742 kfree(img_request);
1743 return NULL; /* Shouldn't happen */
1744 }
0c425248 1745
bf0d5f50
AE
1746 }
1747
1748 img_request->rq = NULL;
1749 img_request->rbd_dev = rbd_dev;
1750 img_request->offset = offset;
1751 img_request->length = length;
0c425248
AE
1752 img_request->flags = 0;
1753 if (write_request) {
1754 img_request_write_set(img_request);
bf0d5f50 1755 img_request->snapc = snapc;
0c425248 1756 } else {
bf0d5f50 1757 img_request->snap_id = rbd_dev->spec->snap_id;
0c425248 1758 }
9849e986
AE
1759 if (child_request)
1760 img_request_child_set(img_request);
d0b2e944
AE
1761 if (rbd_dev->parent_spec)
1762 img_request_layered_set(img_request);
bf0d5f50
AE
1763 spin_lock_init(&img_request->completion_lock);
1764 img_request->next_completion = 0;
1765 img_request->callback = NULL;
a5a337d4 1766 img_request->result = 0;
bf0d5f50
AE
1767 img_request->obj_request_count = 0;
1768 INIT_LIST_HEAD(&img_request->obj_requests);
1769 kref_init(&img_request->kref);
1770
1771 rbd_img_request_get(img_request); /* Avoid a warning */
1772 rbd_img_request_put(img_request); /* TEMPORARY */
1773
37206ee5
AE
1774 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1775 write_request ? "write" : "read", offset, length,
1776 img_request);
1777
bf0d5f50
AE
1778 return img_request;
1779}
1780
1781static void rbd_img_request_destroy(struct kref *kref)
1782{
1783 struct rbd_img_request *img_request;
1784 struct rbd_obj_request *obj_request;
1785 struct rbd_obj_request *next_obj_request;
1786
1787 img_request = container_of(kref, struct rbd_img_request, kref);
1788
37206ee5
AE
1789 dout("%s: img %p\n", __func__, img_request);
1790
bf0d5f50
AE
1791 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1792 rbd_img_obj_request_del(img_request, obj_request);
25dcf954 1793 rbd_assert(img_request->obj_request_count == 0);
bf0d5f50 1794
0c425248 1795 if (img_request_write_test(img_request))
bf0d5f50
AE
1796 ceph_put_snap_context(img_request->snapc);
1797
8b3e1a56
AE
1798 if (img_request_child_test(img_request))
1799 rbd_obj_request_put(img_request->obj_request);
1800
bf0d5f50
AE
1801 kfree(img_request);
1802}
1803
1217857f
AE
1804static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1805{
6365d33a 1806 struct rbd_img_request *img_request;
1217857f
AE
1807 unsigned int xferred;
1808 int result;
8b3e1a56 1809 bool more;
1217857f 1810
6365d33a
AE
1811 rbd_assert(obj_request_img_data_test(obj_request));
1812 img_request = obj_request->img_request;
1813
1217857f
AE
1814 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
1815 xferred = (unsigned int)obj_request->xferred;
1816 result = obj_request->result;
1817 if (result) {
1818 struct rbd_device *rbd_dev = img_request->rbd_dev;
1819
1820 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
1821 img_request_write_test(img_request) ? "write" : "read",
1822 obj_request->length, obj_request->img_offset,
1823 obj_request->offset);
1824 rbd_warn(rbd_dev, " result %d xferred %x\n",
1825 result, xferred);
1826 if (!img_request->result)
1827 img_request->result = result;
1828 }
1829
f1a4739f
AE
1830 /* Image object requests don't own their page array */
1831
1832 if (obj_request->type == OBJ_REQUEST_PAGES) {
1833 obj_request->pages = NULL;
1834 obj_request->page_count = 0;
1835 }
1836
8b3e1a56
AE
1837 if (img_request_child_test(img_request)) {
1838 rbd_assert(img_request->obj_request != NULL);
1839 more = obj_request->which < img_request->obj_request_count - 1;
1840 } else {
1841 rbd_assert(img_request->rq != NULL);
1842 more = blk_end_request(img_request->rq, result, xferred);
1843 }
1844
1845 return more;
1217857f
AE
1846}
1847
2169238d
AE
1848static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1849{
1850 struct rbd_img_request *img_request;
1851 u32 which = obj_request->which;
1852 bool more = true;
1853
6365d33a 1854 rbd_assert(obj_request_img_data_test(obj_request));
2169238d
AE
1855 img_request = obj_request->img_request;
1856
1857 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1858 rbd_assert(img_request != NULL);
2169238d
AE
1859 rbd_assert(img_request->obj_request_count > 0);
1860 rbd_assert(which != BAD_WHICH);
1861 rbd_assert(which < img_request->obj_request_count);
1862 rbd_assert(which >= img_request->next_completion);
1863
1864 spin_lock_irq(&img_request->completion_lock);
1865 if (which != img_request->next_completion)
1866 goto out;
1867
1868 for_each_obj_request_from(img_request, obj_request) {
2169238d
AE
1869 rbd_assert(more);
1870 rbd_assert(which < img_request->obj_request_count);
1871
1872 if (!obj_request_done_test(obj_request))
1873 break;
1217857f 1874 more = rbd_img_obj_end_request(obj_request);
2169238d
AE
1875 which++;
1876 }
1877
1878 rbd_assert(more ^ (which == img_request->obj_request_count));
1879 img_request->next_completion = which;
1880out:
1881 spin_unlock_irq(&img_request->completion_lock);
1882
1883 if (!more)
1884 rbd_img_request_complete(img_request);
1885}
1886
f1a4739f
AE
1887/*
1888 * Split up an image request into one or more object requests, each
1889 * to a different object. The "type" parameter indicates whether
1890 * "data_desc" is the pointer to the head of a list of bio
1891 * structures, or the base of a page array. In either case this
1892 * function assumes data_desc describes memory sufficient to hold
1893 * all data described by the image request.
1894 */
1895static int rbd_img_request_fill(struct rbd_img_request *img_request,
1896 enum obj_request_type type,
1897 void *data_desc)
bf0d5f50
AE
1898{
1899 struct rbd_device *rbd_dev = img_request->rbd_dev;
1900 struct rbd_obj_request *obj_request = NULL;
1901 struct rbd_obj_request *next_obj_request;
0c425248 1902 bool write_request = img_request_write_test(img_request);
f1a4739f
AE
1903 struct bio *bio_list;
1904 unsigned int bio_offset = 0;
1905 struct page **pages;
7da22d29 1906 u64 img_offset;
bf0d5f50
AE
1907 u64 resid;
1908 u16 opcode;
1909
f1a4739f
AE
1910 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
1911 (int)type, data_desc);
37206ee5 1912
430c28c3 1913 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
7da22d29 1914 img_offset = img_request->offset;
bf0d5f50 1915 resid = img_request->length;
4dda41d3 1916 rbd_assert(resid > 0);
f1a4739f
AE
1917
1918 if (type == OBJ_REQUEST_BIO) {
1919 bio_list = data_desc;
1920 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
1921 } else {
1922 rbd_assert(type == OBJ_REQUEST_PAGES);
1923 pages = data_desc;
1924 }
1925
bf0d5f50 1926 while (resid) {
2fa12320 1927 struct ceph_osd_request *osd_req;
bf0d5f50 1928 const char *object_name;
bf0d5f50
AE
1929 u64 offset;
1930 u64 length;
1931
7da22d29 1932 object_name = rbd_segment_name(rbd_dev, img_offset);
bf0d5f50
AE
1933 if (!object_name)
1934 goto out_unwind;
7da22d29
AE
1935 offset = rbd_segment_offset(rbd_dev, img_offset);
1936 length = rbd_segment_length(rbd_dev, img_offset, resid);
bf0d5f50 1937 obj_request = rbd_obj_request_create(object_name,
f1a4739f 1938 offset, length, type);
bf0d5f50
AE
1939 kfree(object_name); /* object request has its own copy */
1940 if (!obj_request)
1941 goto out_unwind;
1942
f1a4739f
AE
1943 if (type == OBJ_REQUEST_BIO) {
1944 unsigned int clone_size;
1945
1946 rbd_assert(length <= (u64)UINT_MAX);
1947 clone_size = (unsigned int)length;
1948 obj_request->bio_list =
1949 bio_chain_clone_range(&bio_list,
1950 &bio_offset,
1951 clone_size,
1952 GFP_ATOMIC);
1953 if (!obj_request->bio_list)
1954 goto out_partial;
1955 } else {
1956 unsigned int page_count;
1957
1958 obj_request->pages = pages;
1959 page_count = (u32)calc_pages_for(offset, length);
1960 obj_request->page_count = page_count;
1961 if ((offset + length) & ~PAGE_MASK)
1962 page_count--; /* more on last page */
1963 pages += page_count;
1964 }
bf0d5f50 1965
2fa12320
AE
1966 osd_req = rbd_osd_req_create(rbd_dev, write_request,
1967 obj_request);
1968 if (!osd_req)
bf0d5f50 1969 goto out_partial;
2fa12320 1970 obj_request->osd_req = osd_req;
2169238d 1971 obj_request->callback = rbd_img_obj_callback;
430c28c3 1972
2fa12320
AE
1973 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
1974 0, 0);
f1a4739f
AE
1975 if (type == OBJ_REQUEST_BIO)
1976 osd_req_op_extent_osd_data_bio(osd_req, 0,
1977 obj_request->bio_list, length);
1978 else
1979 osd_req_op_extent_osd_data_pages(osd_req, 0,
1980 obj_request->pages, length,
1981 offset & ~PAGE_MASK, false, false);
9d4df01f
AE
1982
1983 if (write_request)
1984 rbd_osd_req_format_write(obj_request);
1985 else
1986 rbd_osd_req_format_read(obj_request);
430c28c3 1987
7da22d29 1988 obj_request->img_offset = img_offset;
bf0d5f50
AE
1989 rbd_img_obj_request_add(img_request, obj_request);
1990
7da22d29 1991 img_offset += length;
bf0d5f50
AE
1992 resid -= length;
1993 }
1994
1995 return 0;
1996
1997out_partial:
1998 rbd_obj_request_put(obj_request);
1999out_unwind:
2000 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2001 rbd_obj_request_put(obj_request);
2002
2003 return -ENOMEM;
2004}
2005
0eefd470
AE
2006static void
2007rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2008{
2009 struct rbd_img_request *img_request;
2010 struct rbd_device *rbd_dev;
2011 u64 length;
2012 u32 page_count;
2013
2014 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2015 rbd_assert(obj_request_img_data_test(obj_request));
2016 img_request = obj_request->img_request;
2017 rbd_assert(img_request);
2018
2019 rbd_dev = img_request->rbd_dev;
2020 rbd_assert(rbd_dev);
2021 length = (u64)1 << rbd_dev->header.obj_order;
2022 page_count = (u32)calc_pages_for(0, length);
2023
2024 rbd_assert(obj_request->copyup_pages);
2025 ceph_release_page_vector(obj_request->copyup_pages, page_count);
2026 obj_request->copyup_pages = NULL;
2027
2028 /*
2029 * We want the transfer count to reflect the size of the
2030 * original write request. There is no such thing as a
2031 * successful short write, so if the request was successful
2032 * we can just set it to the originally-requested length.
2033 */
2034 if (!obj_request->result)
2035 obj_request->xferred = obj_request->length;
2036
2037 /* Finish up with the normal image object callback */
2038
2039 rbd_img_obj_callback(obj_request);
2040}
2041
3d7efd18
AE
2042static void
2043rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2044{
2045 struct rbd_obj_request *orig_request;
0eefd470
AE
2046 struct ceph_osd_request *osd_req;
2047 struct ceph_osd_client *osdc;
2048 struct rbd_device *rbd_dev;
3d7efd18 2049 struct page **pages;
3d7efd18
AE
2050 int result;
2051 u64 obj_size;
2052 u64 xferred;
2053
2054 rbd_assert(img_request_child_test(img_request));
2055
2056 /* First get what we need from the image request */
2057
2058 pages = img_request->copyup_pages;
2059 rbd_assert(pages != NULL);
2060 img_request->copyup_pages = NULL;
2061
2062 orig_request = img_request->obj_request;
2063 rbd_assert(orig_request != NULL);
0eefd470 2064 rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
3d7efd18
AE
2065 result = img_request->result;
2066 obj_size = img_request->length;
2067 xferred = img_request->xferred;
2068
0eefd470
AE
2069 rbd_dev = img_request->rbd_dev;
2070 rbd_assert(rbd_dev);
2071 rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2072
3d7efd18
AE
2073 rbd_img_request_put(img_request);
2074
0eefd470
AE
2075 if (result)
2076 goto out_err;
2077
2078 /* Allocate the new copyup osd request for the original request */
2079
2080 result = -ENOMEM;
2081 rbd_assert(!orig_request->osd_req);
2082 osd_req = rbd_osd_req_create_copyup(orig_request);
2083 if (!osd_req)
2084 goto out_err;
2085 orig_request->osd_req = osd_req;
2086 orig_request->copyup_pages = pages;
3d7efd18 2087
0eefd470 2088 /* Initialize the copyup op */
3d7efd18 2089
0eefd470
AE
2090 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2091 osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
2092 false, false);
3d7efd18 2093
0eefd470
AE
2094 /* Then the original write request op */
2095
2096 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2097 orig_request->offset,
2098 orig_request->length, 0, 0);
2099 osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
2100 orig_request->length);
2101
2102 rbd_osd_req_format_write(orig_request);
2103
2104 /* All set, send it off. */
2105
2106 orig_request->callback = rbd_img_obj_copyup_callback;
2107 osdc = &rbd_dev->rbd_client->client->osdc;
2108 result = rbd_obj_request_submit(osdc, orig_request);
2109 if (!result)
2110 return;
2111out_err:
2112 /* Record the error code and complete the request */
2113
2114 orig_request->result = result;
2115 orig_request->xferred = 0;
2116 obj_request_done_set(orig_request);
2117 rbd_obj_request_complete(orig_request);
3d7efd18
AE
2118}
2119
2120/*
2121 * Read from the parent image the range of data that covers the
2122 * entire target of the given object request. This is used for
2123 * satisfying a layered image write request when the target of an
2124 * object request from the image request does not exist.
2125 *
2126 * A page array big enough to hold the returned data is allocated
2127 * and supplied to rbd_img_request_fill() as the "data descriptor."
2128 * When the read completes, this page array will be transferred to
2129 * the original object request for the copyup operation.
2130 *
2131 * If an error occurs, record it as the result of the original
2132 * object request and mark it done so it gets completed.
2133 */
2134static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2135{
2136 struct rbd_img_request *img_request = NULL;
2137 struct rbd_img_request *parent_request = NULL;
2138 struct rbd_device *rbd_dev;
2139 u64 img_offset;
2140 u64 length;
2141 struct page **pages = NULL;
2142 u32 page_count;
2143 int result;
2144
2145 rbd_assert(obj_request_img_data_test(obj_request));
2146 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2147
2148 img_request = obj_request->img_request;
2149 rbd_assert(img_request != NULL);
2150 rbd_dev = img_request->rbd_dev;
2151 rbd_assert(rbd_dev->parent != NULL);
2152
0eefd470
AE
2153 /*
2154 * First things first. The original osd request is of no
2155 * use to use any more, we'll need a new one that can hold
2156 * the two ops in a copyup request. We'll get that later,
2157 * but for now we can release the old one.
2158 */
2159 rbd_osd_req_destroy(obj_request->osd_req);
2160 obj_request->osd_req = NULL;
2161
3d7efd18
AE
2162 /*
2163 * Determine the byte range covered by the object in the
2164 * child image to which the original request was to be sent.
2165 */
2166 img_offset = obj_request->img_offset - obj_request->offset;
2167 length = (u64)1 << rbd_dev->header.obj_order;
2168
a9e8ba2c
AE
2169 /*
2170 * There is no defined parent data beyond the parent
2171 * overlap, so limit what we read at that boundary if
2172 * necessary.
2173 */
2174 if (img_offset + length > rbd_dev->parent_overlap) {
2175 rbd_assert(img_offset < rbd_dev->parent_overlap);
2176 length = rbd_dev->parent_overlap - img_offset;
2177 }
2178
3d7efd18
AE
2179 /*
2180 * Allocate a page array big enough to receive the data read
2181 * from the parent.
2182 */
2183 page_count = (u32)calc_pages_for(0, length);
2184 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2185 if (IS_ERR(pages)) {
2186 result = PTR_ERR(pages);
2187 pages = NULL;
2188 goto out_err;
2189 }
2190
2191 result = -ENOMEM;
2192 parent_request = rbd_img_request_create(rbd_dev->parent,
2193 img_offset, length,
2194 false, true);
2195 if (!parent_request)
2196 goto out_err;
2197 rbd_obj_request_get(obj_request);
2198 parent_request->obj_request = obj_request;
2199
2200 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2201 if (result)
2202 goto out_err;
2203 parent_request->copyup_pages = pages;
2204
2205 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2206 result = rbd_img_request_submit(parent_request);
2207 if (!result)
2208 return 0;
2209
2210 parent_request->copyup_pages = NULL;
2211 parent_request->obj_request = NULL;
2212 rbd_obj_request_put(obj_request);
2213out_err:
2214 if (pages)
2215 ceph_release_page_vector(pages, page_count);
2216 if (parent_request)
2217 rbd_img_request_put(parent_request);
2218 obj_request->result = result;
2219 obj_request->xferred = 0;
2220 obj_request_done_set(obj_request);
2221
2222 return result;
2223}
2224
c5b5ef6c
AE
2225static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2226{
c5b5ef6c
AE
2227 struct rbd_obj_request *orig_request;
2228 int result;
2229
2230 rbd_assert(!obj_request_img_data_test(obj_request));
2231
2232 /*
2233 * All we need from the object request is the original
2234 * request and the result of the STAT op. Grab those, then
2235 * we're done with the request.
2236 */
2237 orig_request = obj_request->obj_request;
2238 obj_request->obj_request = NULL;
2239 rbd_assert(orig_request);
2240 rbd_assert(orig_request->img_request);
2241
2242 result = obj_request->result;
2243 obj_request->result = 0;
2244
2245 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2246 obj_request, orig_request, result,
2247 obj_request->xferred, obj_request->length);
2248 rbd_obj_request_put(obj_request);
2249
2250 rbd_assert(orig_request);
2251 rbd_assert(orig_request->img_request);
c5b5ef6c
AE
2252
2253 /*
2254 * Our only purpose here is to determine whether the object
2255 * exists, and we don't want to treat the non-existence as
2256 * an error. If something else comes back, transfer the
2257 * error to the original request and complete it now.
2258 */
2259 if (!result) {
2260 obj_request_existence_set(orig_request, true);
2261 } else if (result == -ENOENT) {
2262 obj_request_existence_set(orig_request, false);
2263 } else if (result) {
2264 orig_request->result = result;
3d7efd18 2265 goto out;
c5b5ef6c
AE
2266 }
2267
2268 /*
2269 * Resubmit the original request now that we have recorded
2270 * whether the target object exists.
2271 */
b454e36d 2272 orig_request->result = rbd_img_obj_request_submit(orig_request);
3d7efd18 2273out:
c5b5ef6c
AE
2274 if (orig_request->result)
2275 rbd_obj_request_complete(orig_request);
2276 rbd_obj_request_put(orig_request);
2277}
2278
2279static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2280{
2281 struct rbd_obj_request *stat_request;
2282 struct rbd_device *rbd_dev;
2283 struct ceph_osd_client *osdc;
2284 struct page **pages = NULL;
2285 u32 page_count;
2286 size_t size;
2287 int ret;
2288
2289 /*
2290 * The response data for a STAT call consists of:
2291 * le64 length;
2292 * struct {
2293 * le32 tv_sec;
2294 * le32 tv_nsec;
2295 * } mtime;
2296 */
2297 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2298 page_count = (u32)calc_pages_for(0, size);
2299 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2300 if (IS_ERR(pages))
2301 return PTR_ERR(pages);
2302
2303 ret = -ENOMEM;
2304 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2305 OBJ_REQUEST_PAGES);
2306 if (!stat_request)
2307 goto out;
2308
2309 rbd_obj_request_get(obj_request);
2310 stat_request->obj_request = obj_request;
2311 stat_request->pages = pages;
2312 stat_request->page_count = page_count;
2313
2314 rbd_assert(obj_request->img_request);
2315 rbd_dev = obj_request->img_request->rbd_dev;
2316 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2317 stat_request);
2318 if (!stat_request->osd_req)
2319 goto out;
2320 stat_request->callback = rbd_img_obj_exists_callback;
2321
2322 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2323 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2324 false, false);
9d4df01f 2325 rbd_osd_req_format_read(stat_request);
c5b5ef6c
AE
2326
2327 osdc = &rbd_dev->rbd_client->client->osdc;
2328 ret = rbd_obj_request_submit(osdc, stat_request);
2329out:
2330 if (ret)
2331 rbd_obj_request_put(obj_request);
2332
2333 return ret;
2334}
2335
b454e36d
AE
2336static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2337{
2338 struct rbd_img_request *img_request;
a9e8ba2c 2339 struct rbd_device *rbd_dev;
3d7efd18 2340 bool known;
b454e36d
AE
2341
2342 rbd_assert(obj_request_img_data_test(obj_request));
2343
2344 img_request = obj_request->img_request;
2345 rbd_assert(img_request);
a9e8ba2c 2346 rbd_dev = img_request->rbd_dev;
b454e36d 2347
b454e36d 2348 /*
a9e8ba2c
AE
2349 * Only writes to layered images need special handling.
2350 * Reads and non-layered writes are simple object requests.
2351 * Layered writes that start beyond the end of the overlap
2352 * with the parent have no parent data, so they too are
2353 * simple object requests. Finally, if the target object is
2354 * known to already exist, its parent data has already been
2355 * copied, so a write to the object can also be handled as a
2356 * simple object request.
b454e36d
AE
2357 */
2358 if (!img_request_write_test(img_request) ||
2359 !img_request_layered_test(img_request) ||
a9e8ba2c 2360 rbd_dev->parent_overlap <= obj_request->img_offset ||
3d7efd18
AE
2361 ((known = obj_request_known_test(obj_request)) &&
2362 obj_request_exists_test(obj_request))) {
b454e36d
AE
2363
2364 struct rbd_device *rbd_dev;
2365 struct ceph_osd_client *osdc;
2366
2367 rbd_dev = obj_request->img_request->rbd_dev;
2368 osdc = &rbd_dev->rbd_client->client->osdc;
2369
2370 return rbd_obj_request_submit(osdc, obj_request);
2371 }
2372
2373 /*
3d7efd18
AE
2374 * It's a layered write. The target object might exist but
2375 * we may not know that yet. If we know it doesn't exist,
2376 * start by reading the data for the full target object from
2377 * the parent so we can use it for a copyup to the target.
b454e36d 2378 */
3d7efd18
AE
2379 if (known)
2380 return rbd_img_obj_parent_read_full(obj_request);
2381
2382 /* We don't know whether the target exists. Go find out. */
b454e36d
AE
2383
2384 return rbd_img_obj_exists_submit(obj_request);
2385}
2386
bf0d5f50
AE
2387static int rbd_img_request_submit(struct rbd_img_request *img_request)
2388{
bf0d5f50 2389 struct rbd_obj_request *obj_request;
46faeed4 2390 struct rbd_obj_request *next_obj_request;
bf0d5f50 2391
37206ee5 2392 dout("%s: img %p\n", __func__, img_request);
46faeed4 2393 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
bf0d5f50
AE
2394 int ret;
2395
b454e36d 2396 ret = rbd_img_obj_request_submit(obj_request);
bf0d5f50
AE
2397 if (ret)
2398 return ret;
bf0d5f50
AE
2399 }
2400
2401 return 0;
2402}
8b3e1a56
AE
2403
2404static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2405{
2406 struct rbd_obj_request *obj_request;
a9e8ba2c
AE
2407 struct rbd_device *rbd_dev;
2408 u64 obj_end;
8b3e1a56
AE
2409
2410 rbd_assert(img_request_child_test(img_request));
2411
2412 obj_request = img_request->obj_request;
a9e8ba2c
AE
2413 rbd_assert(obj_request);
2414 rbd_assert(obj_request->img_request);
2415
8b3e1a56 2416 obj_request->result = img_request->result;
a9e8ba2c
AE
2417 if (obj_request->result)
2418 goto out;
2419
2420 /*
2421 * We need to zero anything beyond the parent overlap
2422 * boundary. Since rbd_img_obj_request_read_callback()
2423 * will zero anything beyond the end of a short read, an
2424 * easy way to do this is to pretend the data from the
2425 * parent came up short--ending at the overlap boundary.
2426 */
2427 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2428 obj_end = obj_request->img_offset + obj_request->length;
2429 rbd_dev = obj_request->img_request->rbd_dev;
2430 if (obj_end > rbd_dev->parent_overlap) {
2431 u64 xferred = 0;
2432
2433 if (obj_request->img_offset < rbd_dev->parent_overlap)
2434 xferred = rbd_dev->parent_overlap -
2435 obj_request->img_offset;
8b3e1a56 2436
a9e8ba2c
AE
2437 obj_request->xferred = min(img_request->xferred, xferred);
2438 } else {
2439 obj_request->xferred = img_request->xferred;
2440 }
2441out:
8b3e1a56
AE
2442 rbd_img_obj_request_read_callback(obj_request);
2443 rbd_obj_request_complete(obj_request);
2444}
2445
2446static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2447{
2448 struct rbd_device *rbd_dev;
2449 struct rbd_img_request *img_request;
2450 int result;
2451
2452 rbd_assert(obj_request_img_data_test(obj_request));
2453 rbd_assert(obj_request->img_request != NULL);
2454 rbd_assert(obj_request->result == (s32) -ENOENT);
2455 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2456
2457 rbd_dev = obj_request->img_request->rbd_dev;
2458 rbd_assert(rbd_dev->parent != NULL);
2459 /* rbd_read_finish(obj_request, obj_request->length); */
2460 img_request = rbd_img_request_create(rbd_dev->parent,
2461 obj_request->img_offset,
2462 obj_request->length,
2463 false, true);
2464 result = -ENOMEM;
2465 if (!img_request)
2466 goto out_err;
2467
2468 rbd_obj_request_get(obj_request);
2469 img_request->obj_request = obj_request;
2470
f1a4739f
AE
2471 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2472 obj_request->bio_list);
8b3e1a56
AE
2473 if (result)
2474 goto out_err;
2475
2476 img_request->callback = rbd_img_parent_read_callback;
2477 result = rbd_img_request_submit(img_request);
2478 if (result)
2479 goto out_err;
2480
2481 return;
2482out_err:
2483 if (img_request)
2484 rbd_img_request_put(img_request);
2485 obj_request->result = result;
2486 obj_request->xferred = 0;
2487 obj_request_done_set(obj_request);
2488}
bf0d5f50 2489
cf81b60e 2490static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
b8d70035
AE
2491 u64 ver, u64 notify_id)
2492{
2493 struct rbd_obj_request *obj_request;
2169238d 2494 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
b8d70035
AE
2495 int ret;
2496
2497 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2498 OBJ_REQUEST_NODATA);
2499 if (!obj_request)
2500 return -ENOMEM;
2501
2502 ret = -ENOMEM;
430c28c3 2503 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
b8d70035
AE
2504 if (!obj_request->osd_req)
2505 goto out;
2169238d 2506 obj_request->callback = rbd_obj_request_put;
b8d70035 2507
c99d2d4a
AE
2508 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2509 notify_id, ver, 0);
9d4df01f 2510 rbd_osd_req_format_read(obj_request);
430c28c3 2511
b8d70035 2512 ret = rbd_obj_request_submit(osdc, obj_request);
b8d70035 2513out:
cf81b60e
AE
2514 if (ret)
2515 rbd_obj_request_put(obj_request);
b8d70035
AE
2516
2517 return ret;
2518}
2519
2520static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2521{
2522 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2523 u64 hver;
b8d70035
AE
2524
2525 if (!rbd_dev)
2526 return;
2527
37206ee5 2528 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
b8d70035
AE
2529 rbd_dev->header_name, (unsigned long long) notify_id,
2530 (unsigned int) opcode);
522a0cc0 2531 (void)rbd_dev_refresh(rbd_dev, &hver);
b8d70035 2532
cf81b60e 2533 rbd_obj_notify_ack(rbd_dev, hver, notify_id);
b8d70035
AE
2534}
2535
9969ebc5
AE
2536/*
2537 * Request sync osd watch/unwatch. The value of "start" determines
2538 * whether a watch request is being initiated or torn down.
2539 */
2540static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2541{
2542 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2543 struct rbd_obj_request *obj_request;
9969ebc5
AE
2544 int ret;
2545
2546 rbd_assert(start ^ !!rbd_dev->watch_event);
2547 rbd_assert(start ^ !!rbd_dev->watch_request);
2548
2549 if (start) {
3c663bbd 2550 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
9969ebc5
AE
2551 &rbd_dev->watch_event);
2552 if (ret < 0)
2553 return ret;
8eb87565 2554 rbd_assert(rbd_dev->watch_event != NULL);
9969ebc5
AE
2555 }
2556
2557 ret = -ENOMEM;
2558 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2559 OBJ_REQUEST_NODATA);
2560 if (!obj_request)
2561 goto out_cancel;
2562
430c28c3
AE
2563 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2564 if (!obj_request->osd_req)
2565 goto out_cancel;
2566
8eb87565 2567 if (start)
975241af 2568 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
8eb87565 2569 else
6977c3f9 2570 ceph_osdc_unregister_linger_request(osdc,
975241af 2571 rbd_dev->watch_request->osd_req);
2169238d
AE
2572
2573 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2574 rbd_dev->watch_event->cookie,
2575 rbd_dev->header.obj_version, start);
9d4df01f 2576 rbd_osd_req_format_write(obj_request);
2169238d 2577
9969ebc5
AE
2578 ret = rbd_obj_request_submit(osdc, obj_request);
2579 if (ret)
2580 goto out_cancel;
2581 ret = rbd_obj_request_wait(obj_request);
2582 if (ret)
2583 goto out_cancel;
9969ebc5
AE
2584 ret = obj_request->result;
2585 if (ret)
2586 goto out_cancel;
2587
8eb87565
AE
2588 /*
2589 * A watch request is set to linger, so the underlying osd
2590 * request won't go away until we unregister it. We retain
2591 * a pointer to the object request during that time (in
2592 * rbd_dev->watch_request), so we'll keep a reference to
2593 * it. We'll drop that reference (below) after we've
2594 * unregistered it.
2595 */
2596 if (start) {
2597 rbd_dev->watch_request = obj_request;
2598
2599 return 0;
2600 }
2601
2602 /* We have successfully torn down the watch request */
2603
2604 rbd_obj_request_put(rbd_dev->watch_request);
2605 rbd_dev->watch_request = NULL;
9969ebc5
AE
2606out_cancel:
2607 /* Cancel the event if we're tearing down, or on error */
2608 ceph_osdc_cancel_event(rbd_dev->watch_event);
2609 rbd_dev->watch_event = NULL;
9969ebc5
AE
2610 if (obj_request)
2611 rbd_obj_request_put(obj_request);
2612
2613 return ret;
2614}
2615
36be9a76 2616/*
f40eb349
AE
2617 * Synchronous osd object method call. Returns the number of bytes
2618 * returned in the outbound buffer, or a negative error code.
36be9a76
AE
2619 */
2620static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2621 const char *object_name,
2622 const char *class_name,
2623 const char *method_name,
4157976b 2624 const void *outbound,
36be9a76 2625 size_t outbound_size,
4157976b 2626 void *inbound,
36be9a76
AE
2627 size_t inbound_size,
2628 u64 *version)
2629{
2169238d 2630 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
36be9a76 2631 struct rbd_obj_request *obj_request;
36be9a76
AE
2632 struct page **pages;
2633 u32 page_count;
2634 int ret;
2635
2636 /*
6010a451
AE
2637 * Method calls are ultimately read operations. The result
2638 * should placed into the inbound buffer provided. They
2639 * also supply outbound data--parameters for the object
2640 * method. Currently if this is present it will be a
2641 * snapshot id.
36be9a76 2642 */
57385b51 2643 page_count = (u32)calc_pages_for(0, inbound_size);
36be9a76
AE
2644 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2645 if (IS_ERR(pages))
2646 return PTR_ERR(pages);
2647
2648 ret = -ENOMEM;
6010a451 2649 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
36be9a76
AE
2650 OBJ_REQUEST_PAGES);
2651 if (!obj_request)
2652 goto out;
2653
2654 obj_request->pages = pages;
2655 obj_request->page_count = page_count;
2656
430c28c3 2657 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
36be9a76
AE
2658 if (!obj_request->osd_req)
2659 goto out;
2660
c99d2d4a 2661 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
04017e29
AE
2662 class_name, method_name);
2663 if (outbound_size) {
2664 struct ceph_pagelist *pagelist;
2665
2666 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2667 if (!pagelist)
2668 goto out;
2669
2670 ceph_pagelist_init(pagelist);
2671 ceph_pagelist_append(pagelist, outbound, outbound_size);
2672 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2673 pagelist);
2674 }
a4ce40a9
AE
2675 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2676 obj_request->pages, inbound_size,
44cd188d 2677 0, false, false);
9d4df01f 2678 rbd_osd_req_format_read(obj_request);
430c28c3 2679
36be9a76
AE
2680 ret = rbd_obj_request_submit(osdc, obj_request);
2681 if (ret)
2682 goto out;
2683 ret = rbd_obj_request_wait(obj_request);
2684 if (ret)
2685 goto out;
2686
2687 ret = obj_request->result;
2688 if (ret < 0)
2689 goto out;
57385b51
AE
2690
2691 rbd_assert(obj_request->xferred < (u64)INT_MAX);
2692 ret = (int)obj_request->xferred;
903bb32e 2693 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
36be9a76
AE
2694 if (version)
2695 *version = obj_request->version;
2696out:
2697 if (obj_request)
2698 rbd_obj_request_put(obj_request);
2699 else
2700 ceph_release_page_vector(pages, page_count);
2701
2702 return ret;
2703}
2704
bf0d5f50 2705static void rbd_request_fn(struct request_queue *q)
cc344fa1 2706 __releases(q->queue_lock) __acquires(q->queue_lock)
bf0d5f50
AE
2707{
2708 struct rbd_device *rbd_dev = q->queuedata;
2709 bool read_only = rbd_dev->mapping.read_only;
2710 struct request *rq;
2711 int result;
2712
2713 while ((rq = blk_fetch_request(q))) {
2714 bool write_request = rq_data_dir(rq) == WRITE;
2715 struct rbd_img_request *img_request;
2716 u64 offset;
2717 u64 length;
2718
2719 /* Ignore any non-FS requests that filter through. */
2720
2721 if (rq->cmd_type != REQ_TYPE_FS) {
4dda41d3
AE
2722 dout("%s: non-fs request type %d\n", __func__,
2723 (int) rq->cmd_type);
2724 __blk_end_request_all(rq, 0);
2725 continue;
2726 }
2727
2728 /* Ignore/skip any zero-length requests */
2729
2730 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
2731 length = (u64) blk_rq_bytes(rq);
2732
2733 if (!length) {
2734 dout("%s: zero-length request\n", __func__);
bf0d5f50
AE
2735 __blk_end_request_all(rq, 0);
2736 continue;
2737 }
2738
2739 spin_unlock_irq(q->queue_lock);
2740
2741 /* Disallow writes to a read-only device */
2742
2743 if (write_request) {
2744 result = -EROFS;
2745 if (read_only)
2746 goto end_request;
2747 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
2748 }
2749
6d292906
AE
2750 /*
2751 * Quit early if the mapped snapshot no longer
2752 * exists. It's still possible the snapshot will
2753 * have disappeared by the time our request arrives
2754 * at the osd, but there's no sense in sending it if
2755 * we already know.
2756 */
2757 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
bf0d5f50
AE
2758 dout("request for non-existent snapshot");
2759 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
2760 result = -ENXIO;
2761 goto end_request;
2762 }
2763
bf0d5f50
AE
2764 result = -EINVAL;
2765 if (WARN_ON(offset && length > U64_MAX - offset + 1))
2766 goto end_request; /* Shouldn't happen */
2767
2768 result = -ENOMEM;
2769 img_request = rbd_img_request_create(rbd_dev, offset, length,
9849e986 2770 write_request, false);
bf0d5f50
AE
2771 if (!img_request)
2772 goto end_request;
2773
2774 img_request->rq = rq;
2775
f1a4739f
AE
2776 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2777 rq->bio);
bf0d5f50
AE
2778 if (!result)
2779 result = rbd_img_request_submit(img_request);
2780 if (result)
2781 rbd_img_request_put(img_request);
2782end_request:
2783 spin_lock_irq(q->queue_lock);
2784 if (result < 0) {
7da22d29
AE
2785 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
2786 write_request ? "write" : "read",
2787 length, offset, result);
2788
bf0d5f50
AE
2789 __blk_end_request_all(rq, result);
2790 }
2791 }
2792}
2793
602adf40
YS
2794/*
2795 * a queue callback. Makes sure that we don't create a bio that spans across
2796 * multiple osd objects. One exception would be with a single page bios,
f7760dad 2797 * which we handle later at bio_chain_clone_range()
602adf40
YS
2798 */
2799static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2800 struct bio_vec *bvec)
2801{
2802 struct rbd_device *rbd_dev = q->queuedata;
e5cfeed2
AE
2803 sector_t sector_offset;
2804 sector_t sectors_per_obj;
2805 sector_t obj_sector_offset;
2806 int ret;
2807
2808 /*
2809 * Find how far into its rbd object the partition-relative
2810 * bio start sector is to offset relative to the enclosing
2811 * device.
2812 */
2813 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2814 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2815 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2816
2817 /*
2818 * Compute the number of bytes from that offset to the end
2819 * of the object. Account for what's already used by the bio.
2820 */
2821 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2822 if (ret > bmd->bi_size)
2823 ret -= bmd->bi_size;
2824 else
2825 ret = 0;
2826
2827 /*
2828 * Don't send back more than was asked for. And if the bio
2829 * was empty, let the whole thing through because: "Note
2830 * that a block device *must* allow a single page to be
2831 * added to an empty bio."
2832 */
2833 rbd_assert(bvec->bv_len <= PAGE_SIZE);
2834 if (ret > (int) bvec->bv_len || !bmd->bi_size)
2835 ret = (int) bvec->bv_len;
2836
2837 return ret;
602adf40
YS
2838}
2839
2840static void rbd_free_disk(struct rbd_device *rbd_dev)
2841{
2842 struct gendisk *disk = rbd_dev->disk;
2843
2844 if (!disk)
2845 return;
2846
a0cab924
AE
2847 rbd_dev->disk = NULL;
2848 if (disk->flags & GENHD_FL_UP) {
602adf40 2849 del_gendisk(disk);
a0cab924
AE
2850 if (disk->queue)
2851 blk_cleanup_queue(disk->queue);
2852 }
602adf40
YS
2853 put_disk(disk);
2854}
2855
788e2df3
AE
2856static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2857 const char *object_name,
2858 u64 offset, u64 length,
80ef15bf 2859 void *buf, u64 *version)
788e2df3
AE
2860
2861{
2169238d 2862 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
788e2df3 2863 struct rbd_obj_request *obj_request;
788e2df3
AE
2864 struct page **pages = NULL;
2865 u32 page_count;
1ceae7ef 2866 size_t size;
788e2df3
AE
2867 int ret;
2868
2869 page_count = (u32) calc_pages_for(offset, length);
2870 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2871 if (IS_ERR(pages))
2872 ret = PTR_ERR(pages);
2873
2874 ret = -ENOMEM;
2875 obj_request = rbd_obj_request_create(object_name, offset, length,
36be9a76 2876 OBJ_REQUEST_PAGES);
788e2df3
AE
2877 if (!obj_request)
2878 goto out;
2879
2880 obj_request->pages = pages;
2881 obj_request->page_count = page_count;
2882
430c28c3 2883 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
788e2df3
AE
2884 if (!obj_request->osd_req)
2885 goto out;
2886
c99d2d4a
AE
2887 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
2888 offset, length, 0, 0);
406e2c9f 2889 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
a4ce40a9 2890 obj_request->pages,
44cd188d
AE
2891 obj_request->length,
2892 obj_request->offset & ~PAGE_MASK,
2893 false, false);
9d4df01f 2894 rbd_osd_req_format_read(obj_request);
430c28c3 2895
788e2df3
AE
2896 ret = rbd_obj_request_submit(osdc, obj_request);
2897 if (ret)
2898 goto out;
2899 ret = rbd_obj_request_wait(obj_request);
2900 if (ret)
2901 goto out;
2902
2903 ret = obj_request->result;
2904 if (ret < 0)
2905 goto out;
1ceae7ef
AE
2906
2907 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
2908 size = (size_t) obj_request->xferred;
903bb32e 2909 ceph_copy_from_page_vector(pages, buf, 0, size);
23ed6e13
AE
2910 rbd_assert(size <= (size_t) INT_MAX);
2911 ret = (int) size;
788e2df3
AE
2912 if (version)
2913 *version = obj_request->version;
2914out:
2915 if (obj_request)
2916 rbd_obj_request_put(obj_request);
2917 else
2918 ceph_release_page_vector(pages, page_count);
2919
2920 return ret;
2921}
2922
602adf40 2923/*
4156d998
AE
2924 * Read the complete header for the given rbd device.
2925 *
2926 * Returns a pointer to a dynamically-allocated buffer containing
2927 * the complete and validated header. Caller can pass the address
2928 * of a variable that will be filled in with the version of the
2929 * header object at the time it was read.
2930 *
2931 * Returns a pointer-coded errno if a failure occurs.
602adf40 2932 */
4156d998
AE
2933static struct rbd_image_header_ondisk *
2934rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
602adf40 2935{
4156d998 2936 struct rbd_image_header_ondisk *ondisk = NULL;
50f7c4c9 2937 u32 snap_count = 0;
4156d998
AE
2938 u64 names_size = 0;
2939 u32 want_count;
2940 int ret;
602adf40 2941
00f1f36f 2942 /*
4156d998
AE
2943 * The complete header will include an array of its 64-bit
2944 * snapshot ids, followed by the names of those snapshots as
2945 * a contiguous block of NUL-terminated strings. Note that
2946 * the number of snapshots could change by the time we read
2947 * it in, in which case we re-read it.
00f1f36f 2948 */
4156d998
AE
2949 do {
2950 size_t size;
2951
2952 kfree(ondisk);
2953
2954 size = sizeof (*ondisk);
2955 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
2956 size += names_size;
2957 ondisk = kmalloc(size, GFP_KERNEL);
2958 if (!ondisk)
2959 return ERR_PTR(-ENOMEM);
2960
788e2df3 2961 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
80ef15bf 2962 0, size, ondisk, version);
4156d998
AE
2963 if (ret < 0)
2964 goto out_err;
2965 if (WARN_ON((size_t) ret < size)) {
2966 ret = -ENXIO;
06ecc6cb
AE
2967 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
2968 size, ret);
4156d998
AE
2969 goto out_err;
2970 }
2971 if (!rbd_dev_ondisk_valid(ondisk)) {
2972 ret = -ENXIO;
06ecc6cb 2973 rbd_warn(rbd_dev, "invalid header");
4156d998 2974 goto out_err;
81e759fb 2975 }
602adf40 2976
4156d998
AE
2977 names_size = le64_to_cpu(ondisk->snap_names_len);
2978 want_count = snap_count;
2979 snap_count = le32_to_cpu(ondisk->snap_count);
2980 } while (snap_count != want_count);
00f1f36f 2981
4156d998 2982 return ondisk;
00f1f36f 2983
4156d998
AE
2984out_err:
2985 kfree(ondisk);
2986
2987 return ERR_PTR(ret);
2988}
2989
2990/*
2991 * reload the ondisk the header
2992 */
2993static int rbd_read_header(struct rbd_device *rbd_dev,
2994 struct rbd_image_header *header)
2995{
2996 struct rbd_image_header_ondisk *ondisk;
2997 u64 ver = 0;
2998 int ret;
602adf40 2999
4156d998
AE
3000 ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
3001 if (IS_ERR(ondisk))
3002 return PTR_ERR(ondisk);
3003 ret = rbd_header_from_disk(header, ondisk);
3004 if (ret >= 0)
3005 header->obj_version = ver;
3006 kfree(ondisk);
3007
3008 return ret;
602adf40
YS
3009}
3010
41f38c2b 3011static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
dfc5606d
YS
3012{
3013 struct rbd_snap *snap;
a0593290 3014 struct rbd_snap *next;
dfc5606d 3015
6087b51b
AE
3016 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node) {
3017 list_del(&snap->node);
3018 rbd_snap_destroy(snap);
3019 }
dfc5606d
YS
3020}
3021
9478554a
AE
3022static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
3023{
3024 sector_t size;
3025
0d7dbfce 3026 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
9478554a
AE
3027 return;
3028
3029 size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
3030 dout("setting size to %llu sectors", (unsigned long long) size);
3031 rbd_dev->mapping.size = (u64) size;
3032 set_capacity(rbd_dev->disk, size);
3033}
3034
602adf40
YS
3035/*
3036 * only read the first part of the ondisk header, without the snaps info
3037 */
117973fb 3038static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
602adf40
YS
3039{
3040 int ret;
3041 struct rbd_image_header h;
602adf40
YS
3042
3043 ret = rbd_read_header(rbd_dev, &h);
3044 if (ret < 0)
3045 return ret;
3046
a51aa0c0
JD
3047 down_write(&rbd_dev->header_rwsem);
3048
9478554a
AE
3049 /* Update image size, and check for resize of mapped image */
3050 rbd_dev->header.image_size = h.image_size;
3051 rbd_update_mapping_size(rbd_dev);
9db4b3e3 3052
849b4260 3053 /* rbd_dev->header.object_prefix shouldn't change */
602adf40 3054 kfree(rbd_dev->header.snap_sizes);
849b4260 3055 kfree(rbd_dev->header.snap_names);
d1d25646
JD
3056 /* osd requests may still refer to snapc */
3057 ceph_put_snap_context(rbd_dev->header.snapc);
602adf40 3058
b813623a
AE
3059 if (hver)
3060 *hver = h.obj_version;
a71b891b 3061 rbd_dev->header.obj_version = h.obj_version;
93a24e08 3062 rbd_dev->header.image_size = h.image_size;
602adf40
YS
3063 rbd_dev->header.snapc = h.snapc;
3064 rbd_dev->header.snap_names = h.snap_names;
3065 rbd_dev->header.snap_sizes = h.snap_sizes;
849b4260
AE
3066 /* Free the extra copy of the object prefix */
3067 WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
3068 kfree(h.object_prefix);
3069
304f6808 3070 ret = rbd_dev_snaps_update(rbd_dev);
dfc5606d 3071
c666601a 3072 up_write(&rbd_dev->header_rwsem);
602adf40 3073
dfc5606d 3074 return ret;
602adf40
YS
3075}
3076
117973fb 3077static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
1fe5e993
AE
3078{
3079 int ret;
3080
117973fb 3081 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1fe5e993 3082 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
117973fb
AE
3083 if (rbd_dev->image_format == 1)
3084 ret = rbd_dev_v1_refresh(rbd_dev, hver);
3085 else
3086 ret = rbd_dev_v2_refresh(rbd_dev, hver);
1fe5e993 3087 mutex_unlock(&ctl_mutex);
d98df63e 3088 revalidate_disk(rbd_dev->disk);
522a0cc0
AE
3089 if (ret)
3090 rbd_warn(rbd_dev, "got notification but failed to "
3091 " update snaps: %d\n", ret);
1fe5e993
AE
3092
3093 return ret;
3094}
3095
602adf40
YS
3096static int rbd_init_disk(struct rbd_device *rbd_dev)
3097{
3098 struct gendisk *disk;
3099 struct request_queue *q;
593a9e7b 3100 u64 segment_size;
602adf40 3101
602adf40 3102 /* create gendisk info */
602adf40
YS
3103 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3104 if (!disk)
1fcdb8aa 3105 return -ENOMEM;
602adf40 3106
f0f8cef5 3107 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
de71a297 3108 rbd_dev->dev_id);
602adf40
YS
3109 disk->major = rbd_dev->major;
3110 disk->first_minor = 0;
3111 disk->fops = &rbd_bd_ops;
3112 disk->private_data = rbd_dev;
3113
bf0d5f50 3114 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
602adf40
YS
3115 if (!q)
3116 goto out_disk;
029bcbd8 3117
593a9e7b
AE
3118 /* We use the default size, but let's be explicit about it. */
3119 blk_queue_physical_block_size(q, SECTOR_SIZE);
3120
029bcbd8 3121 /* set io sizes to object size */
593a9e7b
AE
3122 segment_size = rbd_obj_bytes(&rbd_dev->header);
3123 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3124 blk_queue_max_segment_size(q, segment_size);
3125 blk_queue_io_min(q, segment_size);
3126 blk_queue_io_opt(q, segment_size);
029bcbd8 3127
602adf40
YS
3128 blk_queue_merge_bvec(q, rbd_merge_bvec);
3129 disk->queue = q;
3130
3131 q->queuedata = rbd_dev;
3132
3133 rbd_dev->disk = disk;
602adf40 3134
12f02944
AE
3135 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
3136
602adf40 3137 return 0;
602adf40
YS
3138out_disk:
3139 put_disk(disk);
1fcdb8aa
AE
3140
3141 return -ENOMEM;
602adf40
YS
3142}
3143
dfc5606d
YS
3144/*
3145 sysfs
3146*/
3147
593a9e7b
AE
3148static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3149{
3150 return container_of(dev, struct rbd_device, dev);
3151}
3152
dfc5606d
YS
3153static ssize_t rbd_size_show(struct device *dev,
3154 struct device_attribute *attr, char *buf)
3155{
593a9e7b 3156 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
a51aa0c0
JD
3157 sector_t size;
3158
3159 down_read(&rbd_dev->header_rwsem);
3160 size = get_capacity(rbd_dev->disk);
3161 up_read(&rbd_dev->header_rwsem);
dfc5606d 3162
a51aa0c0 3163 return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
dfc5606d
YS
3164}
3165
34b13184
AE
3166/*
3167 * Note this shows the features for whatever's mapped, which is not
3168 * necessarily the base image.
3169 */
3170static ssize_t rbd_features_show(struct device *dev,
3171 struct device_attribute *attr, char *buf)
3172{
3173 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3174
3175 return sprintf(buf, "0x%016llx\n",
3176 (unsigned long long) rbd_dev->mapping.features);
3177}
3178
dfc5606d
YS
3179static ssize_t rbd_major_show(struct device *dev,
3180 struct device_attribute *attr, char *buf)
3181{
593a9e7b 3182 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 3183
dfc5606d
YS
3184 return sprintf(buf, "%d\n", rbd_dev->major);
3185}
3186
3187static ssize_t rbd_client_id_show(struct device *dev,
3188 struct device_attribute *attr, char *buf)
602adf40 3189{
593a9e7b 3190 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3191
1dbb4399
AE
3192 return sprintf(buf, "client%lld\n",
3193 ceph_client_id(rbd_dev->rbd_client->client));
602adf40
YS
3194}
3195
dfc5606d
YS
3196static ssize_t rbd_pool_show(struct device *dev,
3197 struct device_attribute *attr, char *buf)
602adf40 3198{
593a9e7b 3199 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3200
0d7dbfce 3201 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
dfc5606d
YS
3202}
3203
9bb2f334
AE
3204static ssize_t rbd_pool_id_show(struct device *dev,
3205 struct device_attribute *attr, char *buf)
3206{
3207 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3208
0d7dbfce
AE
3209 return sprintf(buf, "%llu\n",
3210 (unsigned long long) rbd_dev->spec->pool_id);
9bb2f334
AE
3211}
3212
dfc5606d
YS
3213static ssize_t rbd_name_show(struct device *dev,
3214 struct device_attribute *attr, char *buf)
3215{
593a9e7b 3216 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3217
a92ffdf8
AE
3218 if (rbd_dev->spec->image_name)
3219 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3220
3221 return sprintf(buf, "(unknown)\n");
dfc5606d
YS
3222}
3223
589d30e0
AE
3224static ssize_t rbd_image_id_show(struct device *dev,
3225 struct device_attribute *attr, char *buf)
3226{
3227 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3228
0d7dbfce 3229 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
589d30e0
AE
3230}
3231
34b13184
AE
3232/*
3233 * Shows the name of the currently-mapped snapshot (or
3234 * RBD_SNAP_HEAD_NAME for the base image).
3235 */
dfc5606d
YS
3236static ssize_t rbd_snap_show(struct device *dev,
3237 struct device_attribute *attr,
3238 char *buf)
3239{
593a9e7b 3240 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 3241
0d7dbfce 3242 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
dfc5606d
YS
3243}
3244
86b00e0d
AE
3245/*
3246 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3247 * for the parent image. If there is no parent, simply shows
3248 * "(no parent image)".
3249 */
3250static ssize_t rbd_parent_show(struct device *dev,
3251 struct device_attribute *attr,
3252 char *buf)
3253{
3254 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3255 struct rbd_spec *spec = rbd_dev->parent_spec;
3256 int count;
3257 char *bufp = buf;
3258
3259 if (!spec)
3260 return sprintf(buf, "(no parent image)\n");
3261
3262 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3263 (unsigned long long) spec->pool_id, spec->pool_name);
3264 if (count < 0)
3265 return count;
3266 bufp += count;
3267
3268 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3269 spec->image_name ? spec->image_name : "(unknown)");
3270 if (count < 0)
3271 return count;
3272 bufp += count;
3273
3274 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3275 (unsigned long long) spec->snap_id, spec->snap_name);
3276 if (count < 0)
3277 return count;
3278 bufp += count;
3279
3280 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3281 if (count < 0)
3282 return count;
3283 bufp += count;
3284
3285 return (ssize_t) (bufp - buf);
3286}
3287
dfc5606d
YS
3288static ssize_t rbd_image_refresh(struct device *dev,
3289 struct device_attribute *attr,
3290 const char *buf,
3291 size_t size)
3292{
593a9e7b 3293 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
b813623a 3294 int ret;
602adf40 3295
117973fb 3296 ret = rbd_dev_refresh(rbd_dev, NULL);
b813623a
AE
3297
3298 return ret < 0 ? ret : size;
dfc5606d 3299}
602adf40 3300
dfc5606d 3301static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
34b13184 3302static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
dfc5606d
YS
3303static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3304static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3305static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
9bb2f334 3306static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
dfc5606d 3307static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
589d30e0 3308static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
dfc5606d
YS
3309static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3310static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
86b00e0d 3311static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
dfc5606d
YS
3312
3313static struct attribute *rbd_attrs[] = {
3314 &dev_attr_size.attr,
34b13184 3315 &dev_attr_features.attr,
dfc5606d
YS
3316 &dev_attr_major.attr,
3317 &dev_attr_client_id.attr,
3318 &dev_attr_pool.attr,
9bb2f334 3319 &dev_attr_pool_id.attr,
dfc5606d 3320 &dev_attr_name.attr,
589d30e0 3321 &dev_attr_image_id.attr,
dfc5606d 3322 &dev_attr_current_snap.attr,
86b00e0d 3323 &dev_attr_parent.attr,
dfc5606d 3324 &dev_attr_refresh.attr,
dfc5606d
YS
3325 NULL
3326};
3327
3328static struct attribute_group rbd_attr_group = {
3329 .attrs = rbd_attrs,
3330};
3331
3332static const struct attribute_group *rbd_attr_groups[] = {
3333 &rbd_attr_group,
3334 NULL
3335};
3336
3337static void rbd_sysfs_dev_release(struct device *dev)
3338{
3339}
3340
3341static struct device_type rbd_device_type = {
3342 .name = "rbd",
3343 .groups = rbd_attr_groups,
3344 .release = rbd_sysfs_dev_release,
3345};
3346
8b8fb99c
AE
3347static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3348{
3349 kref_get(&spec->kref);
3350
3351 return spec;
3352}
3353
3354static void rbd_spec_free(struct kref *kref);
3355static void rbd_spec_put(struct rbd_spec *spec)
3356{
3357 if (spec)
3358 kref_put(&spec->kref, rbd_spec_free);
3359}
3360
3361static struct rbd_spec *rbd_spec_alloc(void)
3362{
3363 struct rbd_spec *spec;
3364
3365 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3366 if (!spec)
3367 return NULL;
3368 kref_init(&spec->kref);
3369
8b8fb99c
AE
3370 return spec;
3371}
3372
3373static void rbd_spec_free(struct kref *kref)
3374{
3375 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3376
3377 kfree(spec->pool_name);
3378 kfree(spec->image_id);
3379 kfree(spec->image_name);
3380 kfree(spec->snap_name);
3381 kfree(spec);
3382}
3383
cc344fa1 3384static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
c53d5893
AE
3385 struct rbd_spec *spec)
3386{
3387 struct rbd_device *rbd_dev;
3388
3389 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3390 if (!rbd_dev)
3391 return NULL;
3392
3393 spin_lock_init(&rbd_dev->lock);
6d292906 3394 rbd_dev->flags = 0;
c53d5893
AE
3395 INIT_LIST_HEAD(&rbd_dev->node);
3396 INIT_LIST_HEAD(&rbd_dev->snaps);
3397 init_rwsem(&rbd_dev->header_rwsem);
3398
3399 rbd_dev->spec = spec;
3400 rbd_dev->rbd_client = rbdc;
3401
0903e875
AE
3402 /* Initialize the layout used for all rbd requests */
3403
3404 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3405 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3406 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3407 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3408
c53d5893
AE
3409 return rbd_dev;
3410}
3411
3412static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3413{
86b00e0d 3414 rbd_spec_put(rbd_dev->parent_spec);
c53d5893
AE
3415 kfree(rbd_dev->header_name);
3416 rbd_put_client(rbd_dev->rbd_client);
3417 rbd_spec_put(rbd_dev->spec);
3418 kfree(rbd_dev);
3419}
3420
6087b51b 3421static void rbd_snap_destroy(struct rbd_snap *snap)
dfc5606d 3422{
3e83b65b
AE
3423 kfree(snap->name);
3424 kfree(snap);
dfc5606d
YS
3425}
3426
6087b51b 3427static struct rbd_snap *rbd_snap_create(struct rbd_device *rbd_dev,
c8d18425 3428 const char *snap_name,
34b13184
AE
3429 u64 snap_id, u64 snap_size,
3430 u64 snap_features)
dfc5606d 3431{
4e891e0a 3432 struct rbd_snap *snap;
4e891e0a
AE
3433
3434 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
dfc5606d 3435 if (!snap)
4e891e0a
AE
3436 return ERR_PTR(-ENOMEM);
3437
6e584f52 3438 snap->name = snap_name;
c8d18425
AE
3439 snap->id = snap_id;
3440 snap->size = snap_size;
34b13184 3441 snap->features = snap_features;
4e891e0a
AE
3442
3443 return snap;
dfc5606d
YS
3444}
3445
6e584f52
AE
3446/*
3447 * Returns a dynamically-allocated snapshot name if successful, or a
3448 * pointer-coded error otherwise.
3449 */
cd892126
AE
3450static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
3451 u64 *snap_size, u64 *snap_features)
3452{
3453 char *snap_name;
6e584f52 3454 int i;
cd892126
AE
3455
3456 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
3457
cd892126
AE
3458 /* Skip over names until we find the one we are looking for */
3459
3460 snap_name = rbd_dev->header.snap_names;
6e584f52 3461 for (i = 0; i < which; i++)
cd892126
AE
3462 snap_name += strlen(snap_name) + 1;
3463
6e584f52
AE
3464 snap_name = kstrdup(snap_name, GFP_KERNEL);
3465 if (!snap_name)
3466 return ERR_PTR(-ENOMEM);
3467
3468 *snap_size = rbd_dev->header.snap_sizes[which];
3469 *snap_features = 0; /* No features for v1 */
3470
cd892126
AE
3471 return snap_name;
3472}
3473
9d475de5
AE
3474/*
3475 * Get the size and object order for an image snapshot, or if
3476 * snap_id is CEPH_NOSNAP, gets this information for the base
3477 * image.
3478 */
3479static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3480 u8 *order, u64 *snap_size)
3481{
3482 __le64 snapid = cpu_to_le64(snap_id);
3483 int ret;
3484 struct {
3485 u8 order;
3486 __le64 size;
3487 } __attribute__ ((packed)) size_buf = { 0 };
3488
36be9a76 3489 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
9d475de5 3490 "rbd", "get_size",
4157976b
AE
3491 &snapid, sizeof (snapid),
3492 &size_buf, sizeof (size_buf), NULL);
36be9a76 3493 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
9d475de5
AE
3494 if (ret < 0)
3495 return ret;
57385b51
AE
3496 if (ret < sizeof (size_buf))
3497 return -ERANGE;
9d475de5 3498
c86f86e9
AE
3499 if (order)
3500 *order = size_buf.order;
9d475de5
AE
3501 *snap_size = le64_to_cpu(size_buf.size);
3502
3503 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
57385b51
AE
3504 (unsigned long long)snap_id, (unsigned int)*order,
3505 (unsigned long long)*snap_size);
9d475de5
AE
3506
3507 return 0;
3508}
3509
3510static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3511{
3512 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3513 &rbd_dev->header.obj_order,
3514 &rbd_dev->header.image_size);
3515}
3516
1e130199
AE
3517static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3518{
3519 void *reply_buf;
3520 int ret;
3521 void *p;
3522
3523 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3524 if (!reply_buf)
3525 return -ENOMEM;
3526
36be9a76 3527 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4157976b 3528 "rbd", "get_object_prefix", NULL, 0,
07b2391f 3529 reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
36be9a76 3530 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
1e130199
AE
3531 if (ret < 0)
3532 goto out;
3533
3534 p = reply_buf;
3535 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
57385b51
AE
3536 p + ret, NULL, GFP_NOIO);
3537 ret = 0;
1e130199
AE
3538
3539 if (IS_ERR(rbd_dev->header.object_prefix)) {
3540 ret = PTR_ERR(rbd_dev->header.object_prefix);
3541 rbd_dev->header.object_prefix = NULL;
3542 } else {
3543 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3544 }
1e130199
AE
3545out:
3546 kfree(reply_buf);
3547
3548 return ret;
3549}
3550
b1b5402a
AE
3551static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3552 u64 *snap_features)
3553{
3554 __le64 snapid = cpu_to_le64(snap_id);
3555 struct {
3556 __le64 features;
3557 __le64 incompat;
4157976b 3558 } __attribute__ ((packed)) features_buf = { 0 };
d889140c 3559 u64 incompat;
b1b5402a
AE
3560 int ret;
3561
36be9a76 3562 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
b1b5402a 3563 "rbd", "get_features",
4157976b
AE
3564 &snapid, sizeof (snapid),
3565 &features_buf, sizeof (features_buf), NULL);
36be9a76 3566 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
b1b5402a
AE
3567 if (ret < 0)
3568 return ret;
57385b51
AE
3569 if (ret < sizeof (features_buf))
3570 return -ERANGE;
d889140c
AE
3571
3572 incompat = le64_to_cpu(features_buf.incompat);
5cbf6f12 3573 if (incompat & ~RBD_FEATURES_SUPPORTED)
b8f5c6ed 3574 return -ENXIO;
d889140c 3575
b1b5402a
AE
3576 *snap_features = le64_to_cpu(features_buf.features);
3577
3578 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
57385b51
AE
3579 (unsigned long long)snap_id,
3580 (unsigned long long)*snap_features,
3581 (unsigned long long)le64_to_cpu(features_buf.incompat));
b1b5402a
AE
3582
3583 return 0;
3584}
3585
3586static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3587{
3588 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3589 &rbd_dev->header.features);
3590}
3591
86b00e0d
AE
3592static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3593{
3594 struct rbd_spec *parent_spec;
3595 size_t size;
3596 void *reply_buf = NULL;
3597 __le64 snapid;
3598 void *p;
3599 void *end;
3600 char *image_id;
3601 u64 overlap;
86b00e0d
AE
3602 int ret;
3603
3604 parent_spec = rbd_spec_alloc();
3605 if (!parent_spec)
3606 return -ENOMEM;
3607
3608 size = sizeof (__le64) + /* pool_id */
3609 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3610 sizeof (__le64) + /* snap_id */
3611 sizeof (__le64); /* overlap */
3612 reply_buf = kmalloc(size, GFP_KERNEL);
3613 if (!reply_buf) {
3614 ret = -ENOMEM;
3615 goto out_err;
3616 }
3617
3618 snapid = cpu_to_le64(CEPH_NOSNAP);
36be9a76 3619 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
86b00e0d 3620 "rbd", "get_parent",
4157976b
AE
3621 &snapid, sizeof (snapid),
3622 reply_buf, size, NULL);
36be9a76 3623 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
86b00e0d
AE
3624 if (ret < 0)
3625 goto out_err;
3626
86b00e0d 3627 p = reply_buf;
57385b51
AE
3628 end = reply_buf + ret;
3629 ret = -ERANGE;
86b00e0d
AE
3630 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
3631 if (parent_spec->pool_id == CEPH_NOPOOL)
3632 goto out; /* No parent? No problem. */
3633
0903e875
AE
3634 /* The ceph file layout needs to fit pool id in 32 bits */
3635
3636 ret = -EIO;
57385b51
AE
3637 if (WARN_ON(parent_spec->pool_id > (u64)U32_MAX))
3638 goto out_err;
0903e875 3639
979ed480 3640 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
86b00e0d
AE
3641 if (IS_ERR(image_id)) {
3642 ret = PTR_ERR(image_id);
3643 goto out_err;
3644 }
3645 parent_spec->image_id = image_id;
3646 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3647 ceph_decode_64_safe(&p, end, overlap, out_err);
3648
3649 rbd_dev->parent_overlap = overlap;
3650 rbd_dev->parent_spec = parent_spec;
3651 parent_spec = NULL; /* rbd_dev now owns this */
3652out:
3653 ret = 0;
3654out_err:
3655 kfree(reply_buf);
3656 rbd_spec_put(parent_spec);
3657
3658 return ret;
3659}
3660
cc070d59
AE
3661static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3662{
3663 struct {
3664 __le64 stripe_unit;
3665 __le64 stripe_count;
3666 } __attribute__ ((packed)) striping_info_buf = { 0 };
3667 size_t size = sizeof (striping_info_buf);
3668 void *p;
3669 u64 obj_size;
3670 u64 stripe_unit;
3671 u64 stripe_count;
3672 int ret;
3673
3674 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3675 "rbd", "get_stripe_unit_count", NULL, 0,
3676 (char *)&striping_info_buf, size, NULL);
3677 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3678 if (ret < 0)
3679 return ret;
3680 if (ret < size)
3681 return -ERANGE;
3682
3683 /*
3684 * We don't actually support the "fancy striping" feature
3685 * (STRIPINGV2) yet, but if the striping sizes are the
3686 * defaults the behavior is the same as before. So find
3687 * out, and only fail if the image has non-default values.
3688 */
3689 ret = -EINVAL;
3690 obj_size = (u64)1 << rbd_dev->header.obj_order;
3691 p = &striping_info_buf;
3692 stripe_unit = ceph_decode_64(&p);
3693 if (stripe_unit != obj_size) {
3694 rbd_warn(rbd_dev, "unsupported stripe unit "
3695 "(got %llu want %llu)",
3696 stripe_unit, obj_size);
3697 return -EINVAL;
3698 }
3699 stripe_count = ceph_decode_64(&p);
3700 if (stripe_count != 1) {
3701 rbd_warn(rbd_dev, "unsupported stripe count "
3702 "(got %llu want 1)", stripe_count);
3703 return -EINVAL;
3704 }
3705 rbd_dev->stripe_unit = stripe_unit;
3706 rbd_dev->stripe_count = stripe_count;
3707
3708 return 0;
3709}
3710
9e15b77d
AE
3711static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3712{
3713 size_t image_id_size;
3714 char *image_id;
3715 void *p;
3716 void *end;
3717 size_t size;
3718 void *reply_buf = NULL;
3719 size_t len = 0;
3720 char *image_name = NULL;
3721 int ret;
3722
3723 rbd_assert(!rbd_dev->spec->image_name);
3724
69e7a02f
AE
3725 len = strlen(rbd_dev->spec->image_id);
3726 image_id_size = sizeof (__le32) + len;
9e15b77d
AE
3727 image_id = kmalloc(image_id_size, GFP_KERNEL);
3728 if (!image_id)
3729 return NULL;
3730
3731 p = image_id;
4157976b 3732 end = image_id + image_id_size;
57385b51 3733 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
9e15b77d
AE
3734
3735 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3736 reply_buf = kmalloc(size, GFP_KERNEL);
3737 if (!reply_buf)
3738 goto out;
3739
36be9a76 3740 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
9e15b77d
AE
3741 "rbd", "dir_get_name",
3742 image_id, image_id_size,
4157976b 3743 reply_buf, size, NULL);
9e15b77d
AE
3744 if (ret < 0)
3745 goto out;
3746 p = reply_buf;
f40eb349
AE
3747 end = reply_buf + ret;
3748
9e15b77d
AE
3749 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3750 if (IS_ERR(image_name))
3751 image_name = NULL;
3752 else
3753 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3754out:
3755 kfree(reply_buf);
3756 kfree(image_id);
3757
3758 return image_name;
3759}
3760
3761/*
3762 * When a parent image gets probed, we only have the pool, image,
3763 * and snapshot ids but not the names of any of them. This call
3764 * is made later to fill in those names. It has to be done after
3765 * rbd_dev_snaps_update() has completed because some of the
3766 * information (in particular, snapshot name) is not available
3767 * until then.
3768 */
3769static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
3770{
3771 struct ceph_osd_client *osdc;
3772 const char *name;
3773 void *reply_buf = NULL;
3774 int ret;
3775
3776 if (rbd_dev->spec->pool_name)
3777 return 0; /* Already have the names */
3778
3779 /* Look up the pool name */
3780
3781 osdc = &rbd_dev->rbd_client->client->osdc;
3782 name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
935dc89f
AE
3783 if (!name) {
3784 rbd_warn(rbd_dev, "there is no pool with id %llu",
3785 rbd_dev->spec->pool_id); /* Really a BUG() */
3786 return -EIO;
3787 }
9e15b77d
AE
3788
3789 rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
3790 if (!rbd_dev->spec->pool_name)
3791 return -ENOMEM;
3792
3793 /* Fetch the image name; tolerate failure here */
3794
3795 name = rbd_dev_image_name(rbd_dev);
69e7a02f 3796 if (name)
4157976b 3797 rbd_dev->spec->image_name = (char *)name;
69e7a02f 3798 else
06ecc6cb 3799 rbd_warn(rbd_dev, "unable to get image name");
9e15b77d
AE
3800
3801 /* Look up the snapshot name. */
3802
3803 name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
3804 if (!name) {
935dc89f
AE
3805 rbd_warn(rbd_dev, "no snapshot with id %llu",
3806 rbd_dev->spec->snap_id); /* Really a BUG() */
9e15b77d
AE
3807 ret = -EIO;
3808 goto out_err;
3809 }
3810 rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
3811 if(!rbd_dev->spec->snap_name)
3812 goto out_err;
3813
3814 return 0;
3815out_err:
3816 kfree(reply_buf);
3817 kfree(rbd_dev->spec->pool_name);
3818 rbd_dev->spec->pool_name = NULL;
3819
3820 return ret;
3821}
3822
6e14b1a6 3823static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
35d489f9
AE
3824{
3825 size_t size;
3826 int ret;
3827 void *reply_buf;
3828 void *p;
3829 void *end;
3830 u64 seq;
3831 u32 snap_count;
3832 struct ceph_snap_context *snapc;
3833 u32 i;
3834
3835 /*
3836 * We'll need room for the seq value (maximum snapshot id),
3837 * snapshot count, and array of that many snapshot ids.
3838 * For now we have a fixed upper limit on the number we're
3839 * prepared to receive.
3840 */
3841 size = sizeof (__le64) + sizeof (__le32) +
3842 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3843 reply_buf = kzalloc(size, GFP_KERNEL);
3844 if (!reply_buf)
3845 return -ENOMEM;
3846
36be9a76 3847 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4157976b 3848 "rbd", "get_snapcontext", NULL, 0,
07b2391f 3849 reply_buf, size, ver);
36be9a76 3850 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
35d489f9
AE
3851 if (ret < 0)
3852 goto out;
3853
35d489f9 3854 p = reply_buf;
57385b51
AE
3855 end = reply_buf + ret;
3856 ret = -ERANGE;
35d489f9
AE
3857 ceph_decode_64_safe(&p, end, seq, out);
3858 ceph_decode_32_safe(&p, end, snap_count, out);
3859
3860 /*
3861 * Make sure the reported number of snapshot ids wouldn't go
3862 * beyond the end of our buffer. But before checking that,
3863 * make sure the computed size of the snapshot context we
3864 * allocate is representable in a size_t.
3865 */
3866 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3867 / sizeof (u64)) {
3868 ret = -EINVAL;
3869 goto out;
3870 }
3871 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3872 goto out;
3873
3874 size = sizeof (struct ceph_snap_context) +
3875 snap_count * sizeof (snapc->snaps[0]);
3876 snapc = kmalloc(size, GFP_KERNEL);
3877 if (!snapc) {
3878 ret = -ENOMEM;
3879 goto out;
3880 }
57385b51 3881 ret = 0;
35d489f9
AE
3882
3883 atomic_set(&snapc->nref, 1);
3884 snapc->seq = seq;
3885 snapc->num_snaps = snap_count;
3886 for (i = 0; i < snap_count; i++)
3887 snapc->snaps[i] = ceph_decode_64(&p);
3888
3889 rbd_dev->header.snapc = snapc;
3890
3891 dout(" snap context seq = %llu, snap_count = %u\n",
57385b51 3892 (unsigned long long)seq, (unsigned int)snap_count);
35d489f9
AE
3893out:
3894 kfree(reply_buf);
3895
57385b51 3896 return ret;
35d489f9
AE
3897}
3898
b8b1e2db
AE
3899static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
3900{
3901 size_t size;
3902 void *reply_buf;
3903 __le64 snap_id;
3904 int ret;
3905 void *p;
3906 void *end;
b8b1e2db
AE
3907 char *snap_name;
3908
3909 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3910 reply_buf = kmalloc(size, GFP_KERNEL);
3911 if (!reply_buf)
3912 return ERR_PTR(-ENOMEM);
3913
acb1b6ca 3914 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
b8b1e2db 3915 snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
36be9a76 3916 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
b8b1e2db 3917 "rbd", "get_snapshot_name",
4157976b 3918 &snap_id, sizeof (snap_id),
07b2391f 3919 reply_buf, size, NULL);
36be9a76 3920 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
f40eb349
AE
3921 if (ret < 0) {
3922 snap_name = ERR_PTR(ret);
b8b1e2db 3923 goto out;
f40eb349 3924 }
b8b1e2db
AE
3925
3926 p = reply_buf;
f40eb349 3927 end = reply_buf + ret;
e5c35534 3928 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
f40eb349 3929 if (IS_ERR(snap_name))
b8b1e2db 3930 goto out;
b8b1e2db 3931
f40eb349
AE
3932 dout(" snap_id 0x%016llx snap_name = %s\n",
3933 (unsigned long long)le64_to_cpu(snap_id), snap_name);
b8b1e2db
AE
3934out:
3935 kfree(reply_buf);
3936
f40eb349 3937 return snap_name;
b8b1e2db
AE
3938}
3939
3940static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
3941 u64 *snap_size, u64 *snap_features)
3942{
e0b49868 3943 u64 snap_id;
acb1b6ca
AE
3944 u64 size;
3945 u64 features;
3946 char *snap_name;
b8b1e2db
AE
3947 int ret;
3948
acb1b6ca 3949 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
b8b1e2db 3950 snap_id = rbd_dev->header.snapc->snaps[which];
acb1b6ca 3951 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
b8b1e2db 3952 if (ret)
acb1b6ca
AE
3953 goto out_err;
3954
3955 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
b8b1e2db 3956 if (ret)
acb1b6ca
AE
3957 goto out_err;
3958
3959 snap_name = rbd_dev_v2_snap_name(rbd_dev, which);
3960 if (!IS_ERR(snap_name)) {
3961 *snap_size = size;
3962 *snap_features = features;
3963 }
b8b1e2db 3964
acb1b6ca
AE
3965 return snap_name;
3966out_err:
3967 return ERR_PTR(ret);
b8b1e2db
AE
3968}
3969
3970static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
3971 u64 *snap_size, u64 *snap_features)
3972{
3973 if (rbd_dev->image_format == 1)
3974 return rbd_dev_v1_snap_info(rbd_dev, which,
3975 snap_size, snap_features);
3976 if (rbd_dev->image_format == 2)
3977 return rbd_dev_v2_snap_info(rbd_dev, which,
3978 snap_size, snap_features);
3979 return ERR_PTR(-EINVAL);
3980}
3981
117973fb
AE
3982static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
3983{
3984 int ret;
3985 __u8 obj_order;
3986
3987 down_write(&rbd_dev->header_rwsem);
3988
3989 /* Grab old order first, to see if it changes */
3990
3991 obj_order = rbd_dev->header.obj_order,
3992 ret = rbd_dev_v2_image_size(rbd_dev);
3993 if (ret)
3994 goto out;
3995 if (rbd_dev->header.obj_order != obj_order) {
3996 ret = -EIO;
3997 goto out;
3998 }
3999 rbd_update_mapping_size(rbd_dev);
4000
4001 ret = rbd_dev_v2_snap_context(rbd_dev, hver);
4002 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4003 if (ret)
4004 goto out;
4005 ret = rbd_dev_snaps_update(rbd_dev);
4006 dout("rbd_dev_snaps_update returned %d\n", ret);
4007 if (ret)
4008 goto out;
117973fb
AE
4009out:
4010 up_write(&rbd_dev->header_rwsem);
4011
4012 return ret;
4013}
4014
dfc5606d 4015/*
35938150
AE
4016 * Scan the rbd device's current snapshot list and compare it to the
4017 * newly-received snapshot context. Remove any existing snapshots
4018 * not present in the new snapshot context. Add a new snapshot for
4019 * any snaphots in the snapshot context not in the current list.
4020 * And verify there are no changes to snapshots we already know
4021 * about.
4022 *
4023 * Assumes the snapshots in the snapshot context are sorted by
4024 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
4025 * are also maintained in that order.)
522a0cc0
AE
4026 *
4027 * Note that any error occurs while updating the snapshot list
4028 * aborts the update, and the entire list is cleared. The snapshot
4029 * list becomes inconsistent at that point anyway, so it might as
4030 * well be empty.
dfc5606d 4031 */
304f6808 4032static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
dfc5606d 4033{
35938150
AE
4034 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4035 const u32 snap_count = snapc->num_snaps;
35938150
AE
4036 struct list_head *head = &rbd_dev->snaps;
4037 struct list_head *links = head->next;
4038 u32 index = 0;
522a0cc0 4039 int ret = 0;
dfc5606d 4040
522a0cc0 4041 dout("%s: snap count is %u\n", __func__, (unsigned int)snap_count);
35938150
AE
4042 while (index < snap_count || links != head) {
4043 u64 snap_id;
4044 struct rbd_snap *snap;
cd892126
AE
4045 char *snap_name;
4046 u64 snap_size = 0;
4047 u64 snap_features = 0;
dfc5606d 4048
35938150
AE
4049 snap_id = index < snap_count ? snapc->snaps[index]
4050 : CEPH_NOSNAP;
4051 snap = links != head ? list_entry(links, struct rbd_snap, node)
4052 : NULL;
aafb230e 4053 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
dfc5606d 4054
35938150
AE
4055 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
4056 struct list_head *next = links->next;
dfc5606d 4057
6d292906
AE
4058 /*
4059 * A previously-existing snapshot is not in
4060 * the new snap context.
4061 *
522a0cc0
AE
4062 * If the now-missing snapshot is the one
4063 * the image represents, clear its existence
4064 * flag so we can avoid sending any more
4065 * requests to it.
6d292906 4066 */
0d7dbfce 4067 if (rbd_dev->spec->snap_id == snap->id)
6d292906 4068 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3e83b65b 4069 dout("removing %ssnap id %llu\n",
0d7dbfce
AE
4070 rbd_dev->spec->snap_id == snap->id ?
4071 "mapped " : "",
522a0cc0 4072 (unsigned long long)snap->id);
6087b51b
AE
4073
4074 list_del(&snap->node);
4075 rbd_snap_destroy(snap);
35938150
AE
4076
4077 /* Done with this list entry; advance */
4078
4079 links = next;
dfc5606d
YS
4080 continue;
4081 }
35938150 4082
b8b1e2db
AE
4083 snap_name = rbd_dev_snap_info(rbd_dev, index,
4084 &snap_size, &snap_features);
522a0cc0
AE
4085 if (IS_ERR(snap_name)) {
4086 ret = PTR_ERR(snap_name);
4087 dout("failed to get snap info, error %d\n", ret);
4088 goto out_err;
4089 }
cd892126 4090
522a0cc0
AE
4091 dout("entry %u: snap_id = %llu\n", (unsigned int)snap_count,
4092 (unsigned long long)snap_id);
35938150
AE
4093 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
4094 struct rbd_snap *new_snap;
4095
4096 /* We haven't seen this snapshot before */
4097
6087b51b 4098 new_snap = rbd_snap_create(rbd_dev, snap_name,
cd892126 4099 snap_id, snap_size, snap_features);
9fcbb800 4100 if (IS_ERR(new_snap)) {
522a0cc0
AE
4101 ret = PTR_ERR(new_snap);
4102 dout(" failed to add dev, error %d\n", ret);
4103 goto out_err;
9fcbb800 4104 }
35938150
AE
4105
4106 /* New goes before existing, or at end of list */
4107
9fcbb800 4108 dout(" added dev%s\n", snap ? "" : " at end\n");
35938150
AE
4109 if (snap)
4110 list_add_tail(&new_snap->node, &snap->node);
4111 else
523f3258 4112 list_add_tail(&new_snap->node, head);
35938150
AE
4113 } else {
4114 /* Already have this one */
4115
9fcbb800
AE
4116 dout(" already present\n");
4117
cd892126 4118 rbd_assert(snap->size == snap_size);
aafb230e 4119 rbd_assert(!strcmp(snap->name, snap_name));
cd892126 4120 rbd_assert(snap->features == snap_features);
35938150
AE
4121
4122 /* Done with this list entry; advance */
4123
4124 links = links->next;
dfc5606d 4125 }
35938150
AE
4126
4127 /* Advance to the next entry in the snapshot context */
4128
4129 index++;
dfc5606d 4130 }
9fcbb800 4131 dout("%s: done\n", __func__);
dfc5606d
YS
4132
4133 return 0;
522a0cc0
AE
4134out_err:
4135 rbd_remove_all_snaps(rbd_dev);
4136
4137 return ret;
dfc5606d
YS
4138}
4139
dfc5606d
YS
4140static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4141{
dfc5606d 4142 struct device *dev;
cd789ab9 4143 int ret;
dfc5606d
YS
4144
4145 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
dfc5606d 4146
cd789ab9 4147 dev = &rbd_dev->dev;
dfc5606d
YS
4148 dev->bus = &rbd_bus_type;
4149 dev->type = &rbd_device_type;
4150 dev->parent = &rbd_root_dev;
4151 dev->release = rbd_dev_release;
de71a297 4152 dev_set_name(dev, "%d", rbd_dev->dev_id);
dfc5606d 4153 ret = device_register(dev);
dfc5606d 4154
dfc5606d 4155 mutex_unlock(&ctl_mutex);
cd789ab9 4156
dfc5606d 4157 return ret;
602adf40
YS
4158}
4159
dfc5606d
YS
4160static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4161{
4162 device_unregister(&rbd_dev->dev);
4163}
4164
e2839308 4165static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
1ddbe94e
AE
4166
4167/*
499afd5b
AE
4168 * Get a unique rbd identifier for the given new rbd_dev, and add
4169 * the rbd_dev to the global list. The minimum rbd id is 1.
1ddbe94e 4170 */
e2839308 4171static void rbd_dev_id_get(struct rbd_device *rbd_dev)
b7f23c36 4172{
e2839308 4173 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
499afd5b
AE
4174
4175 spin_lock(&rbd_dev_list_lock);
4176 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4177 spin_unlock(&rbd_dev_list_lock);
e2839308
AE
4178 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4179 (unsigned long long) rbd_dev->dev_id);
1ddbe94e 4180}
b7f23c36 4181
1ddbe94e 4182/*
499afd5b
AE
4183 * Remove an rbd_dev from the global list, and record that its
4184 * identifier is no longer in use.
1ddbe94e 4185 */
e2839308 4186static void rbd_dev_id_put(struct rbd_device *rbd_dev)
1ddbe94e 4187{
d184f6bf 4188 struct list_head *tmp;
de71a297 4189 int rbd_id = rbd_dev->dev_id;
d184f6bf
AE
4190 int max_id;
4191
aafb230e 4192 rbd_assert(rbd_id > 0);
499afd5b 4193
e2839308
AE
4194 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4195 (unsigned long long) rbd_dev->dev_id);
499afd5b
AE
4196 spin_lock(&rbd_dev_list_lock);
4197 list_del_init(&rbd_dev->node);
d184f6bf
AE
4198
4199 /*
4200 * If the id being "put" is not the current maximum, there
4201 * is nothing special we need to do.
4202 */
e2839308 4203 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
d184f6bf
AE
4204 spin_unlock(&rbd_dev_list_lock);
4205 return;
4206 }
4207
4208 /*
4209 * We need to update the current maximum id. Search the
4210 * list to find out what it is. We're more likely to find
4211 * the maximum at the end, so search the list backward.
4212 */
4213 max_id = 0;
4214 list_for_each_prev(tmp, &rbd_dev_list) {
4215 struct rbd_device *rbd_dev;
4216
4217 rbd_dev = list_entry(tmp, struct rbd_device, node);
b213e0b1
AE
4218 if (rbd_dev->dev_id > max_id)
4219 max_id = rbd_dev->dev_id;
d184f6bf 4220 }
499afd5b 4221 spin_unlock(&rbd_dev_list_lock);
b7f23c36 4222
1ddbe94e 4223 /*
e2839308 4224 * The max id could have been updated by rbd_dev_id_get(), in
d184f6bf
AE
4225 * which case it now accurately reflects the new maximum.
4226 * Be careful not to overwrite the maximum value in that
4227 * case.
1ddbe94e 4228 */
e2839308
AE
4229 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4230 dout(" max dev id has been reset\n");
b7f23c36
AE
4231}
4232
e28fff26
AE
4233/*
4234 * Skips over white space at *buf, and updates *buf to point to the
4235 * first found non-space character (if any). Returns the length of
593a9e7b
AE
4236 * the token (string of non-white space characters) found. Note
4237 * that *buf must be terminated with '\0'.
e28fff26
AE
4238 */
4239static inline size_t next_token(const char **buf)
4240{
4241 /*
4242 * These are the characters that produce nonzero for
4243 * isspace() in the "C" and "POSIX" locales.
4244 */
4245 const char *spaces = " \f\n\r\t\v";
4246
4247 *buf += strspn(*buf, spaces); /* Find start of token */
4248
4249 return strcspn(*buf, spaces); /* Return token length */
4250}
4251
4252/*
4253 * Finds the next token in *buf, and if the provided token buffer is
4254 * big enough, copies the found token into it. The result, if
593a9e7b
AE
4255 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4256 * must be terminated with '\0' on entry.
e28fff26
AE
4257 *
4258 * Returns the length of the token found (not including the '\0').
4259 * Return value will be 0 if no token is found, and it will be >=
4260 * token_size if the token would not fit.
4261 *
593a9e7b 4262 * The *buf pointer will be updated to point beyond the end of the
e28fff26
AE
4263 * found token. Note that this occurs even if the token buffer is
4264 * too small to hold it.
4265 */
4266static inline size_t copy_token(const char **buf,
4267 char *token,
4268 size_t token_size)
4269{
4270 size_t len;
4271
4272 len = next_token(buf);
4273 if (len < token_size) {
4274 memcpy(token, *buf, len);
4275 *(token + len) = '\0';
4276 }
4277 *buf += len;
4278
4279 return len;
4280}
4281
ea3352f4
AE
4282/*
4283 * Finds the next token in *buf, dynamically allocates a buffer big
4284 * enough to hold a copy of it, and copies the token into the new
4285 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4286 * that a duplicate buffer is created even for a zero-length token.
4287 *
4288 * Returns a pointer to the newly-allocated duplicate, or a null
4289 * pointer if memory for the duplicate was not available. If
4290 * the lenp argument is a non-null pointer, the length of the token
4291 * (not including the '\0') is returned in *lenp.
4292 *
4293 * If successful, the *buf pointer will be updated to point beyond
4294 * the end of the found token.
4295 *
4296 * Note: uses GFP_KERNEL for allocation.
4297 */
4298static inline char *dup_token(const char **buf, size_t *lenp)
4299{
4300 char *dup;
4301 size_t len;
4302
4303 len = next_token(buf);
4caf35f9 4304 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
ea3352f4
AE
4305 if (!dup)
4306 return NULL;
ea3352f4
AE
4307 *(dup + len) = '\0';
4308 *buf += len;
4309
4310 if (lenp)
4311 *lenp = len;
4312
4313 return dup;
4314}
4315
a725f65e 4316/*
859c31df
AE
4317 * Parse the options provided for an "rbd add" (i.e., rbd image
4318 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4319 * and the data written is passed here via a NUL-terminated buffer.
4320 * Returns 0 if successful or an error code otherwise.
d22f76e7 4321 *
859c31df
AE
4322 * The information extracted from these options is recorded in
4323 * the other parameters which return dynamically-allocated
4324 * structures:
4325 * ceph_opts
4326 * The address of a pointer that will refer to a ceph options
4327 * structure. Caller must release the returned pointer using
4328 * ceph_destroy_options() when it is no longer needed.
4329 * rbd_opts
4330 * Address of an rbd options pointer. Fully initialized by
4331 * this function; caller must release with kfree().
4332 * spec
4333 * Address of an rbd image specification pointer. Fully
4334 * initialized by this function based on parsed options.
4335 * Caller must release with rbd_spec_put().
4336 *
4337 * The options passed take this form:
4338 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4339 * where:
4340 * <mon_addrs>
4341 * A comma-separated list of one or more monitor addresses.
4342 * A monitor address is an ip address, optionally followed
4343 * by a port number (separated by a colon).
4344 * I.e.: ip1[:port1][,ip2[:port2]...]
4345 * <options>
4346 * A comma-separated list of ceph and/or rbd options.
4347 * <pool_name>
4348 * The name of the rados pool containing the rbd image.
4349 * <image_name>
4350 * The name of the image in that pool to map.
4351 * <snap_id>
4352 * An optional snapshot id. If provided, the mapping will
4353 * present data from the image at the time that snapshot was
4354 * created. The image head is used if no snapshot id is
4355 * provided. Snapshot mappings are always read-only.
a725f65e 4356 */
859c31df 4357static int rbd_add_parse_args(const char *buf,
dc79b113 4358 struct ceph_options **ceph_opts,
859c31df
AE
4359 struct rbd_options **opts,
4360 struct rbd_spec **rbd_spec)
e28fff26 4361{
d22f76e7 4362 size_t len;
859c31df 4363 char *options;
0ddebc0c
AE
4364 const char *mon_addrs;
4365 size_t mon_addrs_size;
859c31df 4366 struct rbd_spec *spec = NULL;
4e9afeba 4367 struct rbd_options *rbd_opts = NULL;
859c31df 4368 struct ceph_options *copts;
dc79b113 4369 int ret;
e28fff26
AE
4370
4371 /* The first four tokens are required */
4372
7ef3214a 4373 len = next_token(&buf);
4fb5d671
AE
4374 if (!len) {
4375 rbd_warn(NULL, "no monitor address(es) provided");
4376 return -EINVAL;
4377 }
0ddebc0c 4378 mon_addrs = buf;
f28e565a 4379 mon_addrs_size = len + 1;
7ef3214a 4380 buf += len;
a725f65e 4381
dc79b113 4382 ret = -EINVAL;
f28e565a
AE
4383 options = dup_token(&buf, NULL);
4384 if (!options)
dc79b113 4385 return -ENOMEM;
4fb5d671
AE
4386 if (!*options) {
4387 rbd_warn(NULL, "no options provided");
4388 goto out_err;
4389 }
e28fff26 4390
859c31df
AE
4391 spec = rbd_spec_alloc();
4392 if (!spec)
f28e565a 4393 goto out_mem;
859c31df
AE
4394
4395 spec->pool_name = dup_token(&buf, NULL);
4396 if (!spec->pool_name)
4397 goto out_mem;
4fb5d671
AE
4398 if (!*spec->pool_name) {
4399 rbd_warn(NULL, "no pool name provided");
4400 goto out_err;
4401 }
e28fff26 4402
69e7a02f 4403 spec->image_name = dup_token(&buf, NULL);
859c31df 4404 if (!spec->image_name)
f28e565a 4405 goto out_mem;
4fb5d671
AE
4406 if (!*spec->image_name) {
4407 rbd_warn(NULL, "no image name provided");
4408 goto out_err;
4409 }
d4b125e9 4410
f28e565a
AE
4411 /*
4412 * Snapshot name is optional; default is to use "-"
4413 * (indicating the head/no snapshot).
4414 */
3feeb894 4415 len = next_token(&buf);
820a5f3e 4416 if (!len) {
3feeb894
AE
4417 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4418 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
f28e565a 4419 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
dc79b113 4420 ret = -ENAMETOOLONG;
f28e565a 4421 goto out_err;
849b4260 4422 }
4caf35f9 4423 spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
859c31df 4424 if (!spec->snap_name)
f28e565a 4425 goto out_mem;
859c31df 4426 *(spec->snap_name + len) = '\0';
e5c35534 4427
0ddebc0c 4428 /* Initialize all rbd options to the defaults */
e28fff26 4429
4e9afeba
AE
4430 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4431 if (!rbd_opts)
4432 goto out_mem;
4433
4434 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
d22f76e7 4435
859c31df 4436 copts = ceph_parse_options(options, mon_addrs,
0ddebc0c 4437 mon_addrs + mon_addrs_size - 1,
4e9afeba 4438 parse_rbd_opts_token, rbd_opts);
859c31df
AE
4439 if (IS_ERR(copts)) {
4440 ret = PTR_ERR(copts);
dc79b113
AE
4441 goto out_err;
4442 }
859c31df
AE
4443 kfree(options);
4444
4445 *ceph_opts = copts;
4e9afeba 4446 *opts = rbd_opts;
859c31df 4447 *rbd_spec = spec;
0ddebc0c 4448
dc79b113 4449 return 0;
f28e565a 4450out_mem:
dc79b113 4451 ret = -ENOMEM;
d22f76e7 4452out_err:
859c31df
AE
4453 kfree(rbd_opts);
4454 rbd_spec_put(spec);
f28e565a 4455 kfree(options);
d22f76e7 4456
dc79b113 4457 return ret;
a725f65e
AE
4458}
4459
589d30e0
AE
4460/*
4461 * An rbd format 2 image has a unique identifier, distinct from the
4462 * name given to it by the user. Internally, that identifier is
4463 * what's used to specify the names of objects related to the image.
4464 *
4465 * A special "rbd id" object is used to map an rbd image name to its
4466 * id. If that object doesn't exist, then there is no v2 rbd image
4467 * with the supplied name.
4468 *
4469 * This function will record the given rbd_dev's image_id field if
4470 * it can be determined, and in that case will return 0. If any
4471 * errors occur a negative errno will be returned and the rbd_dev's
4472 * image_id field will be unchanged (and should be NULL).
4473 */
4474static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4475{
4476 int ret;
4477 size_t size;
4478 char *object_name;
4479 void *response;
c0fba368 4480 char *image_id;
2f82ee54 4481
2c0d0a10
AE
4482 /*
4483 * When probing a parent image, the image id is already
4484 * known (and the image name likely is not). There's no
c0fba368
AE
4485 * need to fetch the image id again in this case. We
4486 * do still need to set the image format though.
2c0d0a10 4487 */
c0fba368
AE
4488 if (rbd_dev->spec->image_id) {
4489 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4490
2c0d0a10 4491 return 0;
c0fba368 4492 }
2c0d0a10 4493
589d30e0
AE
4494 /*
4495 * First, see if the format 2 image id file exists, and if
4496 * so, get the image's persistent id from it.
4497 */
69e7a02f 4498 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
589d30e0
AE
4499 object_name = kmalloc(size, GFP_NOIO);
4500 if (!object_name)
4501 return -ENOMEM;
0d7dbfce 4502 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
589d30e0
AE
4503 dout("rbd id object name is %s\n", object_name);
4504
4505 /* Response will be an encoded string, which includes a length */
4506
4507 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4508 response = kzalloc(size, GFP_NOIO);
4509 if (!response) {
4510 ret = -ENOMEM;
4511 goto out;
4512 }
4513
c0fba368
AE
4514 /* If it doesn't exist we'll assume it's a format 1 image */
4515
36be9a76 4516 ret = rbd_obj_method_sync(rbd_dev, object_name,
4157976b 4517 "rbd", "get_id", NULL, 0,
07b2391f 4518 response, RBD_IMAGE_ID_LEN_MAX, NULL);
36be9a76 4519 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
c0fba368
AE
4520 if (ret == -ENOENT) {
4521 image_id = kstrdup("", GFP_KERNEL);
4522 ret = image_id ? 0 : -ENOMEM;
4523 if (!ret)
4524 rbd_dev->image_format = 1;
4525 } else if (ret > sizeof (__le32)) {
4526 void *p = response;
4527
4528 image_id = ceph_extract_encoded_string(&p, p + ret,
979ed480 4529 NULL, GFP_NOIO);
c0fba368
AE
4530 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4531 if (!ret)
4532 rbd_dev->image_format = 2;
589d30e0 4533 } else {
c0fba368
AE
4534 ret = -EINVAL;
4535 }
4536
4537 if (!ret) {
4538 rbd_dev->spec->image_id = image_id;
4539 dout("image_id is %s\n", image_id);
589d30e0
AE
4540 }
4541out:
4542 kfree(response);
4543 kfree(object_name);
4544
4545 return ret;
4546}
4547
a30b71b9
AE
4548static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
4549{
4550 int ret;
4551 size_t size;
4552
a30b71b9
AE
4553 /* Record the header object name for this rbd image. */
4554
69e7a02f 4555 size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
a30b71b9
AE
4556 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4557 if (!rbd_dev->header_name) {
4558 ret = -ENOMEM;
4559 goto out_err;
4560 }
0d7dbfce
AE
4561 sprintf(rbd_dev->header_name, "%s%s",
4562 rbd_dev->spec->image_name, RBD_SUFFIX);
a30b71b9
AE
4563
4564 /* Populate rbd image metadata */
4565
4566 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
4567 if (ret < 0)
4568 goto out_err;
86b00e0d
AE
4569
4570 /* Version 1 images have no parent (no layering) */
4571
4572 rbd_dev->parent_spec = NULL;
4573 rbd_dev->parent_overlap = 0;
4574
a30b71b9
AE
4575 dout("discovered version 1 image, header name is %s\n",
4576 rbd_dev->header_name);
4577
4578 return 0;
4579
4580out_err:
4581 kfree(rbd_dev->header_name);
4582 rbd_dev->header_name = NULL;
0d7dbfce
AE
4583 kfree(rbd_dev->spec->image_id);
4584 rbd_dev->spec->image_id = NULL;
a30b71b9
AE
4585
4586 return ret;
4587}
4588
4589static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4590{
4591 size_t size;
9d475de5 4592 int ret;
6e14b1a6 4593 u64 ver = 0;
a30b71b9
AE
4594
4595 /*
4596 * Image id was filled in by the caller. Record the header
4597 * object name for this rbd image.
4598 */
979ed480 4599 size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
a30b71b9
AE
4600 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4601 if (!rbd_dev->header_name)
4602 return -ENOMEM;
4603 sprintf(rbd_dev->header_name, "%s%s",
0d7dbfce 4604 RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
9d475de5
AE
4605
4606 /* Get the size and object order for the image */
9d475de5 4607 ret = rbd_dev_v2_image_size(rbd_dev);
57385b51 4608 if (ret)
1e130199
AE
4609 goto out_err;
4610
4611 /* Get the object prefix (a.k.a. block_name) for the image */
4612
4613 ret = rbd_dev_v2_object_prefix(rbd_dev);
57385b51 4614 if (ret)
b1b5402a
AE
4615 goto out_err;
4616
d889140c 4617 /* Get the and check features for the image */
b1b5402a
AE
4618
4619 ret = rbd_dev_v2_features(rbd_dev);
57385b51 4620 if (ret)
9d475de5 4621 goto out_err;
35d489f9 4622
86b00e0d
AE
4623 /* If the image supports layering, get the parent info */
4624
4625 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4626 ret = rbd_dev_v2_parent_info(rbd_dev);
57385b51 4627 if (ret)
86b00e0d 4628 goto out_err;
770eba6e
AE
4629 rbd_warn(rbd_dev, "WARNING: kernel support for "
4630 "layered rbd images is EXPERIMENTAL!");
86b00e0d
AE
4631 }
4632
cc070d59
AE
4633 /* If the image supports fancy striping, get its parameters */
4634
4635 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4636 ret = rbd_dev_v2_striping_info(rbd_dev);
4637 if (ret < 0)
4638 goto out_err;
4639 }
4640
6e14b1a6
AE
4641 /* crypto and compression type aren't (yet) supported for v2 images */
4642
4643 rbd_dev->header.crypt_type = 0;
4644 rbd_dev->header.comp_type = 0;
35d489f9 4645
6e14b1a6
AE
4646 /* Get the snapshot context, plus the header version */
4647
4648 ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
35d489f9
AE
4649 if (ret)
4650 goto out_err;
6e14b1a6
AE
4651 rbd_dev->header.obj_version = ver;
4652
a30b71b9
AE
4653 dout("discovered version 2 image, header name is %s\n",
4654 rbd_dev->header_name);
4655
35152979 4656 return 0;
9d475de5 4657out_err:
86b00e0d
AE
4658 rbd_dev->parent_overlap = 0;
4659 rbd_spec_put(rbd_dev->parent_spec);
4660 rbd_dev->parent_spec = NULL;
9d475de5
AE
4661 kfree(rbd_dev->header_name);
4662 rbd_dev->header_name = NULL;
1e130199
AE
4663 kfree(rbd_dev->header.object_prefix);
4664 rbd_dev->header.object_prefix = NULL;
9d475de5
AE
4665
4666 return ret;
a30b71b9
AE
4667}
4668
83a06263
AE
4669static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
4670{
2f82ee54
AE
4671 struct rbd_device *parent = NULL;
4672 struct rbd_spec *parent_spec = NULL;
4673 struct rbd_client *rbdc = NULL;
83a06263
AE
4674 int ret;
4675
4676 /* no need to lock here, as rbd_dev is not registered yet */
4677 ret = rbd_dev_snaps_update(rbd_dev);
4678 if (ret)
4679 return ret;
4680
9e15b77d
AE
4681 ret = rbd_dev_probe_update_spec(rbd_dev);
4682 if (ret)
4683 goto err_out_snaps;
4684
83a06263
AE
4685 ret = rbd_dev_set_mapping(rbd_dev);
4686 if (ret)
4687 goto err_out_snaps;
4688
4689 /* generate unique id: find highest unique id, add one */
4690 rbd_dev_id_get(rbd_dev);
4691
4692 /* Fill in the device name, now that we have its id. */
4693 BUILD_BUG_ON(DEV_NAME_LEN
4694 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4695 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4696
4697 /* Get our block major device number. */
4698
4699 ret = register_blkdev(0, rbd_dev->name);
4700 if (ret < 0)
4701 goto err_out_id;
4702 rbd_dev->major = ret;
4703
4704 /* Set up the blkdev mapping. */
4705
4706 ret = rbd_init_disk(rbd_dev);
4707 if (ret)
4708 goto err_out_blkdev;
4709
4710 ret = rbd_bus_add_dev(rbd_dev);
4711 if (ret)
4712 goto err_out_disk;
4713
4714 /*
4715 * At this point cleanup in the event of an error is the job
4716 * of the sysfs code (initiated by rbd_bus_del_dev()).
4717 */
2f82ee54
AE
4718 /* Probe the parent if there is one */
4719
4720 if (rbd_dev->parent_spec) {
4721 /*
4722 * We need to pass a reference to the client and the
4723 * parent spec when creating the parent rbd_dev.
4724 * Images related by parent/child relationships
4725 * always share both.
4726 */
4727 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4728 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4729
4730 parent = rbd_dev_create(rbdc, parent_spec);
4731 if (!parent) {
4732 ret = -ENOMEM;
4733 goto err_out_spec;
4734 }
4735 rbdc = NULL; /* parent now owns reference */
4736 parent_spec = NULL; /* parent now owns reference */
4737 ret = rbd_dev_probe(parent);
4738 if (ret < 0)
4739 goto err_out_parent;
4740 rbd_dev->parent = parent;
4741 }
4742
9969ebc5 4743 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
83a06263
AE
4744 if (ret)
4745 goto err_out_bus;
4746
4747 /* Everything's ready. Announce the disk to the world. */
4748
4749 add_disk(rbd_dev->disk);
4750
4751 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4752 (unsigned long long) rbd_dev->mapping.size);
4753
4754 return ret;
2f82ee54
AE
4755
4756err_out_parent:
4757 rbd_dev_destroy(parent);
4758err_out_spec:
4759 rbd_spec_put(parent_spec);
4760 rbd_put_client(rbdc);
83a06263
AE
4761err_out_bus:
4762 /* this will also clean up rest of rbd_dev stuff */
4763
4764 rbd_bus_del_dev(rbd_dev);
4765
4766 return ret;
4767err_out_disk:
4768 rbd_free_disk(rbd_dev);
4769err_out_blkdev:
4770 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4771err_out_id:
4772 rbd_dev_id_put(rbd_dev);
4773err_out_snaps:
4774 rbd_remove_all_snaps(rbd_dev);
4775
4776 return ret;
4777}
4778
a30b71b9
AE
4779/*
4780 * Probe for the existence of the header object for the given rbd
4781 * device. For format 2 images this includes determining the image
4782 * id.
4783 */
4784static int rbd_dev_probe(struct rbd_device *rbd_dev)
4785{
4786 int ret;
4787
4788 /*
4789 * Get the id from the image id object. If it's not a
4790 * format 2 image, we'll get ENOENT back, and we'll assume
4791 * it's a format 1 image.
4792 */
4793 ret = rbd_dev_image_id(rbd_dev);
4794 if (ret)
c0fba368
AE
4795 return ret;
4796 rbd_assert(rbd_dev->spec->image_id);
4797 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4798
4799 if (rbd_dev->image_format == 1)
a30b71b9
AE
4800 ret = rbd_dev_v1_probe(rbd_dev);
4801 else
4802 ret = rbd_dev_v2_probe(rbd_dev);
83a06263 4803 if (ret) {
a30b71b9
AE
4804 dout("probe failed, returning %d\n", ret);
4805
83a06263
AE
4806 return ret;
4807 }
4808
4809 ret = rbd_dev_probe_finish(rbd_dev);
4810 if (ret)
4811 rbd_header_free(&rbd_dev->header);
4812
a30b71b9
AE
4813 return ret;
4814}
4815
59c2be1e
YS
4816static ssize_t rbd_add(struct bus_type *bus,
4817 const char *buf,
4818 size_t count)
602adf40 4819{
cb8627c7 4820 struct rbd_device *rbd_dev = NULL;
dc79b113 4821 struct ceph_options *ceph_opts = NULL;
4e9afeba 4822 struct rbd_options *rbd_opts = NULL;
859c31df 4823 struct rbd_spec *spec = NULL;
9d3997fd 4824 struct rbd_client *rbdc;
27cc2594
AE
4825 struct ceph_osd_client *osdc;
4826 int rc = -ENOMEM;
602adf40
YS
4827
4828 if (!try_module_get(THIS_MODULE))
4829 return -ENODEV;
4830
602adf40 4831 /* parse add command */
859c31df 4832 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
dc79b113 4833 if (rc < 0)
bd4ba655 4834 goto err_out_module;
78cea76e 4835
9d3997fd
AE
4836 rbdc = rbd_get_client(ceph_opts);
4837 if (IS_ERR(rbdc)) {
4838 rc = PTR_ERR(rbdc);
0ddebc0c 4839 goto err_out_args;
9d3997fd 4840 }
c53d5893 4841 ceph_opts = NULL; /* rbd_dev client now owns this */
602adf40 4842
602adf40 4843 /* pick the pool */
9d3997fd 4844 osdc = &rbdc->client->osdc;
859c31df 4845 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
602adf40
YS
4846 if (rc < 0)
4847 goto err_out_client;
859c31df
AE
4848 spec->pool_id = (u64) rc;
4849
0903e875
AE
4850 /* The ceph file layout needs to fit pool id in 32 bits */
4851
4852 if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
4853 rc = -EIO;
4854 goto err_out_client;
4855 }
4856
c53d5893 4857 rbd_dev = rbd_dev_create(rbdc, spec);
bd4ba655
AE
4858 if (!rbd_dev)
4859 goto err_out_client;
c53d5893
AE
4860 rbdc = NULL; /* rbd_dev now owns this */
4861 spec = NULL; /* rbd_dev now owns this */
602adf40 4862
bd4ba655 4863 rbd_dev->mapping.read_only = rbd_opts->read_only;
c53d5893
AE
4864 kfree(rbd_opts);
4865 rbd_opts = NULL; /* done with this */
bd4ba655 4866
a30b71b9
AE
4867 rc = rbd_dev_probe(rbd_dev);
4868 if (rc < 0)
c53d5893 4869 goto err_out_rbd_dev;
05fd6f6f 4870
602adf40 4871 return count;
c53d5893
AE
4872err_out_rbd_dev:
4873 rbd_dev_destroy(rbd_dev);
bd4ba655 4874err_out_client:
9d3997fd 4875 rbd_put_client(rbdc);
0ddebc0c 4876err_out_args:
78cea76e
AE
4877 if (ceph_opts)
4878 ceph_destroy_options(ceph_opts);
4e9afeba 4879 kfree(rbd_opts);
859c31df 4880 rbd_spec_put(spec);
bd4ba655
AE
4881err_out_module:
4882 module_put(THIS_MODULE);
27cc2594 4883
602adf40 4884 dout("Error adding device %s\n", buf);
27cc2594
AE
4885
4886 return (ssize_t) rc;
602adf40
YS
4887}
4888
de71a297 4889static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
602adf40
YS
4890{
4891 struct list_head *tmp;
4892 struct rbd_device *rbd_dev;
4893
e124a82f 4894 spin_lock(&rbd_dev_list_lock);
602adf40
YS
4895 list_for_each(tmp, &rbd_dev_list) {
4896 rbd_dev = list_entry(tmp, struct rbd_device, node);
de71a297 4897 if (rbd_dev->dev_id == dev_id) {
e124a82f 4898 spin_unlock(&rbd_dev_list_lock);
602adf40 4899 return rbd_dev;
e124a82f 4900 }
602adf40 4901 }
e124a82f 4902 spin_unlock(&rbd_dev_list_lock);
602adf40
YS
4903 return NULL;
4904}
4905
dfc5606d 4906static void rbd_dev_release(struct device *dev)
602adf40 4907{
593a9e7b 4908 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 4909
59c2be1e 4910 if (rbd_dev->watch_event)
9969ebc5 4911 rbd_dev_header_watch_sync(rbd_dev, 0);
602adf40
YS
4912
4913 /* clean up and free blkdev */
4914 rbd_free_disk(rbd_dev);
4915 unregister_blkdev(rbd_dev->major, rbd_dev->name);
32eec68d 4916
2ac4e75d
AE
4917 /* release allocated disk header fields */
4918 rbd_header_free(&rbd_dev->header);
4919
32eec68d 4920 /* done with the id, and with the rbd_dev */
e2839308 4921 rbd_dev_id_put(rbd_dev);
c53d5893
AE
4922 rbd_assert(rbd_dev->rbd_client != NULL);
4923 rbd_dev_destroy(rbd_dev);
602adf40
YS
4924
4925 /* release module ref */
4926 module_put(THIS_MODULE);
602adf40
YS
4927}
4928
2f82ee54
AE
4929static void __rbd_remove(struct rbd_device *rbd_dev)
4930{
4931 rbd_remove_all_snaps(rbd_dev);
4932 rbd_bus_del_dev(rbd_dev);
4933}
4934
dfc5606d
YS
4935static ssize_t rbd_remove(struct bus_type *bus,
4936 const char *buf,
4937 size_t count)
602adf40
YS
4938{
4939 struct rbd_device *rbd_dev = NULL;
4940 int target_id, rc;
4941 unsigned long ul;
4942 int ret = count;
4943
4944 rc = strict_strtoul(buf, 10, &ul);
4945 if (rc)
4946 return rc;
4947
4948 /* convert to int; abort if we lost anything in the conversion */
4949 target_id = (int) ul;
4950 if (target_id != ul)
4951 return -EINVAL;
4952
4953 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4954
4955 rbd_dev = __rbd_get_dev(target_id);
4956 if (!rbd_dev) {
4957 ret = -ENOENT;
4958 goto done;
42382b70
AE
4959 }
4960
a14ea269 4961 spin_lock_irq(&rbd_dev->lock);
b82d167b 4962 if (rbd_dev->open_count)
42382b70 4963 ret = -EBUSY;
b82d167b
AE
4964 else
4965 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
a14ea269 4966 spin_unlock_irq(&rbd_dev->lock);
b82d167b 4967 if (ret < 0)
42382b70 4968 goto done;
602adf40 4969
2f82ee54
AE
4970 while (rbd_dev->parent_spec) {
4971 struct rbd_device *first = rbd_dev;
4972 struct rbd_device *second = first->parent;
4973 struct rbd_device *third;
4974
4975 /*
4976 * Follow to the parent with no grandparent and
4977 * remove it.
4978 */
4979 while (second && (third = second->parent)) {
4980 first = second;
4981 second = third;
4982 }
4983 __rbd_remove(second);
4984 rbd_spec_put(first->parent_spec);
4985 first->parent_spec = NULL;
4986 first->parent_overlap = 0;
4987 first->parent = NULL;
4988 }
4989 __rbd_remove(rbd_dev);
602adf40
YS
4990
4991done:
4992 mutex_unlock(&ctl_mutex);
aafb230e 4993
602adf40
YS
4994 return ret;
4995}
4996
602adf40
YS
4997/*
4998 * create control files in sysfs
dfc5606d 4999 * /sys/bus/rbd/...
602adf40
YS
5000 */
5001static int rbd_sysfs_init(void)
5002{
dfc5606d 5003 int ret;
602adf40 5004
fed4c143 5005 ret = device_register(&rbd_root_dev);
21079786 5006 if (ret < 0)
dfc5606d 5007 return ret;
602adf40 5008
fed4c143
AE
5009 ret = bus_register(&rbd_bus_type);
5010 if (ret < 0)
5011 device_unregister(&rbd_root_dev);
602adf40 5012
602adf40
YS
5013 return ret;
5014}
5015
5016static void rbd_sysfs_cleanup(void)
5017{
dfc5606d 5018 bus_unregister(&rbd_bus_type);
fed4c143 5019 device_unregister(&rbd_root_dev);
602adf40
YS
5020}
5021
cc344fa1 5022static int __init rbd_init(void)
602adf40
YS
5023{
5024 int rc;
5025
1e32d34c
AE
5026 if (!libceph_compatible(NULL)) {
5027 rbd_warn(NULL, "libceph incompatibility (quitting)");
5028
5029 return -EINVAL;
5030 }
602adf40
YS
5031 rc = rbd_sysfs_init();
5032 if (rc)
5033 return rc;
f0f8cef5 5034 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
602adf40
YS
5035 return 0;
5036}
5037
cc344fa1 5038static void __exit rbd_exit(void)
602adf40
YS
5039{
5040 rbd_sysfs_cleanup();
5041}
5042
5043module_init(rbd_init);
5044module_exit(rbd_exit);
5045
5046MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5047MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5048MODULE_DESCRIPTION("rados block device");
5049
5050/* following authorship retained from original osdblk.c */
5051MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5052
5053MODULE_LICENSE("GPL");