rbd: remove old request handling code
[linux-block.git] / drivers / block / rbd.c
CommitLineData
e2a58ee5 1
602adf40
YS
2/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
dfc5606d 25 For usage instructions, please refer to:
602adf40 26
dfc5606d 27 Documentation/ABI/testing/sysfs-bus-rbd
602adf40
YS
28
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
ed95b21a 34#include <linux/ceph/cls_lock_client.h>
602adf40 35#include <linux/ceph/decode.h>
59c2be1e 36#include <linux/parser.h>
30d1cff8 37#include <linux/bsearch.h>
602adf40
YS
38
39#include <linux/kernel.h>
40#include <linux/device.h>
41#include <linux/module.h>
7ad18afa 42#include <linux/blk-mq.h>
602adf40
YS
43#include <linux/fs.h>
44#include <linux/blkdev.h>
1c2a9dfe 45#include <linux/slab.h>
f8a22fc2 46#include <linux/idr.h>
bc1ecc65 47#include <linux/workqueue.h>
602adf40
YS
48
49#include "rbd_types.h"
50
aafb230e
AE
51#define RBD_DEBUG /* Activate rbd_assert() calls */
52
593a9e7b
AE
53/*
54 * The basic unit of block I/O is a sector. It is interpreted in a
55 * number of contexts in Linux (blk, bio, genhd), but the default is
56 * universally 512 bytes. These symbols are just slightly more
57 * meaningful than the bare numbers they represent.
58 */
59#define SECTOR_SHIFT 9
60#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
61
a2acd00e
AE
62/*
63 * Increment the given counter and return its updated value.
64 * If the counter is already 0 it will not be incremented.
65 * If the counter is already at its maximum value returns
66 * -EINVAL without updating it.
67 */
68static int atomic_inc_return_safe(atomic_t *v)
69{
70 unsigned int counter;
71
72 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
73 if (counter <= (unsigned int)INT_MAX)
74 return (int)counter;
75
76 atomic_dec(v);
77
78 return -EINVAL;
79}
80
81/* Decrement the counter. Return the resulting value, or -EINVAL */
82static int atomic_dec_return_safe(atomic_t *v)
83{
84 int counter;
85
86 counter = atomic_dec_return(v);
87 if (counter >= 0)
88 return counter;
89
90 atomic_inc(v);
91
92 return -EINVAL;
93}
94
f0f8cef5 95#define RBD_DRV_NAME "rbd"
602adf40 96
7e513d43
ID
97#define RBD_MINORS_PER_MAJOR 256
98#define RBD_SINGLE_MAJOR_PART_SHIFT 4
602adf40 99
6d69bb53
ID
100#define RBD_MAX_PARENT_CHAIN_LEN 16
101
d4b125e9
AE
102#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
103#define RBD_MAX_SNAP_NAME_LEN \
104 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
105
35d489f9 106#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
602adf40
YS
107
108#define RBD_SNAP_HEAD_NAME "-"
109
9682fc6d
AE
110#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
111
9e15b77d
AE
112/* This allows a single page to hold an image name sent by OSD */
113#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
1e130199 114#define RBD_IMAGE_ID_LEN_MAX 64
9e15b77d 115
1e130199 116#define RBD_OBJ_PREFIX_LEN_MAX 64
589d30e0 117
ed95b21a 118#define RBD_NOTIFY_TIMEOUT 5 /* seconds */
99d16943
ID
119#define RBD_RETRY_DELAY msecs_to_jiffies(1000)
120
d889140c
AE
121/* Feature bits */
122
8767b293
ID
123#define RBD_FEATURE_LAYERING (1ULL<<0)
124#define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
125#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
126#define RBD_FEATURE_DATA_POOL (1ULL<<7)
e573427a 127#define RBD_FEATURE_OPERATIONS (1ULL<<8)
8767b293 128
ed95b21a
ID
129#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
130 RBD_FEATURE_STRIPINGV2 | \
7e97332e 131 RBD_FEATURE_EXCLUSIVE_LOCK | \
e573427a
ID
132 RBD_FEATURE_DATA_POOL | \
133 RBD_FEATURE_OPERATIONS)
d889140c
AE
134
135/* Features supported by this (client software) implementation. */
136
770eba6e 137#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
d889140c 138
81a89793
AE
139/*
140 * An RBD device name will be "rbd#", where the "rbd" comes from
141 * RBD_DRV_NAME above, and # is a unique integer identifier.
81a89793 142 */
602adf40
YS
143#define DEV_NAME_LEN 32
144
145/*
146 * block device image metadata (in-memory version)
147 */
148struct rbd_image_header {
f35a4dee 149 /* These six fields never change for a given rbd image */
849b4260 150 char *object_prefix;
602adf40 151 __u8 obj_order;
f35a4dee
AE
152 u64 stripe_unit;
153 u64 stripe_count;
7e97332e 154 s64 data_pool_id;
f35a4dee 155 u64 features; /* Might be changeable someday? */
602adf40 156
f84344f3
AE
157 /* The remaining fields need to be updated occasionally */
158 u64 image_size;
159 struct ceph_snap_context *snapc;
f35a4dee
AE
160 char *snap_names; /* format 1 only */
161 u64 *snap_sizes; /* format 1 only */
59c2be1e
YS
162};
163
0d7dbfce
AE
164/*
165 * An rbd image specification.
166 *
167 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
c66c6e0c
AE
168 * identify an image. Each rbd_dev structure includes a pointer to
169 * an rbd_spec structure that encapsulates this identity.
170 *
171 * Each of the id's in an rbd_spec has an associated name. For a
172 * user-mapped image, the names are supplied and the id's associated
173 * with them are looked up. For a layered image, a parent image is
174 * defined by the tuple, and the names are looked up.
175 *
176 * An rbd_dev structure contains a parent_spec pointer which is
177 * non-null if the image it represents is a child in a layered
178 * image. This pointer will refer to the rbd_spec structure used
179 * by the parent rbd_dev for its own identity (i.e., the structure
180 * is shared between the parent and child).
181 *
182 * Since these structures are populated once, during the discovery
183 * phase of image construction, they are effectively immutable so
184 * we make no effort to synchronize access to them.
185 *
186 * Note that code herein does not assume the image name is known (it
187 * could be a null pointer).
0d7dbfce
AE
188 */
189struct rbd_spec {
190 u64 pool_id;
ecb4dc22 191 const char *pool_name;
0d7dbfce 192
ecb4dc22
AE
193 const char *image_id;
194 const char *image_name;
0d7dbfce
AE
195
196 u64 snap_id;
ecb4dc22 197 const char *snap_name;
0d7dbfce
AE
198
199 struct kref kref;
200};
201
602adf40 202/*
f0f8cef5 203 * an instance of the client. multiple devices may share an rbd client.
602adf40
YS
204 */
205struct rbd_client {
206 struct ceph_client *client;
207 struct kref kref;
208 struct list_head node;
209};
210
bf0d5f50
AE
211struct rbd_img_request;
212typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
213
214#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
215
216struct rbd_obj_request;
217typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
218
9969ebc5 219enum obj_request_type {
a1fbb5e7 220 OBJ_REQUEST_NODATA = 1,
5359a17d 221 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
7e07efb1 222 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
9969ebc5 223};
bf0d5f50 224
6d2940c8 225enum obj_operation_type {
a1fbb5e7 226 OBJ_OP_READ = 1,
6d2940c8 227 OBJ_OP_WRITE,
90e98c52 228 OBJ_OP_DISCARD,
6d2940c8
GZ
229};
230
926f9b3f
AE
231enum obj_req_flags {
232 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
6365d33a 233 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
926f9b3f
AE
234};
235
3da691bf
ID
236/*
237 * Writes go through the following state machine to deal with
238 * layering:
239 *
240 * need copyup
241 * RBD_OBJ_WRITE_GUARD ---------------> RBD_OBJ_WRITE_COPYUP
242 * | ^ |
243 * v \------------------------------/
244 * done
245 * ^
246 * |
247 * RBD_OBJ_WRITE_FLAT
248 *
249 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
250 * there is a parent or not.
251 */
252enum rbd_obj_write_state {
253 RBD_OBJ_WRITE_FLAT = 1,
254 RBD_OBJ_WRITE_GUARD,
255 RBD_OBJ_WRITE_COPYUP,
256};
257
bf0d5f50 258struct rbd_obj_request {
a90bb0c1 259 u64 object_no;
bf0d5f50
AE
260 u64 offset; /* object start byte */
261 u64 length; /* bytes from offset */
926f9b3f 262 unsigned long flags;
3da691bf
ID
263 union {
264 bool tried_parent; /* for reads */
265 enum rbd_obj_write_state write_state; /* for writes */
266 };
bf0d5f50 267
c5b5ef6c
AE
268 /*
269 * An object request associated with an image will have its
270 * img_data flag set; a standalone object request will not.
271 *
c5b5ef6c
AE
272 * Finally, an object request for rbd image data will have
273 * which != BAD_WHICH, and will have a non-null img_request
274 * pointer. The value of which will be in the range
275 * 0..(img_request->obj_request_count-1).
276 */
51c3509e
ID
277 struct rbd_img_request *img_request;
278 u64 img_offset;
279 /* links for img_request->obj_requests list */
280 struct list_head links;
bf0d5f50
AE
281 u32 which; /* posn image request list */
282
283 enum obj_request_type type;
788e2df3 284 union {
5359a17d 285 struct ceph_bio_iter bio_pos;
788e2df3 286 struct {
7e07efb1
ID
287 struct ceph_bvec_iter bvec_pos;
288 u32 bvec_count;
788e2df3
AE
289 };
290 };
7e07efb1
ID
291 struct bio_vec *copyup_bvecs;
292 u32 copyup_bvec_count;
bf0d5f50
AE
293
294 struct ceph_osd_request *osd_req;
295
296 u64 xferred; /* bytes transferred */
1b83bef2 297 int result;
bf0d5f50
AE
298
299 rbd_obj_callback_t callback;
300
301 struct kref kref;
302};
303
0c425248 304enum img_req_flags {
9849e986
AE
305 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
306 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
d0b2e944 307 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
90e98c52 308 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
0c425248
AE
309};
310
bf0d5f50 311struct rbd_img_request {
bf0d5f50
AE
312 struct rbd_device *rbd_dev;
313 u64 offset; /* starting image byte offset */
314 u64 length; /* byte count from offset */
0c425248 315 unsigned long flags;
bf0d5f50 316 union {
9849e986 317 u64 snap_id; /* for reads */
bf0d5f50 318 struct ceph_snap_context *snapc; /* for writes */
9849e986
AE
319 };
320 union {
321 struct request *rq; /* block request */
322 struct rbd_obj_request *obj_request; /* obj req initiator */
bf0d5f50
AE
323 };
324 spinlock_t completion_lock;/* protects next_completion */
325 u32 next_completion;
326 rbd_img_callback_t callback;
55f27e09 327 u64 xferred;/* aggregate bytes transferred */
a5a337d4 328 int result; /* first nonzero obj_request result */
bf0d5f50
AE
329
330 u32 obj_request_count;
331 struct list_head obj_requests; /* rbd_obj_request structs */
332
333 struct kref kref;
334};
335
336#define for_each_obj_request(ireq, oreq) \
ef06f4d3 337 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
bf0d5f50 338#define for_each_obj_request_from(ireq, oreq) \
ef06f4d3 339 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
bf0d5f50 340#define for_each_obj_request_safe(ireq, oreq, n) \
ef06f4d3 341 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
bf0d5f50 342
99d16943
ID
343enum rbd_watch_state {
344 RBD_WATCH_STATE_UNREGISTERED,
345 RBD_WATCH_STATE_REGISTERED,
346 RBD_WATCH_STATE_ERROR,
347};
348
ed95b21a
ID
349enum rbd_lock_state {
350 RBD_LOCK_STATE_UNLOCKED,
351 RBD_LOCK_STATE_LOCKED,
352 RBD_LOCK_STATE_RELEASING,
353};
354
355/* WatchNotify::ClientId */
356struct rbd_client_id {
357 u64 gid;
358 u64 handle;
359};
360
f84344f3 361struct rbd_mapping {
99c1f08f 362 u64 size;
34b13184 363 u64 features;
f84344f3
AE
364};
365
602adf40
YS
366/*
367 * a single device
368 */
369struct rbd_device {
de71a297 370 int dev_id; /* blkdev unique id */
602adf40
YS
371
372 int major; /* blkdev assigned major */
dd82fff1 373 int minor;
602adf40 374 struct gendisk *disk; /* blkdev's gendisk and rq */
602adf40 375
a30b71b9 376 u32 image_format; /* Either 1 or 2 */
602adf40
YS
377 struct rbd_client *rbd_client;
378
379 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
380
b82d167b 381 spinlock_t lock; /* queue, flags, open_count */
602adf40
YS
382
383 struct rbd_image_header header;
b82d167b 384 unsigned long flags; /* possibly lock protected */
0d7dbfce 385 struct rbd_spec *spec;
d147543d 386 struct rbd_options *opts;
0d6d1e9c 387 char *config_info; /* add{,_single_major} string */
602adf40 388
c41d13a3 389 struct ceph_object_id header_oid;
922dab61 390 struct ceph_object_locator header_oloc;
971f839a 391
1643dfa4 392 struct ceph_file_layout layout; /* used for all rbd requests */
0903e875 393
99d16943
ID
394 struct mutex watch_mutex;
395 enum rbd_watch_state watch_state;
922dab61 396 struct ceph_osd_linger_request *watch_handle;
99d16943
ID
397 u64 watch_cookie;
398 struct delayed_work watch_dwork;
59c2be1e 399
ed95b21a
ID
400 struct rw_semaphore lock_rwsem;
401 enum rbd_lock_state lock_state;
cbbfb0ff 402 char lock_cookie[32];
ed95b21a
ID
403 struct rbd_client_id owner_cid;
404 struct work_struct acquired_lock_work;
405 struct work_struct released_lock_work;
406 struct delayed_work lock_dwork;
407 struct work_struct unlock_work;
408 wait_queue_head_t lock_waitq;
409
1643dfa4 410 struct workqueue_struct *task_wq;
59c2be1e 411
86b00e0d
AE
412 struct rbd_spec *parent_spec;
413 u64 parent_overlap;
a2acd00e 414 atomic_t parent_ref;
2f82ee54 415 struct rbd_device *parent;
86b00e0d 416
7ad18afa
CH
417 /* Block layer tags. */
418 struct blk_mq_tag_set tag_set;
419
c666601a
JD
420 /* protects updating the header */
421 struct rw_semaphore header_rwsem;
f84344f3
AE
422
423 struct rbd_mapping mapping;
602adf40
YS
424
425 struct list_head node;
dfc5606d 426
dfc5606d
YS
427 /* sysfs related */
428 struct device dev;
b82d167b 429 unsigned long open_count; /* protected by lock */
dfc5606d
YS
430};
431
b82d167b 432/*
87c0fded
ID
433 * Flag bits for rbd_dev->flags:
434 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
435 * by rbd_dev->lock
436 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
b82d167b 437 */
6d292906
AE
438enum rbd_dev_flags {
439 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
b82d167b 440 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
87c0fded 441 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
6d292906
AE
442};
443
cfbf6377 444static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
e124a82f 445
602adf40 446static LIST_HEAD(rbd_dev_list); /* devices */
e124a82f
AE
447static DEFINE_SPINLOCK(rbd_dev_list_lock);
448
432b8587
AE
449static LIST_HEAD(rbd_client_list); /* clients */
450static DEFINE_SPINLOCK(rbd_client_list_lock);
602adf40 451
78c2a44a
AE
452/* Slab caches for frequently-allocated structures */
453
1c2a9dfe 454static struct kmem_cache *rbd_img_request_cache;
868311b1 455static struct kmem_cache *rbd_obj_request_cache;
1c2a9dfe 456
9b60e70b 457static int rbd_major;
f8a22fc2
ID
458static DEFINE_IDA(rbd_dev_id_ida);
459
f5ee37bd
ID
460static struct workqueue_struct *rbd_wq;
461
9b60e70b 462/*
3cfa3b16 463 * single-major requires >= 0.75 version of userspace rbd utility.
9b60e70b 464 */
3cfa3b16 465static bool single_major = true;
9b60e70b 466module_param(single_major, bool, S_IRUGO);
3cfa3b16 467MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
9b60e70b 468
f0f8cef5
AE
469static ssize_t rbd_add(struct bus_type *bus, const char *buf,
470 size_t count);
471static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
472 size_t count);
9b60e70b
ID
473static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
474 size_t count);
475static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
476 size_t count);
6d69bb53 477static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
a2acd00e 478static void rbd_spec_put(struct rbd_spec *spec);
f0f8cef5 479
9b60e70b
ID
480static int rbd_dev_id_to_minor(int dev_id)
481{
7e513d43 482 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
9b60e70b
ID
483}
484
485static int minor_to_rbd_dev_id(int minor)
486{
7e513d43 487 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
9b60e70b
ID
488}
489
ed95b21a
ID
490static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
491{
492 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
493 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
494}
495
496static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
497{
498 bool is_lock_owner;
499
500 down_read(&rbd_dev->lock_rwsem);
501 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
502 up_read(&rbd_dev->lock_rwsem);
503 return is_lock_owner;
504}
505
8767b293
ID
506static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
507{
508 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
509}
510
b15a21dd
GKH
511static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
512static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
9b60e70b
ID
513static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
514static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
8767b293 515static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL);
b15a21dd
GKH
516
517static struct attribute *rbd_bus_attrs[] = {
518 &bus_attr_add.attr,
519 &bus_attr_remove.attr,
9b60e70b
ID
520 &bus_attr_add_single_major.attr,
521 &bus_attr_remove_single_major.attr,
8767b293 522 &bus_attr_supported_features.attr,
b15a21dd 523 NULL,
f0f8cef5 524};
92c76dc0
ID
525
526static umode_t rbd_bus_is_visible(struct kobject *kobj,
527 struct attribute *attr, int index)
528{
9b60e70b
ID
529 if (!single_major &&
530 (attr == &bus_attr_add_single_major.attr ||
531 attr == &bus_attr_remove_single_major.attr))
532 return 0;
533
92c76dc0
ID
534 return attr->mode;
535}
536
537static const struct attribute_group rbd_bus_group = {
538 .attrs = rbd_bus_attrs,
539 .is_visible = rbd_bus_is_visible,
540};
541__ATTRIBUTE_GROUPS(rbd_bus);
f0f8cef5
AE
542
543static struct bus_type rbd_bus_type = {
544 .name = "rbd",
b15a21dd 545 .bus_groups = rbd_bus_groups,
f0f8cef5
AE
546};
547
548static void rbd_root_dev_release(struct device *dev)
549{
550}
551
552static struct device rbd_root_dev = {
553 .init_name = "rbd",
554 .release = rbd_root_dev_release,
555};
556
06ecc6cb
AE
557static __printf(2, 3)
558void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
559{
560 struct va_format vaf;
561 va_list args;
562
563 va_start(args, fmt);
564 vaf.fmt = fmt;
565 vaf.va = &args;
566
567 if (!rbd_dev)
568 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
569 else if (rbd_dev->disk)
570 printk(KERN_WARNING "%s: %s: %pV\n",
571 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
572 else if (rbd_dev->spec && rbd_dev->spec->image_name)
573 printk(KERN_WARNING "%s: image %s: %pV\n",
574 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
575 else if (rbd_dev->spec && rbd_dev->spec->image_id)
576 printk(KERN_WARNING "%s: id %s: %pV\n",
577 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
578 else /* punt */
579 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
580 RBD_DRV_NAME, rbd_dev, &vaf);
581 va_end(args);
582}
583
aafb230e
AE
584#ifdef RBD_DEBUG
585#define rbd_assert(expr) \
586 if (unlikely(!(expr))) { \
587 printk(KERN_ERR "\nAssertion failure in %s() " \
588 "at line %d:\n\n" \
589 "\trbd_assert(%s);\n\n", \
590 __func__, __LINE__, #expr); \
591 BUG(); \
592 }
593#else /* !RBD_DEBUG */
594# define rbd_assert(expr) ((void) 0)
595#endif /* !RBD_DEBUG */
dfc5606d 596
05a46afd 597static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
8b3e1a56 598
cc4a38bd 599static int rbd_dev_refresh(struct rbd_device *rbd_dev);
2df3fac7 600static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
a720ae09 601static int rbd_dev_header_info(struct rbd_device *rbd_dev);
e8f59b59 602static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
54cac61f
AE
603static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
604 u64 snap_id);
2ad3d716
AE
605static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
606 u8 *order, u64 *snap_size);
607static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
608 u64 *snap_features);
59c2be1e 609
602adf40
YS
610static int rbd_open(struct block_device *bdev, fmode_t mode)
611{
f0f8cef5 612 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
b82d167b 613 bool removing = false;
602adf40 614
a14ea269 615 spin_lock_irq(&rbd_dev->lock);
b82d167b
AE
616 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
617 removing = true;
618 else
619 rbd_dev->open_count++;
a14ea269 620 spin_unlock_irq(&rbd_dev->lock);
b82d167b
AE
621 if (removing)
622 return -ENOENT;
623
c3e946ce 624 (void) get_device(&rbd_dev->dev);
340c7a2b 625
602adf40
YS
626 return 0;
627}
628
db2a144b 629static void rbd_release(struct gendisk *disk, fmode_t mode)
dfc5606d
YS
630{
631 struct rbd_device *rbd_dev = disk->private_data;
b82d167b
AE
632 unsigned long open_count_before;
633
a14ea269 634 spin_lock_irq(&rbd_dev->lock);
b82d167b 635 open_count_before = rbd_dev->open_count--;
a14ea269 636 spin_unlock_irq(&rbd_dev->lock);
b82d167b 637 rbd_assert(open_count_before > 0);
dfc5606d 638
c3e946ce 639 put_device(&rbd_dev->dev);
dfc5606d
YS
640}
641
131fd9f6
GZ
642static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
643{
1de797bb 644 int ro;
131fd9f6 645
1de797bb 646 if (get_user(ro, (int __user *)arg))
131fd9f6
GZ
647 return -EFAULT;
648
1de797bb 649 /* Snapshots can't be marked read-write */
131fd9f6
GZ
650 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
651 return -EROFS;
652
1de797bb
ID
653 /* Let blkdev_roset() handle it */
654 return -ENOTTY;
131fd9f6
GZ
655}
656
657static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
658 unsigned int cmd, unsigned long arg)
659{
660 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
1de797bb 661 int ret;
131fd9f6 662
131fd9f6
GZ
663 switch (cmd) {
664 case BLKROSET:
665 ret = rbd_ioctl_set_ro(rbd_dev, arg);
666 break;
667 default:
668 ret = -ENOTTY;
669 }
670
131fd9f6
GZ
671 return ret;
672}
673
674#ifdef CONFIG_COMPAT
675static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
676 unsigned int cmd, unsigned long arg)
677{
678 return rbd_ioctl(bdev, mode, cmd, arg);
679}
680#endif /* CONFIG_COMPAT */
681
602adf40
YS
682static const struct block_device_operations rbd_bd_ops = {
683 .owner = THIS_MODULE,
684 .open = rbd_open,
dfc5606d 685 .release = rbd_release,
131fd9f6
GZ
686 .ioctl = rbd_ioctl,
687#ifdef CONFIG_COMPAT
688 .compat_ioctl = rbd_compat_ioctl,
689#endif
602adf40
YS
690};
691
692/*
7262cfca 693 * Initialize an rbd client instance. Success or not, this function
cfbf6377 694 * consumes ceph_opts. Caller holds client_mutex.
602adf40 695 */
f8c38929 696static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
602adf40
YS
697{
698 struct rbd_client *rbdc;
699 int ret = -ENOMEM;
700
37206ee5 701 dout("%s:\n", __func__);
602adf40
YS
702 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
703 if (!rbdc)
704 goto out_opt;
705
706 kref_init(&rbdc->kref);
707 INIT_LIST_HEAD(&rbdc->node);
708
74da4a0f 709 rbdc->client = ceph_create_client(ceph_opts, rbdc);
602adf40 710 if (IS_ERR(rbdc->client))
08f75463 711 goto out_rbdc;
43ae4701 712 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
602adf40
YS
713
714 ret = ceph_open_session(rbdc->client);
715 if (ret < 0)
08f75463 716 goto out_client;
602adf40 717
432b8587 718 spin_lock(&rbd_client_list_lock);
602adf40 719 list_add_tail(&rbdc->node, &rbd_client_list);
432b8587 720 spin_unlock(&rbd_client_list_lock);
602adf40 721
37206ee5 722 dout("%s: rbdc %p\n", __func__, rbdc);
bc534d86 723
602adf40 724 return rbdc;
08f75463 725out_client:
602adf40 726 ceph_destroy_client(rbdc->client);
08f75463 727out_rbdc:
602adf40
YS
728 kfree(rbdc);
729out_opt:
43ae4701
AE
730 if (ceph_opts)
731 ceph_destroy_options(ceph_opts);
37206ee5
AE
732 dout("%s: error %d\n", __func__, ret);
733
28f259b7 734 return ERR_PTR(ret);
602adf40
YS
735}
736
2f82ee54
AE
737static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
738{
739 kref_get(&rbdc->kref);
740
741 return rbdc;
742}
743
602adf40 744/*
1f7ba331
AE
745 * Find a ceph client with specific addr and configuration. If
746 * found, bump its reference count.
602adf40 747 */
1f7ba331 748static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
602adf40
YS
749{
750 struct rbd_client *client_node;
1f7ba331 751 bool found = false;
602adf40 752
43ae4701 753 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
602adf40
YS
754 return NULL;
755
1f7ba331
AE
756 spin_lock(&rbd_client_list_lock);
757 list_for_each_entry(client_node, &rbd_client_list, node) {
758 if (!ceph_compare_options(ceph_opts, client_node->client)) {
2f82ee54
AE
759 __rbd_get_client(client_node);
760
1f7ba331
AE
761 found = true;
762 break;
763 }
764 }
765 spin_unlock(&rbd_client_list_lock);
766
767 return found ? client_node : NULL;
602adf40
YS
768}
769
59c2be1e 770/*
210c104c 771 * (Per device) rbd map options
59c2be1e
YS
772 */
773enum {
b5584180 774 Opt_queue_depth,
59c2be1e
YS
775 Opt_last_int,
776 /* int args above */
777 Opt_last_string,
778 /* string args above */
cc0538b6
AE
779 Opt_read_only,
780 Opt_read_write,
80de1912 781 Opt_lock_on_read,
e010dd0a 782 Opt_exclusive,
210c104c 783 Opt_err
59c2be1e
YS
784};
785
43ae4701 786static match_table_t rbd_opts_tokens = {
b5584180 787 {Opt_queue_depth, "queue_depth=%d"},
59c2be1e
YS
788 /* int args above */
789 /* string args above */
be466c1c 790 {Opt_read_only, "read_only"},
cc0538b6
AE
791 {Opt_read_only, "ro"}, /* Alternate spelling */
792 {Opt_read_write, "read_write"},
793 {Opt_read_write, "rw"}, /* Alternate spelling */
80de1912 794 {Opt_lock_on_read, "lock_on_read"},
e010dd0a 795 {Opt_exclusive, "exclusive"},
210c104c 796 {Opt_err, NULL}
59c2be1e
YS
797};
798
98571b5a 799struct rbd_options {
b5584180 800 int queue_depth;
98571b5a 801 bool read_only;
80de1912 802 bool lock_on_read;
e010dd0a 803 bool exclusive;
98571b5a
AE
804};
805
b5584180 806#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
98571b5a 807#define RBD_READ_ONLY_DEFAULT false
80de1912 808#define RBD_LOCK_ON_READ_DEFAULT false
e010dd0a 809#define RBD_EXCLUSIVE_DEFAULT false
98571b5a 810
59c2be1e
YS
811static int parse_rbd_opts_token(char *c, void *private)
812{
43ae4701 813 struct rbd_options *rbd_opts = private;
59c2be1e
YS
814 substring_t argstr[MAX_OPT_ARGS];
815 int token, intval, ret;
816
43ae4701 817 token = match_token(c, rbd_opts_tokens, argstr);
59c2be1e
YS
818 if (token < Opt_last_int) {
819 ret = match_int(&argstr[0], &intval);
820 if (ret < 0) {
210c104c 821 pr_err("bad mount option arg (not int) at '%s'\n", c);
59c2be1e
YS
822 return ret;
823 }
824 dout("got int token %d val %d\n", token, intval);
825 } else if (token > Opt_last_int && token < Opt_last_string) {
210c104c 826 dout("got string token %d val %s\n", token, argstr[0].from);
59c2be1e
YS
827 } else {
828 dout("got token %d\n", token);
829 }
830
831 switch (token) {
b5584180
ID
832 case Opt_queue_depth:
833 if (intval < 1) {
834 pr_err("queue_depth out of range\n");
835 return -EINVAL;
836 }
837 rbd_opts->queue_depth = intval;
838 break;
cc0538b6
AE
839 case Opt_read_only:
840 rbd_opts->read_only = true;
841 break;
842 case Opt_read_write:
843 rbd_opts->read_only = false;
844 break;
80de1912
ID
845 case Opt_lock_on_read:
846 rbd_opts->lock_on_read = true;
847 break;
e010dd0a
ID
848 case Opt_exclusive:
849 rbd_opts->exclusive = true;
850 break;
59c2be1e 851 default:
210c104c
ID
852 /* libceph prints "bad option" msg */
853 return -EINVAL;
59c2be1e 854 }
210c104c 855
59c2be1e
YS
856 return 0;
857}
858
6d2940c8
GZ
859static char* obj_op_name(enum obj_operation_type op_type)
860{
861 switch (op_type) {
862 case OBJ_OP_READ:
863 return "read";
864 case OBJ_OP_WRITE:
865 return "write";
90e98c52
GZ
866 case OBJ_OP_DISCARD:
867 return "discard";
6d2940c8
GZ
868 default:
869 return "???";
870 }
871}
872
602adf40
YS
873/*
874 * Get a ceph client with specific addr and configuration, if one does
7262cfca
AE
875 * not exist create it. Either way, ceph_opts is consumed by this
876 * function.
602adf40 877 */
9d3997fd 878static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
602adf40 879{
f8c38929 880 struct rbd_client *rbdc;
59c2be1e 881
cfbf6377 882 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
1f7ba331 883 rbdc = rbd_client_find(ceph_opts);
9d3997fd 884 if (rbdc) /* using an existing client */
43ae4701 885 ceph_destroy_options(ceph_opts);
9d3997fd 886 else
f8c38929 887 rbdc = rbd_client_create(ceph_opts);
cfbf6377 888 mutex_unlock(&client_mutex);
602adf40 889
9d3997fd 890 return rbdc;
602adf40
YS
891}
892
893/*
894 * Destroy ceph client
d23a4b3f 895 *
432b8587 896 * Caller must hold rbd_client_list_lock.
602adf40
YS
897 */
898static void rbd_client_release(struct kref *kref)
899{
900 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
901
37206ee5 902 dout("%s: rbdc %p\n", __func__, rbdc);
cd9d9f5d 903 spin_lock(&rbd_client_list_lock);
602adf40 904 list_del(&rbdc->node);
cd9d9f5d 905 spin_unlock(&rbd_client_list_lock);
602adf40
YS
906
907 ceph_destroy_client(rbdc->client);
908 kfree(rbdc);
909}
910
911/*
912 * Drop reference to ceph client node. If it's not referenced anymore, release
913 * it.
914 */
9d3997fd 915static void rbd_put_client(struct rbd_client *rbdc)
602adf40 916{
c53d5893
AE
917 if (rbdc)
918 kref_put(&rbdc->kref, rbd_client_release);
602adf40
YS
919}
920
a30b71b9
AE
921static bool rbd_image_format_valid(u32 image_format)
922{
923 return image_format == 1 || image_format == 2;
924}
925
8e94af8e
AE
926static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
927{
103a150f
AE
928 size_t size;
929 u32 snap_count;
930
931 /* The header has to start with the magic rbd header text */
932 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
933 return false;
934
db2388b6
AE
935 /* The bio layer requires at least sector-sized I/O */
936
937 if (ondisk->options.order < SECTOR_SHIFT)
938 return false;
939
940 /* If we use u64 in a few spots we may be able to loosen this */
941
942 if (ondisk->options.order > 8 * sizeof (int) - 1)
943 return false;
944
103a150f
AE
945 /*
946 * The size of a snapshot header has to fit in a size_t, and
947 * that limits the number of snapshots.
948 */
949 snap_count = le32_to_cpu(ondisk->snap_count);
950 size = SIZE_MAX - sizeof (struct ceph_snap_context);
951 if (snap_count > size / sizeof (__le64))
952 return false;
953
954 /*
955 * Not only that, but the size of the entire the snapshot
956 * header must also be representable in a size_t.
957 */
958 size -= snap_count * sizeof (__le64);
959 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
960 return false;
961
962 return true;
8e94af8e
AE
963}
964
5bc3fb17
ID
965/*
966 * returns the size of an object in the image
967 */
968static u32 rbd_obj_bytes(struct rbd_image_header *header)
969{
970 return 1U << header->obj_order;
971}
972
263423f8
ID
973static void rbd_init_layout(struct rbd_device *rbd_dev)
974{
975 if (rbd_dev->header.stripe_unit == 0 ||
976 rbd_dev->header.stripe_count == 0) {
977 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
978 rbd_dev->header.stripe_count = 1;
979 }
980
981 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
982 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
983 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
7e97332e
ID
984 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
985 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
263423f8
ID
986 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
987}
988
602adf40 989/*
bb23e37a
AE
990 * Fill an rbd image header with information from the given format 1
991 * on-disk header.
602adf40 992 */
662518b1 993static int rbd_header_from_disk(struct rbd_device *rbd_dev,
4156d998 994 struct rbd_image_header_ondisk *ondisk)
602adf40 995{
662518b1 996 struct rbd_image_header *header = &rbd_dev->header;
bb23e37a
AE
997 bool first_time = header->object_prefix == NULL;
998 struct ceph_snap_context *snapc;
999 char *object_prefix = NULL;
1000 char *snap_names = NULL;
1001 u64 *snap_sizes = NULL;
ccece235 1002 u32 snap_count;
bb23e37a 1003 int ret = -ENOMEM;
621901d6 1004 u32 i;
602adf40 1005
bb23e37a 1006 /* Allocate this now to avoid having to handle failure below */
6a52325f 1007
bb23e37a 1008 if (first_time) {
848d796c
ID
1009 object_prefix = kstrndup(ondisk->object_prefix,
1010 sizeof(ondisk->object_prefix),
1011 GFP_KERNEL);
bb23e37a
AE
1012 if (!object_prefix)
1013 return -ENOMEM;
bb23e37a 1014 }
00f1f36f 1015
bb23e37a 1016 /* Allocate the snapshot context and fill it in */
00f1f36f 1017
bb23e37a
AE
1018 snap_count = le32_to_cpu(ondisk->snap_count);
1019 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1020 if (!snapc)
1021 goto out_err;
1022 snapc->seq = le64_to_cpu(ondisk->snap_seq);
602adf40 1023 if (snap_count) {
bb23e37a 1024 struct rbd_image_snap_ondisk *snaps;
f785cc1d
AE
1025 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1026
bb23e37a 1027 /* We'll keep a copy of the snapshot names... */
621901d6 1028
bb23e37a
AE
1029 if (snap_names_len > (u64)SIZE_MAX)
1030 goto out_2big;
1031 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1032 if (!snap_names)
6a52325f
AE
1033 goto out_err;
1034
bb23e37a 1035 /* ...as well as the array of their sizes. */
88a25a5f
ME
1036 snap_sizes = kmalloc_array(snap_count,
1037 sizeof(*header->snap_sizes),
1038 GFP_KERNEL);
bb23e37a 1039 if (!snap_sizes)
6a52325f 1040 goto out_err;
bb23e37a 1041
f785cc1d 1042 /*
bb23e37a
AE
1043 * Copy the names, and fill in each snapshot's id
1044 * and size.
1045 *
99a41ebc 1046 * Note that rbd_dev_v1_header_info() guarantees the
bb23e37a 1047 * ondisk buffer we're working with has
f785cc1d
AE
1048 * snap_names_len bytes beyond the end of the
1049 * snapshot id array, this memcpy() is safe.
1050 */
bb23e37a
AE
1051 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1052 snaps = ondisk->snaps;
1053 for (i = 0; i < snap_count; i++) {
1054 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1055 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1056 }
602adf40 1057 }
6a52325f 1058
bb23e37a 1059 /* We won't fail any more, fill in the header */
621901d6 1060
bb23e37a
AE
1061 if (first_time) {
1062 header->object_prefix = object_prefix;
1063 header->obj_order = ondisk->options.order;
263423f8 1064 rbd_init_layout(rbd_dev);
602adf40 1065 } else {
662518b1
AE
1066 ceph_put_snap_context(header->snapc);
1067 kfree(header->snap_names);
1068 kfree(header->snap_sizes);
602adf40 1069 }
849b4260 1070
bb23e37a 1071 /* The remaining fields always get updated (when we refresh) */
621901d6 1072
f84344f3 1073 header->image_size = le64_to_cpu(ondisk->image_size);
bb23e37a
AE
1074 header->snapc = snapc;
1075 header->snap_names = snap_names;
1076 header->snap_sizes = snap_sizes;
468521c1 1077
602adf40 1078 return 0;
bb23e37a
AE
1079out_2big:
1080 ret = -EIO;
6a52325f 1081out_err:
bb23e37a
AE
1082 kfree(snap_sizes);
1083 kfree(snap_names);
1084 ceph_put_snap_context(snapc);
1085 kfree(object_prefix);
ccece235 1086
bb23e37a 1087 return ret;
602adf40
YS
1088}
1089
9682fc6d
AE
1090static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1091{
1092 const char *snap_name;
1093
1094 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1095
1096 /* Skip over names until we find the one we are looking for */
1097
1098 snap_name = rbd_dev->header.snap_names;
1099 while (which--)
1100 snap_name += strlen(snap_name) + 1;
1101
1102 return kstrdup(snap_name, GFP_KERNEL);
1103}
1104
30d1cff8
AE
1105/*
1106 * Snapshot id comparison function for use with qsort()/bsearch().
1107 * Note that result is for snapshots in *descending* order.
1108 */
1109static int snapid_compare_reverse(const void *s1, const void *s2)
1110{
1111 u64 snap_id1 = *(u64 *)s1;
1112 u64 snap_id2 = *(u64 *)s2;
1113
1114 if (snap_id1 < snap_id2)
1115 return 1;
1116 return snap_id1 == snap_id2 ? 0 : -1;
1117}
1118
1119/*
1120 * Search a snapshot context to see if the given snapshot id is
1121 * present.
1122 *
1123 * Returns the position of the snapshot id in the array if it's found,
1124 * or BAD_SNAP_INDEX otherwise.
1125 *
1126 * Note: The snapshot array is in kept sorted (by the osd) in
1127 * reverse order, highest snapshot id first.
1128 */
9682fc6d
AE
1129static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1130{
1131 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
30d1cff8 1132 u64 *found;
9682fc6d 1133
30d1cff8
AE
1134 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1135 sizeof (snap_id), snapid_compare_reverse);
9682fc6d 1136
30d1cff8 1137 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
9682fc6d
AE
1138}
1139
2ad3d716
AE
1140static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1141 u64 snap_id)
9e15b77d 1142{
54cac61f 1143 u32 which;
da6a6b63 1144 const char *snap_name;
9e15b77d 1145
54cac61f
AE
1146 which = rbd_dev_snap_index(rbd_dev, snap_id);
1147 if (which == BAD_SNAP_INDEX)
da6a6b63 1148 return ERR_PTR(-ENOENT);
54cac61f 1149
da6a6b63
JD
1150 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1151 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
54cac61f
AE
1152}
1153
1154static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1155{
9e15b77d
AE
1156 if (snap_id == CEPH_NOSNAP)
1157 return RBD_SNAP_HEAD_NAME;
1158
54cac61f
AE
1159 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1160 if (rbd_dev->image_format == 1)
1161 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
9e15b77d 1162
54cac61f 1163 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
9e15b77d
AE
1164}
1165
2ad3d716
AE
1166static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1167 u64 *snap_size)
602adf40 1168{
2ad3d716
AE
1169 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1170 if (snap_id == CEPH_NOSNAP) {
1171 *snap_size = rbd_dev->header.image_size;
1172 } else if (rbd_dev->image_format == 1) {
1173 u32 which;
602adf40 1174
2ad3d716
AE
1175 which = rbd_dev_snap_index(rbd_dev, snap_id);
1176 if (which == BAD_SNAP_INDEX)
1177 return -ENOENT;
e86924a8 1178
2ad3d716
AE
1179 *snap_size = rbd_dev->header.snap_sizes[which];
1180 } else {
1181 u64 size = 0;
1182 int ret;
1183
1184 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1185 if (ret)
1186 return ret;
1187
1188 *snap_size = size;
1189 }
1190 return 0;
602adf40
YS
1191}
1192
2ad3d716
AE
1193static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1194 u64 *snap_features)
602adf40 1195{
2ad3d716
AE
1196 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1197 if (snap_id == CEPH_NOSNAP) {
1198 *snap_features = rbd_dev->header.features;
1199 } else if (rbd_dev->image_format == 1) {
1200 *snap_features = 0; /* No features for format 1 */
602adf40 1201 } else {
2ad3d716
AE
1202 u64 features = 0;
1203 int ret;
8b0241f8 1204
2ad3d716
AE
1205 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1206 if (ret)
1207 return ret;
1208
1209 *snap_features = features;
1210 }
1211 return 0;
1212}
1213
1214static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1215{
8f4b7d98 1216 u64 snap_id = rbd_dev->spec->snap_id;
2ad3d716
AE
1217 u64 size = 0;
1218 u64 features = 0;
1219 int ret;
1220
2ad3d716
AE
1221 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1222 if (ret)
1223 return ret;
1224 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1225 if (ret)
1226 return ret;
1227
1228 rbd_dev->mapping.size = size;
1229 rbd_dev->mapping.features = features;
1230
8b0241f8 1231 return 0;
602adf40
YS
1232}
1233
d1cf5788
AE
1234static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1235{
1236 rbd_dev->mapping.size = 0;
1237 rbd_dev->mapping.features = 0;
200a6a8b
AE
1238}
1239
65ccfe21
AE
1240static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1241{
5bc3fb17 1242 u64 segment_size = rbd_obj_bytes(&rbd_dev->header);
602adf40 1243
65ccfe21
AE
1244 return offset & (segment_size - 1);
1245}
1246
1247static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1248 u64 offset, u64 length)
1249{
5bc3fb17 1250 u64 segment_size = rbd_obj_bytes(&rbd_dev->header);
65ccfe21
AE
1251
1252 offset &= segment_size - 1;
1253
aafb230e 1254 rbd_assert(length <= U64_MAX - offset);
65ccfe21
AE
1255 if (offset + length > segment_size)
1256 length = segment_size - offset;
1257
1258 return length;
602adf40
YS
1259}
1260
5359a17d
ID
1261static void zero_bvec(struct bio_vec *bv)
1262{
1263 void *buf;
1264 unsigned long flags;
1265
1266 buf = bvec_kmap_irq(bv, &flags);
1267 memset(buf, 0, bv->bv_len);
1268 flush_dcache_page(bv->bv_page);
1269 bvec_kunmap_irq(buf, &flags);
1270}
1271
1272static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1273{
1274 struct ceph_bio_iter it = *bio_pos;
1275
1276 ceph_bio_iter_advance(&it, off);
1277 ceph_bio_iter_advance_step(&it, bytes, ({
1278 zero_bvec(&bv);
1279 }));
1280}
1281
7e07efb1
ID
1282static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1283{
1284 struct ceph_bvec_iter it = *bvec_pos;
1285
1286 ceph_bvec_iter_advance(&it, off);
1287 ceph_bvec_iter_advance_step(&it, bytes, ({
1288 zero_bvec(&bv);
1289 }));
b9434c5b
AE
1290}
1291
3da691bf
ID
1292/*
1293 * Zero a range in @obj_req data buffer defined by a bio (list) or
1294 * bio_vec array.
1295 *
1296 * @off is relative to the start of the data buffer.
1297 */
1298static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1299 u32 bytes)
1300{
1301 switch (obj_req->type) {
1302 case OBJ_REQUEST_BIO:
1303 zero_bios(&obj_req->bio_pos, off, bytes);
1304 break;
1305 case OBJ_REQUEST_BVECS:
1306 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1307 break;
1308 default:
1309 rbd_assert(0);
1310 }
1311}
1312
926f9b3f
AE
1313/*
1314 * The default/initial value for all object request flags is 0. For
1315 * each flag, once its value is set to 1 it is never reset to 0
1316 * again.
1317 */
57acbaa7 1318static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
926f9b3f 1319{
57acbaa7 1320 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
926f9b3f
AE
1321 struct rbd_device *rbd_dev;
1322
57acbaa7 1323 rbd_dev = obj_request->img_request->rbd_dev;
9584d508 1324 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
926f9b3f
AE
1325 obj_request);
1326 }
1327}
1328
57acbaa7 1329static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
926f9b3f
AE
1330{
1331 smp_mb();
57acbaa7 1332 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
926f9b3f
AE
1333}
1334
57acbaa7 1335static void obj_request_done_set(struct rbd_obj_request *obj_request)
6365d33a 1336{
57acbaa7
AE
1337 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1338 struct rbd_device *rbd_dev = NULL;
6365d33a 1339
57acbaa7
AE
1340 if (obj_request_img_data_test(obj_request))
1341 rbd_dev = obj_request->img_request->rbd_dev;
9584d508 1342 rbd_warn(rbd_dev, "obj_request %p already marked done",
6365d33a
AE
1343 obj_request);
1344 }
1345}
1346
57acbaa7 1347static bool obj_request_done_test(struct rbd_obj_request *obj_request)
6365d33a
AE
1348{
1349 smp_mb();
57acbaa7 1350 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
6365d33a
AE
1351}
1352
9638556a
ID
1353static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1354{
1355 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1356
1357 return obj_request->img_offset <
1358 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1359}
1360
bf0d5f50
AE
1361static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1362{
37206ee5 1363 dout("%s: obj %p (was %d)\n", __func__, obj_request,
2c935bc5 1364 kref_read(&obj_request->kref));
bf0d5f50
AE
1365 kref_get(&obj_request->kref);
1366}
1367
1368static void rbd_obj_request_destroy(struct kref *kref);
1369static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1370{
1371 rbd_assert(obj_request != NULL);
37206ee5 1372 dout("%s: obj %p (was %d)\n", __func__, obj_request,
2c935bc5 1373 kref_read(&obj_request->kref));
bf0d5f50
AE
1374 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1375}
1376
0f2d5be7
AE
1377static void rbd_img_request_get(struct rbd_img_request *img_request)
1378{
1379 dout("%s: img %p (was %d)\n", __func__, img_request,
2c935bc5 1380 kref_read(&img_request->kref));
0f2d5be7
AE
1381 kref_get(&img_request->kref);
1382}
1383
e93f3152
AE
1384static bool img_request_child_test(struct rbd_img_request *img_request);
1385static void rbd_parent_request_destroy(struct kref *kref);
bf0d5f50
AE
1386static void rbd_img_request_destroy(struct kref *kref);
1387static void rbd_img_request_put(struct rbd_img_request *img_request)
1388{
1389 rbd_assert(img_request != NULL);
37206ee5 1390 dout("%s: img %p (was %d)\n", __func__, img_request,
2c935bc5 1391 kref_read(&img_request->kref));
e93f3152
AE
1392 if (img_request_child_test(img_request))
1393 kref_put(&img_request->kref, rbd_parent_request_destroy);
1394 else
1395 kref_put(&img_request->kref, rbd_img_request_destroy);
bf0d5f50
AE
1396}
1397
1398static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1399 struct rbd_obj_request *obj_request)
1400{
25dcf954
AE
1401 rbd_assert(obj_request->img_request == NULL);
1402
b155e86c 1403 /* Image request now owns object's original reference */
bf0d5f50 1404 obj_request->img_request = img_request;
25dcf954 1405 obj_request->which = img_request->obj_request_count;
6365d33a
AE
1406 rbd_assert(!obj_request_img_data_test(obj_request));
1407 obj_request_img_data_set(obj_request);
bf0d5f50 1408 rbd_assert(obj_request->which != BAD_WHICH);
25dcf954
AE
1409 img_request->obj_request_count++;
1410 list_add_tail(&obj_request->links, &img_request->obj_requests);
37206ee5
AE
1411 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1412 obj_request->which);
bf0d5f50
AE
1413}
1414
1415static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1416 struct rbd_obj_request *obj_request)
1417{
1418 rbd_assert(obj_request->which != BAD_WHICH);
25dcf954 1419
37206ee5
AE
1420 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1421 obj_request->which);
bf0d5f50 1422 list_del(&obj_request->links);
25dcf954
AE
1423 rbd_assert(img_request->obj_request_count > 0);
1424 img_request->obj_request_count--;
1425 rbd_assert(obj_request->which == img_request->obj_request_count);
1426 obj_request->which = BAD_WHICH;
6365d33a 1427 rbd_assert(obj_request_img_data_test(obj_request));
bf0d5f50 1428 rbd_assert(obj_request->img_request == img_request);
bf0d5f50 1429 obj_request->img_request = NULL;
25dcf954 1430 obj_request->callback = NULL;
bf0d5f50
AE
1431 rbd_obj_request_put(obj_request);
1432}
1433
1434static bool obj_request_type_valid(enum obj_request_type type)
1435{
1436 switch (type) {
9969ebc5 1437 case OBJ_REQUEST_NODATA:
bf0d5f50 1438 case OBJ_REQUEST_BIO:
7e07efb1 1439 case OBJ_REQUEST_BVECS:
bf0d5f50
AE
1440 return true;
1441 default:
1442 return false;
1443 }
1444}
1445
4a17dadc
ID
1446static void rbd_img_obj_callback(struct rbd_obj_request *obj_request);
1447
980917fc 1448static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
bf0d5f50 1449{
980917fc
ID
1450 struct ceph_osd_request *osd_req = obj_request->osd_req;
1451
a90bb0c1
ID
1452 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__,
1453 obj_request, obj_request->object_no, obj_request->offset,
67e2b652 1454 obj_request->length, osd_req);
4a17dadc
ID
1455 if (obj_request_img_data_test(obj_request)) {
1456 WARN_ON(obj_request->callback != rbd_img_obj_callback);
1457 rbd_img_request_get(obj_request->img_request);
1458 }
980917fc 1459 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
bf0d5f50
AE
1460}
1461
1462static void rbd_img_request_complete(struct rbd_img_request *img_request)
1463{
55f27e09 1464
37206ee5 1465 dout("%s: img %p\n", __func__, img_request);
55f27e09
AE
1466
1467 /*
1468 * If no error occurred, compute the aggregate transfer
1469 * count for the image request. We could instead use
1470 * atomic64_cmpxchg() to update it as each object request
1471 * completes; not clear which way is better off hand.
1472 */
1473 if (!img_request->result) {
1474 struct rbd_obj_request *obj_request;
1475 u64 xferred = 0;
1476
1477 for_each_obj_request(img_request, obj_request)
1478 xferred += obj_request->xferred;
1479 img_request->xferred = xferred;
1480 }
1481
bf0d5f50
AE
1482 if (img_request->callback)
1483 img_request->callback(img_request);
1484 else
1485 rbd_img_request_put(img_request);
1486}
1487
0c425248
AE
1488/*
1489 * The default/initial value for all image request flags is 0. Each
1490 * is conditionally set to 1 at image request initialization time
1491 * and currently never change thereafter.
1492 */
1493static void img_request_write_set(struct rbd_img_request *img_request)
1494{
1495 set_bit(IMG_REQ_WRITE, &img_request->flags);
1496 smp_mb();
1497}
1498
1499static bool img_request_write_test(struct rbd_img_request *img_request)
1500{
1501 smp_mb();
1502 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1503}
1504
90e98c52
GZ
1505/*
1506 * Set the discard flag when the img_request is an discard request
1507 */
1508static void img_request_discard_set(struct rbd_img_request *img_request)
1509{
1510 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1511 smp_mb();
1512}
1513
1514static bool img_request_discard_test(struct rbd_img_request *img_request)
1515{
1516 smp_mb();
1517 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1518}
1519
9849e986
AE
1520static void img_request_child_set(struct rbd_img_request *img_request)
1521{
1522 set_bit(IMG_REQ_CHILD, &img_request->flags);
1523 smp_mb();
1524}
1525
e93f3152
AE
1526static void img_request_child_clear(struct rbd_img_request *img_request)
1527{
1528 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1529 smp_mb();
1530}
1531
9849e986
AE
1532static bool img_request_child_test(struct rbd_img_request *img_request)
1533{
1534 smp_mb();
1535 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1536}
1537
d0b2e944
AE
1538static void img_request_layered_set(struct rbd_img_request *img_request)
1539{
1540 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1541 smp_mb();
1542}
1543
a2acd00e
AE
1544static void img_request_layered_clear(struct rbd_img_request *img_request)
1545{
1546 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1547 smp_mb();
1548}
1549
d0b2e944
AE
1550static bool img_request_layered_test(struct rbd_img_request *img_request)
1551{
1552 smp_mb();
1553 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1554}
1555
3b434a2a
JD
1556static enum obj_operation_type
1557rbd_img_request_op_type(struct rbd_img_request *img_request)
1558{
1559 if (img_request_write_test(img_request))
1560 return OBJ_OP_WRITE;
1561 else if (img_request_discard_test(img_request))
1562 return OBJ_OP_DISCARD;
1563 else
1564 return OBJ_OP_READ;
1565}
1566
3da691bf
ID
1567static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1568{
1569 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1570
1571 return !obj_req->offset &&
1572 obj_req->length == rbd_dev->layout.object_size;
1573}
1574
1575static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1576{
1577 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1578
1579 return obj_req->offset + obj_req->length ==
1580 rbd_dev->layout.object_size;
1581}
1582
1583static bool rbd_img_is_write(struct rbd_img_request *img_req)
1584{
1585 switch (rbd_img_request_op_type(img_req)) {
1586 case OBJ_OP_READ:
1587 return false;
1588 case OBJ_OP_WRITE:
1589 case OBJ_OP_DISCARD:
1590 return true;
1591 default:
1592 rbd_assert(0);
1593 }
1594}
1595
bf0d5f50
AE
1596static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1597{
37206ee5
AE
1598 dout("%s: obj %p cb %p\n", __func__, obj_request,
1599 obj_request->callback);
2e584bce 1600 obj_request->callback(obj_request);
bf0d5f50
AE
1601}
1602
3da691bf
ID
1603static void rbd_obj_handle_request(struct rbd_obj_request *obj_req);
1604
85e084fe 1605static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
bf0d5f50 1606{
3da691bf 1607 struct rbd_obj_request *obj_req = osd_req->r_priv;
bf0d5f50 1608
3da691bf
ID
1609 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1610 osd_req->r_result, obj_req);
1611 rbd_assert(osd_req == obj_req->osd_req);
0ccd5926 1612
3da691bf
ID
1613 obj_req->result = osd_req->r_result < 0 ? osd_req->r_result : 0;
1614 if (!obj_req->result && !rbd_img_is_write(obj_req->img_request))
1615 obj_req->xferred = osd_req->r_result;
1616 else
1617 /*
1618 * Writes aren't allowed to return a data payload. In some
1619 * guarded write cases (e.g. stat + zero on an empty object)
1620 * a stat response makes it through, but we don't care.
1621 */
1622 obj_req->xferred = 0;
bf0d5f50 1623
3da691bf 1624 rbd_obj_handle_request(obj_req);
bf0d5f50
AE
1625}
1626
9d4df01f 1627static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
430c28c3 1628{
8c042b0d 1629 struct ceph_osd_request *osd_req = obj_request->osd_req;
430c28c3 1630
7c84883a
ID
1631 rbd_assert(obj_request_img_data_test(obj_request));
1632 osd_req->r_snapid = obj_request->img_request->snap_id;
9d4df01f
AE
1633}
1634
1635static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1636{
9d4df01f 1637 struct ceph_osd_request *osd_req = obj_request->osd_req;
9d4df01f 1638
1134e091 1639 ktime_get_real_ts(&osd_req->r_mtime);
bb873b53 1640 osd_req->r_data_offset = obj_request->offset;
430c28c3
AE
1641}
1642
bc81207e
ID
1643static struct ceph_osd_request *
1644__rbd_osd_req_create(struct rbd_device *rbd_dev,
1645 struct ceph_snap_context *snapc,
1646 int num_ops, unsigned int flags,
1647 struct rbd_obj_request *obj_request)
1648{
1649 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1650 struct ceph_osd_request *req;
a90bb0c1
ID
1651 const char *name_format = rbd_dev->image_format == 1 ?
1652 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
bc81207e
ID
1653
1654 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1655 if (!req)
1656 return NULL;
1657
1658 req->r_flags = flags;
1659 req->r_callback = rbd_osd_req_callback;
1660 req->r_priv = obj_request;
1661
1662 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
a90bb0c1
ID
1663 if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1664 rbd_dev->header.object_prefix, obj_request->object_no))
bc81207e
ID
1665 goto err_req;
1666
1667 if (ceph_osdc_alloc_messages(req, GFP_NOIO))
1668 goto err_req;
1669
1670 return req;
1671
1672err_req:
1673 ceph_osdc_put_request(req);
1674 return NULL;
1675}
1676
bf0d5f50
AE
1677static struct ceph_osd_request *rbd_osd_req_create(
1678 struct rbd_device *rbd_dev,
6d2940c8 1679 enum obj_operation_type op_type,
deb236b3 1680 unsigned int num_ops,
430c28c3 1681 struct rbd_obj_request *obj_request)
bf0d5f50 1682{
bf0d5f50 1683 struct ceph_snap_context *snapc = NULL;
bf0d5f50 1684
90e98c52
GZ
1685 if (obj_request_img_data_test(obj_request) &&
1686 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
6365d33a 1687 struct rbd_img_request *img_request = obj_request->img_request;
90e98c52
GZ
1688 if (op_type == OBJ_OP_WRITE) {
1689 rbd_assert(img_request_write_test(img_request));
1690 } else {
1691 rbd_assert(img_request_discard_test(img_request));
1692 }
6d2940c8 1693 snapc = img_request->snapc;
bf0d5f50
AE
1694 }
1695
bc81207e
ID
1696 return __rbd_osd_req_create(rbd_dev, snapc, num_ops,
1697 (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD) ?
54ea0046 1698 CEPH_OSD_FLAG_WRITE : CEPH_OSD_FLAG_READ, obj_request);
bf0d5f50
AE
1699}
1700
1701static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1702{
1703 ceph_osdc_put_request(osd_req);
1704}
1705
6c696d85
ID
1706static struct rbd_obj_request *
1707rbd_obj_request_create(enum obj_request_type type)
bf0d5f50
AE
1708{
1709 struct rbd_obj_request *obj_request;
bf0d5f50
AE
1710
1711 rbd_assert(obj_request_type_valid(type));
1712
5a60e876 1713 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
6c696d85 1714 if (!obj_request)
f907ad55 1715 return NULL;
f907ad55 1716
bf0d5f50
AE
1717 obj_request->which = BAD_WHICH;
1718 obj_request->type = type;
1719 INIT_LIST_HEAD(&obj_request->links);
bf0d5f50
AE
1720 kref_init(&obj_request->kref);
1721
67e2b652 1722 dout("%s %p\n", __func__, obj_request);
bf0d5f50
AE
1723 return obj_request;
1724}
1725
1726static void rbd_obj_request_destroy(struct kref *kref)
1727{
1728 struct rbd_obj_request *obj_request;
7e07efb1 1729 u32 i;
bf0d5f50
AE
1730
1731 obj_request = container_of(kref, struct rbd_obj_request, kref);
1732
37206ee5
AE
1733 dout("%s: obj %p\n", __func__, obj_request);
1734
bf0d5f50
AE
1735 rbd_assert(obj_request->img_request == NULL);
1736 rbd_assert(obj_request->which == BAD_WHICH);
1737
1738 if (obj_request->osd_req)
1739 rbd_osd_req_destroy(obj_request->osd_req);
1740
bf0d5f50 1741 switch (obj_request->type) {
9969ebc5 1742 case OBJ_REQUEST_NODATA:
bf0d5f50 1743 case OBJ_REQUEST_BIO:
7e07efb1 1744 case OBJ_REQUEST_BVECS:
5359a17d 1745 break; /* Nothing to do */
7e07efb1
ID
1746 default:
1747 rbd_assert(0);
bf0d5f50
AE
1748 }
1749
7e07efb1
ID
1750 if (obj_request->copyup_bvecs) {
1751 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1752 if (obj_request->copyup_bvecs[i].bv_page)
1753 __free_page(obj_request->copyup_bvecs[i].bv_page);
1754 }
1755 kfree(obj_request->copyup_bvecs);
1756 }
f9dcbc44 1757
868311b1 1758 kmem_cache_free(rbd_obj_request_cache, obj_request);
bf0d5f50
AE
1759}
1760
fb65d228
AE
1761/* It's OK to call this for a device with no parent */
1762
1763static void rbd_spec_put(struct rbd_spec *spec);
1764static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1765{
1766 rbd_dev_remove_parent(rbd_dev);
1767 rbd_spec_put(rbd_dev->parent_spec);
1768 rbd_dev->parent_spec = NULL;
1769 rbd_dev->parent_overlap = 0;
1770}
1771
a2acd00e
AE
1772/*
1773 * Parent image reference counting is used to determine when an
1774 * image's parent fields can be safely torn down--after there are no
1775 * more in-flight requests to the parent image. When the last
1776 * reference is dropped, cleaning them up is safe.
1777 */
1778static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1779{
1780 int counter;
1781
1782 if (!rbd_dev->parent_spec)
1783 return;
1784
1785 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1786 if (counter > 0)
1787 return;
1788
1789 /* Last reference; clean up parent data structures */
1790
1791 if (!counter)
1792 rbd_dev_unparent(rbd_dev);
1793 else
9584d508 1794 rbd_warn(rbd_dev, "parent reference underflow");
a2acd00e
AE
1795}
1796
1797/*
1798 * If an image has a non-zero parent overlap, get a reference to its
1799 * parent.
1800 *
1801 * Returns true if the rbd device has a parent with a non-zero
1802 * overlap and a reference for it was successfully taken, or
1803 * false otherwise.
1804 */
1805static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1806{
ae43e9d0 1807 int counter = 0;
a2acd00e
AE
1808
1809 if (!rbd_dev->parent_spec)
1810 return false;
1811
ae43e9d0
ID
1812 down_read(&rbd_dev->header_rwsem);
1813 if (rbd_dev->parent_overlap)
1814 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1815 up_read(&rbd_dev->header_rwsem);
a2acd00e
AE
1816
1817 if (counter < 0)
9584d508 1818 rbd_warn(rbd_dev, "parent reference overflow");
a2acd00e 1819
ae43e9d0 1820 return counter > 0;
a2acd00e
AE
1821}
1822
bf0d5f50
AE
1823/*
1824 * Caller is responsible for filling in the list of object requests
1825 * that comprises the image request, and the Linux request pointer
1826 * (if there is one).
1827 */
cc344fa1
AE
1828static struct rbd_img_request *rbd_img_request_create(
1829 struct rbd_device *rbd_dev,
bf0d5f50 1830 u64 offset, u64 length,
6d2940c8 1831 enum obj_operation_type op_type,
4e752f0a 1832 struct ceph_snap_context *snapc)
bf0d5f50
AE
1833{
1834 struct rbd_img_request *img_request;
bf0d5f50 1835
a0c5895b 1836 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
bf0d5f50
AE
1837 if (!img_request)
1838 return NULL;
1839
bf0d5f50
AE
1840 img_request->rbd_dev = rbd_dev;
1841 img_request->offset = offset;
1842 img_request->length = length;
90e98c52
GZ
1843 if (op_type == OBJ_OP_DISCARD) {
1844 img_request_discard_set(img_request);
1845 img_request->snapc = snapc;
1846 } else if (op_type == OBJ_OP_WRITE) {
0c425248 1847 img_request_write_set(img_request);
4e752f0a 1848 img_request->snapc = snapc;
0c425248 1849 } else {
bf0d5f50 1850 img_request->snap_id = rbd_dev->spec->snap_id;
0c425248 1851 }
a2acd00e 1852 if (rbd_dev_parent_get(rbd_dev))
d0b2e944 1853 img_request_layered_set(img_request);
a0c5895b 1854
bf0d5f50 1855 spin_lock_init(&img_request->completion_lock);
bf0d5f50
AE
1856 INIT_LIST_HEAD(&img_request->obj_requests);
1857 kref_init(&img_request->kref);
1858
37206ee5 1859 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
6d2940c8 1860 obj_op_name(op_type), offset, length, img_request);
37206ee5 1861
bf0d5f50
AE
1862 return img_request;
1863}
1864
1865static void rbd_img_request_destroy(struct kref *kref)
1866{
1867 struct rbd_img_request *img_request;
1868 struct rbd_obj_request *obj_request;
1869 struct rbd_obj_request *next_obj_request;
1870
1871 img_request = container_of(kref, struct rbd_img_request, kref);
1872
37206ee5
AE
1873 dout("%s: img %p\n", __func__, img_request);
1874
bf0d5f50
AE
1875 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1876 rbd_img_obj_request_del(img_request, obj_request);
25dcf954 1877 rbd_assert(img_request->obj_request_count == 0);
bf0d5f50 1878
a2acd00e
AE
1879 if (img_request_layered_test(img_request)) {
1880 img_request_layered_clear(img_request);
1881 rbd_dev_parent_put(img_request->rbd_dev);
1882 }
1883
bef95455
JD
1884 if (img_request_write_test(img_request) ||
1885 img_request_discard_test(img_request))
812164f8 1886 ceph_put_snap_context(img_request->snapc);
bf0d5f50 1887
1c2a9dfe 1888 kmem_cache_free(rbd_img_request_cache, img_request);
bf0d5f50
AE
1889}
1890
e93f3152
AE
1891static struct rbd_img_request *rbd_parent_request_create(
1892 struct rbd_obj_request *obj_request,
1893 u64 img_offset, u64 length)
1894{
1895 struct rbd_img_request *parent_request;
1896 struct rbd_device *rbd_dev;
1897
1898 rbd_assert(obj_request->img_request);
1899 rbd_dev = obj_request->img_request->rbd_dev;
1900
4e752f0a 1901 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
6d2940c8 1902 length, OBJ_OP_READ, NULL);
e93f3152
AE
1903 if (!parent_request)
1904 return NULL;
1905
1906 img_request_child_set(parent_request);
1907 rbd_obj_request_get(obj_request);
1908 parent_request->obj_request = obj_request;
1909
1910 return parent_request;
1911}
1912
1913static void rbd_parent_request_destroy(struct kref *kref)
1914{
1915 struct rbd_img_request *parent_request;
1916 struct rbd_obj_request *orig_request;
1917
1918 parent_request = container_of(kref, struct rbd_img_request, kref);
1919 orig_request = parent_request->obj_request;
1920
1921 parent_request->obj_request = NULL;
1922 rbd_obj_request_put(orig_request);
1923 img_request_child_clear(parent_request);
1924
1925 rbd_img_request_destroy(kref);
1926}
1927
1217857f
AE
1928static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1929{
6365d33a 1930 struct rbd_img_request *img_request;
1217857f
AE
1931 unsigned int xferred;
1932 int result;
8b3e1a56 1933 bool more;
1217857f 1934
6365d33a
AE
1935 rbd_assert(obj_request_img_data_test(obj_request));
1936 img_request = obj_request->img_request;
1937
1217857f
AE
1938 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
1939 xferred = (unsigned int)obj_request->xferred;
1940 result = obj_request->result;
1941 if (result) {
1942 struct rbd_device *rbd_dev = img_request->rbd_dev;
6d2940c8
GZ
1943 enum obj_operation_type op_type;
1944
90e98c52
GZ
1945 if (img_request_discard_test(img_request))
1946 op_type = OBJ_OP_DISCARD;
1947 else if (img_request_write_test(img_request))
1948 op_type = OBJ_OP_WRITE;
1949 else
1950 op_type = OBJ_OP_READ;
1217857f 1951
9584d508 1952 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
6d2940c8
GZ
1953 obj_op_name(op_type), obj_request->length,
1954 obj_request->img_offset, obj_request->offset);
9584d508 1955 rbd_warn(rbd_dev, " result %d xferred %x",
1217857f
AE
1956 result, xferred);
1957 if (!img_request->result)
1958 img_request->result = result;
082a75da
ID
1959 /*
1960 * Need to end I/O on the entire obj_request worth of
1961 * bytes in case of error.
1962 */
1963 xferred = obj_request->length;
1217857f
AE
1964 }
1965
8b3e1a56
AE
1966 if (img_request_child_test(img_request)) {
1967 rbd_assert(img_request->obj_request != NULL);
1968 more = obj_request->which < img_request->obj_request_count - 1;
1969 } else {
2a842aca
CH
1970 blk_status_t status = errno_to_blk_status(result);
1971
8b3e1a56 1972 rbd_assert(img_request->rq != NULL);
7ad18afa 1973
2a842aca 1974 more = blk_update_request(img_request->rq, status, xferred);
7ad18afa 1975 if (!more)
2a842aca 1976 __blk_mq_end_request(img_request->rq, status);
8b3e1a56
AE
1977 }
1978
1979 return more;
1217857f
AE
1980}
1981
2169238d
AE
1982static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1983{
1984 struct rbd_img_request *img_request;
1985 u32 which = obj_request->which;
1986 bool more = true;
1987
6365d33a 1988 rbd_assert(obj_request_img_data_test(obj_request));
2169238d
AE
1989 img_request = obj_request->img_request;
1990
1991 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1992 rbd_assert(img_request != NULL);
2169238d
AE
1993 rbd_assert(img_request->obj_request_count > 0);
1994 rbd_assert(which != BAD_WHICH);
1995 rbd_assert(which < img_request->obj_request_count);
2169238d
AE
1996
1997 spin_lock_irq(&img_request->completion_lock);
1998 if (which != img_request->next_completion)
1999 goto out;
2000
2001 for_each_obj_request_from(img_request, obj_request) {
2169238d
AE
2002 rbd_assert(more);
2003 rbd_assert(which < img_request->obj_request_count);
2004
2005 if (!obj_request_done_test(obj_request))
2006 break;
1217857f 2007 more = rbd_img_obj_end_request(obj_request);
2169238d
AE
2008 which++;
2009 }
2010
2011 rbd_assert(more ^ (which == img_request->obj_request_count));
2012 img_request->next_completion = which;
2013out:
2014 spin_unlock_irq(&img_request->completion_lock);
0f2d5be7 2015 rbd_img_request_put(img_request);
2169238d
AE
2016
2017 if (!more)
2018 rbd_img_request_complete(img_request);
2019}
2020
3da691bf
ID
2021static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
2022{
2023 switch (obj_req->type) {
2024 case OBJ_REQUEST_BIO:
2025 osd_req_op_extent_osd_data_bio(obj_req->osd_req, which,
2026 &obj_req->bio_pos,
2027 obj_req->length);
2028 break;
2029 case OBJ_REQUEST_BVECS:
2030 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2031 obj_req->length);
2032 osd_req_op_extent_osd_data_bvec_pos(obj_req->osd_req, which,
2033 &obj_req->bvec_pos);
2034 break;
2035 default:
2036 rbd_assert(0);
2037 }
2038}
2039
2040static int rbd_obj_setup_read(struct rbd_obj_request *obj_req)
2041{
2042 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2043
2044 obj_req->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1, obj_req);
2045 if (!obj_req->osd_req)
2046 return -ENOMEM;
2047
2048 osd_req_op_extent_init(obj_req->osd_req, 0, CEPH_OSD_OP_READ,
2049 obj_req->offset, obj_req->length, 0, 0);
2050 rbd_osd_req_setup_data(obj_req, 0);
2051
2052 rbd_osd_req_format_read(obj_req);
2053 return 0;
2054}
2055
2056static int __rbd_obj_setup_stat(struct rbd_obj_request *obj_req,
2057 unsigned int which)
2058{
2059 struct page **pages;
2060
2061 /*
2062 * The response data for a STAT call consists of:
2063 * le64 length;
2064 * struct {
2065 * le32 tv_sec;
2066 * le32 tv_nsec;
2067 * } mtime;
2068 */
2069 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2070 if (IS_ERR(pages))
2071 return PTR_ERR(pages);
2072
2073 osd_req_op_init(obj_req->osd_req, which, CEPH_OSD_OP_STAT, 0);
2074 osd_req_op_raw_data_in_pages(obj_req->osd_req, which, pages,
2075 8 + sizeof(struct ceph_timespec),
2076 0, false, true);
2077 return 0;
2078}
2079
2080static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req,
2081 unsigned int which)
2082{
2083 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2084 u16 opcode;
2085
2086 osd_req_op_alloc_hint_init(obj_req->osd_req, which++,
2087 rbd_dev->layout.object_size,
2088 rbd_dev->layout.object_size);
2089
2090 if (rbd_obj_is_entire(obj_req))
2091 opcode = CEPH_OSD_OP_WRITEFULL;
2092 else
2093 opcode = CEPH_OSD_OP_WRITE;
2094
2095 osd_req_op_extent_init(obj_req->osd_req, which, opcode,
2096 obj_req->offset, obj_req->length, 0, 0);
2097 rbd_osd_req_setup_data(obj_req, which++);
2098
2099 rbd_assert(which == obj_req->osd_req->r_num_ops);
2100 rbd_osd_req_format_write(obj_req);
2101}
2102
2103static int rbd_obj_setup_write(struct rbd_obj_request *obj_req)
2104{
2105 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2106 unsigned int num_osd_ops, which = 0;
2107 int ret;
2108
2109 if (obj_request_overlaps_parent(obj_req)) {
2110 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2111 num_osd_ops = 3; /* stat + setallochint + write/writefull */
2112 } else {
2113 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2114 num_osd_ops = 2; /* setallochint + write/writefull */
2115 }
2116
2117 obj_req->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE,
2118 num_osd_ops, obj_req);
2119 if (!obj_req->osd_req)
2120 return -ENOMEM;
2121
2122 if (obj_request_overlaps_parent(obj_req)) {
2123 ret = __rbd_obj_setup_stat(obj_req, which++);
2124 if (ret)
2125 return ret;
2126 }
2127
2128 __rbd_obj_setup_write(obj_req, which);
2129 return 0;
2130}
2131
2132static void __rbd_obj_setup_discard(struct rbd_obj_request *obj_req,
2133 unsigned int which)
2134{
2135 u16 opcode;
2136
2137 if (rbd_obj_is_entire(obj_req)) {
2138 if (obj_request_overlaps_parent(obj_req)) {
2139 opcode = CEPH_OSD_OP_TRUNCATE;
2140 } else {
2141 osd_req_op_init(obj_req->osd_req, which++,
2142 CEPH_OSD_OP_DELETE, 0);
2143 opcode = 0;
2144 }
2145 } else if (rbd_obj_is_tail(obj_req)) {
2146 opcode = CEPH_OSD_OP_TRUNCATE;
2147 } else {
2148 opcode = CEPH_OSD_OP_ZERO;
2149 }
2150
2151 if (opcode)
2152 osd_req_op_extent_init(obj_req->osd_req, which++, opcode,
2153 obj_req->offset, obj_req->length,
2154 0, 0);
2155
2156 rbd_assert(which == obj_req->osd_req->r_num_ops);
2157 rbd_osd_req_format_write(obj_req);
2158}
2159
2160static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
2161{
2162 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2163 unsigned int num_osd_ops, which = 0;
2164 int ret;
2165
2166 if (rbd_obj_is_entire(obj_req)) {
2167 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2168 num_osd_ops = 1; /* truncate/delete */
2169 } else {
2170 if (obj_request_overlaps_parent(obj_req)) {
2171 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2172 num_osd_ops = 2; /* stat + truncate/zero */
2173 } else {
2174 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2175 num_osd_ops = 1; /* truncate/zero */
2176 }
2177 }
2178
2179 obj_req->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_DISCARD,
2180 num_osd_ops, obj_req);
2181 if (!obj_req->osd_req)
2182 return -ENOMEM;
2183
2184 if (!rbd_obj_is_entire(obj_req) &&
2185 obj_request_overlaps_parent(obj_req)) {
2186 ret = __rbd_obj_setup_stat(obj_req, which++);
2187 if (ret)
2188 return ret;
2189 }
2190
2191 __rbd_obj_setup_discard(obj_req, which);
2192 return 0;
2193}
2194
2195/*
2196 * For each object request in @img_req, allocate an OSD request, add
2197 * individual OSD ops and prepare them for submission. The number of
2198 * OSD ops depends on op_type and the overlap point (if any).
2199 */
2200static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2201{
2202 struct rbd_obj_request *obj_req;
2203 int ret;
2204
2205 for_each_obj_request(img_req, obj_req) {
2206 switch (rbd_img_request_op_type(img_req)) {
2207 case OBJ_OP_READ:
2208 ret = rbd_obj_setup_read(obj_req);
2209 break;
2210 case OBJ_OP_WRITE:
2211 ret = rbd_obj_setup_write(obj_req);
2212 break;
2213 case OBJ_OP_DISCARD:
2214 ret = rbd_obj_setup_discard(obj_req);
2215 break;
2216 default:
2217 rbd_assert(0);
2218 }
2219 if (ret)
2220 return ret;
2221 }
2222
2223 return 0;
2224}
2225
f1a4739f
AE
2226/*
2227 * Split up an image request into one or more object requests, each
2228 * to a different object. The "type" parameter indicates whether
2229 * "data_desc" is the pointer to the head of a list of bio
2230 * structures, or the base of a page array. In either case this
2231 * function assumes data_desc describes memory sufficient to hold
2232 * all data described by the image request.
2233 */
2234static int rbd_img_request_fill(struct rbd_img_request *img_request,
2235 enum obj_request_type type,
2236 void *data_desc)
bf0d5f50
AE
2237{
2238 struct rbd_device *rbd_dev = img_request->rbd_dev;
2239 struct rbd_obj_request *obj_request = NULL;
2240 struct rbd_obj_request *next_obj_request;
5359a17d 2241 struct ceph_bio_iter bio_it;
7e07efb1 2242 struct ceph_bvec_iter bvec_it;
7da22d29 2243 u64 img_offset;
bf0d5f50 2244 u64 resid;
bf0d5f50 2245
f1a4739f
AE
2246 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2247 (int)type, data_desc);
37206ee5 2248
7da22d29 2249 img_offset = img_request->offset;
bf0d5f50 2250 resid = img_request->length;
4dda41d3 2251 rbd_assert(resid > 0);
f1a4739f
AE
2252
2253 if (type == OBJ_REQUEST_BIO) {
5359a17d 2254 bio_it = *(struct ceph_bio_iter *)data_desc;
4f024f37 2255 rbd_assert(img_offset ==
5359a17d 2256 bio_it.iter.bi_sector << SECTOR_SHIFT);
7e07efb1
ID
2257 } else if (type == OBJ_REQUEST_BVECS) {
2258 bvec_it = *(struct ceph_bvec_iter *)data_desc;
f1a4739f
AE
2259 }
2260
bf0d5f50 2261 while (resid) {
a90bb0c1 2262 u64 object_no = img_offset >> rbd_dev->header.obj_order;
67e2b652
ID
2263 u64 offset = rbd_segment_offset(rbd_dev, img_offset);
2264 u64 length = rbd_segment_length(rbd_dev, img_offset, resid);
bf0d5f50 2265
6c696d85 2266 obj_request = rbd_obj_request_create(type);
bf0d5f50
AE
2267 if (!obj_request)
2268 goto out_unwind;
62054da6 2269
a90bb0c1 2270 obj_request->object_no = object_no;
67e2b652
ID
2271 obj_request->offset = offset;
2272 obj_request->length = length;
2273
03507db6
JD
2274 /*
2275 * set obj_request->img_request before creating the
2276 * osd_request so that it gets the right snapc
2277 */
2278 rbd_img_obj_request_add(img_request, obj_request);
bf0d5f50 2279
f1a4739f 2280 if (type == OBJ_REQUEST_BIO) {
5359a17d
ID
2281 obj_request->bio_pos = bio_it;
2282 ceph_bio_iter_advance(&bio_it, length);
7e07efb1
ID
2283 } else if (type == OBJ_REQUEST_BVECS) {
2284 obj_request->bvec_pos = bvec_it;
2285 ceph_bvec_iter_shorten(&obj_request->bvec_pos, length);
2286 ceph_bvec_iter_advance(&bvec_it, length);
f1a4739f 2287 }
bf0d5f50 2288
2169238d 2289 obj_request->callback = rbd_img_obj_callback;
3b434a2a 2290 obj_request->img_offset = img_offset;
9d4df01f 2291
7da22d29 2292 img_offset += length;
bf0d5f50
AE
2293 resid -= length;
2294 }
2295
3da691bf 2296 return __rbd_img_fill_request(img_request);
bf0d5f50 2297
bf0d5f50
AE
2298out_unwind:
2299 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
42dd037c 2300 rbd_img_obj_request_del(img_request, obj_request);
bf0d5f50
AE
2301
2302 return -ENOMEM;
2303}
2304
bf0d5f50
AE
2305static int rbd_img_request_submit(struct rbd_img_request *img_request)
2306{
bf0d5f50 2307 struct rbd_obj_request *obj_request;
46faeed4 2308 struct rbd_obj_request *next_obj_request;
663ae2cc 2309 int ret = 0;
bf0d5f50 2310
37206ee5 2311 dout("%s: img %p\n", __func__, img_request);
bf0d5f50 2312
663ae2cc
ID
2313 rbd_img_request_get(img_request);
2314 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
3da691bf 2315 rbd_obj_request_submit(obj_request);
bf0d5f50
AE
2316 }
2317
663ae2cc
ID
2318 rbd_img_request_put(img_request);
2319 return ret;
bf0d5f50 2320}
8b3e1a56 2321
3da691bf
ID
2322static void rbd_img_end_child_request(struct rbd_img_request *img_req);
2323
2324static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req,
2325 u64 img_offset, u32 bytes)
2326{
2327 struct rbd_img_request *img_req = obj_req->img_request;
2328 struct rbd_img_request *child_img_req;
2329 int ret;
2330
2331 child_img_req = rbd_parent_request_create(obj_req, img_offset, bytes);
2332 if (!child_img_req)
2333 return -ENOMEM;
2334
2335 child_img_req->callback = rbd_img_end_child_request;
2336
2337 if (!rbd_img_is_write(img_req)) {
2338 switch (obj_req->type) {
2339 case OBJ_REQUEST_BIO:
2340 ret = rbd_img_request_fill(child_img_req,
2341 OBJ_REQUEST_BIO,
2342 &obj_req->bio_pos);
2343 break;
2344 case OBJ_REQUEST_BVECS:
2345 ret = rbd_img_request_fill(child_img_req,
2346 OBJ_REQUEST_BVECS,
2347 &obj_req->bvec_pos);
2348 break;
2349 default:
2350 rbd_assert(0);
2351 }
2352 } else {
2353 struct ceph_bvec_iter it = {
2354 .bvecs = obj_req->copyup_bvecs,
2355 .iter = { .bi_size = bytes },
2356 };
2357
2358 ret = rbd_img_request_fill(child_img_req, OBJ_REQUEST_BVECS,
2359 &it);
2360 }
2361 if (ret) {
2362 rbd_img_request_put(child_img_req);
2363 return ret;
2364 }
2365
2366 rbd_img_request_submit(child_img_req);
2367 return 0;
2368}
2369
2370static bool rbd_obj_handle_read(struct rbd_obj_request *obj_req)
2371{
2372 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2373 int ret;
2374
2375 if (obj_req->result == -ENOENT &&
2376 obj_req->img_offset < rbd_dev->parent_overlap &&
2377 !obj_req->tried_parent) {
2378 u64 obj_overlap = min(obj_req->length,
2379 rbd_dev->parent_overlap - obj_req->img_offset);
2380
2381 obj_req->tried_parent = true;
2382 ret = rbd_obj_read_from_parent(obj_req, obj_req->img_offset,
2383 obj_overlap);
2384 if (ret) {
2385 obj_req->result = ret;
2386 return true;
2387 }
2388 return false;
2389 }
2390
2391 /*
2392 * -ENOENT means a hole in the image -- zero-fill the entire
2393 * length of the request. A short read also implies zero-fill
2394 * to the end of the request. In both cases we update xferred
2395 * count to indicate the whole request was satisfied.
2396 */
2397 if (obj_req->result == -ENOENT ||
2398 (!obj_req->result && obj_req->xferred < obj_req->length)) {
2399 rbd_assert(!obj_req->xferred || !obj_req->result);
2400 rbd_obj_zero_range(obj_req, obj_req->xferred,
2401 obj_req->length - obj_req->xferred);
2402 obj_req->result = 0;
2403 obj_req->xferred = obj_req->length;
2404 }
2405
2406 return true;
2407}
2408
2409/*
2410 * copyup_bvecs pages are never highmem pages
2411 */
2412static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2413{
2414 struct ceph_bvec_iter it = {
2415 .bvecs = bvecs,
2416 .iter = { .bi_size = bytes },
2417 };
2418
2419 ceph_bvec_iter_advance_step(&it, bytes, ({
2420 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
2421 bv.bv_len))
2422 return false;
2423 }));
2424 return true;
2425}
2426
2427static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
2428{
2429 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2430 unsigned int num_osd_ops = obj_req->osd_req->r_num_ops;
2431
2432 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
2433 rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT);
2434 rbd_osd_req_destroy(obj_req->osd_req);
2435
2436 /*
2437 * Create a copyup request with the same number of OSD ops as
2438 * the original request. The original request was stat + op(s),
2439 * the new copyup request will be copyup + the same op(s).
2440 */
2441 obj_req->osd_req = rbd_osd_req_create(rbd_dev,
2442 rbd_img_request_op_type(obj_req->img_request),
2443 num_osd_ops, obj_req);
2444 if (!obj_req->osd_req)
2445 return -ENOMEM;
2446
2447 /*
2448 * Only send non-zero copyup data to save some I/O and network
2449 * bandwidth -- zero copyup data is equivalent to the object not
2450 * existing.
2451 */
2452 if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
2453 dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
2454 bytes = 0;
2455 }
2456
2457 osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd",
2458 "copyup");
2459 osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
2460 obj_req->copyup_bvecs, bytes);
2461
2462 switch (rbd_img_request_op_type(obj_req->img_request)) {
2463 case OBJ_OP_WRITE:
2464 __rbd_obj_setup_write(obj_req, 1);
2465 break;
2466 case OBJ_OP_DISCARD:
2467 rbd_assert(!rbd_obj_is_entire(obj_req));
2468 __rbd_obj_setup_discard(obj_req, 1);
2469 break;
2470 default:
2471 rbd_assert(0);
2472 }
2473
2474 rbd_obj_request_submit(obj_req);
2475 /* FIXME: in lieu of rbd_img_obj_callback() */
2476 rbd_img_request_put(obj_req->img_request);
2477 return 0;
2478}
2479
7e07efb1
ID
2480static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
2481{
2482 u32 i;
2483
2484 rbd_assert(!obj_req->copyup_bvecs);
2485 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
2486 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
2487 sizeof(*obj_req->copyup_bvecs),
2488 GFP_NOIO);
2489 if (!obj_req->copyup_bvecs)
2490 return -ENOMEM;
2491
2492 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
2493 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
2494
2495 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
2496 if (!obj_req->copyup_bvecs[i].bv_page)
2497 return -ENOMEM;
2498
2499 obj_req->copyup_bvecs[i].bv_offset = 0;
2500 obj_req->copyup_bvecs[i].bv_len = len;
2501 obj_overlap -= len;
2502 }
2503
2504 rbd_assert(!obj_overlap);
2505 return 0;
2506}
2507
3da691bf
ID
2508static int rbd_obj_handle_write_guard(struct rbd_obj_request *obj_req)
2509{
2510 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2511 u64 img_offset;
2512 u64 obj_overlap;
2513 int ret;
2514
2515 if (!obj_request_overlaps_parent(obj_req)) {
2516 /*
2517 * The overlap has become 0 (most likely because the
2518 * image has been flattened). Use rbd_obj_issue_copyup()
2519 * to re-submit the original write request -- the copyup
2520 * operation itself will be a no-op, since someone must
2521 * have populated the child object while we weren't
2522 * looking. Move to WRITE_FLAT state as we'll be done
2523 * with the operation once the null copyup completes.
2524 */
2525 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2526 return rbd_obj_issue_copyup(obj_req, 0);
2527 }
2528
2529 /*
2530 * Determine the byte range covered by the object in the
2531 * child image to which the original request was to be sent.
2532 */
2533 img_offset = obj_req->img_offset - obj_req->offset;
2534 obj_overlap = rbd_dev->layout.object_size;
2535
2536 /*
2537 * There is no defined parent data beyond the parent
2538 * overlap, so limit what we read at that boundary if
2539 * necessary.
2540 */
2541 if (img_offset + obj_overlap > rbd_dev->parent_overlap) {
2542 rbd_assert(img_offset < rbd_dev->parent_overlap);
2543 obj_overlap = rbd_dev->parent_overlap - img_offset;
2544 }
2545
2546 ret = setup_copyup_bvecs(obj_req, obj_overlap);
2547 if (ret)
2548 return ret;
2549
2550 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
2551 return rbd_obj_read_from_parent(obj_req, img_offset, obj_overlap);
2552}
2553
2554static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
2555{
2556 int ret;
2557
2558again:
2559 switch (obj_req->write_state) {
2560 case RBD_OBJ_WRITE_GUARD:
2561 rbd_assert(!obj_req->xferred);
2562 if (obj_req->result == -ENOENT) {
2563 /*
2564 * The target object doesn't exist. Read the data for
2565 * the entire target object up to the overlap point (if
2566 * any) from the parent, so we can use it for a copyup.
2567 */
2568 ret = rbd_obj_handle_write_guard(obj_req);
2569 if (ret) {
2570 obj_req->result = ret;
2571 return true;
2572 }
2573 return false;
2574 }
2575 /* fall through */
2576 case RBD_OBJ_WRITE_FLAT:
2577 if (!obj_req->result)
2578 /*
2579 * There is no such thing as a successful short
2580 * write -- indicate the whole request was satisfied.
2581 */
2582 obj_req->xferred = obj_req->length;
2583 return true;
2584 case RBD_OBJ_WRITE_COPYUP:
2585 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2586 if (obj_req->result)
2587 goto again;
2588
2589 rbd_assert(obj_req->xferred);
2590 ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred);
2591 if (ret) {
2592 obj_req->result = ret;
2593 return true;
2594 }
2595 return false;
2596 default:
2597 rbd_assert(0);
2598 }
2599}
2600
2601/*
2602 * Returns true if @obj_req is completed, or false otherwise.
2603 */
2604static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2605{
2606 switch (rbd_img_request_op_type(obj_req->img_request)) {
2607 case OBJ_OP_READ:
2608 return rbd_obj_handle_read(obj_req);
2609 case OBJ_OP_WRITE:
2610 return rbd_obj_handle_write(obj_req);
2611 case OBJ_OP_DISCARD:
2612 if (rbd_obj_handle_write(obj_req)) {
2613 /*
2614 * Hide -ENOENT from delete/truncate/zero -- discarding
2615 * a non-existent object is not a problem.
2616 */
2617 if (obj_req->result == -ENOENT) {
2618 obj_req->result = 0;
2619 obj_req->xferred = obj_req->length;
2620 }
2621 return true;
2622 }
2623 return false;
2624 default:
2625 rbd_assert(0);
2626 }
2627}
2628
2629static void rbd_img_end_child_request(struct rbd_img_request *img_req)
2630{
2631 struct rbd_obj_request *obj_req = img_req->obj_request;
2632
2633 rbd_assert(test_bit(IMG_REQ_CHILD, &img_req->flags));
2634
2635 obj_req->result = img_req->result;
2636 obj_req->xferred = img_req->xferred;
2637 rbd_img_request_put(img_req);
2638
2639 rbd_obj_handle_request(obj_req);
2640}
2641
2642static void rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2643{
2644 if (!__rbd_obj_handle_request(obj_req))
2645 return;
2646
2647 obj_request_done_set(obj_req);
2648 rbd_obj_request_complete(obj_req);
2649}
2650
ed95b21a 2651static const struct rbd_client_id rbd_empty_cid;
b8d70035 2652
ed95b21a
ID
2653static bool rbd_cid_equal(const struct rbd_client_id *lhs,
2654 const struct rbd_client_id *rhs)
2655{
2656 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
2657}
2658
2659static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
2660{
2661 struct rbd_client_id cid;
2662
2663 mutex_lock(&rbd_dev->watch_mutex);
2664 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
2665 cid.handle = rbd_dev->watch_cookie;
2666 mutex_unlock(&rbd_dev->watch_mutex);
2667 return cid;
2668}
2669
2670/*
2671 * lock_rwsem must be held for write
2672 */
2673static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
2674 const struct rbd_client_id *cid)
2675{
2676 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
2677 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
2678 cid->gid, cid->handle);
2679 rbd_dev->owner_cid = *cid; /* struct */
2680}
2681
2682static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
2683{
2684 mutex_lock(&rbd_dev->watch_mutex);
2685 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
2686 mutex_unlock(&rbd_dev->watch_mutex);
2687}
2688
edd8ca80
FM
2689static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
2690{
2691 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2692
2693 strcpy(rbd_dev->lock_cookie, cookie);
2694 rbd_set_owner_cid(rbd_dev, &cid);
2695 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
2696}
2697
ed95b21a
ID
2698/*
2699 * lock_rwsem must be held for write
2700 */
2701static int rbd_lock(struct rbd_device *rbd_dev)
b8d70035 2702{
922dab61 2703 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
ed95b21a 2704 char cookie[32];
e627db08 2705 int ret;
b8d70035 2706
cbbfb0ff
ID
2707 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
2708 rbd_dev->lock_cookie[0] != '\0');
52bb1f9b 2709
ed95b21a
ID
2710 format_lock_cookie(rbd_dev, cookie);
2711 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2712 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
2713 RBD_LOCK_TAG, "", 0);
e627db08 2714 if (ret)
ed95b21a 2715 return ret;
b8d70035 2716
ed95b21a 2717 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
edd8ca80 2718 __rbd_lock(rbd_dev, cookie);
ed95b21a 2719 return 0;
b8d70035
AE
2720}
2721
ed95b21a
ID
2722/*
2723 * lock_rwsem must be held for write
2724 */
bbead745 2725static void rbd_unlock(struct rbd_device *rbd_dev)
bb040aa0 2726{
922dab61 2727 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
bb040aa0
ID
2728 int ret;
2729
cbbfb0ff
ID
2730 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
2731 rbd_dev->lock_cookie[0] == '\0');
bb040aa0 2732
ed95b21a 2733 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
cbbfb0ff 2734 RBD_LOCK_NAME, rbd_dev->lock_cookie);
bbead745
ID
2735 if (ret && ret != -ENOENT)
2736 rbd_warn(rbd_dev, "failed to unlock: %d", ret);
bb040aa0 2737
bbead745
ID
2738 /* treat errors as the image is unlocked */
2739 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
cbbfb0ff 2740 rbd_dev->lock_cookie[0] = '\0';
ed95b21a
ID
2741 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
2742 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
bb040aa0
ID
2743}
2744
ed95b21a
ID
2745static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
2746 enum rbd_notify_op notify_op,
2747 struct page ***preply_pages,
2748 size_t *preply_len)
9969ebc5
AE
2749{
2750 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
ed95b21a
ID
2751 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2752 int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
2753 char buf[buf_size];
2754 void *p = buf;
9969ebc5 2755
ed95b21a 2756 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
9969ebc5 2757
ed95b21a
ID
2758 /* encode *LockPayload NotifyMessage (op + ClientId) */
2759 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
2760 ceph_encode_32(&p, notify_op);
2761 ceph_encode_64(&p, cid.gid);
2762 ceph_encode_64(&p, cid.handle);
8eb87565 2763
ed95b21a
ID
2764 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
2765 &rbd_dev->header_oloc, buf, buf_size,
2766 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
b30a01f2
ID
2767}
2768
ed95b21a
ID
2769static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
2770 enum rbd_notify_op notify_op)
b30a01f2 2771{
ed95b21a
ID
2772 struct page **reply_pages;
2773 size_t reply_len;
b30a01f2 2774
ed95b21a
ID
2775 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
2776 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2777}
b30a01f2 2778
ed95b21a
ID
2779static void rbd_notify_acquired_lock(struct work_struct *work)
2780{
2781 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2782 acquired_lock_work);
76756a51 2783
ed95b21a 2784 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
c525f036
ID
2785}
2786
ed95b21a 2787static void rbd_notify_released_lock(struct work_struct *work)
c525f036 2788{
ed95b21a
ID
2789 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2790 released_lock_work);
811c6688 2791
ed95b21a 2792 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
fca27065
ID
2793}
2794
ed95b21a 2795static int rbd_request_lock(struct rbd_device *rbd_dev)
36be9a76 2796{
ed95b21a
ID
2797 struct page **reply_pages;
2798 size_t reply_len;
2799 bool lock_owner_responded = false;
36be9a76
AE
2800 int ret;
2801
ed95b21a 2802 dout("%s rbd_dev %p\n", __func__, rbd_dev);
36be9a76 2803
ed95b21a
ID
2804 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
2805 &reply_pages, &reply_len);
2806 if (ret && ret != -ETIMEDOUT) {
2807 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
36be9a76 2808 goto out;
ed95b21a 2809 }
36be9a76 2810
ed95b21a
ID
2811 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
2812 void *p = page_address(reply_pages[0]);
2813 void *const end = p + reply_len;
2814 u32 n;
36be9a76 2815
ed95b21a
ID
2816 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
2817 while (n--) {
2818 u8 struct_v;
2819 u32 len;
36be9a76 2820
ed95b21a
ID
2821 ceph_decode_need(&p, end, 8 + 8, e_inval);
2822 p += 8 + 8; /* skip gid and cookie */
04017e29 2823
ed95b21a
ID
2824 ceph_decode_32_safe(&p, end, len, e_inval);
2825 if (!len)
2826 continue;
2827
2828 if (lock_owner_responded) {
2829 rbd_warn(rbd_dev,
2830 "duplicate lock owners detected");
2831 ret = -EIO;
2832 goto out;
2833 }
2834
2835 lock_owner_responded = true;
2836 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
2837 &struct_v, &len);
2838 if (ret) {
2839 rbd_warn(rbd_dev,
2840 "failed to decode ResponseMessage: %d",
2841 ret);
2842 goto e_inval;
2843 }
2844
2845 ret = ceph_decode_32(&p);
2846 }
2847 }
2848
2849 if (!lock_owner_responded) {
2850 rbd_warn(rbd_dev, "no lock owners detected");
2851 ret = -ETIMEDOUT;
2852 }
2853
2854out:
2855 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2856 return ret;
2857
2858e_inval:
2859 ret = -EINVAL;
2860 goto out;
2861}
2862
2863static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
2864{
2865 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
2866
2867 cancel_delayed_work(&rbd_dev->lock_dwork);
2868 if (wake_all)
2869 wake_up_all(&rbd_dev->lock_waitq);
2870 else
2871 wake_up(&rbd_dev->lock_waitq);
2872}
2873
2874static int get_lock_owner_info(struct rbd_device *rbd_dev,
2875 struct ceph_locker **lockers, u32 *num_lockers)
2876{
2877 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2878 u8 lock_type;
2879 char *lock_tag;
2880 int ret;
2881
2882 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2883
2884 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
2885 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2886 &lock_type, &lock_tag, lockers, num_lockers);
2887 if (ret)
2888 return ret;
2889
2890 if (*num_lockers == 0) {
2891 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
2892 goto out;
2893 }
2894
2895 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
2896 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
2897 lock_tag);
2898 ret = -EBUSY;
2899 goto out;
2900 }
2901
2902 if (lock_type == CEPH_CLS_LOCK_SHARED) {
2903 rbd_warn(rbd_dev, "shared lock type detected");
2904 ret = -EBUSY;
2905 goto out;
2906 }
2907
2908 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
2909 strlen(RBD_LOCK_COOKIE_PREFIX))) {
2910 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
2911 (*lockers)[0].id.cookie);
2912 ret = -EBUSY;
2913 goto out;
2914 }
2915
2916out:
2917 kfree(lock_tag);
2918 return ret;
2919}
2920
2921static int find_watcher(struct rbd_device *rbd_dev,
2922 const struct ceph_locker *locker)
2923{
2924 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2925 struct ceph_watch_item *watchers;
2926 u32 num_watchers;
2927 u64 cookie;
2928 int i;
2929 int ret;
2930
2931 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
2932 &rbd_dev->header_oloc, &watchers,
2933 &num_watchers);
2934 if (ret)
2935 return ret;
2936
2937 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
2938 for (i = 0; i < num_watchers; i++) {
2939 if (!memcmp(&watchers[i].addr, &locker->info.addr,
2940 sizeof(locker->info.addr)) &&
2941 watchers[i].cookie == cookie) {
2942 struct rbd_client_id cid = {
2943 .gid = le64_to_cpu(watchers[i].name.num),
2944 .handle = cookie,
2945 };
2946
2947 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
2948 rbd_dev, cid.gid, cid.handle);
2949 rbd_set_owner_cid(rbd_dev, &cid);
2950 ret = 1;
2951 goto out;
2952 }
2953 }
2954
2955 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
2956 ret = 0;
2957out:
2958 kfree(watchers);
2959 return ret;
2960}
2961
2962/*
2963 * lock_rwsem must be held for write
2964 */
2965static int rbd_try_lock(struct rbd_device *rbd_dev)
2966{
2967 struct ceph_client *client = rbd_dev->rbd_client->client;
2968 struct ceph_locker *lockers;
2969 u32 num_lockers;
2970 int ret;
2971
2972 for (;;) {
2973 ret = rbd_lock(rbd_dev);
2974 if (ret != -EBUSY)
2975 return ret;
2976
2977 /* determine if the current lock holder is still alive */
2978 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
2979 if (ret)
2980 return ret;
2981
2982 if (num_lockers == 0)
2983 goto again;
2984
2985 ret = find_watcher(rbd_dev, lockers);
2986 if (ret) {
2987 if (ret > 0)
2988 ret = 0; /* have to request lock */
2989 goto out;
2990 }
2991
2992 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
2993 ENTITY_NAME(lockers[0].id.name));
2994
2995 ret = ceph_monc_blacklist_add(&client->monc,
2996 &lockers[0].info.addr);
2997 if (ret) {
2998 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
2999 ENTITY_NAME(lockers[0].id.name), ret);
3000 goto out;
3001 }
3002
3003 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
3004 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3005 lockers[0].id.cookie,
3006 &lockers[0].id.name);
3007 if (ret && ret != -ENOENT)
3008 goto out;
3009
3010again:
3011 ceph_free_lockers(lockers, num_lockers);
3012 }
3013
3014out:
3015 ceph_free_lockers(lockers, num_lockers);
3016 return ret;
3017}
3018
3019/*
3020 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
3021 */
3022static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
3023 int *pret)
3024{
3025 enum rbd_lock_state lock_state;
3026
3027 down_read(&rbd_dev->lock_rwsem);
3028 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3029 rbd_dev->lock_state);
3030 if (__rbd_is_lock_owner(rbd_dev)) {
3031 lock_state = rbd_dev->lock_state;
3032 up_read(&rbd_dev->lock_rwsem);
3033 return lock_state;
3034 }
3035
3036 up_read(&rbd_dev->lock_rwsem);
3037 down_write(&rbd_dev->lock_rwsem);
3038 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3039 rbd_dev->lock_state);
3040 if (!__rbd_is_lock_owner(rbd_dev)) {
3041 *pret = rbd_try_lock(rbd_dev);
3042 if (*pret)
3043 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3044 }
3045
3046 lock_state = rbd_dev->lock_state;
3047 up_write(&rbd_dev->lock_rwsem);
3048 return lock_state;
3049}
3050
3051static void rbd_acquire_lock(struct work_struct *work)
3052{
3053 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3054 struct rbd_device, lock_dwork);
3055 enum rbd_lock_state lock_state;
37f13252 3056 int ret = 0;
ed95b21a
ID
3057
3058 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3059again:
3060 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3061 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3062 if (lock_state == RBD_LOCK_STATE_LOCKED)
3063 wake_requests(rbd_dev, true);
3064 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3065 rbd_dev, lock_state, ret);
3066 return;
3067 }
3068
3069 ret = rbd_request_lock(rbd_dev);
3070 if (ret == -ETIMEDOUT) {
3071 goto again; /* treat this as a dead client */
e010dd0a
ID
3072 } else if (ret == -EROFS) {
3073 rbd_warn(rbd_dev, "peer will not release lock");
3074 /*
3075 * If this is rbd_add_acquire_lock(), we want to fail
3076 * immediately -- reuse BLACKLISTED flag. Otherwise we
3077 * want to block.
3078 */
3079 if (!(rbd_dev->disk->flags & GENHD_FL_UP)) {
3080 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3081 /* wake "rbd map --exclusive" process */
3082 wake_requests(rbd_dev, false);
3083 }
ed95b21a
ID
3084 } else if (ret < 0) {
3085 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3086 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3087 RBD_RETRY_DELAY);
3088 } else {
3089 /*
3090 * lock owner acked, but resend if we don't see them
3091 * release the lock
3092 */
3093 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3094 rbd_dev);
3095 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3096 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3097 }
3098}
3099
3100/*
3101 * lock_rwsem must be held for write
3102 */
3103static bool rbd_release_lock(struct rbd_device *rbd_dev)
3104{
3105 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3106 rbd_dev->lock_state);
3107 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3108 return false;
3109
3110 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3111 downgrade_write(&rbd_dev->lock_rwsem);
52bb1f9b 3112 /*
ed95b21a 3113 * Ensure that all in-flight IO is flushed.
52bb1f9b 3114 *
ed95b21a
ID
3115 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3116 * may be shared with other devices.
52bb1f9b 3117 */
ed95b21a
ID
3118 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3119 up_read(&rbd_dev->lock_rwsem);
3120
3121 down_write(&rbd_dev->lock_rwsem);
3122 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3123 rbd_dev->lock_state);
3124 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3125 return false;
3126
bbead745
ID
3127 rbd_unlock(rbd_dev);
3128 /*
3129 * Give others a chance to grab the lock - we would re-acquire
3130 * almost immediately if we got new IO during ceph_osdc_sync()
3131 * otherwise. We need to ack our own notifications, so this
3132 * lock_dwork will be requeued from rbd_wait_state_locked()
3133 * after wake_requests() in rbd_handle_released_lock().
3134 */
3135 cancel_delayed_work(&rbd_dev->lock_dwork);
ed95b21a
ID
3136 return true;
3137}
3138
3139static void rbd_release_lock_work(struct work_struct *work)
3140{
3141 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3142 unlock_work);
3143
3144 down_write(&rbd_dev->lock_rwsem);
3145 rbd_release_lock(rbd_dev);
3146 up_write(&rbd_dev->lock_rwsem);
3147}
3148
3149static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3150 void **p)
3151{
3152 struct rbd_client_id cid = { 0 };
3153
3154 if (struct_v >= 2) {
3155 cid.gid = ceph_decode_64(p);
3156 cid.handle = ceph_decode_64(p);
3157 }
3158
3159 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3160 cid.handle);
3161 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3162 down_write(&rbd_dev->lock_rwsem);
3163 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3164 /*
3165 * we already know that the remote client is
3166 * the owner
3167 */
3168 up_write(&rbd_dev->lock_rwsem);
3169 return;
3170 }
3171
3172 rbd_set_owner_cid(rbd_dev, &cid);
3173 downgrade_write(&rbd_dev->lock_rwsem);
3174 } else {
3175 down_read(&rbd_dev->lock_rwsem);
3176 }
3177
3178 if (!__rbd_is_lock_owner(rbd_dev))
3179 wake_requests(rbd_dev, false);
3180 up_read(&rbd_dev->lock_rwsem);
3181}
3182
3183static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3184 void **p)
3185{
3186 struct rbd_client_id cid = { 0 };
3187
3188 if (struct_v >= 2) {
3189 cid.gid = ceph_decode_64(p);
3190 cid.handle = ceph_decode_64(p);
3191 }
3192
3193 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3194 cid.handle);
3195 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3196 down_write(&rbd_dev->lock_rwsem);
3197 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3198 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3199 __func__, rbd_dev, cid.gid, cid.handle,
3200 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3201 up_write(&rbd_dev->lock_rwsem);
3202 return;
3203 }
3204
3205 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3206 downgrade_write(&rbd_dev->lock_rwsem);
3207 } else {
3208 down_read(&rbd_dev->lock_rwsem);
3209 }
3210
3211 if (!__rbd_is_lock_owner(rbd_dev))
3212 wake_requests(rbd_dev, false);
3213 up_read(&rbd_dev->lock_rwsem);
3214}
3215
3b77faa0
ID
3216/*
3217 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
3218 * ResponseMessage is needed.
3219 */
3220static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3221 void **p)
ed95b21a
ID
3222{
3223 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3224 struct rbd_client_id cid = { 0 };
3b77faa0 3225 int result = 1;
ed95b21a
ID
3226
3227 if (struct_v >= 2) {
3228 cid.gid = ceph_decode_64(p);
3229 cid.handle = ceph_decode_64(p);
3230 }
3231
3232 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3233 cid.handle);
3234 if (rbd_cid_equal(&cid, &my_cid))
3b77faa0 3235 return result;
ed95b21a
ID
3236
3237 down_read(&rbd_dev->lock_rwsem);
3b77faa0
ID
3238 if (__rbd_is_lock_owner(rbd_dev)) {
3239 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
3240 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
3241 goto out_unlock;
3242
3243 /*
3244 * encode ResponseMessage(0) so the peer can detect
3245 * a missing owner
3246 */
3247 result = 0;
3248
3249 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
e010dd0a
ID
3250 if (!rbd_dev->opts->exclusive) {
3251 dout("%s rbd_dev %p queueing unlock_work\n",
3252 __func__, rbd_dev);
3253 queue_work(rbd_dev->task_wq,
3254 &rbd_dev->unlock_work);
3255 } else {
3256 /* refuse to release the lock */
3257 result = -EROFS;
3258 }
ed95b21a
ID
3259 }
3260 }
3b77faa0
ID
3261
3262out_unlock:
ed95b21a 3263 up_read(&rbd_dev->lock_rwsem);
3b77faa0 3264 return result;
ed95b21a
ID
3265}
3266
3267static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3268 u64 notify_id, u64 cookie, s32 *result)
3269{
3270 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3271 int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
3272 char buf[buf_size];
3273 int ret;
3274
3275 if (result) {
3276 void *p = buf;
3277
3278 /* encode ResponseMessage */
3279 ceph_start_encoding(&p, 1, 1,
3280 buf_size - CEPH_ENCODING_START_BLK_LEN);
3281 ceph_encode_32(&p, *result);
3282 } else {
3283 buf_size = 0;
3284 }
b8d70035 3285
922dab61
ID
3286 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3287 &rbd_dev->header_oloc, notify_id, cookie,
ed95b21a 3288 buf, buf_size);
52bb1f9b 3289 if (ret)
ed95b21a
ID
3290 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3291}
3292
3293static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3294 u64 cookie)
3295{
3296 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3297 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3298}
3299
3300static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3301 u64 notify_id, u64 cookie, s32 result)
3302{
3303 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3304 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3305}
3306
3307static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3308 u64 notifier_id, void *data, size_t data_len)
3309{
3310 struct rbd_device *rbd_dev = arg;
3311 void *p = data;
3312 void *const end = p + data_len;
d4c2269b 3313 u8 struct_v = 0;
ed95b21a
ID
3314 u32 len;
3315 u32 notify_op;
3316 int ret;
3317
3318 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3319 __func__, rbd_dev, cookie, notify_id, data_len);
3320 if (data_len) {
3321 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3322 &struct_v, &len);
3323 if (ret) {
3324 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3325 ret);
3326 return;
3327 }
3328
3329 notify_op = ceph_decode_32(&p);
3330 } else {
3331 /* legacy notification for header updates */
3332 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3333 len = 0;
3334 }
3335
3336 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3337 switch (notify_op) {
3338 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3339 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3340 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3341 break;
3342 case RBD_NOTIFY_OP_RELEASED_LOCK:
3343 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3344 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3345 break;
3346 case RBD_NOTIFY_OP_REQUEST_LOCK:
3b77faa0
ID
3347 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
3348 if (ret <= 0)
ed95b21a 3349 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3b77faa0 3350 cookie, ret);
ed95b21a
ID
3351 else
3352 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3353 break;
3354 case RBD_NOTIFY_OP_HEADER_UPDATE:
3355 ret = rbd_dev_refresh(rbd_dev);
3356 if (ret)
3357 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3358
3359 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3360 break;
3361 default:
3362 if (rbd_is_lock_owner(rbd_dev))
3363 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3364 cookie, -EOPNOTSUPP);
3365 else
3366 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3367 break;
3368 }
b8d70035
AE
3369}
3370
99d16943
ID
3371static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3372
922dab61 3373static void rbd_watch_errcb(void *arg, u64 cookie, int err)
bb040aa0 3374{
922dab61 3375 struct rbd_device *rbd_dev = arg;
bb040aa0 3376
922dab61 3377 rbd_warn(rbd_dev, "encountered watch error: %d", err);
bb040aa0 3378
ed95b21a
ID
3379 down_write(&rbd_dev->lock_rwsem);
3380 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3381 up_write(&rbd_dev->lock_rwsem);
3382
99d16943
ID
3383 mutex_lock(&rbd_dev->watch_mutex);
3384 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3385 __rbd_unregister_watch(rbd_dev);
3386 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
bb040aa0 3387
99d16943 3388 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
bb040aa0 3389 }
99d16943 3390 mutex_unlock(&rbd_dev->watch_mutex);
bb040aa0
ID
3391}
3392
9969ebc5 3393/*
99d16943 3394 * watch_mutex must be locked
9969ebc5 3395 */
99d16943 3396static int __rbd_register_watch(struct rbd_device *rbd_dev)
9969ebc5
AE
3397{
3398 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
922dab61 3399 struct ceph_osd_linger_request *handle;
9969ebc5 3400
922dab61 3401 rbd_assert(!rbd_dev->watch_handle);
99d16943 3402 dout("%s rbd_dev %p\n", __func__, rbd_dev);
9969ebc5 3403
922dab61
ID
3404 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3405 &rbd_dev->header_oloc, rbd_watch_cb,
3406 rbd_watch_errcb, rbd_dev);
3407 if (IS_ERR(handle))
3408 return PTR_ERR(handle);
8eb87565 3409
922dab61 3410 rbd_dev->watch_handle = handle;
b30a01f2 3411 return 0;
b30a01f2
ID
3412}
3413
99d16943
ID
3414/*
3415 * watch_mutex must be locked
3416 */
3417static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
b30a01f2 3418{
922dab61
ID
3419 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3420 int ret;
b30a01f2 3421
99d16943
ID
3422 rbd_assert(rbd_dev->watch_handle);
3423 dout("%s rbd_dev %p\n", __func__, rbd_dev);
b30a01f2 3424
922dab61
ID
3425 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3426 if (ret)
3427 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
76756a51 3428
922dab61 3429 rbd_dev->watch_handle = NULL;
c525f036
ID
3430}
3431
99d16943
ID
3432static int rbd_register_watch(struct rbd_device *rbd_dev)
3433{
3434 int ret;
3435
3436 mutex_lock(&rbd_dev->watch_mutex);
3437 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3438 ret = __rbd_register_watch(rbd_dev);
3439 if (ret)
3440 goto out;
3441
3442 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3443 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3444
3445out:
3446 mutex_unlock(&rbd_dev->watch_mutex);
3447 return ret;
3448}
3449
3450static void cancel_tasks_sync(struct rbd_device *rbd_dev)
c525f036 3451{
99d16943
ID
3452 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3453
3454 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
ed95b21a
ID
3455 cancel_work_sync(&rbd_dev->acquired_lock_work);
3456 cancel_work_sync(&rbd_dev->released_lock_work);
3457 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3458 cancel_work_sync(&rbd_dev->unlock_work);
99d16943
ID
3459}
3460
3461static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3462{
ed95b21a 3463 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
99d16943
ID
3464 cancel_tasks_sync(rbd_dev);
3465
3466 mutex_lock(&rbd_dev->watch_mutex);
3467 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3468 __rbd_unregister_watch(rbd_dev);
3469 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3470 mutex_unlock(&rbd_dev->watch_mutex);
811c6688 3471
811c6688 3472 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
fca27065
ID
3473}
3474
14bb211d
ID
3475/*
3476 * lock_rwsem must be held for write
3477 */
3478static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3479{
3480 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3481 char cookie[32];
3482 int ret;
3483
3484 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3485
3486 format_lock_cookie(rbd_dev, cookie);
3487 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
3488 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3489 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
3490 RBD_LOCK_TAG, cookie);
3491 if (ret) {
3492 if (ret != -EOPNOTSUPP)
3493 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
3494 ret);
3495
3496 /*
3497 * Lock cookie cannot be updated on older OSDs, so do
3498 * a manual release and queue an acquire.
3499 */
3500 if (rbd_release_lock(rbd_dev))
3501 queue_delayed_work(rbd_dev->task_wq,
3502 &rbd_dev->lock_dwork, 0);
3503 } else {
edd8ca80 3504 __rbd_lock(rbd_dev, cookie);
14bb211d
ID
3505 }
3506}
3507
99d16943
ID
3508static void rbd_reregister_watch(struct work_struct *work)
3509{
3510 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3511 struct rbd_device, watch_dwork);
3512 int ret;
3513
3514 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3515
3516 mutex_lock(&rbd_dev->watch_mutex);
87c0fded
ID
3517 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
3518 mutex_unlock(&rbd_dev->watch_mutex);
14bb211d 3519 return;
87c0fded 3520 }
99d16943
ID
3521
3522 ret = __rbd_register_watch(rbd_dev);
3523 if (ret) {
3524 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4d73644b 3525 if (ret == -EBLACKLISTED || ret == -ENOENT) {
87c0fded 3526 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
14bb211d 3527 wake_requests(rbd_dev, true);
87c0fded 3528 } else {
99d16943
ID
3529 queue_delayed_work(rbd_dev->task_wq,
3530 &rbd_dev->watch_dwork,
3531 RBD_RETRY_DELAY);
87c0fded
ID
3532 }
3533 mutex_unlock(&rbd_dev->watch_mutex);
14bb211d 3534 return;
99d16943
ID
3535 }
3536
3537 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3538 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3539 mutex_unlock(&rbd_dev->watch_mutex);
3540
14bb211d
ID
3541 down_write(&rbd_dev->lock_rwsem);
3542 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3543 rbd_reacquire_lock(rbd_dev);
3544 up_write(&rbd_dev->lock_rwsem);
3545
99d16943
ID
3546 ret = rbd_dev_refresh(rbd_dev);
3547 if (ret)
3548 rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
99d16943
ID
3549}
3550
36be9a76 3551/*
f40eb349
AE
3552 * Synchronous osd object method call. Returns the number of bytes
3553 * returned in the outbound buffer, or a negative error code.
36be9a76
AE
3554 */
3555static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
ecd4a68a
ID
3556 struct ceph_object_id *oid,
3557 struct ceph_object_locator *oloc,
36be9a76 3558 const char *method_name,
4157976b 3559 const void *outbound,
36be9a76 3560 size_t outbound_size,
4157976b 3561 void *inbound,
e2a58ee5 3562 size_t inbound_size)
36be9a76 3563{
ecd4a68a
ID
3564 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3565 struct page *req_page = NULL;
3566 struct page *reply_page;
36be9a76
AE
3567 int ret;
3568
3569 /*
6010a451
AE
3570 * Method calls are ultimately read operations. The result
3571 * should placed into the inbound buffer provided. They
3572 * also supply outbound data--parameters for the object
3573 * method. Currently if this is present it will be a
3574 * snapshot id.
36be9a76 3575 */
ecd4a68a
ID
3576 if (outbound) {
3577 if (outbound_size > PAGE_SIZE)
3578 return -E2BIG;
36be9a76 3579
ecd4a68a
ID
3580 req_page = alloc_page(GFP_KERNEL);
3581 if (!req_page)
3582 return -ENOMEM;
04017e29 3583
ecd4a68a 3584 memcpy(page_address(req_page), outbound, outbound_size);
04017e29 3585 }
36be9a76 3586
ecd4a68a
ID
3587 reply_page = alloc_page(GFP_KERNEL);
3588 if (!reply_page) {
3589 if (req_page)
3590 __free_page(req_page);
3591 return -ENOMEM;
3592 }
57385b51 3593
ecd4a68a
ID
3594 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
3595 CEPH_OSD_FLAG_READ, req_page, outbound_size,
3596 reply_page, &inbound_size);
3597 if (!ret) {
3598 memcpy(inbound, page_address(reply_page), inbound_size);
3599 ret = inbound_size;
3600 }
36be9a76 3601
ecd4a68a
ID
3602 if (req_page)
3603 __free_page(req_page);
3604 __free_page(reply_page);
36be9a76
AE
3605 return ret;
3606}
3607
ed95b21a
ID
3608/*
3609 * lock_rwsem must be held for read
3610 */
3611static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
3612{
3613 DEFINE_WAIT(wait);
3614
3615 do {
3616 /*
3617 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3618 * and cancel_delayed_work() in wake_requests().
3619 */
3620 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3621 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3622 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
3623 TASK_UNINTERRUPTIBLE);
3624 up_read(&rbd_dev->lock_rwsem);
3625 schedule();
3626 down_read(&rbd_dev->lock_rwsem);
87c0fded
ID
3627 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
3628 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
3629
ed95b21a
ID
3630 finish_wait(&rbd_dev->lock_waitq, &wait);
3631}
3632
7ad18afa 3633static void rbd_queue_workfn(struct work_struct *work)
bf0d5f50 3634{
7ad18afa
CH
3635 struct request *rq = blk_mq_rq_from_pdu(work);
3636 struct rbd_device *rbd_dev = rq->q->queuedata;
bc1ecc65 3637 struct rbd_img_request *img_request;
4e752f0a 3638 struct ceph_snap_context *snapc = NULL;
bc1ecc65
ID
3639 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3640 u64 length = blk_rq_bytes(rq);
6d2940c8 3641 enum obj_operation_type op_type;
4e752f0a 3642 u64 mapping_size;
80de1912 3643 bool must_be_locked;
bf0d5f50
AE
3644 int result;
3645
aebf526b
CH
3646 switch (req_op(rq)) {
3647 case REQ_OP_DISCARD:
6ac56951 3648 case REQ_OP_WRITE_ZEROES:
90e98c52 3649 op_type = OBJ_OP_DISCARD;
aebf526b
CH
3650 break;
3651 case REQ_OP_WRITE:
6d2940c8 3652 op_type = OBJ_OP_WRITE;
aebf526b
CH
3653 break;
3654 case REQ_OP_READ:
6d2940c8 3655 op_type = OBJ_OP_READ;
aebf526b
CH
3656 break;
3657 default:
3658 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
3659 result = -EIO;
3660 goto err;
3661 }
6d2940c8 3662
bc1ecc65 3663 /* Ignore/skip any zero-length requests */
bf0d5f50 3664
bc1ecc65
ID
3665 if (!length) {
3666 dout("%s: zero-length request\n", __func__);
3667 result = 0;
3668 goto err_rq;
3669 }
bf0d5f50 3670
9568c93e
ID
3671 rbd_assert(op_type == OBJ_OP_READ ||
3672 rbd_dev->spec->snap_id == CEPH_NOSNAP);
4dda41d3 3673
bc1ecc65
ID
3674 /*
3675 * Quit early if the mapped snapshot no longer exists. It's
3676 * still possible the snapshot will have disappeared by the
3677 * time our request arrives at the osd, but there's no sense in
3678 * sending it if we already know.
3679 */
3680 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3681 dout("request for non-existent snapshot");
3682 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3683 result = -ENXIO;
3684 goto err_rq;
3685 }
4dda41d3 3686
bc1ecc65
ID
3687 if (offset && length > U64_MAX - offset + 1) {
3688 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3689 length);
3690 result = -EINVAL;
3691 goto err_rq; /* Shouldn't happen */
3692 }
4dda41d3 3693
7ad18afa
CH
3694 blk_mq_start_request(rq);
3695
4e752f0a
JD
3696 down_read(&rbd_dev->header_rwsem);
3697 mapping_size = rbd_dev->mapping.size;
6d2940c8 3698 if (op_type != OBJ_OP_READ) {
4e752f0a
JD
3699 snapc = rbd_dev->header.snapc;
3700 ceph_get_snap_context(snapc);
3701 }
3702 up_read(&rbd_dev->header_rwsem);
3703
3704 if (offset + length > mapping_size) {
bc1ecc65 3705 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4e752f0a 3706 length, mapping_size);
bc1ecc65
ID
3707 result = -EIO;
3708 goto err_rq;
3709 }
bf0d5f50 3710
f9bebd58
ID
3711 must_be_locked =
3712 (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
3713 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
ed95b21a
ID
3714 if (must_be_locked) {
3715 down_read(&rbd_dev->lock_rwsem);
87c0fded 3716 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
e010dd0a
ID
3717 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3718 if (rbd_dev->opts->exclusive) {
3719 rbd_warn(rbd_dev, "exclusive lock required");
3720 result = -EROFS;
3721 goto err_unlock;
3722 }
ed95b21a 3723 rbd_wait_state_locked(rbd_dev);
e010dd0a 3724 }
87c0fded
ID
3725 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3726 result = -EBLACKLISTED;
3727 goto err_unlock;
3728 }
ed95b21a
ID
3729 }
3730
6d2940c8 3731 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
4e752f0a 3732 snapc);
bc1ecc65
ID
3733 if (!img_request) {
3734 result = -ENOMEM;
ed95b21a 3735 goto err_unlock;
bc1ecc65
ID
3736 }
3737 img_request->rq = rq;
70b16db8 3738 snapc = NULL; /* img_request consumes a ref */
bf0d5f50 3739
90e98c52
GZ
3740 if (op_type == OBJ_OP_DISCARD)
3741 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3742 NULL);
5359a17d
ID
3743 else {
3744 struct ceph_bio_iter bio_it = { .bio = rq->bio,
3745 .iter = rq->bio->bi_iter };
3746
90e98c52 3747 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
5359a17d
ID
3748 &bio_it);
3749 }
bc1ecc65
ID
3750 if (result)
3751 goto err_img_request;
bf0d5f50 3752
bc1ecc65
ID
3753 result = rbd_img_request_submit(img_request);
3754 if (result)
3755 goto err_img_request;
bf0d5f50 3756
ed95b21a
ID
3757 if (must_be_locked)
3758 up_read(&rbd_dev->lock_rwsem);
bc1ecc65 3759 return;
bf0d5f50 3760
bc1ecc65
ID
3761err_img_request:
3762 rbd_img_request_put(img_request);
ed95b21a
ID
3763err_unlock:
3764 if (must_be_locked)
3765 up_read(&rbd_dev->lock_rwsem);
bc1ecc65
ID
3766err_rq:
3767 if (result)
3768 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
6d2940c8 3769 obj_op_name(op_type), length, offset, result);
e96a650a 3770 ceph_put_snap_context(snapc);
7ad18afa 3771err:
2a842aca 3772 blk_mq_end_request(rq, errno_to_blk_status(result));
bc1ecc65 3773}
bf0d5f50 3774
fc17b653 3775static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
7ad18afa 3776 const struct blk_mq_queue_data *bd)
bc1ecc65 3777{
7ad18afa
CH
3778 struct request *rq = bd->rq;
3779 struct work_struct *work = blk_mq_rq_to_pdu(rq);
bf0d5f50 3780
7ad18afa 3781 queue_work(rbd_wq, work);
fc17b653 3782 return BLK_STS_OK;
bf0d5f50
AE
3783}
3784
602adf40
YS
3785static void rbd_free_disk(struct rbd_device *rbd_dev)
3786{
5769ed0c
ID
3787 blk_cleanup_queue(rbd_dev->disk->queue);
3788 blk_mq_free_tag_set(&rbd_dev->tag_set);
3789 put_disk(rbd_dev->disk);
a0cab924 3790 rbd_dev->disk = NULL;
602adf40
YS
3791}
3792
788e2df3 3793static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
fe5478e0
ID
3794 struct ceph_object_id *oid,
3795 struct ceph_object_locator *oloc,
3796 void *buf, int buf_len)
788e2df3
AE
3797
3798{
fe5478e0
ID
3799 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3800 struct ceph_osd_request *req;
3801 struct page **pages;
3802 int num_pages = calc_pages_for(0, buf_len);
788e2df3
AE
3803 int ret;
3804
fe5478e0
ID
3805 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
3806 if (!req)
3807 return -ENOMEM;
788e2df3 3808
fe5478e0
ID
3809 ceph_oid_copy(&req->r_base_oid, oid);
3810 ceph_oloc_copy(&req->r_base_oloc, oloc);
3811 req->r_flags = CEPH_OSD_FLAG_READ;
430c28c3 3812
fe5478e0 3813 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
788e2df3 3814 if (ret)
fe5478e0 3815 goto out_req;
788e2df3 3816
fe5478e0
ID
3817 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
3818 if (IS_ERR(pages)) {
3819 ret = PTR_ERR(pages);
3820 goto out_req;
3821 }
1ceae7ef 3822
fe5478e0
ID
3823 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
3824 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
3825 true);
3826
3827 ceph_osdc_start_request(osdc, req, false);
3828 ret = ceph_osdc_wait_request(osdc, req);
3829 if (ret >= 0)
3830 ceph_copy_from_page_vector(pages, buf, 0, ret);
788e2df3 3831
fe5478e0
ID
3832out_req:
3833 ceph_osdc_put_request(req);
788e2df3
AE
3834 return ret;
3835}
3836
602adf40 3837/*
662518b1
AE
3838 * Read the complete header for the given rbd device. On successful
3839 * return, the rbd_dev->header field will contain up-to-date
3840 * information about the image.
602adf40 3841 */
99a41ebc 3842static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
602adf40 3843{
4156d998 3844 struct rbd_image_header_ondisk *ondisk = NULL;
50f7c4c9 3845 u32 snap_count = 0;
4156d998
AE
3846 u64 names_size = 0;
3847 u32 want_count;
3848 int ret;
602adf40 3849
00f1f36f 3850 /*
4156d998
AE
3851 * The complete header will include an array of its 64-bit
3852 * snapshot ids, followed by the names of those snapshots as
3853 * a contiguous block of NUL-terminated strings. Note that
3854 * the number of snapshots could change by the time we read
3855 * it in, in which case we re-read it.
00f1f36f 3856 */
4156d998
AE
3857 do {
3858 size_t size;
3859
3860 kfree(ondisk);
3861
3862 size = sizeof (*ondisk);
3863 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3864 size += names_size;
3865 ondisk = kmalloc(size, GFP_KERNEL);
3866 if (!ondisk)
662518b1 3867 return -ENOMEM;
4156d998 3868
fe5478e0
ID
3869 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
3870 &rbd_dev->header_oloc, ondisk, size);
4156d998 3871 if (ret < 0)
662518b1 3872 goto out;
c0cd10db 3873 if ((size_t)ret < size) {
4156d998 3874 ret = -ENXIO;
06ecc6cb
AE
3875 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3876 size, ret);
662518b1 3877 goto out;
4156d998
AE
3878 }
3879 if (!rbd_dev_ondisk_valid(ondisk)) {
3880 ret = -ENXIO;
06ecc6cb 3881 rbd_warn(rbd_dev, "invalid header");
662518b1 3882 goto out;
81e759fb 3883 }
602adf40 3884
4156d998
AE
3885 names_size = le64_to_cpu(ondisk->snap_names_len);
3886 want_count = snap_count;
3887 snap_count = le32_to_cpu(ondisk->snap_count);
3888 } while (snap_count != want_count);
00f1f36f 3889
662518b1
AE
3890 ret = rbd_header_from_disk(rbd_dev, ondisk);
3891out:
4156d998
AE
3892 kfree(ondisk);
3893
3894 return ret;
602adf40
YS
3895}
3896
15228ede
AE
3897/*
3898 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3899 * has disappeared from the (just updated) snapshot context.
3900 */
3901static void rbd_exists_validate(struct rbd_device *rbd_dev)
3902{
3903 u64 snap_id;
3904
3905 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3906 return;
3907
3908 snap_id = rbd_dev->spec->snap_id;
3909 if (snap_id == CEPH_NOSNAP)
3910 return;
3911
3912 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3913 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3914}
3915
9875201e
JD
3916static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3917{
3918 sector_t size;
9875201e
JD
3919
3920 /*
811c6688
ID
3921 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3922 * try to update its size. If REMOVING is set, updating size
3923 * is just useless work since the device can't be opened.
9875201e 3924 */
811c6688
ID
3925 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3926 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
9875201e
JD
3927 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3928 dout("setting size to %llu sectors", (unsigned long long)size);
3929 set_capacity(rbd_dev->disk, size);
3930 revalidate_disk(rbd_dev->disk);
3931 }
3932}
3933
cc4a38bd 3934static int rbd_dev_refresh(struct rbd_device *rbd_dev)
1fe5e993 3935{
e627db08 3936 u64 mapping_size;
1fe5e993
AE
3937 int ret;
3938
cfbf6377 3939 down_write(&rbd_dev->header_rwsem);
3b5cf2a2 3940 mapping_size = rbd_dev->mapping.size;
a720ae09
ID
3941
3942 ret = rbd_dev_header_info(rbd_dev);
52bb1f9b 3943 if (ret)
73e39e4d 3944 goto out;
15228ede 3945
e8f59b59
ID
3946 /*
3947 * If there is a parent, see if it has disappeared due to the
3948 * mapped image getting flattened.
3949 */
3950 if (rbd_dev->parent) {
3951 ret = rbd_dev_v2_parent_info(rbd_dev);
3952 if (ret)
73e39e4d 3953 goto out;
e8f59b59
ID
3954 }
3955
5ff1108c 3956 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
73e39e4d 3957 rbd_dev->mapping.size = rbd_dev->header.image_size;
5ff1108c
ID
3958 } else {
3959 /* validate mapped snapshot's EXISTS flag */
3960 rbd_exists_validate(rbd_dev);
3961 }
15228ede 3962
73e39e4d 3963out:
cfbf6377 3964 up_write(&rbd_dev->header_rwsem);
73e39e4d 3965 if (!ret && mapping_size != rbd_dev->mapping.size)
9875201e 3966 rbd_dev_update_size(rbd_dev);
1fe5e993 3967
73e39e4d 3968 return ret;
1fe5e993
AE
3969}
3970
d6296d39
CH
3971static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
3972 unsigned int hctx_idx, unsigned int numa_node)
7ad18afa
CH
3973{
3974 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3975
3976 INIT_WORK(work, rbd_queue_workfn);
3977 return 0;
3978}
3979
f363b089 3980static const struct blk_mq_ops rbd_mq_ops = {
7ad18afa 3981 .queue_rq = rbd_queue_rq,
7ad18afa
CH
3982 .init_request = rbd_init_request,
3983};
3984
602adf40
YS
3985static int rbd_init_disk(struct rbd_device *rbd_dev)
3986{
3987 struct gendisk *disk;
3988 struct request_queue *q;
593a9e7b 3989 u64 segment_size;
7ad18afa 3990 int err;
602adf40 3991
602adf40 3992 /* create gendisk info */
7e513d43
ID
3993 disk = alloc_disk(single_major ?
3994 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3995 RBD_MINORS_PER_MAJOR);
602adf40 3996 if (!disk)
1fcdb8aa 3997 return -ENOMEM;
602adf40 3998
f0f8cef5 3999 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
de71a297 4000 rbd_dev->dev_id);
602adf40 4001 disk->major = rbd_dev->major;
dd82fff1 4002 disk->first_minor = rbd_dev->minor;
7e513d43
ID
4003 if (single_major)
4004 disk->flags |= GENHD_FL_EXT_DEVT;
602adf40
YS
4005 disk->fops = &rbd_bd_ops;
4006 disk->private_data = rbd_dev;
4007
7ad18afa
CH
4008 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4009 rbd_dev->tag_set.ops = &rbd_mq_ops;
b5584180 4010 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
7ad18afa 4011 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
b5584180 4012 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
7ad18afa
CH
4013 rbd_dev->tag_set.nr_hw_queues = 1;
4014 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
4015
4016 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4017 if (err)
602adf40 4018 goto out_disk;
029bcbd8 4019
7ad18afa
CH
4020 q = blk_mq_init_queue(&rbd_dev->tag_set);
4021 if (IS_ERR(q)) {
4022 err = PTR_ERR(q);
4023 goto out_tag_set;
4024 }
4025
d8a2c89c
ID
4026 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4027 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
593a9e7b 4028
029bcbd8 4029 /* set io sizes to object size */
593a9e7b
AE
4030 segment_size = rbd_obj_bytes(&rbd_dev->header);
4031 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
0d9fde4f 4032 q->limits.max_sectors = queue_max_hw_sectors(q);
21acdf45 4033 blk_queue_max_segments(q, USHRT_MAX);
24f1df60 4034 blk_queue_max_segment_size(q, UINT_MAX);
593a9e7b
AE
4035 blk_queue_io_min(q, segment_size);
4036 blk_queue_io_opt(q, segment_size);
029bcbd8 4037
90e98c52
GZ
4038 /* enable the discard support */
4039 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4040 q->limits.discard_granularity = segment_size;
2bb4cd5c 4041 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
6ac56951 4042 blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
90e98c52 4043
bae818ee 4044 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
dc3b17cc 4045 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
bae818ee 4046
5769ed0c
ID
4047 /*
4048 * disk_release() expects a queue ref from add_disk() and will
4049 * put it. Hold an extra ref until add_disk() is called.
4050 */
4051 WARN_ON(!blk_get_queue(q));
602adf40 4052 disk->queue = q;
602adf40
YS
4053 q->queuedata = rbd_dev;
4054
4055 rbd_dev->disk = disk;
602adf40 4056
602adf40 4057 return 0;
7ad18afa
CH
4058out_tag_set:
4059 blk_mq_free_tag_set(&rbd_dev->tag_set);
602adf40
YS
4060out_disk:
4061 put_disk(disk);
7ad18afa 4062 return err;
602adf40
YS
4063}
4064
dfc5606d
YS
4065/*
4066 sysfs
4067*/
4068
593a9e7b
AE
4069static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4070{
4071 return container_of(dev, struct rbd_device, dev);
4072}
4073
dfc5606d
YS
4074static ssize_t rbd_size_show(struct device *dev,
4075 struct device_attribute *attr, char *buf)
4076{
593a9e7b 4077 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
a51aa0c0 4078
fc71d833
AE
4079 return sprintf(buf, "%llu\n",
4080 (unsigned long long)rbd_dev->mapping.size);
dfc5606d
YS
4081}
4082
34b13184
AE
4083/*
4084 * Note this shows the features for whatever's mapped, which is not
4085 * necessarily the base image.
4086 */
4087static ssize_t rbd_features_show(struct device *dev,
4088 struct device_attribute *attr, char *buf)
4089{
4090 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4091
4092 return sprintf(buf, "0x%016llx\n",
fc71d833 4093 (unsigned long long)rbd_dev->mapping.features);
34b13184
AE
4094}
4095
dfc5606d
YS
4096static ssize_t rbd_major_show(struct device *dev,
4097 struct device_attribute *attr, char *buf)
4098{
593a9e7b 4099 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 4100
fc71d833
AE
4101 if (rbd_dev->major)
4102 return sprintf(buf, "%d\n", rbd_dev->major);
4103
4104 return sprintf(buf, "(none)\n");
dd82fff1
ID
4105}
4106
4107static ssize_t rbd_minor_show(struct device *dev,
4108 struct device_attribute *attr, char *buf)
4109{
4110 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
fc71d833 4111
dd82fff1 4112 return sprintf(buf, "%d\n", rbd_dev->minor);
dfc5606d
YS
4113}
4114
005a07bf
ID
4115static ssize_t rbd_client_addr_show(struct device *dev,
4116 struct device_attribute *attr, char *buf)
4117{
4118 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4119 struct ceph_entity_addr *client_addr =
4120 ceph_client_addr(rbd_dev->rbd_client->client);
4121
4122 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4123 le32_to_cpu(client_addr->nonce));
4124}
4125
dfc5606d
YS
4126static ssize_t rbd_client_id_show(struct device *dev,
4127 struct device_attribute *attr, char *buf)
602adf40 4128{
593a9e7b 4129 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 4130
1dbb4399 4131 return sprintf(buf, "client%lld\n",
033268a5 4132 ceph_client_gid(rbd_dev->rbd_client->client));
602adf40
YS
4133}
4134
267fb90b
MC
4135static ssize_t rbd_cluster_fsid_show(struct device *dev,
4136 struct device_attribute *attr, char *buf)
4137{
4138 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4139
4140 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4141}
4142
0d6d1e9c
MC
4143static ssize_t rbd_config_info_show(struct device *dev,
4144 struct device_attribute *attr, char *buf)
4145{
4146 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4147
4148 return sprintf(buf, "%s\n", rbd_dev->config_info);
602adf40
YS
4149}
4150
dfc5606d
YS
4151static ssize_t rbd_pool_show(struct device *dev,
4152 struct device_attribute *attr, char *buf)
602adf40 4153{
593a9e7b 4154 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 4155
0d7dbfce 4156 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
dfc5606d
YS
4157}
4158
9bb2f334
AE
4159static ssize_t rbd_pool_id_show(struct device *dev,
4160 struct device_attribute *attr, char *buf)
4161{
4162 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4163
0d7dbfce 4164 return sprintf(buf, "%llu\n",
fc71d833 4165 (unsigned long long) rbd_dev->spec->pool_id);
9bb2f334
AE
4166}
4167
dfc5606d
YS
4168static ssize_t rbd_name_show(struct device *dev,
4169 struct device_attribute *attr, char *buf)
4170{
593a9e7b 4171 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 4172
a92ffdf8
AE
4173 if (rbd_dev->spec->image_name)
4174 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4175
4176 return sprintf(buf, "(unknown)\n");
dfc5606d
YS
4177}
4178
589d30e0
AE
4179static ssize_t rbd_image_id_show(struct device *dev,
4180 struct device_attribute *attr, char *buf)
4181{
4182 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4183
0d7dbfce 4184 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
589d30e0
AE
4185}
4186
34b13184
AE
4187/*
4188 * Shows the name of the currently-mapped snapshot (or
4189 * RBD_SNAP_HEAD_NAME for the base image).
4190 */
dfc5606d
YS
4191static ssize_t rbd_snap_show(struct device *dev,
4192 struct device_attribute *attr,
4193 char *buf)
4194{
593a9e7b 4195 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 4196
0d7dbfce 4197 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
dfc5606d
YS
4198}
4199
92a58671
MC
4200static ssize_t rbd_snap_id_show(struct device *dev,
4201 struct device_attribute *attr, char *buf)
4202{
4203 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4204
4205 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4206}
4207
86b00e0d 4208/*
ff96128f
ID
4209 * For a v2 image, shows the chain of parent images, separated by empty
4210 * lines. For v1 images or if there is no parent, shows "(no parent
4211 * image)".
86b00e0d
AE
4212 */
4213static ssize_t rbd_parent_show(struct device *dev,
ff96128f
ID
4214 struct device_attribute *attr,
4215 char *buf)
86b00e0d
AE
4216{
4217 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
ff96128f 4218 ssize_t count = 0;
86b00e0d 4219
ff96128f 4220 if (!rbd_dev->parent)
86b00e0d
AE
4221 return sprintf(buf, "(no parent image)\n");
4222
ff96128f
ID
4223 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4224 struct rbd_spec *spec = rbd_dev->parent_spec;
4225
4226 count += sprintf(&buf[count], "%s"
4227 "pool_id %llu\npool_name %s\n"
4228 "image_id %s\nimage_name %s\n"
4229 "snap_id %llu\nsnap_name %s\n"
4230 "overlap %llu\n",
4231 !count ? "" : "\n", /* first? */
4232 spec->pool_id, spec->pool_name,
4233 spec->image_id, spec->image_name ?: "(unknown)",
4234 spec->snap_id, spec->snap_name,
4235 rbd_dev->parent_overlap);
4236 }
4237
4238 return count;
86b00e0d
AE
4239}
4240
dfc5606d
YS
4241static ssize_t rbd_image_refresh(struct device *dev,
4242 struct device_attribute *attr,
4243 const char *buf,
4244 size_t size)
4245{
593a9e7b 4246 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
b813623a 4247 int ret;
602adf40 4248
cc4a38bd 4249 ret = rbd_dev_refresh(rbd_dev);
e627db08 4250 if (ret)
52bb1f9b 4251 return ret;
b813623a 4252
52bb1f9b 4253 return size;
dfc5606d 4254}
602adf40 4255
dfc5606d 4256static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
34b13184 4257static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
dfc5606d 4258static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
dd82fff1 4259static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
005a07bf 4260static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL);
dfc5606d 4261static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
267fb90b 4262static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL);
0d6d1e9c 4263static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL);
dfc5606d 4264static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
9bb2f334 4265static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
dfc5606d 4266static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
589d30e0 4267static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
dfc5606d
YS
4268static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
4269static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
92a58671 4270static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
86b00e0d 4271static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
dfc5606d
YS
4272
4273static struct attribute *rbd_attrs[] = {
4274 &dev_attr_size.attr,
34b13184 4275 &dev_attr_features.attr,
dfc5606d 4276 &dev_attr_major.attr,
dd82fff1 4277 &dev_attr_minor.attr,
005a07bf 4278 &dev_attr_client_addr.attr,
dfc5606d 4279 &dev_attr_client_id.attr,
267fb90b 4280 &dev_attr_cluster_fsid.attr,
0d6d1e9c 4281 &dev_attr_config_info.attr,
dfc5606d 4282 &dev_attr_pool.attr,
9bb2f334 4283 &dev_attr_pool_id.attr,
dfc5606d 4284 &dev_attr_name.attr,
589d30e0 4285 &dev_attr_image_id.attr,
dfc5606d 4286 &dev_attr_current_snap.attr,
92a58671 4287 &dev_attr_snap_id.attr,
86b00e0d 4288 &dev_attr_parent.attr,
dfc5606d 4289 &dev_attr_refresh.attr,
dfc5606d
YS
4290 NULL
4291};
4292
4293static struct attribute_group rbd_attr_group = {
4294 .attrs = rbd_attrs,
4295};
4296
4297static const struct attribute_group *rbd_attr_groups[] = {
4298 &rbd_attr_group,
4299 NULL
4300};
4301
6cac4695 4302static void rbd_dev_release(struct device *dev);
dfc5606d 4303
b9942bc9 4304static const struct device_type rbd_device_type = {
dfc5606d
YS
4305 .name = "rbd",
4306 .groups = rbd_attr_groups,
6cac4695 4307 .release = rbd_dev_release,
dfc5606d
YS
4308};
4309
8b8fb99c
AE
4310static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4311{
4312 kref_get(&spec->kref);
4313
4314 return spec;
4315}
4316
4317static void rbd_spec_free(struct kref *kref);
4318static void rbd_spec_put(struct rbd_spec *spec)
4319{
4320 if (spec)
4321 kref_put(&spec->kref, rbd_spec_free);
4322}
4323
4324static struct rbd_spec *rbd_spec_alloc(void)
4325{
4326 struct rbd_spec *spec;
4327
4328 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4329 if (!spec)
4330 return NULL;
04077599
ID
4331
4332 spec->pool_id = CEPH_NOPOOL;
4333 spec->snap_id = CEPH_NOSNAP;
8b8fb99c
AE
4334 kref_init(&spec->kref);
4335
8b8fb99c
AE
4336 return spec;
4337}
4338
4339static void rbd_spec_free(struct kref *kref)
4340{
4341 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4342
4343 kfree(spec->pool_name);
4344 kfree(spec->image_id);
4345 kfree(spec->image_name);
4346 kfree(spec->snap_name);
4347 kfree(spec);
4348}
4349
1643dfa4 4350static void rbd_dev_free(struct rbd_device *rbd_dev)
dd5ac32d 4351{
99d16943 4352 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
ed95b21a 4353 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
dd5ac32d 4354
c41d13a3 4355 ceph_oid_destroy(&rbd_dev->header_oid);
6b6dddbe 4356 ceph_oloc_destroy(&rbd_dev->header_oloc);
0d6d1e9c 4357 kfree(rbd_dev->config_info);
c41d13a3 4358
dd5ac32d
ID
4359 rbd_put_client(rbd_dev->rbd_client);
4360 rbd_spec_put(rbd_dev->spec);
4361 kfree(rbd_dev->opts);
4362 kfree(rbd_dev);
1643dfa4
ID
4363}
4364
4365static void rbd_dev_release(struct device *dev)
4366{
4367 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4368 bool need_put = !!rbd_dev->opts;
4369
4370 if (need_put) {
4371 destroy_workqueue(rbd_dev->task_wq);
4372 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4373 }
4374
4375 rbd_dev_free(rbd_dev);
dd5ac32d
ID
4376
4377 /*
4378 * This is racy, but way better than putting module outside of
4379 * the release callback. The race window is pretty small, so
4380 * doing something similar to dm (dm-builtin.c) is overkill.
4381 */
4382 if (need_put)
4383 module_put(THIS_MODULE);
4384}
4385
1643dfa4
ID
4386static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4387 struct rbd_spec *spec)
c53d5893
AE
4388{
4389 struct rbd_device *rbd_dev;
4390
1643dfa4 4391 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
c53d5893
AE
4392 if (!rbd_dev)
4393 return NULL;
4394
4395 spin_lock_init(&rbd_dev->lock);
4396 INIT_LIST_HEAD(&rbd_dev->node);
c53d5893
AE
4397 init_rwsem(&rbd_dev->header_rwsem);
4398
7e97332e 4399 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
c41d13a3 4400 ceph_oid_init(&rbd_dev->header_oid);
431a02cd 4401 rbd_dev->header_oloc.pool = spec->pool_id;
c41d13a3 4402
99d16943
ID
4403 mutex_init(&rbd_dev->watch_mutex);
4404 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4405 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4406
ed95b21a
ID
4407 init_rwsem(&rbd_dev->lock_rwsem);
4408 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4409 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4410 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4411 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4412 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4413 init_waitqueue_head(&rbd_dev->lock_waitq);
4414
dd5ac32d
ID
4415 rbd_dev->dev.bus = &rbd_bus_type;
4416 rbd_dev->dev.type = &rbd_device_type;
4417 rbd_dev->dev.parent = &rbd_root_dev;
dd5ac32d
ID
4418 device_initialize(&rbd_dev->dev);
4419
c53d5893 4420 rbd_dev->rbd_client = rbdc;
d147543d 4421 rbd_dev->spec = spec;
0903e875 4422
1643dfa4
ID
4423 return rbd_dev;
4424}
4425
4426/*
4427 * Create a mapping rbd_dev.
4428 */
4429static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4430 struct rbd_spec *spec,
4431 struct rbd_options *opts)
4432{
4433 struct rbd_device *rbd_dev;
4434
4435 rbd_dev = __rbd_dev_create(rbdc, spec);
4436 if (!rbd_dev)
4437 return NULL;
4438
4439 rbd_dev->opts = opts;
4440
4441 /* get an id and fill in device name */
4442 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4443 minor_to_rbd_dev_id(1 << MINORBITS),
4444 GFP_KERNEL);
4445 if (rbd_dev->dev_id < 0)
4446 goto fail_rbd_dev;
4447
4448 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4449 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4450 rbd_dev->name);
4451 if (!rbd_dev->task_wq)
4452 goto fail_dev_id;
dd5ac32d 4453
1643dfa4
ID
4454 /* we have a ref from do_rbd_add() */
4455 __module_get(THIS_MODULE);
dd5ac32d 4456
1643dfa4 4457 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
c53d5893 4458 return rbd_dev;
1643dfa4
ID
4459
4460fail_dev_id:
4461 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4462fail_rbd_dev:
4463 rbd_dev_free(rbd_dev);
4464 return NULL;
c53d5893
AE
4465}
4466
4467static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4468{
dd5ac32d
ID
4469 if (rbd_dev)
4470 put_device(&rbd_dev->dev);
c53d5893
AE
4471}
4472
9d475de5
AE
4473/*
4474 * Get the size and object order for an image snapshot, or if
4475 * snap_id is CEPH_NOSNAP, gets this information for the base
4476 * image.
4477 */
4478static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4479 u8 *order, u64 *snap_size)
4480{
4481 __le64 snapid = cpu_to_le64(snap_id);
4482 int ret;
4483 struct {
4484 u8 order;
4485 __le64 size;
4486 } __attribute__ ((packed)) size_buf = { 0 };
4487
ecd4a68a
ID
4488 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4489 &rbd_dev->header_oloc, "get_size",
4490 &snapid, sizeof(snapid),
4491 &size_buf, sizeof(size_buf));
36be9a76 4492 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
9d475de5
AE
4493 if (ret < 0)
4494 return ret;
57385b51
AE
4495 if (ret < sizeof (size_buf))
4496 return -ERANGE;
9d475de5 4497
c3545579 4498 if (order) {
c86f86e9 4499 *order = size_buf.order;
c3545579
JD
4500 dout(" order %u", (unsigned int)*order);
4501 }
9d475de5
AE
4502 *snap_size = le64_to_cpu(size_buf.size);
4503
c3545579
JD
4504 dout(" snap_id 0x%016llx snap_size = %llu\n",
4505 (unsigned long long)snap_id,
57385b51 4506 (unsigned long long)*snap_size);
9d475de5
AE
4507
4508 return 0;
4509}
4510
4511static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4512{
4513 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4514 &rbd_dev->header.obj_order,
4515 &rbd_dev->header.image_size);
4516}
4517
1e130199
AE
4518static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4519{
4520 void *reply_buf;
4521 int ret;
4522 void *p;
4523
4524 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4525 if (!reply_buf)
4526 return -ENOMEM;
4527
ecd4a68a
ID
4528 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4529 &rbd_dev->header_oloc, "get_object_prefix",
4530 NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
36be9a76 4531 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
1e130199
AE
4532 if (ret < 0)
4533 goto out;
4534
4535 p = reply_buf;
4536 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
57385b51
AE
4537 p + ret, NULL, GFP_NOIO);
4538 ret = 0;
1e130199
AE
4539
4540 if (IS_ERR(rbd_dev->header.object_prefix)) {
4541 ret = PTR_ERR(rbd_dev->header.object_prefix);
4542 rbd_dev->header.object_prefix = NULL;
4543 } else {
4544 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4545 }
1e130199
AE
4546out:
4547 kfree(reply_buf);
4548
4549 return ret;
4550}
4551
b1b5402a
AE
4552static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4553 u64 *snap_features)
4554{
4555 __le64 snapid = cpu_to_le64(snap_id);
4556 struct {
4557 __le64 features;
4558 __le64 incompat;
4157976b 4559 } __attribute__ ((packed)) features_buf = { 0 };
d3767f0f 4560 u64 unsup;
b1b5402a
AE
4561 int ret;
4562
ecd4a68a
ID
4563 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4564 &rbd_dev->header_oloc, "get_features",
4565 &snapid, sizeof(snapid),
4566 &features_buf, sizeof(features_buf));
36be9a76 4567 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
b1b5402a
AE
4568 if (ret < 0)
4569 return ret;
57385b51
AE
4570 if (ret < sizeof (features_buf))
4571 return -ERANGE;
d889140c 4572
d3767f0f
ID
4573 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4574 if (unsup) {
4575 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4576 unsup);
b8f5c6ed 4577 return -ENXIO;
d3767f0f 4578 }
d889140c 4579
b1b5402a
AE
4580 *snap_features = le64_to_cpu(features_buf.features);
4581
4582 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
57385b51
AE
4583 (unsigned long long)snap_id,
4584 (unsigned long long)*snap_features,
4585 (unsigned long long)le64_to_cpu(features_buf.incompat));
b1b5402a
AE
4586
4587 return 0;
4588}
4589
4590static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4591{
4592 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4593 &rbd_dev->header.features);
4594}
4595
86b00e0d
AE
4596static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4597{
4598 struct rbd_spec *parent_spec;
4599 size_t size;
4600 void *reply_buf = NULL;
4601 __le64 snapid;
4602 void *p;
4603 void *end;
642a2537 4604 u64 pool_id;
86b00e0d 4605 char *image_id;
3b5cf2a2 4606 u64 snap_id;
86b00e0d 4607 u64 overlap;
86b00e0d
AE
4608 int ret;
4609
4610 parent_spec = rbd_spec_alloc();
4611 if (!parent_spec)
4612 return -ENOMEM;
4613
4614 size = sizeof (__le64) + /* pool_id */
4615 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4616 sizeof (__le64) + /* snap_id */
4617 sizeof (__le64); /* overlap */
4618 reply_buf = kmalloc(size, GFP_KERNEL);
4619 if (!reply_buf) {
4620 ret = -ENOMEM;
4621 goto out_err;
4622 }
4623
4d9b67cd 4624 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
ecd4a68a
ID
4625 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4626 &rbd_dev->header_oloc, "get_parent",
4627 &snapid, sizeof(snapid), reply_buf, size);
36be9a76 4628 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
86b00e0d
AE
4629 if (ret < 0)
4630 goto out_err;
4631
86b00e0d 4632 p = reply_buf;
57385b51
AE
4633 end = reply_buf + ret;
4634 ret = -ERANGE;
642a2537 4635 ceph_decode_64_safe(&p, end, pool_id, out_err);
392a9dad
AE
4636 if (pool_id == CEPH_NOPOOL) {
4637 /*
4638 * Either the parent never existed, or we have
4639 * record of it but the image got flattened so it no
4640 * longer has a parent. When the parent of a
4641 * layered image disappears we immediately set the
4642 * overlap to 0. The effect of this is that all new
4643 * requests will be treated as if the image had no
4644 * parent.
4645 */
4646 if (rbd_dev->parent_overlap) {
4647 rbd_dev->parent_overlap = 0;
392a9dad
AE
4648 rbd_dev_parent_put(rbd_dev);
4649 pr_info("%s: clone image has been flattened\n",
4650 rbd_dev->disk->disk_name);
4651 }
4652
86b00e0d 4653 goto out; /* No parent? No problem. */
392a9dad 4654 }
86b00e0d 4655
0903e875
AE
4656 /* The ceph file layout needs to fit pool id in 32 bits */
4657
4658 ret = -EIO;
642a2537 4659 if (pool_id > (u64)U32_MAX) {
9584d508 4660 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
642a2537 4661 (unsigned long long)pool_id, U32_MAX);
57385b51 4662 goto out_err;
c0cd10db 4663 }
0903e875 4664
979ed480 4665 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
86b00e0d
AE
4666 if (IS_ERR(image_id)) {
4667 ret = PTR_ERR(image_id);
4668 goto out_err;
4669 }
3b5cf2a2 4670 ceph_decode_64_safe(&p, end, snap_id, out_err);
86b00e0d
AE
4671 ceph_decode_64_safe(&p, end, overlap, out_err);
4672
3b5cf2a2
AE
4673 /*
4674 * The parent won't change (except when the clone is
4675 * flattened, already handled that). So we only need to
4676 * record the parent spec we have not already done so.
4677 */
4678 if (!rbd_dev->parent_spec) {
4679 parent_spec->pool_id = pool_id;
4680 parent_spec->image_id = image_id;
4681 parent_spec->snap_id = snap_id;
70cf49cf
AE
4682 rbd_dev->parent_spec = parent_spec;
4683 parent_spec = NULL; /* rbd_dev now owns this */
fbba11b3
ID
4684 } else {
4685 kfree(image_id);
3b5cf2a2
AE
4686 }
4687
4688 /*
cf32bd9c
ID
4689 * We always update the parent overlap. If it's zero we issue
4690 * a warning, as we will proceed as if there was no parent.
3b5cf2a2 4691 */
3b5cf2a2 4692 if (!overlap) {
3b5cf2a2 4693 if (parent_spec) {
cf32bd9c
ID
4694 /* refresh, careful to warn just once */
4695 if (rbd_dev->parent_overlap)
4696 rbd_warn(rbd_dev,
4697 "clone now standalone (overlap became 0)");
3b5cf2a2 4698 } else {
cf32bd9c
ID
4699 /* initial probe */
4700 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
3b5cf2a2 4701 }
70cf49cf 4702 }
cf32bd9c
ID
4703 rbd_dev->parent_overlap = overlap;
4704
86b00e0d
AE
4705out:
4706 ret = 0;
4707out_err:
4708 kfree(reply_buf);
4709 rbd_spec_put(parent_spec);
4710
4711 return ret;
4712}
4713
cc070d59
AE
4714static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4715{
4716 struct {
4717 __le64 stripe_unit;
4718 __le64 stripe_count;
4719 } __attribute__ ((packed)) striping_info_buf = { 0 };
4720 size_t size = sizeof (striping_info_buf);
4721 void *p;
4722 u64 obj_size;
4723 u64 stripe_unit;
4724 u64 stripe_count;
4725 int ret;
4726
ecd4a68a
ID
4727 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4728 &rbd_dev->header_oloc, "get_stripe_unit_count",
4729 NULL, 0, &striping_info_buf, size);
cc070d59
AE
4730 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4731 if (ret < 0)
4732 return ret;
4733 if (ret < size)
4734 return -ERANGE;
4735
4736 /*
4737 * We don't actually support the "fancy striping" feature
4738 * (STRIPINGV2) yet, but if the striping sizes are the
4739 * defaults the behavior is the same as before. So find
4740 * out, and only fail if the image has non-default values.
4741 */
4742 ret = -EINVAL;
5bc3fb17 4743 obj_size = rbd_obj_bytes(&rbd_dev->header);
cc070d59
AE
4744 p = &striping_info_buf;
4745 stripe_unit = ceph_decode_64(&p);
4746 if (stripe_unit != obj_size) {
4747 rbd_warn(rbd_dev, "unsupported stripe unit "
4748 "(got %llu want %llu)",
4749 stripe_unit, obj_size);
4750 return -EINVAL;
4751 }
4752 stripe_count = ceph_decode_64(&p);
4753 if (stripe_count != 1) {
4754 rbd_warn(rbd_dev, "unsupported stripe count "
4755 "(got %llu want 1)", stripe_count);
4756 return -EINVAL;
4757 }
500d0c0f
AE
4758 rbd_dev->header.stripe_unit = stripe_unit;
4759 rbd_dev->header.stripe_count = stripe_count;
cc070d59
AE
4760
4761 return 0;
4762}
4763
7e97332e
ID
4764static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
4765{
4766 __le64 data_pool_id;
4767 int ret;
4768
4769 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4770 &rbd_dev->header_oloc, "get_data_pool",
4771 NULL, 0, &data_pool_id, sizeof(data_pool_id));
4772 if (ret < 0)
4773 return ret;
4774 if (ret < sizeof(data_pool_id))
4775 return -EBADMSG;
4776
4777 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
4778 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
4779 return 0;
4780}
4781
9e15b77d
AE
4782static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4783{
ecd4a68a 4784 CEPH_DEFINE_OID_ONSTACK(oid);
9e15b77d
AE
4785 size_t image_id_size;
4786 char *image_id;
4787 void *p;
4788 void *end;
4789 size_t size;
4790 void *reply_buf = NULL;
4791 size_t len = 0;
4792 char *image_name = NULL;
4793 int ret;
4794
4795 rbd_assert(!rbd_dev->spec->image_name);
4796
69e7a02f
AE
4797 len = strlen(rbd_dev->spec->image_id);
4798 image_id_size = sizeof (__le32) + len;
9e15b77d
AE
4799 image_id = kmalloc(image_id_size, GFP_KERNEL);
4800 if (!image_id)
4801 return NULL;
4802
4803 p = image_id;
4157976b 4804 end = image_id + image_id_size;
57385b51 4805 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
9e15b77d
AE
4806
4807 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4808 reply_buf = kmalloc(size, GFP_KERNEL);
4809 if (!reply_buf)
4810 goto out;
4811
ecd4a68a
ID
4812 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
4813 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
4814 "dir_get_name", image_id, image_id_size,
4815 reply_buf, size);
9e15b77d
AE
4816 if (ret < 0)
4817 goto out;
4818 p = reply_buf;
f40eb349
AE
4819 end = reply_buf + ret;
4820
9e15b77d
AE
4821 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4822 if (IS_ERR(image_name))
4823 image_name = NULL;
4824 else
4825 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4826out:
4827 kfree(reply_buf);
4828 kfree(image_id);
4829
4830 return image_name;
4831}
4832
2ad3d716
AE
4833static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4834{
4835 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4836 const char *snap_name;
4837 u32 which = 0;
4838
4839 /* Skip over names until we find the one we are looking for */
4840
4841 snap_name = rbd_dev->header.snap_names;
4842 while (which < snapc->num_snaps) {
4843 if (!strcmp(name, snap_name))
4844 return snapc->snaps[which];
4845 snap_name += strlen(snap_name) + 1;
4846 which++;
4847 }
4848 return CEPH_NOSNAP;
4849}
4850
4851static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4852{
4853 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4854 u32 which;
4855 bool found = false;
4856 u64 snap_id;
4857
4858 for (which = 0; !found && which < snapc->num_snaps; which++) {
4859 const char *snap_name;
4860
4861 snap_id = snapc->snaps[which];
4862 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
efadc98a
JD
4863 if (IS_ERR(snap_name)) {
4864 /* ignore no-longer existing snapshots */
4865 if (PTR_ERR(snap_name) == -ENOENT)
4866 continue;
4867 else
4868 break;
4869 }
2ad3d716
AE
4870 found = !strcmp(name, snap_name);
4871 kfree(snap_name);
4872 }
4873 return found ? snap_id : CEPH_NOSNAP;
4874}
4875
4876/*
4877 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4878 * no snapshot by that name is found, or if an error occurs.
4879 */
4880static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4881{
4882 if (rbd_dev->image_format == 1)
4883 return rbd_v1_snap_id_by_name(rbd_dev, name);
4884
4885 return rbd_v2_snap_id_by_name(rbd_dev, name);
4886}
4887
9e15b77d 4888/*
04077599
ID
4889 * An image being mapped will have everything but the snap id.
4890 */
4891static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4892{
4893 struct rbd_spec *spec = rbd_dev->spec;
4894
4895 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4896 rbd_assert(spec->image_id && spec->image_name);
4897 rbd_assert(spec->snap_name);
4898
4899 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4900 u64 snap_id;
4901
4902 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4903 if (snap_id == CEPH_NOSNAP)
4904 return -ENOENT;
4905
4906 spec->snap_id = snap_id;
4907 } else {
4908 spec->snap_id = CEPH_NOSNAP;
4909 }
4910
4911 return 0;
4912}
4913
4914/*
4915 * A parent image will have all ids but none of the names.
e1d4213f 4916 *
04077599
ID
4917 * All names in an rbd spec are dynamically allocated. It's OK if we
4918 * can't figure out the name for an image id.
9e15b77d 4919 */
04077599 4920static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
9e15b77d 4921{
2e9f7f1c
AE
4922 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4923 struct rbd_spec *spec = rbd_dev->spec;
4924 const char *pool_name;
4925 const char *image_name;
4926 const char *snap_name;
9e15b77d
AE
4927 int ret;
4928
04077599
ID
4929 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4930 rbd_assert(spec->image_id);
4931 rbd_assert(spec->snap_id != CEPH_NOSNAP);
9e15b77d 4932
2e9f7f1c 4933 /* Get the pool name; we have to make our own copy of this */
9e15b77d 4934
2e9f7f1c
AE
4935 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4936 if (!pool_name) {
4937 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
935dc89f
AE
4938 return -EIO;
4939 }
2e9f7f1c
AE
4940 pool_name = kstrdup(pool_name, GFP_KERNEL);
4941 if (!pool_name)
9e15b77d
AE
4942 return -ENOMEM;
4943
4944 /* Fetch the image name; tolerate failure here */
4945
2e9f7f1c
AE
4946 image_name = rbd_dev_image_name(rbd_dev);
4947 if (!image_name)
06ecc6cb 4948 rbd_warn(rbd_dev, "unable to get image name");
9e15b77d 4949
04077599 4950 /* Fetch the snapshot name */
9e15b77d 4951
2e9f7f1c 4952 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
da6a6b63
JD
4953 if (IS_ERR(snap_name)) {
4954 ret = PTR_ERR(snap_name);
9e15b77d 4955 goto out_err;
2e9f7f1c
AE
4956 }
4957
4958 spec->pool_name = pool_name;
4959 spec->image_name = image_name;
4960 spec->snap_name = snap_name;
9e15b77d
AE
4961
4962 return 0;
04077599 4963
9e15b77d 4964out_err:
2e9f7f1c
AE
4965 kfree(image_name);
4966 kfree(pool_name);
9e15b77d
AE
4967 return ret;
4968}
4969
cc4a38bd 4970static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
35d489f9
AE
4971{
4972 size_t size;
4973 int ret;
4974 void *reply_buf;
4975 void *p;
4976 void *end;
4977 u64 seq;
4978 u32 snap_count;
4979 struct ceph_snap_context *snapc;
4980 u32 i;
4981
4982 /*
4983 * We'll need room for the seq value (maximum snapshot id),
4984 * snapshot count, and array of that many snapshot ids.
4985 * For now we have a fixed upper limit on the number we're
4986 * prepared to receive.
4987 */
4988 size = sizeof (__le64) + sizeof (__le32) +
4989 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4990 reply_buf = kzalloc(size, GFP_KERNEL);
4991 if (!reply_buf)
4992 return -ENOMEM;
4993
ecd4a68a
ID
4994 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4995 &rbd_dev->header_oloc, "get_snapcontext",
4996 NULL, 0, reply_buf, size);
36be9a76 4997 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
35d489f9
AE
4998 if (ret < 0)
4999 goto out;
5000
35d489f9 5001 p = reply_buf;
57385b51
AE
5002 end = reply_buf + ret;
5003 ret = -ERANGE;
35d489f9
AE
5004 ceph_decode_64_safe(&p, end, seq, out);
5005 ceph_decode_32_safe(&p, end, snap_count, out);
5006
5007 /*
5008 * Make sure the reported number of snapshot ids wouldn't go
5009 * beyond the end of our buffer. But before checking that,
5010 * make sure the computed size of the snapshot context we
5011 * allocate is representable in a size_t.
5012 */
5013 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5014 / sizeof (u64)) {
5015 ret = -EINVAL;
5016 goto out;
5017 }
5018 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5019 goto out;
468521c1 5020 ret = 0;
35d489f9 5021
812164f8 5022 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
35d489f9
AE
5023 if (!snapc) {
5024 ret = -ENOMEM;
5025 goto out;
5026 }
35d489f9 5027 snapc->seq = seq;
35d489f9
AE
5028 for (i = 0; i < snap_count; i++)
5029 snapc->snaps[i] = ceph_decode_64(&p);
5030
49ece554 5031 ceph_put_snap_context(rbd_dev->header.snapc);
35d489f9
AE
5032 rbd_dev->header.snapc = snapc;
5033
5034 dout(" snap context seq = %llu, snap_count = %u\n",
57385b51 5035 (unsigned long long)seq, (unsigned int)snap_count);
35d489f9
AE
5036out:
5037 kfree(reply_buf);
5038
57385b51 5039 return ret;
35d489f9
AE
5040}
5041
54cac61f
AE
5042static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5043 u64 snap_id)
b8b1e2db
AE
5044{
5045 size_t size;
5046 void *reply_buf;
54cac61f 5047 __le64 snapid;
b8b1e2db
AE
5048 int ret;
5049 void *p;
5050 void *end;
b8b1e2db
AE
5051 char *snap_name;
5052
5053 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5054 reply_buf = kmalloc(size, GFP_KERNEL);
5055 if (!reply_buf)
5056 return ERR_PTR(-ENOMEM);
5057
54cac61f 5058 snapid = cpu_to_le64(snap_id);
ecd4a68a
ID
5059 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5060 &rbd_dev->header_oloc, "get_snapshot_name",
5061 &snapid, sizeof(snapid), reply_buf, size);
36be9a76 5062 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
f40eb349
AE
5063 if (ret < 0) {
5064 snap_name = ERR_PTR(ret);
b8b1e2db 5065 goto out;
f40eb349 5066 }
b8b1e2db
AE
5067
5068 p = reply_buf;
f40eb349 5069 end = reply_buf + ret;
e5c35534 5070 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
f40eb349 5071 if (IS_ERR(snap_name))
b8b1e2db 5072 goto out;
b8b1e2db 5073
f40eb349 5074 dout(" snap_id 0x%016llx snap_name = %s\n",
54cac61f 5075 (unsigned long long)snap_id, snap_name);
b8b1e2db
AE
5076out:
5077 kfree(reply_buf);
5078
f40eb349 5079 return snap_name;
b8b1e2db
AE
5080}
5081
2df3fac7 5082static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
117973fb 5083{
2df3fac7 5084 bool first_time = rbd_dev->header.object_prefix == NULL;
117973fb 5085 int ret;
117973fb 5086
1617e40c
JD
5087 ret = rbd_dev_v2_image_size(rbd_dev);
5088 if (ret)
cfbf6377 5089 return ret;
1617e40c 5090
2df3fac7
AE
5091 if (first_time) {
5092 ret = rbd_dev_v2_header_onetime(rbd_dev);
5093 if (ret)
cfbf6377 5094 return ret;
2df3fac7
AE
5095 }
5096
cc4a38bd 5097 ret = rbd_dev_v2_snap_context(rbd_dev);
d194cd1d
ID
5098 if (ret && first_time) {
5099 kfree(rbd_dev->header.object_prefix);
5100 rbd_dev->header.object_prefix = NULL;
5101 }
117973fb
AE
5102
5103 return ret;
5104}
5105
a720ae09
ID
5106static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5107{
5108 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5109
5110 if (rbd_dev->image_format == 1)
5111 return rbd_dev_v1_header_info(rbd_dev);
5112
5113 return rbd_dev_v2_header_info(rbd_dev);
5114}
5115
e28fff26
AE
5116/*
5117 * Skips over white space at *buf, and updates *buf to point to the
5118 * first found non-space character (if any). Returns the length of
593a9e7b
AE
5119 * the token (string of non-white space characters) found. Note
5120 * that *buf must be terminated with '\0'.
e28fff26
AE
5121 */
5122static inline size_t next_token(const char **buf)
5123{
5124 /*
5125 * These are the characters that produce nonzero for
5126 * isspace() in the "C" and "POSIX" locales.
5127 */
5128 const char *spaces = " \f\n\r\t\v";
5129
5130 *buf += strspn(*buf, spaces); /* Find start of token */
5131
5132 return strcspn(*buf, spaces); /* Return token length */
5133}
5134
ea3352f4
AE
5135/*
5136 * Finds the next token in *buf, dynamically allocates a buffer big
5137 * enough to hold a copy of it, and copies the token into the new
5138 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5139 * that a duplicate buffer is created even for a zero-length token.
5140 *
5141 * Returns a pointer to the newly-allocated duplicate, or a null
5142 * pointer if memory for the duplicate was not available. If
5143 * the lenp argument is a non-null pointer, the length of the token
5144 * (not including the '\0') is returned in *lenp.
5145 *
5146 * If successful, the *buf pointer will be updated to point beyond
5147 * the end of the found token.
5148 *
5149 * Note: uses GFP_KERNEL for allocation.
5150 */
5151static inline char *dup_token(const char **buf, size_t *lenp)
5152{
5153 char *dup;
5154 size_t len;
5155
5156 len = next_token(buf);
4caf35f9 5157 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
ea3352f4
AE
5158 if (!dup)
5159 return NULL;
ea3352f4
AE
5160 *(dup + len) = '\0';
5161 *buf += len;
5162
5163 if (lenp)
5164 *lenp = len;
5165
5166 return dup;
5167}
5168
a725f65e 5169/*
859c31df
AE
5170 * Parse the options provided for an "rbd add" (i.e., rbd image
5171 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5172 * and the data written is passed here via a NUL-terminated buffer.
5173 * Returns 0 if successful or an error code otherwise.
d22f76e7 5174 *
859c31df
AE
5175 * The information extracted from these options is recorded in
5176 * the other parameters which return dynamically-allocated
5177 * structures:
5178 * ceph_opts
5179 * The address of a pointer that will refer to a ceph options
5180 * structure. Caller must release the returned pointer using
5181 * ceph_destroy_options() when it is no longer needed.
5182 * rbd_opts
5183 * Address of an rbd options pointer. Fully initialized by
5184 * this function; caller must release with kfree().
5185 * spec
5186 * Address of an rbd image specification pointer. Fully
5187 * initialized by this function based on parsed options.
5188 * Caller must release with rbd_spec_put().
5189 *
5190 * The options passed take this form:
5191 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5192 * where:
5193 * <mon_addrs>
5194 * A comma-separated list of one or more monitor addresses.
5195 * A monitor address is an ip address, optionally followed
5196 * by a port number (separated by a colon).
5197 * I.e.: ip1[:port1][,ip2[:port2]...]
5198 * <options>
5199 * A comma-separated list of ceph and/or rbd options.
5200 * <pool_name>
5201 * The name of the rados pool containing the rbd image.
5202 * <image_name>
5203 * The name of the image in that pool to map.
5204 * <snap_id>
5205 * An optional snapshot id. If provided, the mapping will
5206 * present data from the image at the time that snapshot was
5207 * created. The image head is used if no snapshot id is
5208 * provided. Snapshot mappings are always read-only.
a725f65e 5209 */
859c31df 5210static int rbd_add_parse_args(const char *buf,
dc79b113 5211 struct ceph_options **ceph_opts,
859c31df
AE
5212 struct rbd_options **opts,
5213 struct rbd_spec **rbd_spec)
e28fff26 5214{
d22f76e7 5215 size_t len;
859c31df 5216 char *options;
0ddebc0c 5217 const char *mon_addrs;
ecb4dc22 5218 char *snap_name;
0ddebc0c 5219 size_t mon_addrs_size;
859c31df 5220 struct rbd_spec *spec = NULL;
4e9afeba 5221 struct rbd_options *rbd_opts = NULL;
859c31df 5222 struct ceph_options *copts;
dc79b113 5223 int ret;
e28fff26
AE
5224
5225 /* The first four tokens are required */
5226
7ef3214a 5227 len = next_token(&buf);
4fb5d671
AE
5228 if (!len) {
5229 rbd_warn(NULL, "no monitor address(es) provided");
5230 return -EINVAL;
5231 }
0ddebc0c 5232 mon_addrs = buf;
f28e565a 5233 mon_addrs_size = len + 1;
7ef3214a 5234 buf += len;
a725f65e 5235
dc79b113 5236 ret = -EINVAL;
f28e565a
AE
5237 options = dup_token(&buf, NULL);
5238 if (!options)
dc79b113 5239 return -ENOMEM;
4fb5d671
AE
5240 if (!*options) {
5241 rbd_warn(NULL, "no options provided");
5242 goto out_err;
5243 }
e28fff26 5244
859c31df
AE
5245 spec = rbd_spec_alloc();
5246 if (!spec)
f28e565a 5247 goto out_mem;
859c31df
AE
5248
5249 spec->pool_name = dup_token(&buf, NULL);
5250 if (!spec->pool_name)
5251 goto out_mem;
4fb5d671
AE
5252 if (!*spec->pool_name) {
5253 rbd_warn(NULL, "no pool name provided");
5254 goto out_err;
5255 }
e28fff26 5256
69e7a02f 5257 spec->image_name = dup_token(&buf, NULL);
859c31df 5258 if (!spec->image_name)
f28e565a 5259 goto out_mem;
4fb5d671
AE
5260 if (!*spec->image_name) {
5261 rbd_warn(NULL, "no image name provided");
5262 goto out_err;
5263 }
d4b125e9 5264
f28e565a
AE
5265 /*
5266 * Snapshot name is optional; default is to use "-"
5267 * (indicating the head/no snapshot).
5268 */
3feeb894 5269 len = next_token(&buf);
820a5f3e 5270 if (!len) {
3feeb894
AE
5271 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5272 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
f28e565a 5273 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
dc79b113 5274 ret = -ENAMETOOLONG;
f28e565a 5275 goto out_err;
849b4260 5276 }
ecb4dc22
AE
5277 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5278 if (!snap_name)
f28e565a 5279 goto out_mem;
ecb4dc22
AE
5280 *(snap_name + len) = '\0';
5281 spec->snap_name = snap_name;
e5c35534 5282
0ddebc0c 5283 /* Initialize all rbd options to the defaults */
e28fff26 5284
4e9afeba
AE
5285 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
5286 if (!rbd_opts)
5287 goto out_mem;
5288
5289 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
b5584180 5290 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
80de1912 5291 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
e010dd0a 5292 rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
d22f76e7 5293
859c31df 5294 copts = ceph_parse_options(options, mon_addrs,
0ddebc0c 5295 mon_addrs + mon_addrs_size - 1,
4e9afeba 5296 parse_rbd_opts_token, rbd_opts);
859c31df
AE
5297 if (IS_ERR(copts)) {
5298 ret = PTR_ERR(copts);
dc79b113
AE
5299 goto out_err;
5300 }
859c31df
AE
5301 kfree(options);
5302
5303 *ceph_opts = copts;
4e9afeba 5304 *opts = rbd_opts;
859c31df 5305 *rbd_spec = spec;
0ddebc0c 5306
dc79b113 5307 return 0;
f28e565a 5308out_mem:
dc79b113 5309 ret = -ENOMEM;
d22f76e7 5310out_err:
859c31df
AE
5311 kfree(rbd_opts);
5312 rbd_spec_put(spec);
f28e565a 5313 kfree(options);
d22f76e7 5314
dc79b113 5315 return ret;
a725f65e
AE
5316}
5317
30ba1f02
ID
5318/*
5319 * Return pool id (>= 0) or a negative error code.
5320 */
5321static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
5322{
a319bf56 5323 struct ceph_options *opts = rbdc->client->options;
30ba1f02 5324 u64 newest_epoch;
30ba1f02
ID
5325 int tries = 0;
5326 int ret;
5327
5328again:
5329 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
5330 if (ret == -ENOENT && tries++ < 1) {
d0b19705
ID
5331 ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
5332 &newest_epoch);
30ba1f02
ID
5333 if (ret < 0)
5334 return ret;
5335
5336 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
7cca78c9 5337 ceph_osdc_maybe_request_map(&rbdc->client->osdc);
30ba1f02 5338 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
a319bf56
ID
5339 newest_epoch,
5340 opts->mount_timeout);
30ba1f02
ID
5341 goto again;
5342 } else {
5343 /* the osdmap we have is new enough */
5344 return -ENOENT;
5345 }
5346 }
5347
5348 return ret;
5349}
5350
e010dd0a
ID
5351static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
5352{
5353 down_write(&rbd_dev->lock_rwsem);
5354 if (__rbd_is_lock_owner(rbd_dev))
5355 rbd_unlock(rbd_dev);
5356 up_write(&rbd_dev->lock_rwsem);
5357}
5358
5359static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
5360{
5361 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
5362 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
5363 return -EINVAL;
5364 }
5365
5366 /* FIXME: "rbd map --exclusive" should be in interruptible */
5367 down_read(&rbd_dev->lock_rwsem);
5368 rbd_wait_state_locked(rbd_dev);
5369 up_read(&rbd_dev->lock_rwsem);
5370 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
5371 rbd_warn(rbd_dev, "failed to acquire exclusive lock");
5372 return -EROFS;
5373 }
5374
5375 return 0;
5376}
5377
589d30e0
AE
5378/*
5379 * An rbd format 2 image has a unique identifier, distinct from the
5380 * name given to it by the user. Internally, that identifier is
5381 * what's used to specify the names of objects related to the image.
5382 *
5383 * A special "rbd id" object is used to map an rbd image name to its
5384 * id. If that object doesn't exist, then there is no v2 rbd image
5385 * with the supplied name.
5386 *
5387 * This function will record the given rbd_dev's image_id field if
5388 * it can be determined, and in that case will return 0. If any
5389 * errors occur a negative errno will be returned and the rbd_dev's
5390 * image_id field will be unchanged (and should be NULL).
5391 */
5392static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5393{
5394 int ret;
5395 size_t size;
ecd4a68a 5396 CEPH_DEFINE_OID_ONSTACK(oid);
589d30e0 5397 void *response;
c0fba368 5398 char *image_id;
2f82ee54 5399
2c0d0a10
AE
5400 /*
5401 * When probing a parent image, the image id is already
5402 * known (and the image name likely is not). There's no
c0fba368
AE
5403 * need to fetch the image id again in this case. We
5404 * do still need to set the image format though.
2c0d0a10 5405 */
c0fba368
AE
5406 if (rbd_dev->spec->image_id) {
5407 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5408
2c0d0a10 5409 return 0;
c0fba368 5410 }
2c0d0a10 5411
589d30e0
AE
5412 /*
5413 * First, see if the format 2 image id file exists, and if
5414 * so, get the image's persistent id from it.
5415 */
ecd4a68a
ID
5416 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
5417 rbd_dev->spec->image_name);
5418 if (ret)
5419 return ret;
5420
5421 dout("rbd id object name is %s\n", oid.name);
589d30e0
AE
5422
5423 /* Response will be an encoded string, which includes a length */
5424
5425 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5426 response = kzalloc(size, GFP_NOIO);
5427 if (!response) {
5428 ret = -ENOMEM;
5429 goto out;
5430 }
5431
c0fba368
AE
5432 /* If it doesn't exist we'll assume it's a format 1 image */
5433
ecd4a68a
ID
5434 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5435 "get_id", NULL, 0,
5436 response, RBD_IMAGE_ID_LEN_MAX);
36be9a76 5437 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
c0fba368
AE
5438 if (ret == -ENOENT) {
5439 image_id = kstrdup("", GFP_KERNEL);
5440 ret = image_id ? 0 : -ENOMEM;
5441 if (!ret)
5442 rbd_dev->image_format = 1;
7dd440c9 5443 } else if (ret >= 0) {
c0fba368
AE
5444 void *p = response;
5445
5446 image_id = ceph_extract_encoded_string(&p, p + ret,
979ed480 5447 NULL, GFP_NOIO);
461f758a 5448 ret = PTR_ERR_OR_ZERO(image_id);
c0fba368
AE
5449 if (!ret)
5450 rbd_dev->image_format = 2;
c0fba368
AE
5451 }
5452
5453 if (!ret) {
5454 rbd_dev->spec->image_id = image_id;
5455 dout("image_id is %s\n", image_id);
589d30e0
AE
5456 }
5457out:
5458 kfree(response);
ecd4a68a 5459 ceph_oid_destroy(&oid);
589d30e0
AE
5460 return ret;
5461}
5462
3abef3b3
AE
5463/*
5464 * Undo whatever state changes are made by v1 or v2 header info
5465 * call.
5466 */
6fd48b3b
AE
5467static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5468{
5469 struct rbd_image_header *header;
5470
e69b8d41 5471 rbd_dev_parent_put(rbd_dev);
6fd48b3b
AE
5472
5473 /* Free dynamic fields from the header, then zero it out */
5474
5475 header = &rbd_dev->header;
812164f8 5476 ceph_put_snap_context(header->snapc);
6fd48b3b
AE
5477 kfree(header->snap_sizes);
5478 kfree(header->snap_names);
5479 kfree(header->object_prefix);
5480 memset(header, 0, sizeof (*header));
5481}
5482
2df3fac7 5483static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
a30b71b9
AE
5484{
5485 int ret;
a30b71b9 5486
1e130199 5487 ret = rbd_dev_v2_object_prefix(rbd_dev);
57385b51 5488 if (ret)
b1b5402a
AE
5489 goto out_err;
5490
2df3fac7
AE
5491 /*
5492 * Get the and check features for the image. Currently the
5493 * features are assumed to never change.
5494 */
b1b5402a 5495 ret = rbd_dev_v2_features(rbd_dev);
57385b51 5496 if (ret)
9d475de5 5497 goto out_err;
35d489f9 5498
cc070d59
AE
5499 /* If the image supports fancy striping, get its parameters */
5500
5501 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5502 ret = rbd_dev_v2_striping_info(rbd_dev);
5503 if (ret < 0)
5504 goto out_err;
5505 }
a30b71b9 5506
7e97332e
ID
5507 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
5508 ret = rbd_dev_v2_data_pool(rbd_dev);
5509 if (ret)
5510 goto out_err;
5511 }
5512
263423f8 5513 rbd_init_layout(rbd_dev);
35152979 5514 return 0;
263423f8 5515
9d475de5 5516out_err:
642a2537 5517 rbd_dev->header.features = 0;
1e130199
AE
5518 kfree(rbd_dev->header.object_prefix);
5519 rbd_dev->header.object_prefix = NULL;
9d475de5 5520 return ret;
a30b71b9
AE
5521}
5522
6d69bb53
ID
5523/*
5524 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5525 * rbd_dev_image_probe() recursion depth, which means it's also the
5526 * length of the already discovered part of the parent chain.
5527 */
5528static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
83a06263 5529{
2f82ee54 5530 struct rbd_device *parent = NULL;
124afba2
AE
5531 int ret;
5532
5533 if (!rbd_dev->parent_spec)
5534 return 0;
124afba2 5535
6d69bb53
ID
5536 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5537 pr_info("parent chain is too long (%d)\n", depth);
5538 ret = -EINVAL;
5539 goto out_err;
5540 }
5541
1643dfa4 5542 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
1f2c6651
ID
5543 if (!parent) {
5544 ret = -ENOMEM;
124afba2 5545 goto out_err;
1f2c6651
ID
5546 }
5547
5548 /*
5549 * Images related by parent/child relationships always share
5550 * rbd_client and spec/parent_spec, so bump their refcounts.
5551 */
5552 __rbd_get_client(rbd_dev->rbd_client);
5553 rbd_spec_get(rbd_dev->parent_spec);
124afba2 5554
6d69bb53 5555 ret = rbd_dev_image_probe(parent, depth);
124afba2
AE
5556 if (ret < 0)
5557 goto out_err;
1f2c6651 5558
124afba2 5559 rbd_dev->parent = parent;
a2acd00e 5560 atomic_set(&rbd_dev->parent_ref, 1);
124afba2 5561 return 0;
1f2c6651 5562
124afba2 5563out_err:
1f2c6651 5564 rbd_dev_unparent(rbd_dev);
1761b229 5565 rbd_dev_destroy(parent);
124afba2
AE
5566 return ret;
5567}
5568
5769ed0c
ID
5569static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5570{
5571 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5572 rbd_dev_mapping_clear(rbd_dev);
5573 rbd_free_disk(rbd_dev);
5574 if (!single_major)
5575 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5576}
5577
811c6688
ID
5578/*
5579 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5580 * upon return.
5581 */
200a6a8b 5582static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
124afba2 5583{
83a06263 5584 int ret;
d1cf5788 5585
9b60e70b 5586 /* Record our major and minor device numbers. */
83a06263 5587
9b60e70b
ID
5588 if (!single_major) {
5589 ret = register_blkdev(0, rbd_dev->name);
5590 if (ret < 0)
1643dfa4 5591 goto err_out_unlock;
9b60e70b
ID
5592
5593 rbd_dev->major = ret;
5594 rbd_dev->minor = 0;
5595 } else {
5596 rbd_dev->major = rbd_major;
5597 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5598 }
83a06263
AE
5599
5600 /* Set up the blkdev mapping. */
5601
5602 ret = rbd_init_disk(rbd_dev);
5603 if (ret)
5604 goto err_out_blkdev;
5605
f35a4dee 5606 ret = rbd_dev_mapping_set(rbd_dev);
83a06263
AE
5607 if (ret)
5608 goto err_out_disk;
bc1ecc65 5609
f35a4dee 5610 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
9568c93e 5611 set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
f35a4dee 5612
5769ed0c 5613 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
f35a4dee 5614 if (ret)
f5ee37bd 5615 goto err_out_mapping;
83a06263 5616
129b79d4 5617 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
811c6688 5618 up_write(&rbd_dev->header_rwsem);
5769ed0c 5619 return 0;
2f82ee54 5620
f35a4dee
AE
5621err_out_mapping:
5622 rbd_dev_mapping_clear(rbd_dev);
83a06263
AE
5623err_out_disk:
5624 rbd_free_disk(rbd_dev);
5625err_out_blkdev:
9b60e70b
ID
5626 if (!single_major)
5627 unregister_blkdev(rbd_dev->major, rbd_dev->name);
811c6688
ID
5628err_out_unlock:
5629 up_write(&rbd_dev->header_rwsem);
83a06263
AE
5630 return ret;
5631}
5632
332bb12d
AE
5633static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5634{
5635 struct rbd_spec *spec = rbd_dev->spec;
c41d13a3 5636 int ret;
332bb12d
AE
5637
5638 /* Record the header object name for this rbd image. */
5639
5640 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
332bb12d 5641 if (rbd_dev->image_format == 1)
c41d13a3
ID
5642 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5643 spec->image_name, RBD_SUFFIX);
332bb12d 5644 else
c41d13a3
ID
5645 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5646 RBD_HEADER_PREFIX, spec->image_id);
332bb12d 5647
c41d13a3 5648 return ret;
332bb12d
AE
5649}
5650
200a6a8b
AE
5651static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5652{
6fd48b3b 5653 rbd_dev_unprobe(rbd_dev);
fd22aef8
ID
5654 if (rbd_dev->opts)
5655 rbd_unregister_watch(rbd_dev);
6fd48b3b
AE
5656 rbd_dev->image_format = 0;
5657 kfree(rbd_dev->spec->image_id);
5658 rbd_dev->spec->image_id = NULL;
200a6a8b
AE
5659}
5660
a30b71b9
AE
5661/*
5662 * Probe for the existence of the header object for the given rbd
1f3ef788
AE
5663 * device. If this image is the one being mapped (i.e., not a
5664 * parent), initiate a watch on its header object before using that
5665 * object to get detailed information about the rbd image.
a30b71b9 5666 */
6d69bb53 5667static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
a30b71b9
AE
5668{
5669 int ret;
5670
5671 /*
3abef3b3
AE
5672 * Get the id from the image id object. Unless there's an
5673 * error, rbd_dev->spec->image_id will be filled in with
5674 * a dynamically-allocated string, and rbd_dev->image_format
5675 * will be set to either 1 or 2.
a30b71b9
AE
5676 */
5677 ret = rbd_dev_image_id(rbd_dev);
5678 if (ret)
c0fba368 5679 return ret;
c0fba368 5680
332bb12d
AE
5681 ret = rbd_dev_header_name(rbd_dev);
5682 if (ret)
5683 goto err_out_format;
5684
6d69bb53 5685 if (!depth) {
99d16943 5686 ret = rbd_register_watch(rbd_dev);
1fe48023
ID
5687 if (ret) {
5688 if (ret == -ENOENT)
5689 pr_info("image %s/%s does not exist\n",
5690 rbd_dev->spec->pool_name,
5691 rbd_dev->spec->image_name);
c41d13a3 5692 goto err_out_format;
1fe48023 5693 }
1f3ef788 5694 }
b644de2b 5695
a720ae09 5696 ret = rbd_dev_header_info(rbd_dev);
5655c4d9 5697 if (ret)
b644de2b 5698 goto err_out_watch;
83a06263 5699
04077599
ID
5700 /*
5701 * If this image is the one being mapped, we have pool name and
5702 * id, image name and id, and snap name - need to fill snap id.
5703 * Otherwise this is a parent image, identified by pool, image
5704 * and snap ids - need to fill in names for those ids.
5705 */
6d69bb53 5706 if (!depth)
04077599
ID
5707 ret = rbd_spec_fill_snap_id(rbd_dev);
5708 else
5709 ret = rbd_spec_fill_names(rbd_dev);
1fe48023
ID
5710 if (ret) {
5711 if (ret == -ENOENT)
5712 pr_info("snap %s/%s@%s does not exist\n",
5713 rbd_dev->spec->pool_name,
5714 rbd_dev->spec->image_name,
5715 rbd_dev->spec->snap_name);
33dca39f 5716 goto err_out_probe;
1fe48023 5717 }
9bb81c9b 5718
e8f59b59
ID
5719 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5720 ret = rbd_dev_v2_parent_info(rbd_dev);
5721 if (ret)
5722 goto err_out_probe;
5723
5724 /*
5725 * Need to warn users if this image is the one being
5726 * mapped and has a parent.
5727 */
6d69bb53 5728 if (!depth && rbd_dev->parent_spec)
e8f59b59
ID
5729 rbd_warn(rbd_dev,
5730 "WARNING: kernel layering is EXPERIMENTAL!");
5731 }
5732
6d69bb53 5733 ret = rbd_dev_probe_parent(rbd_dev, depth);
30d60ba2
AE
5734 if (ret)
5735 goto err_out_probe;
5736
5737 dout("discovered format %u image, header name is %s\n",
c41d13a3 5738 rbd_dev->image_format, rbd_dev->header_oid.name);
30d60ba2 5739 return 0;
e8f59b59 5740
6fd48b3b
AE
5741err_out_probe:
5742 rbd_dev_unprobe(rbd_dev);
b644de2b 5743err_out_watch:
6d69bb53 5744 if (!depth)
99d16943 5745 rbd_unregister_watch(rbd_dev);
332bb12d
AE
5746err_out_format:
5747 rbd_dev->image_format = 0;
5655c4d9
AE
5748 kfree(rbd_dev->spec->image_id);
5749 rbd_dev->spec->image_id = NULL;
a30b71b9
AE
5750 return ret;
5751}
5752
9b60e70b
ID
5753static ssize_t do_rbd_add(struct bus_type *bus,
5754 const char *buf,
5755 size_t count)
602adf40 5756{
cb8627c7 5757 struct rbd_device *rbd_dev = NULL;
dc79b113 5758 struct ceph_options *ceph_opts = NULL;
4e9afeba 5759 struct rbd_options *rbd_opts = NULL;
859c31df 5760 struct rbd_spec *spec = NULL;
9d3997fd 5761 struct rbd_client *rbdc;
b51c83c2 5762 int rc;
602adf40
YS
5763
5764 if (!try_module_get(THIS_MODULE))
5765 return -ENODEV;
5766
602adf40 5767 /* parse add command */
859c31df 5768 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
dc79b113 5769 if (rc < 0)
dd5ac32d 5770 goto out;
78cea76e 5771
9d3997fd
AE
5772 rbdc = rbd_get_client(ceph_opts);
5773 if (IS_ERR(rbdc)) {
5774 rc = PTR_ERR(rbdc);
0ddebc0c 5775 goto err_out_args;
9d3997fd 5776 }
602adf40 5777
602adf40 5778 /* pick the pool */
30ba1f02 5779 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
1fe48023
ID
5780 if (rc < 0) {
5781 if (rc == -ENOENT)
5782 pr_info("pool %s does not exist\n", spec->pool_name);
602adf40 5783 goto err_out_client;
1fe48023 5784 }
c0cd10db 5785 spec->pool_id = (u64)rc;
859c31df 5786
d147543d 5787 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
b51c83c2
ID
5788 if (!rbd_dev) {
5789 rc = -ENOMEM;
bd4ba655 5790 goto err_out_client;
b51c83c2 5791 }
c53d5893
AE
5792 rbdc = NULL; /* rbd_dev now owns this */
5793 spec = NULL; /* rbd_dev now owns this */
d147543d 5794 rbd_opts = NULL; /* rbd_dev now owns this */
602adf40 5795
0d6d1e9c
MC
5796 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
5797 if (!rbd_dev->config_info) {
5798 rc = -ENOMEM;
5799 goto err_out_rbd_dev;
5800 }
5801
811c6688 5802 down_write(&rbd_dev->header_rwsem);
6d69bb53 5803 rc = rbd_dev_image_probe(rbd_dev, 0);
0d6d1e9c
MC
5804 if (rc < 0) {
5805 up_write(&rbd_dev->header_rwsem);
c53d5893 5806 goto err_out_rbd_dev;
0d6d1e9c 5807 }
05fd6f6f 5808
7ce4eef7 5809 /* If we are mapping a snapshot it must be marked read-only */
7ce4eef7 5810 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
9568c93e 5811 rbd_dev->opts->read_only = true;
7ce4eef7 5812
b536f69a 5813 rc = rbd_dev_device_setup(rbd_dev);
fd22aef8 5814 if (rc)
8b679ec5 5815 goto err_out_image_probe;
3abef3b3 5816
e010dd0a
ID
5817 if (rbd_dev->opts->exclusive) {
5818 rc = rbd_add_acquire_lock(rbd_dev);
5819 if (rc)
5820 goto err_out_device_setup;
3abef3b3
AE
5821 }
5822
5769ed0c
ID
5823 /* Everything's ready. Announce the disk to the world. */
5824
5825 rc = device_add(&rbd_dev->dev);
5826 if (rc)
e010dd0a 5827 goto err_out_image_lock;
5769ed0c
ID
5828
5829 add_disk(rbd_dev->disk);
5830 /* see rbd_init_disk() */
5831 blk_put_queue(rbd_dev->disk->queue);
5832
5833 spin_lock(&rbd_dev_list_lock);
5834 list_add_tail(&rbd_dev->node, &rbd_dev_list);
5835 spin_unlock(&rbd_dev_list_lock);
5836
5837 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
5838 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
5839 rbd_dev->header.features);
dd5ac32d
ID
5840 rc = count;
5841out:
5842 module_put(THIS_MODULE);
5843 return rc;
b536f69a 5844
e010dd0a
ID
5845err_out_image_lock:
5846 rbd_dev_image_unlock(rbd_dev);
5769ed0c
ID
5847err_out_device_setup:
5848 rbd_dev_device_release(rbd_dev);
8b679ec5
ID
5849err_out_image_probe:
5850 rbd_dev_image_release(rbd_dev);
c53d5893
AE
5851err_out_rbd_dev:
5852 rbd_dev_destroy(rbd_dev);
bd4ba655 5853err_out_client:
9d3997fd 5854 rbd_put_client(rbdc);
0ddebc0c 5855err_out_args:
859c31df 5856 rbd_spec_put(spec);
d147543d 5857 kfree(rbd_opts);
dd5ac32d 5858 goto out;
602adf40
YS
5859}
5860
9b60e70b
ID
5861static ssize_t rbd_add(struct bus_type *bus,
5862 const char *buf,
5863 size_t count)
5864{
5865 if (single_major)
5866 return -EINVAL;
5867
5868 return do_rbd_add(bus, buf, count);
5869}
5870
5871static ssize_t rbd_add_single_major(struct bus_type *bus,
5872 const char *buf,
5873 size_t count)
5874{
5875 return do_rbd_add(bus, buf, count);
5876}
5877
05a46afd
AE
5878static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5879{
ad945fc1 5880 while (rbd_dev->parent) {
05a46afd
AE
5881 struct rbd_device *first = rbd_dev;
5882 struct rbd_device *second = first->parent;
5883 struct rbd_device *third;
5884
5885 /*
5886 * Follow to the parent with no grandparent and
5887 * remove it.
5888 */
5889 while (second && (third = second->parent)) {
5890 first = second;
5891 second = third;
5892 }
ad945fc1 5893 rbd_assert(second);
8ad42cd0 5894 rbd_dev_image_release(second);
8b679ec5 5895 rbd_dev_destroy(second);
ad945fc1
AE
5896 first->parent = NULL;
5897 first->parent_overlap = 0;
5898
5899 rbd_assert(first->parent_spec);
05a46afd
AE
5900 rbd_spec_put(first->parent_spec);
5901 first->parent_spec = NULL;
05a46afd
AE
5902 }
5903}
5904
9b60e70b
ID
5905static ssize_t do_rbd_remove(struct bus_type *bus,
5906 const char *buf,
5907 size_t count)
602adf40
YS
5908{
5909 struct rbd_device *rbd_dev = NULL;
751cc0e3
AE
5910 struct list_head *tmp;
5911 int dev_id;
0276dca6 5912 char opt_buf[6];
82a442d2 5913 bool already = false;
0276dca6 5914 bool force = false;
0d8189e1 5915 int ret;
602adf40 5916
0276dca6
MC
5917 dev_id = -1;
5918 opt_buf[0] = '\0';
5919 sscanf(buf, "%d %5s", &dev_id, opt_buf);
5920 if (dev_id < 0) {
5921 pr_err("dev_id out of range\n");
602adf40 5922 return -EINVAL;
0276dca6
MC
5923 }
5924 if (opt_buf[0] != '\0') {
5925 if (!strcmp(opt_buf, "force")) {
5926 force = true;
5927 } else {
5928 pr_err("bad remove option at '%s'\n", opt_buf);
5929 return -EINVAL;
5930 }
5931 }
602adf40 5932
751cc0e3
AE
5933 ret = -ENOENT;
5934 spin_lock(&rbd_dev_list_lock);
5935 list_for_each(tmp, &rbd_dev_list) {
5936 rbd_dev = list_entry(tmp, struct rbd_device, node);
5937 if (rbd_dev->dev_id == dev_id) {
5938 ret = 0;
5939 break;
5940 }
42382b70 5941 }
751cc0e3
AE
5942 if (!ret) {
5943 spin_lock_irq(&rbd_dev->lock);
0276dca6 5944 if (rbd_dev->open_count && !force)
751cc0e3
AE
5945 ret = -EBUSY;
5946 else
82a442d2
AE
5947 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5948 &rbd_dev->flags);
751cc0e3
AE
5949 spin_unlock_irq(&rbd_dev->lock);
5950 }
5951 spin_unlock(&rbd_dev_list_lock);
82a442d2 5952 if (ret < 0 || already)
1ba0f1e7 5953 return ret;
751cc0e3 5954
0276dca6
MC
5955 if (force) {
5956 /*
5957 * Prevent new IO from being queued and wait for existing
5958 * IO to complete/fail.
5959 */
5960 blk_mq_freeze_queue(rbd_dev->disk->queue);
5961 blk_set_queue_dying(rbd_dev->disk->queue);
5962 }
5963
5769ed0c
ID
5964 del_gendisk(rbd_dev->disk);
5965 spin_lock(&rbd_dev_list_lock);
5966 list_del_init(&rbd_dev->node);
5967 spin_unlock(&rbd_dev_list_lock);
5968 device_del(&rbd_dev->dev);
fca27065 5969
e010dd0a 5970 rbd_dev_image_unlock(rbd_dev);
dd5ac32d 5971 rbd_dev_device_release(rbd_dev);
8ad42cd0 5972 rbd_dev_image_release(rbd_dev);
8b679ec5 5973 rbd_dev_destroy(rbd_dev);
1ba0f1e7 5974 return count;
602adf40
YS
5975}
5976
9b60e70b
ID
5977static ssize_t rbd_remove(struct bus_type *bus,
5978 const char *buf,
5979 size_t count)
5980{
5981 if (single_major)
5982 return -EINVAL;
5983
5984 return do_rbd_remove(bus, buf, count);
5985}
5986
5987static ssize_t rbd_remove_single_major(struct bus_type *bus,
5988 const char *buf,
5989 size_t count)
5990{
5991 return do_rbd_remove(bus, buf, count);
5992}
5993
602adf40
YS
5994/*
5995 * create control files in sysfs
dfc5606d 5996 * /sys/bus/rbd/...
602adf40
YS
5997 */
5998static int rbd_sysfs_init(void)
5999{
dfc5606d 6000 int ret;
602adf40 6001
fed4c143 6002 ret = device_register(&rbd_root_dev);
21079786 6003 if (ret < 0)
dfc5606d 6004 return ret;
602adf40 6005
fed4c143
AE
6006 ret = bus_register(&rbd_bus_type);
6007 if (ret < 0)
6008 device_unregister(&rbd_root_dev);
602adf40 6009
602adf40
YS
6010 return ret;
6011}
6012
6013static void rbd_sysfs_cleanup(void)
6014{
dfc5606d 6015 bus_unregister(&rbd_bus_type);
fed4c143 6016 device_unregister(&rbd_root_dev);
602adf40
YS
6017}
6018
1c2a9dfe
AE
6019static int rbd_slab_init(void)
6020{
6021 rbd_assert(!rbd_img_request_cache);
03d94406 6022 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
868311b1
AE
6023 if (!rbd_img_request_cache)
6024 return -ENOMEM;
6025
6026 rbd_assert(!rbd_obj_request_cache);
03d94406 6027 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
78c2a44a
AE
6028 if (!rbd_obj_request_cache)
6029 goto out_err;
6030
6c696d85 6031 return 0;
1c2a9dfe 6032
6c696d85 6033out_err:
868311b1
AE
6034 kmem_cache_destroy(rbd_img_request_cache);
6035 rbd_img_request_cache = NULL;
1c2a9dfe
AE
6036 return -ENOMEM;
6037}
6038
6039static void rbd_slab_exit(void)
6040{
868311b1
AE
6041 rbd_assert(rbd_obj_request_cache);
6042 kmem_cache_destroy(rbd_obj_request_cache);
6043 rbd_obj_request_cache = NULL;
6044
1c2a9dfe
AE
6045 rbd_assert(rbd_img_request_cache);
6046 kmem_cache_destroy(rbd_img_request_cache);
6047 rbd_img_request_cache = NULL;
6048}
6049
cc344fa1 6050static int __init rbd_init(void)
602adf40
YS
6051{
6052 int rc;
6053
1e32d34c
AE
6054 if (!libceph_compatible(NULL)) {
6055 rbd_warn(NULL, "libceph incompatibility (quitting)");
1e32d34c
AE
6056 return -EINVAL;
6057 }
e1b4d96d 6058
1c2a9dfe 6059 rc = rbd_slab_init();
602adf40
YS
6060 if (rc)
6061 return rc;
e1b4d96d 6062
f5ee37bd
ID
6063 /*
6064 * The number of active work items is limited by the number of
f77303bd 6065 * rbd devices * queue depth, so leave @max_active at default.
f5ee37bd
ID
6066 */
6067 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6068 if (!rbd_wq) {
6069 rc = -ENOMEM;
6070 goto err_out_slab;
6071 }
6072
9b60e70b
ID
6073 if (single_major) {
6074 rbd_major = register_blkdev(0, RBD_DRV_NAME);
6075 if (rbd_major < 0) {
6076 rc = rbd_major;
f5ee37bd 6077 goto err_out_wq;
9b60e70b
ID
6078 }
6079 }
6080
1c2a9dfe
AE
6081 rc = rbd_sysfs_init();
6082 if (rc)
9b60e70b
ID
6083 goto err_out_blkdev;
6084
6085 if (single_major)
6086 pr_info("loaded (major %d)\n", rbd_major);
6087 else
6088 pr_info("loaded\n");
1c2a9dfe 6089
e1b4d96d
ID
6090 return 0;
6091
9b60e70b
ID
6092err_out_blkdev:
6093 if (single_major)
6094 unregister_blkdev(rbd_major, RBD_DRV_NAME);
f5ee37bd
ID
6095err_out_wq:
6096 destroy_workqueue(rbd_wq);
e1b4d96d
ID
6097err_out_slab:
6098 rbd_slab_exit();
1c2a9dfe 6099 return rc;
602adf40
YS
6100}
6101
cc344fa1 6102static void __exit rbd_exit(void)
602adf40 6103{
ffe312cf 6104 ida_destroy(&rbd_dev_id_ida);
602adf40 6105 rbd_sysfs_cleanup();
9b60e70b
ID
6106 if (single_major)
6107 unregister_blkdev(rbd_major, RBD_DRV_NAME);
f5ee37bd 6108 destroy_workqueue(rbd_wq);
1c2a9dfe 6109 rbd_slab_exit();
602adf40
YS
6110}
6111
6112module_init(rbd_init);
6113module_exit(rbd_exit);
6114
d552c619 6115MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
602adf40
YS
6116MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6117MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
602adf40
YS
6118/* following authorship retained from original osdblk.c */
6119MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6120
90da258b 6121MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
602adf40 6122MODULE_LICENSE("GPL");