Merge tag 'spi-fix-v6.6-merge-window' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / block / rbd.c
CommitLineData
e2a58ee5 1
602adf40
YS
2/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
dfc5606d 25 For usage instructions, please refer to:
602adf40 26
dfc5606d 27 Documentation/ABI/testing/sysfs-bus-rbd
602adf40
YS
28
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
ed95b21a 34#include <linux/ceph/cls_lock_client.h>
43df3d35 35#include <linux/ceph/striper.h>
602adf40 36#include <linux/ceph/decode.h>
82995cc6 37#include <linux/fs_parser.h>
30d1cff8 38#include <linux/bsearch.h>
602adf40
YS
39
40#include <linux/kernel.h>
41#include <linux/device.h>
42#include <linux/module.h>
7ad18afa 43#include <linux/blk-mq.h>
602adf40
YS
44#include <linux/fs.h>
45#include <linux/blkdev.h>
1c2a9dfe 46#include <linux/slab.h>
f8a22fc2 47#include <linux/idr.h>
bc1ecc65 48#include <linux/workqueue.h>
602adf40
YS
49
50#include "rbd_types.h"
51
aafb230e
AE
52#define RBD_DEBUG /* Activate rbd_assert() calls */
53
a2acd00e
AE
54/*
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
59 */
60static int atomic_inc_return_safe(atomic_t *v)
61{
62 unsigned int counter;
63
bfc18e38 64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
a2acd00e
AE
65 if (counter <= (unsigned int)INT_MAX)
66 return (int)counter;
67
68 atomic_dec(v);
69
70 return -EINVAL;
71}
72
73/* Decrement the counter. Return the resulting value, or -EINVAL */
74static int atomic_dec_return_safe(atomic_t *v)
75{
76 int counter;
77
78 counter = atomic_dec_return(v);
79 if (counter >= 0)
80 return counter;
81
82 atomic_inc(v);
83
84 return -EINVAL;
85}
86
f0f8cef5 87#define RBD_DRV_NAME "rbd"
602adf40 88
7e513d43
ID
89#define RBD_MINORS_PER_MAJOR 256
90#define RBD_SINGLE_MAJOR_PART_SHIFT 4
602adf40 91
6d69bb53
ID
92#define RBD_MAX_PARENT_CHAIN_LEN 16
93
d4b125e9
AE
94#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95#define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
97
35d489f9 98#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
602adf40
YS
99
100#define RBD_SNAP_HEAD_NAME "-"
101
9682fc6d
AE
102#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
103
9e15b77d
AE
104/* This allows a single page to hold an image name sent by OSD */
105#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
1e130199 106#define RBD_IMAGE_ID_LEN_MAX 64
9e15b77d 107
1e130199 108#define RBD_OBJ_PREFIX_LEN_MAX 64
589d30e0 109
ed95b21a 110#define RBD_NOTIFY_TIMEOUT 5 /* seconds */
99d16943
ID
111#define RBD_RETRY_DELAY msecs_to_jiffies(1000)
112
d889140c
AE
113/* Feature bits */
114
8767b293
ID
115#define RBD_FEATURE_LAYERING (1ULL<<0)
116#define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
22e8bd51
ID
118#define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119#define RBD_FEATURE_FAST_DIFF (1ULL<<4)
b9f6d447 120#define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
8767b293 121#define RBD_FEATURE_DATA_POOL (1ULL<<7)
e573427a 122#define RBD_FEATURE_OPERATIONS (1ULL<<8)
8767b293 123
ed95b21a
ID
124#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
7e97332e 126 RBD_FEATURE_EXCLUSIVE_LOCK | \
22e8bd51
ID
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
b9f6d447 129 RBD_FEATURE_DEEP_FLATTEN | \
e573427a
ID
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
d889140c
AE
132
133/* Features supported by this (client software) implementation. */
134
770eba6e 135#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
d889140c 136
81a89793
AE
137/*
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
81a89793 140 */
602adf40
YS
141#define DEV_NAME_LEN 32
142
143/*
144 * block device image metadata (in-memory version)
145 */
146struct rbd_image_header {
f35a4dee 147 /* These six fields never change for a given rbd image */
849b4260 148 char *object_prefix;
602adf40 149 __u8 obj_order;
f35a4dee
AE
150 u64 stripe_unit;
151 u64 stripe_count;
7e97332e 152 s64 data_pool_id;
f35a4dee 153 u64 features; /* Might be changeable someday? */
602adf40 154
f84344f3
AE
155 /* The remaining fields need to be updated occasionally */
156 u64 image_size;
157 struct ceph_snap_context *snapc;
f35a4dee
AE
158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
59c2be1e
YS
160};
161
0d7dbfce
AE
162/*
163 * An rbd image specification.
164 *
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
c66c6e0c
AE
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
168 *
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
173 *
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
179 *
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
183 *
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
0d7dbfce
AE
186 */
187struct rbd_spec {
188 u64 pool_id;
ecb4dc22 189 const char *pool_name;
b26c047b 190 const char *pool_ns; /* NULL if default, never "" */
0d7dbfce 191
ecb4dc22
AE
192 const char *image_id;
193 const char *image_name;
0d7dbfce
AE
194
195 u64 snap_id;
ecb4dc22 196 const char *snap_name;
0d7dbfce
AE
197
198 struct kref kref;
199};
200
602adf40 201/*
f0f8cef5 202 * an instance of the client. multiple devices may share an rbd client.
602adf40
YS
203 */
204struct rbd_client {
205 struct ceph_client *client;
206 struct kref kref;
207 struct list_head node;
208};
209
0192ce2e
ID
210struct pending_result {
211 int result; /* first nonzero result */
212 int num_pending;
213};
214
bf0d5f50 215struct rbd_img_request;
bf0d5f50 216
9969ebc5 217enum obj_request_type {
a1fbb5e7 218 OBJ_REQUEST_NODATA = 1,
5359a17d 219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
7e07efb1 220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
afb97888 221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
9969ebc5 222};
bf0d5f50 223
6d2940c8 224enum obj_operation_type {
a1fbb5e7 225 OBJ_OP_READ = 1,
6d2940c8 226 OBJ_OP_WRITE,
90e98c52 227 OBJ_OP_DISCARD,
6484cbe9 228 OBJ_OP_ZEROOUT,
6d2940c8
GZ
229};
230
0ad5d953
ID
231#define RBD_OBJ_FLAG_DELETION (1U << 0)
232#define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
793333a3 233#define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
22e8bd51
ID
234#define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235#define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
0ad5d953 236
a9b67e69 237enum rbd_obj_read_state {
85b5e6d1
ID
238 RBD_OBJ_READ_START = 1,
239 RBD_OBJ_READ_OBJECT,
a9b67e69
ID
240 RBD_OBJ_READ_PARENT,
241};
242
3da691bf
ID
243/*
244 * Writes go through the following state machine to deal with
245 * layering:
246 *
89a59c1c
ID
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
248 * . | .
249 * . v .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
251 * . | . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
254 * flattened) v | . .
255 * . v . .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
257 * | not needed) v
258 * v .
259 * done . . . . . . . . . . . . . . . . . .
260 * ^
261 * |
262 * RBD_OBJ_WRITE_FLAT
3da691bf
ID
263 *
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
89a59c1c
ID
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
3da691bf
ID
267 */
268enum rbd_obj_write_state {
85b5e6d1 269 RBD_OBJ_WRITE_START = 1,
22e8bd51 270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
85b5e6d1 271 RBD_OBJ_WRITE_OBJECT,
793333a3
ID
272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
22e8bd51 274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
793333a3
ID
275};
276
277enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
22e8bd51
ID
280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
793333a3
ID
282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
926f9b3f
AE
284};
285
bf0d5f50 286struct rbd_obj_request {
43df3d35 287 struct ceph_object_extent ex;
0ad5d953 288 unsigned int flags; /* RBD_OBJ_FLAG_* */
c5b5ef6c 289 union {
a9b67e69 290 enum rbd_obj_read_state read_state; /* for reads */
3da691bf 291 enum rbd_obj_write_state write_state; /* for writes */
c5b5ef6c 292 };
bf0d5f50 293
51c3509e 294 struct rbd_img_request *img_request;
86bd7998
ID
295 struct ceph_file_extent *img_extents;
296 u32 num_img_extents;
bf0d5f50 297
788e2df3 298 union {
5359a17d 299 struct ceph_bio_iter bio_pos;
788e2df3 300 struct {
7e07efb1
ID
301 struct ceph_bvec_iter bvec_pos;
302 u32 bvec_count;
afb97888 303 u32 bvec_idx;
788e2df3
AE
304 };
305 };
793333a3
ID
306
307 enum rbd_obj_copyup_state copyup_state;
7e07efb1
ID
308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
bf0d5f50 310
bcbab1db 311 struct list_head osd_reqs; /* w/ r_private_item */
bf0d5f50 312
85b5e6d1 313 struct mutex state_mutex;
793333a3 314 struct pending_result pending;
bf0d5f50
AE
315 struct kref kref;
316};
317
0c425248 318enum img_req_flags {
9849e986 319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
d0b2e944 320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
0c425248
AE
321};
322
0192ce2e
ID
323enum rbd_img_state {
324 RBD_IMG_START = 1,
637cd060 325 RBD_IMG_EXCLUSIVE_LOCK,
0192ce2e
ID
326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
328};
329
bf0d5f50 330struct rbd_img_request {
bf0d5f50 331 struct rbd_device *rbd_dev;
9bb0248d 332 enum obj_operation_type op_type;
ecc633ca 333 enum obj_request_type data_type;
0c425248 334 unsigned long flags;
0192ce2e 335 enum rbd_img_state state;
bf0d5f50 336 union {
9849e986 337 u64 snap_id; /* for reads */
bf0d5f50 338 struct ceph_snap_context *snapc; /* for writes */
9849e986 339 };
59e542c8 340 struct rbd_obj_request *obj_request; /* obj req initiator */
bf0d5f50 341
e1fddc8f 342 struct list_head lock_item;
43df3d35 343 struct list_head object_extents; /* obj_req.ex structs */
bf0d5f50 344
0192ce2e
ID
345 struct mutex state_mutex;
346 struct pending_result pending;
347 struct work_struct work;
348 int work_result;
bf0d5f50
AE
349};
350
351#define for_each_obj_request(ireq, oreq) \
43df3d35 352 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
bf0d5f50 353#define for_each_obj_request_safe(ireq, oreq, n) \
43df3d35 354 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
bf0d5f50 355
99d16943
ID
356enum rbd_watch_state {
357 RBD_WATCH_STATE_UNREGISTERED,
358 RBD_WATCH_STATE_REGISTERED,
359 RBD_WATCH_STATE_ERROR,
360};
361
ed95b21a
ID
362enum rbd_lock_state {
363 RBD_LOCK_STATE_UNLOCKED,
364 RBD_LOCK_STATE_LOCKED,
365 RBD_LOCK_STATE_RELEASING,
366};
367
368/* WatchNotify::ClientId */
369struct rbd_client_id {
370 u64 gid;
371 u64 handle;
372};
373
f84344f3 374struct rbd_mapping {
99c1f08f 375 u64 size;
f84344f3
AE
376};
377
602adf40
YS
378/*
379 * a single device
380 */
381struct rbd_device {
de71a297 382 int dev_id; /* blkdev unique id */
602adf40
YS
383
384 int major; /* blkdev assigned major */
dd82fff1 385 int minor;
602adf40 386 struct gendisk *disk; /* blkdev's gendisk and rq */
602adf40 387
a30b71b9 388 u32 image_format; /* Either 1 or 2 */
602adf40
YS
389 struct rbd_client *rbd_client;
390
391 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
392
b82d167b 393 spinlock_t lock; /* queue, flags, open_count */
602adf40
YS
394
395 struct rbd_image_header header;
b82d167b 396 unsigned long flags; /* possibly lock protected */
0d7dbfce 397 struct rbd_spec *spec;
d147543d 398 struct rbd_options *opts;
0d6d1e9c 399 char *config_info; /* add{,_single_major} string */
602adf40 400
c41d13a3 401 struct ceph_object_id header_oid;
922dab61 402 struct ceph_object_locator header_oloc;
971f839a 403
1643dfa4 404 struct ceph_file_layout layout; /* used for all rbd requests */
0903e875 405
99d16943
ID
406 struct mutex watch_mutex;
407 enum rbd_watch_state watch_state;
922dab61 408 struct ceph_osd_linger_request *watch_handle;
99d16943
ID
409 u64 watch_cookie;
410 struct delayed_work watch_dwork;
59c2be1e 411
ed95b21a
ID
412 struct rw_semaphore lock_rwsem;
413 enum rbd_lock_state lock_state;
cbbfb0ff 414 char lock_cookie[32];
ed95b21a
ID
415 struct rbd_client_id owner_cid;
416 struct work_struct acquired_lock_work;
417 struct work_struct released_lock_work;
418 struct delayed_work lock_dwork;
419 struct work_struct unlock_work;
e1fddc8f 420 spinlock_t lock_lists_lock;
637cd060 421 struct list_head acquiring_list;
e1fddc8f 422 struct list_head running_list;
637cd060
ID
423 struct completion acquire_wait;
424 int acquire_err;
e1fddc8f 425 struct completion releasing_wait;
ed95b21a 426
22e8bd51
ID
427 spinlock_t object_map_lock;
428 u8 *object_map;
429 u64 object_map_size; /* in objects */
430 u64 object_map_flags;
ed95b21a 431
1643dfa4 432 struct workqueue_struct *task_wq;
59c2be1e 433
86b00e0d
AE
434 struct rbd_spec *parent_spec;
435 u64 parent_overlap;
a2acd00e 436 atomic_t parent_ref;
2f82ee54 437 struct rbd_device *parent;
86b00e0d 438
7ad18afa
CH
439 /* Block layer tags. */
440 struct blk_mq_tag_set tag_set;
441
c666601a
JD
442 /* protects updating the header */
443 struct rw_semaphore header_rwsem;
f84344f3
AE
444
445 struct rbd_mapping mapping;
602adf40
YS
446
447 struct list_head node;
dfc5606d 448
dfc5606d
YS
449 /* sysfs related */
450 struct device dev;
b82d167b 451 unsigned long open_count; /* protected by lock */
dfc5606d
YS
452};
453
b82d167b 454/*
87c0fded
ID
455 * Flag bits for rbd_dev->flags:
456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
457 * by rbd_dev->lock
b82d167b 458 */
6d292906 459enum rbd_dev_flags {
686238b7 460 RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */
b82d167b 461 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
39258aa2 462 RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */
6d292906
AE
463};
464
cfbf6377 465static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
e124a82f 466
602adf40 467static LIST_HEAD(rbd_dev_list); /* devices */
e124a82f
AE
468static DEFINE_SPINLOCK(rbd_dev_list_lock);
469
432b8587
AE
470static LIST_HEAD(rbd_client_list); /* clients */
471static DEFINE_SPINLOCK(rbd_client_list_lock);
602adf40 472
78c2a44a
AE
473/* Slab caches for frequently-allocated structures */
474
1c2a9dfe 475static struct kmem_cache *rbd_img_request_cache;
868311b1 476static struct kmem_cache *rbd_obj_request_cache;
1c2a9dfe 477
9b60e70b 478static int rbd_major;
f8a22fc2
ID
479static DEFINE_IDA(rbd_dev_id_ida);
480
f5ee37bd
ID
481static struct workqueue_struct *rbd_wq;
482
89a59c1c
ID
483static struct ceph_snap_context rbd_empty_snapc = {
484 .nref = REFCOUNT_INIT(1),
485};
486
9b60e70b 487/*
3cfa3b16 488 * single-major requires >= 0.75 version of userspace rbd utility.
9b60e70b 489 */
3cfa3b16 490static bool single_major = true;
5657a819 491module_param(single_major, bool, 0444);
3cfa3b16 492MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
9b60e70b 493
75cff725
GKH
494static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count);
495static ssize_t remove_store(const struct bus_type *bus, const char *buf,
7e9586ba 496 size_t count);
75cff725 497static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf,
7e9586ba 498 size_t count);
75cff725 499static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf,
7e9586ba 500 size_t count);
6d69bb53 501static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
f0f8cef5 502
9b60e70b
ID
503static int rbd_dev_id_to_minor(int dev_id)
504{
7e513d43 505 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
9b60e70b
ID
506}
507
508static int minor_to_rbd_dev_id(int minor)
509{
7e513d43 510 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
9b60e70b
ID
511}
512
39258aa2
ID
513static bool rbd_is_ro(struct rbd_device *rbd_dev)
514{
515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
516}
517
f3c0e459
ID
518static bool rbd_is_snap(struct rbd_device *rbd_dev)
519{
520 return rbd_dev->spec->snap_id != CEPH_NOSNAP;
521}
522
ed95b21a
ID
523static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
524{
637cd060
ID
525 lockdep_assert_held(&rbd_dev->lock_rwsem);
526
ed95b21a
ID
527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
529}
530
531static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
532{
533 bool is_lock_owner;
534
535 down_read(&rbd_dev->lock_rwsem);
536 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
537 up_read(&rbd_dev->lock_rwsem);
538 return is_lock_owner;
539}
540
75cff725 541static ssize_t supported_features_show(const struct bus_type *bus, char *buf)
8767b293
ID
542{
543 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
544}
545
7e9586ba
GKH
546static BUS_ATTR_WO(add);
547static BUS_ATTR_WO(remove);
548static BUS_ATTR_WO(add_single_major);
549static BUS_ATTR_WO(remove_single_major);
550static BUS_ATTR_RO(supported_features);
b15a21dd
GKH
551
552static struct attribute *rbd_bus_attrs[] = {
553 &bus_attr_add.attr,
554 &bus_attr_remove.attr,
9b60e70b
ID
555 &bus_attr_add_single_major.attr,
556 &bus_attr_remove_single_major.attr,
8767b293 557 &bus_attr_supported_features.attr,
b15a21dd 558 NULL,
f0f8cef5 559};
92c76dc0
ID
560
561static umode_t rbd_bus_is_visible(struct kobject *kobj,
562 struct attribute *attr, int index)
563{
9b60e70b
ID
564 if (!single_major &&
565 (attr == &bus_attr_add_single_major.attr ||
566 attr == &bus_attr_remove_single_major.attr))
567 return 0;
568
92c76dc0
ID
569 return attr->mode;
570}
571
572static const struct attribute_group rbd_bus_group = {
573 .attrs = rbd_bus_attrs,
574 .is_visible = rbd_bus_is_visible,
575};
576__ATTRIBUTE_GROUPS(rbd_bus);
f0f8cef5
AE
577
578static struct bus_type rbd_bus_type = {
579 .name = "rbd",
b15a21dd 580 .bus_groups = rbd_bus_groups,
f0f8cef5
AE
581};
582
583static void rbd_root_dev_release(struct device *dev)
584{
585}
586
587static struct device rbd_root_dev = {
588 .init_name = "rbd",
589 .release = rbd_root_dev_release,
590};
591
06ecc6cb
AE
592static __printf(2, 3)
593void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
594{
595 struct va_format vaf;
596 va_list args;
597
598 va_start(args, fmt);
599 vaf.fmt = fmt;
600 vaf.va = &args;
601
602 if (!rbd_dev)
603 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
604 else if (rbd_dev->disk)
605 printk(KERN_WARNING "%s: %s: %pV\n",
606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
607 else if (rbd_dev->spec && rbd_dev->spec->image_name)
608 printk(KERN_WARNING "%s: image %s: %pV\n",
609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
610 else if (rbd_dev->spec && rbd_dev->spec->image_id)
611 printk(KERN_WARNING "%s: id %s: %pV\n",
612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
613 else /* punt */
614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
615 RBD_DRV_NAME, rbd_dev, &vaf);
616 va_end(args);
617}
618
aafb230e
AE
619#ifdef RBD_DEBUG
620#define rbd_assert(expr) \
621 if (unlikely(!(expr))) { \
622 printk(KERN_ERR "\nAssertion failure in %s() " \
623 "at line %d:\n\n" \
624 "\trbd_assert(%s);\n\n", \
625 __func__, __LINE__, #expr); \
626 BUG(); \
627 }
628#else /* !RBD_DEBUG */
629# define rbd_assert(expr) ((void) 0)
630#endif /* !RBD_DEBUG */
dfc5606d 631
05a46afd 632static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
8b3e1a56 633
cc4a38bd 634static int rbd_dev_refresh(struct rbd_device *rbd_dev);
2df3fac7 635static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
a720ae09 636static int rbd_dev_header_info(struct rbd_device *rbd_dev);
e8f59b59 637static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
54cac61f
AE
638static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
639 u64 snap_id);
2ad3d716
AE
640static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
641 u8 *order, u64 *snap_size);
22e8bd51 642static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
59c2be1e 643
54ab3b24 644static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
0192ce2e
ID
645static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
646
647/*
648 * Return true if nothing else is pending.
649 */
650static bool pending_result_dec(struct pending_result *pending, int *result)
651{
652 rbd_assert(pending->num_pending > 0);
653
654 if (*result && !pending->result)
655 pending->result = *result;
656 if (--pending->num_pending)
657 return false;
658
659 *result = pending->result;
660 return true;
661}
59c2be1e 662
05bdb996 663static int rbd_open(struct gendisk *disk, blk_mode_t mode)
602adf40 664{
d32e2bf8 665 struct rbd_device *rbd_dev = disk->private_data;
b82d167b 666 bool removing = false;
602adf40 667
a14ea269 668 spin_lock_irq(&rbd_dev->lock);
b82d167b
AE
669 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
670 removing = true;
671 else
672 rbd_dev->open_count++;
a14ea269 673 spin_unlock_irq(&rbd_dev->lock);
b82d167b
AE
674 if (removing)
675 return -ENOENT;
676
c3e946ce 677 (void) get_device(&rbd_dev->dev);
340c7a2b 678
602adf40
YS
679 return 0;
680}
681
ae220766 682static void rbd_release(struct gendisk *disk)
dfc5606d
YS
683{
684 struct rbd_device *rbd_dev = disk->private_data;
b82d167b
AE
685 unsigned long open_count_before;
686
a14ea269 687 spin_lock_irq(&rbd_dev->lock);
b82d167b 688 open_count_before = rbd_dev->open_count--;
a14ea269 689 spin_unlock_irq(&rbd_dev->lock);
b82d167b 690 rbd_assert(open_count_before > 0);
dfc5606d 691
c3e946ce 692 put_device(&rbd_dev->dev);
dfc5606d
YS
693}
694
602adf40
YS
695static const struct block_device_operations rbd_bd_ops = {
696 .owner = THIS_MODULE,
697 .open = rbd_open,
dfc5606d 698 .release = rbd_release,
602adf40
YS
699};
700
701/*
7262cfca 702 * Initialize an rbd client instance. Success or not, this function
cfbf6377 703 * consumes ceph_opts. Caller holds client_mutex.
602adf40 704 */
f8c38929 705static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
602adf40
YS
706{
707 struct rbd_client *rbdc;
708 int ret = -ENOMEM;
709
37206ee5 710 dout("%s:\n", __func__);
602adf40
YS
711 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
712 if (!rbdc)
713 goto out_opt;
714
715 kref_init(&rbdc->kref);
716 INIT_LIST_HEAD(&rbdc->node);
717
74da4a0f 718 rbdc->client = ceph_create_client(ceph_opts, rbdc);
602adf40 719 if (IS_ERR(rbdc->client))
08f75463 720 goto out_rbdc;
43ae4701 721 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
602adf40
YS
722
723 ret = ceph_open_session(rbdc->client);
724 if (ret < 0)
08f75463 725 goto out_client;
602adf40 726
432b8587 727 spin_lock(&rbd_client_list_lock);
602adf40 728 list_add_tail(&rbdc->node, &rbd_client_list);
432b8587 729 spin_unlock(&rbd_client_list_lock);
602adf40 730
37206ee5 731 dout("%s: rbdc %p\n", __func__, rbdc);
bc534d86 732
602adf40 733 return rbdc;
08f75463 734out_client:
602adf40 735 ceph_destroy_client(rbdc->client);
08f75463 736out_rbdc:
602adf40
YS
737 kfree(rbdc);
738out_opt:
43ae4701
AE
739 if (ceph_opts)
740 ceph_destroy_options(ceph_opts);
37206ee5
AE
741 dout("%s: error %d\n", __func__, ret);
742
28f259b7 743 return ERR_PTR(ret);
602adf40
YS
744}
745
2f82ee54
AE
746static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
747{
748 kref_get(&rbdc->kref);
749
750 return rbdc;
751}
752
602adf40 753/*
1f7ba331
AE
754 * Find a ceph client with specific addr and configuration. If
755 * found, bump its reference count.
602adf40 756 */
1f7ba331 757static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
602adf40 758{
3302ffd4 759 struct rbd_client *rbdc = NULL, *iter;
602adf40 760
43ae4701 761 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
602adf40
YS
762 return NULL;
763
1f7ba331 764 spin_lock(&rbd_client_list_lock);
3302ffd4
JK
765 list_for_each_entry(iter, &rbd_client_list, node) {
766 if (!ceph_compare_options(ceph_opts, iter->client)) {
767 __rbd_get_client(iter);
2f82ee54 768
3302ffd4 769 rbdc = iter;
1f7ba331
AE
770 break;
771 }
772 }
773 spin_unlock(&rbd_client_list_lock);
774
3302ffd4 775 return rbdc;
602adf40
YS
776}
777
59c2be1e 778/*
210c104c 779 * (Per device) rbd map options
59c2be1e
YS
780 */
781enum {
b5584180 782 Opt_queue_depth,
0c93e1b7 783 Opt_alloc_size,
34f55d0b 784 Opt_lock_timeout,
59c2be1e 785 /* int args above */
b26c047b 786 Opt_pool_ns,
dc1dad8e 787 Opt_compression_hint,
59c2be1e 788 /* string args above */
cc0538b6
AE
789 Opt_read_only,
790 Opt_read_write,
80de1912 791 Opt_lock_on_read,
e010dd0a 792 Opt_exclusive,
d9360540 793 Opt_notrim,
59c2be1e
YS
794};
795
dc1dad8e
ID
796enum {
797 Opt_compression_hint_none,
798 Opt_compression_hint_compressible,
799 Opt_compression_hint_incompressible,
800};
801
802static const struct constant_table rbd_param_compression_hint[] = {
803 {"none", Opt_compression_hint_none},
804 {"compressible", Opt_compression_hint_compressible},
805 {"incompressible", Opt_compression_hint_incompressible},
806 {}
807};
808
d7167b14 809static const struct fs_parameter_spec rbd_parameters[] = {
82995cc6 810 fsparam_u32 ("alloc_size", Opt_alloc_size),
dc1dad8e
ID
811 fsparam_enum ("compression_hint", Opt_compression_hint,
812 rbd_param_compression_hint),
82995cc6
DH
813 fsparam_flag ("exclusive", Opt_exclusive),
814 fsparam_flag ("lock_on_read", Opt_lock_on_read),
815 fsparam_u32 ("lock_timeout", Opt_lock_timeout),
816 fsparam_flag ("notrim", Opt_notrim),
817 fsparam_string ("_pool_ns", Opt_pool_ns),
818 fsparam_u32 ("queue_depth", Opt_queue_depth),
819 fsparam_flag ("read_only", Opt_read_only),
820 fsparam_flag ("read_write", Opt_read_write),
821 fsparam_flag ("ro", Opt_read_only),
822 fsparam_flag ("rw", Opt_read_write),
823 {}
824};
825
98571b5a 826struct rbd_options {
b5584180 827 int queue_depth;
0c93e1b7 828 int alloc_size;
34f55d0b 829 unsigned long lock_timeout;
98571b5a 830 bool read_only;
80de1912 831 bool lock_on_read;
e010dd0a 832 bool exclusive;
d9360540 833 bool trim;
dc1dad8e
ID
834
835 u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
98571b5a
AE
836};
837
d2a27964 838#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ
0c93e1b7 839#define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
34f55d0b 840#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
98571b5a 841#define RBD_READ_ONLY_DEFAULT false
80de1912 842#define RBD_LOCK_ON_READ_DEFAULT false
e010dd0a 843#define RBD_EXCLUSIVE_DEFAULT false
d9360540 844#define RBD_TRIM_DEFAULT true
98571b5a 845
82995cc6 846struct rbd_parse_opts_ctx {
c300156b 847 struct rbd_spec *spec;
82995cc6 848 struct ceph_options *copts;
c300156b
ID
849 struct rbd_options *opts;
850};
851
6d2940c8
GZ
852static char* obj_op_name(enum obj_operation_type op_type)
853{
854 switch (op_type) {
855 case OBJ_OP_READ:
856 return "read";
857 case OBJ_OP_WRITE:
858 return "write";
90e98c52
GZ
859 case OBJ_OP_DISCARD:
860 return "discard";
6484cbe9
ID
861 case OBJ_OP_ZEROOUT:
862 return "zeroout";
6d2940c8
GZ
863 default:
864 return "???";
865 }
866}
867
602adf40
YS
868/*
869 * Destroy ceph client
d23a4b3f 870 *
432b8587 871 * Caller must hold rbd_client_list_lock.
602adf40
YS
872 */
873static void rbd_client_release(struct kref *kref)
874{
875 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
876
37206ee5 877 dout("%s: rbdc %p\n", __func__, rbdc);
cd9d9f5d 878 spin_lock(&rbd_client_list_lock);
602adf40 879 list_del(&rbdc->node);
cd9d9f5d 880 spin_unlock(&rbd_client_list_lock);
602adf40
YS
881
882 ceph_destroy_client(rbdc->client);
883 kfree(rbdc);
884}
885
886/*
887 * Drop reference to ceph client node. If it's not referenced anymore, release
888 * it.
889 */
9d3997fd 890static void rbd_put_client(struct rbd_client *rbdc)
602adf40 891{
c53d5893
AE
892 if (rbdc)
893 kref_put(&rbdc->kref, rbd_client_release);
602adf40
YS
894}
895
5feb0d8d
ID
896/*
897 * Get a ceph client with specific addr and configuration, if one does
898 * not exist create it. Either way, ceph_opts is consumed by this
899 * function.
900 */
901static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
902{
903 struct rbd_client *rbdc;
dd435855 904 int ret;
5feb0d8d 905
a32e4143 906 mutex_lock(&client_mutex);
5feb0d8d 907 rbdc = rbd_client_find(ceph_opts);
dd435855 908 if (rbdc) {
5feb0d8d 909 ceph_destroy_options(ceph_opts);
dd435855
ID
910
911 /*
912 * Using an existing client. Make sure ->pg_pools is up to
913 * date before we look up the pool id in do_rbd_add().
914 */
9d4a227f
ID
915 ret = ceph_wait_for_latest_osdmap(rbdc->client,
916 rbdc->client->options->mount_timeout);
dd435855
ID
917 if (ret) {
918 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
919 rbd_put_client(rbdc);
920 rbdc = ERR_PTR(ret);
921 }
922 } else {
5feb0d8d 923 rbdc = rbd_client_create(ceph_opts);
dd435855 924 }
5feb0d8d
ID
925 mutex_unlock(&client_mutex);
926
927 return rbdc;
928}
929
a30b71b9
AE
930static bool rbd_image_format_valid(u32 image_format)
931{
932 return image_format == 1 || image_format == 2;
933}
934
8e94af8e
AE
935static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
936{
103a150f
AE
937 size_t size;
938 u32 snap_count;
939
940 /* The header has to start with the magic rbd header text */
941 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
942 return false;
943
db2388b6
AE
944 /* The bio layer requires at least sector-sized I/O */
945
946 if (ondisk->options.order < SECTOR_SHIFT)
947 return false;
948
949 /* If we use u64 in a few spots we may be able to loosen this */
950
951 if (ondisk->options.order > 8 * sizeof (int) - 1)
952 return false;
953
103a150f
AE
954 /*
955 * The size of a snapshot header has to fit in a size_t, and
956 * that limits the number of snapshots.
957 */
958 snap_count = le32_to_cpu(ondisk->snap_count);
959 size = SIZE_MAX - sizeof (struct ceph_snap_context);
960 if (snap_count > size / sizeof (__le64))
961 return false;
962
963 /*
964 * Not only that, but the size of the entire the snapshot
965 * header must also be representable in a size_t.
966 */
967 size -= snap_count * sizeof (__le64);
968 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
969 return false;
970
971 return true;
8e94af8e
AE
972}
973
5bc3fb17
ID
974/*
975 * returns the size of an object in the image
976 */
977static u32 rbd_obj_bytes(struct rbd_image_header *header)
978{
979 return 1U << header->obj_order;
980}
981
263423f8
ID
982static void rbd_init_layout(struct rbd_device *rbd_dev)
983{
984 if (rbd_dev->header.stripe_unit == 0 ||
985 rbd_dev->header.stripe_count == 0) {
986 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
987 rbd_dev->header.stripe_count = 1;
988 }
989
990 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
991 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
992 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
7e97332e
ID
993 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
994 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
263423f8
ID
995 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
996}
997
602adf40 998/*
bb23e37a
AE
999 * Fill an rbd image header with information from the given format 1
1000 * on-disk header.
602adf40 1001 */
662518b1 1002static int rbd_header_from_disk(struct rbd_device *rbd_dev,
4156d998 1003 struct rbd_image_header_ondisk *ondisk)
602adf40 1004{
662518b1 1005 struct rbd_image_header *header = &rbd_dev->header;
bb23e37a
AE
1006 bool first_time = header->object_prefix == NULL;
1007 struct ceph_snap_context *snapc;
1008 char *object_prefix = NULL;
1009 char *snap_names = NULL;
1010 u64 *snap_sizes = NULL;
ccece235 1011 u32 snap_count;
bb23e37a 1012 int ret = -ENOMEM;
621901d6 1013 u32 i;
602adf40 1014
bb23e37a 1015 /* Allocate this now to avoid having to handle failure below */
6a52325f 1016
bb23e37a 1017 if (first_time) {
848d796c
ID
1018 object_prefix = kstrndup(ondisk->object_prefix,
1019 sizeof(ondisk->object_prefix),
1020 GFP_KERNEL);
bb23e37a
AE
1021 if (!object_prefix)
1022 return -ENOMEM;
bb23e37a 1023 }
00f1f36f 1024
bb23e37a 1025 /* Allocate the snapshot context and fill it in */
00f1f36f 1026
bb23e37a
AE
1027 snap_count = le32_to_cpu(ondisk->snap_count);
1028 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1029 if (!snapc)
1030 goto out_err;
1031 snapc->seq = le64_to_cpu(ondisk->snap_seq);
602adf40 1032 if (snap_count) {
bb23e37a 1033 struct rbd_image_snap_ondisk *snaps;
f785cc1d
AE
1034 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1035
bb23e37a 1036 /* We'll keep a copy of the snapshot names... */
621901d6 1037
bb23e37a
AE
1038 if (snap_names_len > (u64)SIZE_MAX)
1039 goto out_2big;
1040 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1041 if (!snap_names)
6a52325f
AE
1042 goto out_err;
1043
bb23e37a 1044 /* ...as well as the array of their sizes. */
88a25a5f
ME
1045 snap_sizes = kmalloc_array(snap_count,
1046 sizeof(*header->snap_sizes),
1047 GFP_KERNEL);
bb23e37a 1048 if (!snap_sizes)
6a52325f 1049 goto out_err;
bb23e37a 1050
f785cc1d 1051 /*
bb23e37a
AE
1052 * Copy the names, and fill in each snapshot's id
1053 * and size.
1054 *
99a41ebc 1055 * Note that rbd_dev_v1_header_info() guarantees the
bb23e37a 1056 * ondisk buffer we're working with has
f785cc1d
AE
1057 * snap_names_len bytes beyond the end of the
1058 * snapshot id array, this memcpy() is safe.
1059 */
bb23e37a
AE
1060 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1061 snaps = ondisk->snaps;
1062 for (i = 0; i < snap_count; i++) {
1063 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1064 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1065 }
602adf40 1066 }
6a52325f 1067
bb23e37a 1068 /* We won't fail any more, fill in the header */
621901d6 1069
bb23e37a
AE
1070 if (first_time) {
1071 header->object_prefix = object_prefix;
1072 header->obj_order = ondisk->options.order;
263423f8 1073 rbd_init_layout(rbd_dev);
602adf40 1074 } else {
662518b1
AE
1075 ceph_put_snap_context(header->snapc);
1076 kfree(header->snap_names);
1077 kfree(header->snap_sizes);
602adf40 1078 }
849b4260 1079
bb23e37a 1080 /* The remaining fields always get updated (when we refresh) */
621901d6 1081
f84344f3 1082 header->image_size = le64_to_cpu(ondisk->image_size);
bb23e37a
AE
1083 header->snapc = snapc;
1084 header->snap_names = snap_names;
1085 header->snap_sizes = snap_sizes;
468521c1 1086
602adf40 1087 return 0;
bb23e37a
AE
1088out_2big:
1089 ret = -EIO;
6a52325f 1090out_err:
bb23e37a
AE
1091 kfree(snap_sizes);
1092 kfree(snap_names);
1093 ceph_put_snap_context(snapc);
1094 kfree(object_prefix);
ccece235 1095
bb23e37a 1096 return ret;
602adf40
YS
1097}
1098
9682fc6d
AE
1099static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1100{
1101 const char *snap_name;
1102
1103 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1104
1105 /* Skip over names until we find the one we are looking for */
1106
1107 snap_name = rbd_dev->header.snap_names;
1108 while (which--)
1109 snap_name += strlen(snap_name) + 1;
1110
1111 return kstrdup(snap_name, GFP_KERNEL);
1112}
1113
30d1cff8
AE
1114/*
1115 * Snapshot id comparison function for use with qsort()/bsearch().
1116 * Note that result is for snapshots in *descending* order.
1117 */
1118static int snapid_compare_reverse(const void *s1, const void *s2)
1119{
1120 u64 snap_id1 = *(u64 *)s1;
1121 u64 snap_id2 = *(u64 *)s2;
1122
1123 if (snap_id1 < snap_id2)
1124 return 1;
1125 return snap_id1 == snap_id2 ? 0 : -1;
1126}
1127
1128/*
1129 * Search a snapshot context to see if the given snapshot id is
1130 * present.
1131 *
1132 * Returns the position of the snapshot id in the array if it's found,
1133 * or BAD_SNAP_INDEX otherwise.
1134 *
1135 * Note: The snapshot array is in kept sorted (by the osd) in
1136 * reverse order, highest snapshot id first.
1137 */
9682fc6d
AE
1138static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1139{
1140 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
30d1cff8 1141 u64 *found;
9682fc6d 1142
30d1cff8
AE
1143 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1144 sizeof (snap_id), snapid_compare_reverse);
9682fc6d 1145
30d1cff8 1146 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
9682fc6d
AE
1147}
1148
2ad3d716
AE
1149static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1150 u64 snap_id)
9e15b77d 1151{
54cac61f 1152 u32 which;
da6a6b63 1153 const char *snap_name;
9e15b77d 1154
54cac61f
AE
1155 which = rbd_dev_snap_index(rbd_dev, snap_id);
1156 if (which == BAD_SNAP_INDEX)
da6a6b63 1157 return ERR_PTR(-ENOENT);
54cac61f 1158
da6a6b63
JD
1159 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1160 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
54cac61f
AE
1161}
1162
1163static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1164{
9e15b77d
AE
1165 if (snap_id == CEPH_NOSNAP)
1166 return RBD_SNAP_HEAD_NAME;
1167
54cac61f
AE
1168 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1169 if (rbd_dev->image_format == 1)
1170 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
9e15b77d 1171
54cac61f 1172 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
9e15b77d
AE
1173}
1174
2ad3d716
AE
1175static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1176 u64 *snap_size)
602adf40 1177{
2ad3d716
AE
1178 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1179 if (snap_id == CEPH_NOSNAP) {
1180 *snap_size = rbd_dev->header.image_size;
1181 } else if (rbd_dev->image_format == 1) {
1182 u32 which;
602adf40 1183
2ad3d716
AE
1184 which = rbd_dev_snap_index(rbd_dev, snap_id);
1185 if (which == BAD_SNAP_INDEX)
1186 return -ENOENT;
e86924a8 1187
2ad3d716
AE
1188 *snap_size = rbd_dev->header.snap_sizes[which];
1189 } else {
1190 u64 size = 0;
1191 int ret;
1192
1193 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1194 if (ret)
1195 return ret;
1196
1197 *snap_size = size;
1198 }
1199 return 0;
602adf40
YS
1200}
1201
2ad3d716
AE
1202static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1203{
8f4b7d98 1204 u64 snap_id = rbd_dev->spec->snap_id;
2ad3d716 1205 u64 size = 0;
2ad3d716
AE
1206 int ret;
1207
2ad3d716 1208 ret = rbd_snap_size(rbd_dev, snap_id, &size);
2ad3d716
AE
1209 if (ret)
1210 return ret;
1211
1212 rbd_dev->mapping.size = size;
8b0241f8 1213 return 0;
602adf40
YS
1214}
1215
d1cf5788
AE
1216static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1217{
1218 rbd_dev->mapping.size = 0;
200a6a8b
AE
1219}
1220
5359a17d 1221static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
b9434c5b 1222{
5359a17d 1223 struct ceph_bio_iter it = *bio_pos;
b9434c5b 1224
5359a17d
ID
1225 ceph_bio_iter_advance(&it, off);
1226 ceph_bio_iter_advance_step(&it, bytes, ({
732022b8 1227 memzero_bvec(&bv);
5359a17d 1228 }));
b9434c5b
AE
1229}
1230
7e07efb1 1231static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
602adf40 1232{
7e07efb1 1233 struct ceph_bvec_iter it = *bvec_pos;
602adf40 1234
7e07efb1
ID
1235 ceph_bvec_iter_advance(&it, off);
1236 ceph_bvec_iter_advance_step(&it, bytes, ({
732022b8 1237 memzero_bvec(&bv);
7e07efb1 1238 }));
f7760dad
AE
1239}
1240
1241/*
3da691bf 1242 * Zero a range in @obj_req data buffer defined by a bio (list) or
afb97888 1243 * (private) bio_vec array.
f7760dad 1244 *
3da691bf 1245 * @off is relative to the start of the data buffer.
926f9b3f 1246 */
3da691bf
ID
1247static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1248 u32 bytes)
926f9b3f 1249{
54ab3b24
ID
1250 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1251
ecc633ca 1252 switch (obj_req->img_request->data_type) {
3da691bf
ID
1253 case OBJ_REQUEST_BIO:
1254 zero_bios(&obj_req->bio_pos, off, bytes);
1255 break;
1256 case OBJ_REQUEST_BVECS:
afb97888 1257 case OBJ_REQUEST_OWN_BVECS:
3da691bf
ID
1258 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1259 break;
1260 default:
16809372 1261 BUG();
6365d33a
AE
1262 }
1263}
1264
bf0d5f50
AE
1265static void rbd_obj_request_destroy(struct kref *kref);
1266static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1267{
1268 rbd_assert(obj_request != NULL);
37206ee5 1269 dout("%s: obj %p (was %d)\n", __func__, obj_request,
2c935bc5 1270 kref_read(&obj_request->kref));
bf0d5f50
AE
1271 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1272}
1273
bf0d5f50
AE
1274static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1275 struct rbd_obj_request *obj_request)
1276{
25dcf954
AE
1277 rbd_assert(obj_request->img_request == NULL);
1278
b155e86c 1279 /* Image request now owns object's original reference */
bf0d5f50 1280 obj_request->img_request = img_request;
15961b44 1281 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
bf0d5f50
AE
1282}
1283
1284static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1285 struct rbd_obj_request *obj_request)
1286{
15961b44 1287 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
43df3d35 1288 list_del(&obj_request->ex.oe_item);
bf0d5f50 1289 rbd_assert(obj_request->img_request == img_request);
bf0d5f50
AE
1290 rbd_obj_request_put(obj_request);
1291}
1292
a086a1b8 1293static void rbd_osd_submit(struct ceph_osd_request *osd_req)
bf0d5f50 1294{
a086a1b8 1295 struct rbd_obj_request *obj_req = osd_req->r_priv;
980917fc 1296
a086a1b8
ID
1297 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1298 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1299 obj_req->ex.oe_off, obj_req->ex.oe_len);
a8af0d68 1300 ceph_osdc_start_request(osd_req->r_osdc, osd_req);
bf0d5f50
AE
1301}
1302
0c425248
AE
1303/*
1304 * The default/initial value for all image request flags is 0. Each
1305 * is conditionally set to 1 at image request initialization time
1306 * and currently never change thereafter.
1307 */
d0b2e944
AE
1308static void img_request_layered_set(struct rbd_img_request *img_request)
1309{
1310 set_bit(IMG_REQ_LAYERED, &img_request->flags);
d0b2e944
AE
1311}
1312
1313static bool img_request_layered_test(struct rbd_img_request *img_request)
1314{
d0b2e944
AE
1315 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1316}
1317
3da691bf 1318static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
6e2a4505 1319{
3da691bf 1320 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
b9434c5b 1321
43df3d35
ID
1322 return !obj_req->ex.oe_off &&
1323 obj_req->ex.oe_len == rbd_dev->layout.object_size;
6e2a4505
AE
1324}
1325
3da691bf 1326static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
bf0d5f50 1327{
3da691bf 1328 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
bf0d5f50 1329
43df3d35 1330 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
3da691bf 1331 rbd_dev->layout.object_size;
0dcc685e
ID
1332}
1333
13488d53
ID
1334/*
1335 * Must be called after rbd_obj_calc_img_extents().
1336 */
09fe05c5 1337static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
13488d53 1338{
870611e4
ID
1339 rbd_assert(obj_req->img_request->snapc);
1340
09fe05c5
ID
1341 if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
1342 dout("%s %p objno %llu discard\n", __func__, obj_req,
1343 obj_req->ex.oe_objno);
1344 return;
1345 }
13488d53 1346
09fe05c5
ID
1347 if (!obj_req->num_img_extents) {
1348 dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
1349 obj_req->ex.oe_objno);
1350 return;
1351 }
1352
1353 if (rbd_obj_is_entire(obj_req) &&
1354 !obj_req->img_request->snapc->num_snaps) {
1355 dout("%s %p objno %llu entire\n", __func__, obj_req,
1356 obj_req->ex.oe_objno);
1357 return;
1358 }
1359
1360 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
13488d53
ID
1361}
1362
86bd7998 1363static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
bf0d5f50 1364{
86bd7998
ID
1365 return ceph_file_extents_bytes(obj_req->img_extents,
1366 obj_req->num_img_extents);
bf0d5f50
AE
1367}
1368
3da691bf 1369static bool rbd_img_is_write(struct rbd_img_request *img_req)
bf0d5f50 1370{
9bb0248d 1371 switch (img_req->op_type) {
3da691bf
ID
1372 case OBJ_OP_READ:
1373 return false;
1374 case OBJ_OP_WRITE:
1375 case OBJ_OP_DISCARD:
6484cbe9 1376 case OBJ_OP_ZEROOUT:
3da691bf
ID
1377 return true;
1378 default:
c6244b3b 1379 BUG();
3da691bf 1380 }
90e98c52
GZ
1381}
1382
85e084fe 1383static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
bf0d5f50 1384{
3da691bf 1385 struct rbd_obj_request *obj_req = osd_req->r_priv;
54ab3b24 1386 int result;
bf0d5f50 1387
3da691bf
ID
1388 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1389 osd_req->r_result, obj_req);
bf0d5f50 1390
54ab3b24
ID
1391 /*
1392 * Writes aren't allowed to return a data payload. In some
1393 * guarded write cases (e.g. stat + zero on an empty object)
1394 * a stat response makes it through, but we don't care.
1395 */
1396 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1397 result = 0;
3da691bf 1398 else
54ab3b24 1399 result = osd_req->r_result;
bf0d5f50 1400
54ab3b24 1401 rbd_obj_handle_request(obj_req, result);
bf0d5f50
AE
1402}
1403
bcbab1db 1404static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
430c28c3 1405{
bcbab1db 1406 struct rbd_obj_request *obj_request = osd_req->r_priv;
22d2cfdf
ID
1407 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1408 struct ceph_options *opt = rbd_dev->rbd_client->client->options;
430c28c3 1409
22d2cfdf 1410 osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica;
7c84883a 1411 osd_req->r_snapid = obj_request->img_request->snap_id;
9d4df01f
AE
1412}
1413
bcbab1db 1414static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
9d4df01f 1415{
bcbab1db 1416 struct rbd_obj_request *obj_request = osd_req->r_priv;
9d4df01f 1417
a162b308 1418 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
fac02ddf 1419 ktime_get_real_ts64(&osd_req->r_mtime);
43df3d35 1420 osd_req->r_data_offset = obj_request->ex.oe_off;
430c28c3
AE
1421}
1422
bc81207e 1423static struct ceph_osd_request *
bcbab1db
ID
1424__rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1425 struct ceph_snap_context *snapc, int num_ops)
bc81207e 1426{
e28eded5 1427 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
bc81207e
ID
1428 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1429 struct ceph_osd_request *req;
a90bb0c1
ID
1430 const char *name_format = rbd_dev->image_format == 1 ?
1431 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
bcbab1db 1432 int ret;
bc81207e 1433
e28eded5 1434 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
bc81207e 1435 if (!req)
bcbab1db 1436 return ERR_PTR(-ENOMEM);
bc81207e 1437
bcbab1db 1438 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
bc81207e 1439 req->r_callback = rbd_osd_req_callback;
a162b308 1440 req->r_priv = obj_req;
bc81207e 1441
b26c047b
ID
1442 /*
1443 * Data objects may be stored in a separate pool, but always in
1444 * the same namespace in that pool as the header in its pool.
1445 */
1446 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
bc81207e 1447 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
b26c047b 1448
bcbab1db
ID
1449 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1450 rbd_dev->header.object_prefix,
1451 obj_req->ex.oe_objno);
1452 if (ret)
1453 return ERR_PTR(ret);
bc81207e 1454
bc81207e 1455 return req;
bc81207e
ID
1456}
1457
e28eded5 1458static struct ceph_osd_request *
bcbab1db 1459rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
bf0d5f50 1460{
870611e4 1461 rbd_assert(obj_req->img_request->snapc);
bcbab1db
ID
1462 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1463 num_ops);
bf0d5f50
AE
1464}
1465
ecc633ca 1466static struct rbd_obj_request *rbd_obj_request_create(void)
bf0d5f50
AE
1467{
1468 struct rbd_obj_request *obj_request;
bf0d5f50 1469
5a60e876 1470 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
6c696d85 1471 if (!obj_request)
f907ad55 1472 return NULL;
f907ad55 1473
43df3d35 1474 ceph_object_extent_init(&obj_request->ex);
bcbab1db 1475 INIT_LIST_HEAD(&obj_request->osd_reqs);
85b5e6d1 1476 mutex_init(&obj_request->state_mutex);
bf0d5f50
AE
1477 kref_init(&obj_request->kref);
1478
67e2b652 1479 dout("%s %p\n", __func__, obj_request);
bf0d5f50
AE
1480 return obj_request;
1481}
1482
1483static void rbd_obj_request_destroy(struct kref *kref)
1484{
1485 struct rbd_obj_request *obj_request;
bcbab1db 1486 struct ceph_osd_request *osd_req;
7e07efb1 1487 u32 i;
bf0d5f50
AE
1488
1489 obj_request = container_of(kref, struct rbd_obj_request, kref);
1490
37206ee5
AE
1491 dout("%s: obj %p\n", __func__, obj_request);
1492
bcbab1db
ID
1493 while (!list_empty(&obj_request->osd_reqs)) {
1494 osd_req = list_first_entry(&obj_request->osd_reqs,
1495 struct ceph_osd_request, r_private_item);
1496 list_del_init(&osd_req->r_private_item);
1497 ceph_osdc_put_request(osd_req);
1498 }
bf0d5f50 1499
ecc633ca 1500 switch (obj_request->img_request->data_type) {
9969ebc5 1501 case OBJ_REQUEST_NODATA:
bf0d5f50 1502 case OBJ_REQUEST_BIO:
7e07efb1 1503 case OBJ_REQUEST_BVECS:
5359a17d 1504 break; /* Nothing to do */
afb97888
ID
1505 case OBJ_REQUEST_OWN_BVECS:
1506 kfree(obj_request->bvec_pos.bvecs);
788e2df3 1507 break;
7e07efb1 1508 default:
16809372 1509 BUG();
bf0d5f50
AE
1510 }
1511
86bd7998 1512 kfree(obj_request->img_extents);
7e07efb1
ID
1513 if (obj_request->copyup_bvecs) {
1514 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1515 if (obj_request->copyup_bvecs[i].bv_page)
1516 __free_page(obj_request->copyup_bvecs[i].bv_page);
1517 }
1518 kfree(obj_request->copyup_bvecs);
bf0d5f50
AE
1519 }
1520
868311b1 1521 kmem_cache_free(rbd_obj_request_cache, obj_request);
bf0d5f50
AE
1522}
1523
fb65d228
AE
1524/* It's OK to call this for a device with no parent */
1525
1526static void rbd_spec_put(struct rbd_spec *spec);
1527static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1528{
1529 rbd_dev_remove_parent(rbd_dev);
1530 rbd_spec_put(rbd_dev->parent_spec);
1531 rbd_dev->parent_spec = NULL;
1532 rbd_dev->parent_overlap = 0;
1533}
1534
a2acd00e
AE
1535/*
1536 * Parent image reference counting is used to determine when an
1537 * image's parent fields can be safely torn down--after there are no
1538 * more in-flight requests to the parent image. When the last
1539 * reference is dropped, cleaning them up is safe.
1540 */
1541static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1542{
1543 int counter;
1544
1545 if (!rbd_dev->parent_spec)
1546 return;
1547
1548 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1549 if (counter > 0)
1550 return;
1551
1552 /* Last reference; clean up parent data structures */
1553
1554 if (!counter)
1555 rbd_dev_unparent(rbd_dev);
1556 else
9584d508 1557 rbd_warn(rbd_dev, "parent reference underflow");
a2acd00e
AE
1558}
1559
1560/*
1561 * If an image has a non-zero parent overlap, get a reference to its
1562 * parent.
1563 *
1564 * Returns true if the rbd device has a parent with a non-zero
1565 * overlap and a reference for it was successfully taken, or
1566 * false otherwise.
1567 */
1568static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1569{
ae43e9d0 1570 int counter = 0;
a2acd00e
AE
1571
1572 if (!rbd_dev->parent_spec)
1573 return false;
1574
ae43e9d0
ID
1575 if (rbd_dev->parent_overlap)
1576 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
a2acd00e
AE
1577
1578 if (counter < 0)
9584d508 1579 rbd_warn(rbd_dev, "parent reference overflow");
a2acd00e 1580
ae43e9d0 1581 return counter > 0;
a2acd00e
AE
1582}
1583
59e542c8
ID
1584static void rbd_img_request_init(struct rbd_img_request *img_request,
1585 struct rbd_device *rbd_dev,
1586 enum obj_operation_type op_type)
bf0d5f50 1587{
59e542c8 1588 memset(img_request, 0, sizeof(*img_request));
bf0d5f50 1589
bf0d5f50 1590 img_request->rbd_dev = rbd_dev;
9bb0248d 1591 img_request->op_type = op_type;
a0c5895b 1592
e1fddc8f 1593 INIT_LIST_HEAD(&img_request->lock_item);
43df3d35 1594 INIT_LIST_HEAD(&img_request->object_extents);
0192ce2e 1595 mutex_init(&img_request->state_mutex);
bf0d5f50
AE
1596}
1597
870611e4
ID
1598/*
1599 * Only snap_id is captured here, for reads. For writes, snapshot
1600 * context is captured in rbd_img_object_requests() after exclusive
1601 * lock is ensured to be held.
1602 */
a52cc685
ID
1603static void rbd_img_capture_header(struct rbd_img_request *img_req)
1604{
1605 struct rbd_device *rbd_dev = img_req->rbd_dev;
1606
1607 lockdep_assert_held(&rbd_dev->header_rwsem);
1608
870611e4 1609 if (!rbd_img_is_write(img_req))
a52cc685
ID
1610 img_req->snap_id = rbd_dev->spec->snap_id;
1611
1612 if (rbd_dev_parent_get(rbd_dev))
1613 img_request_layered_set(img_req);
1614}
1615
679a97d2 1616static void rbd_img_request_destroy(struct rbd_img_request *img_request)
bf0d5f50 1617{
bf0d5f50
AE
1618 struct rbd_obj_request *obj_request;
1619 struct rbd_obj_request *next_obj_request;
1620
37206ee5
AE
1621 dout("%s: img %p\n", __func__, img_request);
1622
e1fddc8f 1623 WARN_ON(!list_empty(&img_request->lock_item));
bf0d5f50
AE
1624 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1625 rbd_img_obj_request_del(img_request, obj_request);
1626
78b42a87 1627 if (img_request_layered_test(img_request))
a2acd00e 1628 rbd_dev_parent_put(img_request->rbd_dev);
a2acd00e 1629
9bb0248d 1630 if (rbd_img_is_write(img_request))
812164f8 1631 ceph_put_snap_context(img_request->snapc);
bf0d5f50 1632
59e542c8
ID
1633 if (test_bit(IMG_REQ_CHILD, &img_request->flags))
1634 kmem_cache_free(rbd_img_request_cache, img_request);
bf0d5f50
AE
1635}
1636
22e8bd51
ID
1637#define BITS_PER_OBJ 2
1638#define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1639#define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
e93f3152 1640
22e8bd51
ID
1641static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1642 u64 *index, u8 *shift)
1643{
1644 u32 off;
e93f3152 1645
22e8bd51
ID
1646 rbd_assert(objno < rbd_dev->object_map_size);
1647 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1648 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1649}
e93f3152 1650
22e8bd51
ID
1651static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1652{
1653 u64 index;
1654 u8 shift;
e93f3152 1655
22e8bd51
ID
1656 lockdep_assert_held(&rbd_dev->object_map_lock);
1657 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1658 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
e93f3152
AE
1659}
1660
22e8bd51 1661static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
e93f3152 1662{
22e8bd51
ID
1663 u64 index;
1664 u8 shift;
1665 u8 *p;
e93f3152 1666
22e8bd51
ID
1667 lockdep_assert_held(&rbd_dev->object_map_lock);
1668 rbd_assert(!(val & ~OBJ_MASK));
e93f3152 1669
22e8bd51
ID
1670 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1671 p = &rbd_dev->object_map[index];
1672 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
e93f3152
AE
1673}
1674
22e8bd51 1675static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1217857f 1676{
22e8bd51
ID
1677 u8 state;
1678
1679 spin_lock(&rbd_dev->object_map_lock);
1680 state = __rbd_object_map_get(rbd_dev, objno);
1681 spin_unlock(&rbd_dev->object_map_lock);
1682 return state;
3da691bf 1683}
1217857f 1684
22e8bd51 1685static bool use_object_map(struct rbd_device *rbd_dev)
3da691bf 1686{
3fe69921
ID
1687 /*
1688 * An image mapped read-only can't use the object map -- it isn't
1689 * loaded because the header lock isn't acquired. Someone else can
1690 * write to the image and update the object map behind our back.
1691 *
1692 * A snapshot can't be written to, so using the object map is always
1693 * safe.
1694 */
1695 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1696 return false;
1697
22e8bd51
ID
1698 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1699 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
3da691bf
ID
1700}
1701
22e8bd51 1702static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
3da691bf 1703{
22e8bd51 1704 u8 state;
8b3e1a56 1705
22e8bd51
ID
1706 /* fall back to default logic if object map is disabled or invalid */
1707 if (!use_object_map(rbd_dev))
1708 return true;
3da691bf 1709
22e8bd51
ID
1710 state = rbd_object_map_get(rbd_dev, objno);
1711 return state != OBJECT_NONEXISTENT;
1217857f
AE
1712}
1713
22e8bd51
ID
1714static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1715 struct ceph_object_id *oid)
13488d53 1716{
22e8bd51
ID
1717 if (snap_id == CEPH_NOSNAP)
1718 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1719 rbd_dev->spec->image_id);
1720 else
1721 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1722 rbd_dev->spec->image_id, snap_id);
13488d53
ID
1723}
1724
22e8bd51 1725static int rbd_object_map_lock(struct rbd_device *rbd_dev)
2169238d 1726{
22e8bd51
ID
1727 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1728 CEPH_DEFINE_OID_ONSTACK(oid);
1729 u8 lock_type;
1730 char *lock_tag;
1731 struct ceph_locker *lockers;
1732 u32 num_lockers;
1733 bool broke_lock = false;
1734 int ret;
2169238d 1735
22e8bd51 1736 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
2169238d 1737
22e8bd51
ID
1738again:
1739 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1740 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1741 if (ret != -EBUSY || broke_lock) {
1742 if (ret == -EEXIST)
1743 ret = 0; /* already locked by myself */
1744 if (ret)
1745 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1746 return ret;
1747 }
2169238d 1748
22e8bd51
ID
1749 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1750 RBD_LOCK_NAME, &lock_type, &lock_tag,
1751 &lockers, &num_lockers);
1752 if (ret) {
1753 if (ret == -ENOENT)
1754 goto again;
3da691bf 1755
22e8bd51 1756 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
86bd7998 1757 return ret;
22e8bd51 1758 }
86bd7998 1759
22e8bd51
ID
1760 kfree(lock_tag);
1761 if (num_lockers == 0)
1762 goto again;
2169238d 1763
22e8bd51
ID
1764 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1765 ENTITY_NAME(lockers[0].id.name));
2169238d 1766
22e8bd51
ID
1767 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1768 RBD_LOCK_NAME, lockers[0].id.cookie,
1769 &lockers[0].id.name);
1770 ceph_free_lockers(lockers, num_lockers);
1771 if (ret) {
1772 if (ret == -ENOENT)
1773 goto again;
13488d53 1774
22e8bd51
ID
1775 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1776 return ret;
3da691bf
ID
1777 }
1778
22e8bd51
ID
1779 broke_lock = true;
1780 goto again;
2169238d
AE
1781}
1782
22e8bd51 1783static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
6484cbe9 1784{
22e8bd51
ID
1785 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1786 CEPH_DEFINE_OID_ONSTACK(oid);
1787 int ret;
1788
1789 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1790
1791 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1792 "");
1793 if (ret && ret != -ENOENT)
1794 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
6484cbe9
ID
1795}
1796
22e8bd51 1797static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
6484cbe9 1798{
22e8bd51
ID
1799 u8 struct_v;
1800 u32 struct_len;
1801 u32 header_len;
1802 void *header_end;
6484cbe9
ID
1803 int ret;
1804
22e8bd51
ID
1805 ceph_decode_32_safe(p, end, header_len, e_inval);
1806 header_end = *p + header_len;
0c93e1b7 1807
22e8bd51
ID
1808 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1809 &struct_len);
6484cbe9
ID
1810 if (ret)
1811 return ret;
1812
22e8bd51 1813 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
6484cbe9 1814
22e8bd51 1815 *p = header_end;
6484cbe9 1816 return 0;
22e8bd51
ID
1817
1818e_inval:
1819 return -EINVAL;
6484cbe9
ID
1820}
1821
22e8bd51 1822static int __rbd_object_map_load(struct rbd_device *rbd_dev)
13488d53 1823{
22e8bd51
ID
1824 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1825 CEPH_DEFINE_OID_ONSTACK(oid);
1826 struct page **pages;
1827 void *p, *end;
1828 size_t reply_len;
1829 u64 num_objects;
1830 u64 object_map_bytes;
1831 u64 object_map_size;
1832 int num_pages;
1833 int ret;
13488d53 1834
22e8bd51 1835 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
13488d53 1836
22e8bd51
ID
1837 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1838 rbd_dev->mapping.size);
1839 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1840 BITS_PER_BYTE);
1841 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1842 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1843 if (IS_ERR(pages))
1844 return PTR_ERR(pages);
13488d53 1845
22e8bd51
ID
1846 reply_len = num_pages * PAGE_SIZE;
1847 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1848 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1849 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1850 NULL, 0, pages, &reply_len);
1851 if (ret)
1852 goto out;
3b434a2a 1853
22e8bd51
ID
1854 p = page_address(pages[0]);
1855 end = p + min(reply_len, (size_t)PAGE_SIZE);
1856 ret = decode_object_map_header(&p, end, &object_map_size);
1857 if (ret)
1858 goto out;
1859
1860 if (object_map_size != num_objects) {
1861 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1862 object_map_size, num_objects);
1863 ret = -EINVAL;
1864 goto out;
3b434a2a
JD
1865 }
1866
22e8bd51
ID
1867 if (offset_in_page(p) + object_map_bytes > reply_len) {
1868 ret = -EINVAL;
1869 goto out;
1870 }
1871
1872 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1873 if (!rbd_dev->object_map) {
1874 ret = -ENOMEM;
1875 goto out;
1876 }
1877
1878 rbd_dev->object_map_size = object_map_size;
1879 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1880 offset_in_page(p), object_map_bytes);
1881
1882out:
1883 ceph_release_page_vector(pages, num_pages);
1884 return ret;
1885}
3da691bf 1886
22e8bd51
ID
1887static void rbd_object_map_free(struct rbd_device *rbd_dev)
1888{
1889 kvfree(rbd_dev->object_map);
1890 rbd_dev->object_map = NULL;
1891 rbd_dev->object_map_size = 0;
3b434a2a
JD
1892}
1893
22e8bd51 1894static int rbd_object_map_load(struct rbd_device *rbd_dev)
bf0d5f50 1895{
3da691bf 1896 int ret;
37206ee5 1897
22e8bd51 1898 ret = __rbd_object_map_load(rbd_dev);
86bd7998
ID
1899 if (ret)
1900 return ret;
f1a4739f 1901
22e8bd51
ID
1902 ret = rbd_dev_v2_get_flags(rbd_dev);
1903 if (ret) {
1904 rbd_object_map_free(rbd_dev);
1905 return ret;
1906 }
1907
1908 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1909 rbd_warn(rbd_dev, "object map is invalid");
1910
1911 return 0;
1912}
1913
1914static int rbd_object_map_open(struct rbd_device *rbd_dev)
1915{
1916 int ret;
1917
1918 ret = rbd_object_map_lock(rbd_dev);
1919 if (ret)
1920 return ret;
1921
1922 ret = rbd_object_map_load(rbd_dev);
1923 if (ret) {
1924 rbd_object_map_unlock(rbd_dev);
1925 return ret;
1926 }
1927
1928 return 0;
1929}
1930
1931static void rbd_object_map_close(struct rbd_device *rbd_dev)
1932{
1933 rbd_object_map_free(rbd_dev);
1934 rbd_object_map_unlock(rbd_dev);
1935}
1936
1937/*
1938 * This function needs snap_id (or more precisely just something to
1939 * distinguish between HEAD and snapshot object maps), new_state and
1940 * current_state that were passed to rbd_object_map_update().
1941 *
1942 * To avoid allocating and stashing a context we piggyback on the OSD
1943 * request. A HEAD update has two ops (assert_locked). For new_state
1944 * and current_state we decode our own object_map_update op, encoded in
1945 * rbd_cls_object_map_update().
1946 */
1947static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
1948 struct ceph_osd_request *osd_req)
1949{
1950 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1951 struct ceph_osd_data *osd_data;
1952 u64 objno;
3f649ab7 1953 u8 state, new_state, current_state;
22e8bd51
ID
1954 bool has_current_state;
1955 void *p;
1956
1957 if (osd_req->r_result)
1958 return osd_req->r_result;
1959
1960 /*
1961 * Nothing to do for a snapshot object map.
1962 */
1963 if (osd_req->r_num_ops == 1)
1964 return 0;
1965
1966 /*
1967 * Update in-memory HEAD object map.
1968 */
1969 rbd_assert(osd_req->r_num_ops == 2);
1970 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
1971 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
1972
1973 p = page_address(osd_data->pages[0]);
1974 objno = ceph_decode_64(&p);
1975 rbd_assert(objno == obj_req->ex.oe_objno);
1976 rbd_assert(ceph_decode_64(&p) == objno + 1);
1977 new_state = ceph_decode_8(&p);
1978 has_current_state = ceph_decode_8(&p);
1979 if (has_current_state)
1980 current_state = ceph_decode_8(&p);
1981
1982 spin_lock(&rbd_dev->object_map_lock);
1983 state = __rbd_object_map_get(rbd_dev, objno);
1984 if (!has_current_state || current_state == state ||
1985 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
1986 __rbd_object_map_set(rbd_dev, objno, new_state);
1987 spin_unlock(&rbd_dev->object_map_lock);
1988
1989 return 0;
1990}
1991
1992static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
1993{
1994 struct rbd_obj_request *obj_req = osd_req->r_priv;
1995 int result;
1996
1997 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1998 osd_req->r_result, obj_req);
1999
2000 result = rbd_object_map_update_finish(obj_req, osd_req);
2001 rbd_obj_handle_request(obj_req, result);
2002}
2003
2004static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2005{
2006 u8 state = rbd_object_map_get(rbd_dev, objno);
bf0d5f50 2007
22e8bd51
ID
2008 if (state == new_state ||
2009 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
2010 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
2011 return false;
2012
2013 return true;
2014}
2015
2016static int rbd_cls_object_map_update(struct ceph_osd_request *req,
2017 int which, u64 objno, u8 new_state,
2018 const u8 *current_state)
2019{
2020 struct page **pages;
2021 void *p, *start;
2022 int ret;
2023
2024 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2025 if (ret)
2026 return ret;
2027
2028 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2029 if (IS_ERR(pages))
2030 return PTR_ERR(pages);
2031
2032 p = start = page_address(pages[0]);
2033 ceph_encode_64(&p, objno);
2034 ceph_encode_64(&p, objno + 1);
2035 ceph_encode_8(&p, new_state);
2036 if (current_state) {
2037 ceph_encode_8(&p, 1);
2038 ceph_encode_8(&p, *current_state);
2039 } else {
2040 ceph_encode_8(&p, 0);
2041 }
2042
2043 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2044 false, true);
2045 return 0;
2046}
2047
2048/*
2049 * Return:
2050 * 0 - object map update sent
2051 * 1 - object map update isn't needed
2052 * <0 - error
2053 */
2054static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2055 u8 new_state, const u8 *current_state)
2056{
2057 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2058 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2059 struct ceph_osd_request *req;
2060 int num_ops = 1;
2061 int which = 0;
2062 int ret;
2063
2064 if (snap_id == CEPH_NOSNAP) {
2065 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2066 return 1;
2067
2068 num_ops++; /* assert_locked */
2069 }
2070
2071 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2072 if (!req)
2073 return -ENOMEM;
2074
2075 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2076 req->r_callback = rbd_object_map_callback;
2077 req->r_priv = obj_req;
2078
2079 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2080 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2081 req->r_flags = CEPH_OSD_FLAG_WRITE;
2082 ktime_get_real_ts64(&req->r_mtime);
2083
2084 if (snap_id == CEPH_NOSNAP) {
2085 /*
2086 * Protect against possible race conditions during lock
2087 * ownership transitions.
2088 */
2089 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2090 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
3da691bf
ID
2091 if (ret)
2092 return ret;
22e8bd51
ID
2093 }
2094
2095 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2096 new_state, current_state);
2097 if (ret)
2098 return ret;
2099
2100 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2101 if (ret)
2102 return ret;
13488d53 2103
a8af0d68 2104 ceph_osdc_start_request(osdc, req);
22e8bd51
ID
2105 return 0;
2106}
2107
86bd7998
ID
2108static void prune_extents(struct ceph_file_extent *img_extents,
2109 u32 *num_img_extents, u64 overlap)
e93f3152 2110{
86bd7998 2111 u32 cnt = *num_img_extents;
e93f3152 2112
86bd7998
ID
2113 /* drop extents completely beyond the overlap */
2114 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2115 cnt--;
e93f3152 2116
86bd7998
ID
2117 if (cnt) {
2118 struct ceph_file_extent *ex = &img_extents[cnt - 1];
e93f3152 2119
86bd7998
ID
2120 /* trim final overlapping extent */
2121 if (ex->fe_off + ex->fe_len > overlap)
2122 ex->fe_len = overlap - ex->fe_off;
2123 }
e93f3152 2124
86bd7998 2125 *num_img_extents = cnt;
e93f3152
AE
2126}
2127
86bd7998
ID
2128/*
2129 * Determine the byte range(s) covered by either just the object extent
2130 * or the entire object in the parent image.
2131 */
2132static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2133 bool entire)
e93f3152 2134{
86bd7998
ID
2135 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2136 int ret;
e93f3152 2137
86bd7998
ID
2138 if (!rbd_dev->parent_overlap)
2139 return 0;
e93f3152 2140
86bd7998
ID
2141 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2142 entire ? 0 : obj_req->ex.oe_off,
2143 entire ? rbd_dev->layout.object_size :
2144 obj_req->ex.oe_len,
2145 &obj_req->img_extents,
2146 &obj_req->num_img_extents);
2147 if (ret)
2148 return ret;
e93f3152 2149
86bd7998
ID
2150 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2151 rbd_dev->parent_overlap);
2152 return 0;
e93f3152
AE
2153}
2154
bcbab1db 2155static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
1217857f 2156{
bcbab1db
ID
2157 struct rbd_obj_request *obj_req = osd_req->r_priv;
2158
ecc633ca 2159 switch (obj_req->img_request->data_type) {
3da691bf 2160 case OBJ_REQUEST_BIO:
bcbab1db 2161 osd_req_op_extent_osd_data_bio(osd_req, which,
3da691bf 2162 &obj_req->bio_pos,
43df3d35 2163 obj_req->ex.oe_len);
3da691bf
ID
2164 break;
2165 case OBJ_REQUEST_BVECS:
afb97888 2166 case OBJ_REQUEST_OWN_BVECS:
3da691bf 2167 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
43df3d35 2168 obj_req->ex.oe_len);
afb97888 2169 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
bcbab1db 2170 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
3da691bf
ID
2171 &obj_req->bvec_pos);
2172 break;
2173 default:
16809372 2174 BUG();
1217857f 2175 }
3da691bf 2176}
1217857f 2177
bcbab1db 2178static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
3da691bf
ID
2179{
2180 struct page **pages;
8b3e1a56 2181
3da691bf
ID
2182 /*
2183 * The response data for a STAT call consists of:
2184 * le64 length;
2185 * struct {
2186 * le32 tv_sec;
2187 * le32 tv_nsec;
2188 * } mtime;
2189 */
2190 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2191 if (IS_ERR(pages))
2192 return PTR_ERR(pages);
2193
bcbab1db
ID
2194 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2195 osd_req_op_raw_data_in_pages(osd_req, which, pages,
3da691bf
ID
2196 8 + sizeof(struct ceph_timespec),
2197 0, false, true);
2198 return 0;
1217857f
AE
2199}
2200
b5ae8cbc
ID
2201static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2202 u32 bytes)
2203{
2204 struct rbd_obj_request *obj_req = osd_req->r_priv;
2205 int ret;
2206
2207 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2208 if (ret)
2209 return ret;
2210
2211 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2212 obj_req->copyup_bvec_count, bytes);
2213 return 0;
2214}
2215
ea9b743c
ID
2216static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2217{
2218 obj_req->read_state = RBD_OBJ_READ_START;
2219 return 0;
2220}
2221
bcbab1db
ID
2222static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2223 int which)
2169238d 2224{
bcbab1db 2225 struct rbd_obj_request *obj_req = osd_req->r_priv;
3da691bf
ID
2226 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2227 u16 opcode;
2169238d 2228
8b5bec5c
ID
2229 if (!use_object_map(rbd_dev) ||
2230 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2231 osd_req_op_alloc_hint_init(osd_req, which++,
2232 rbd_dev->layout.object_size,
d3798acc 2233 rbd_dev->layout.object_size,
dc1dad8e 2234 rbd_dev->opts->alloc_hint_flags);
8b5bec5c 2235 }
2169238d 2236
3da691bf
ID
2237 if (rbd_obj_is_entire(obj_req))
2238 opcode = CEPH_OSD_OP_WRITEFULL;
2239 else
2240 opcode = CEPH_OSD_OP_WRITE;
2169238d 2241
bcbab1db 2242 osd_req_op_extent_init(osd_req, which, opcode,
43df3d35 2243 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
bcbab1db 2244 rbd_osd_setup_data(osd_req, which);
3da691bf 2245}
2169238d 2246
ea9b743c 2247static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
3da691bf 2248{
3da691bf
ID
2249 int ret;
2250
86bd7998
ID
2251 /* reverse map the entire object onto the parent */
2252 ret = rbd_obj_calc_img_extents(obj_req, true);
2253 if (ret)
2254 return ret;
2255
85b5e6d1 2256 obj_req->write_state = RBD_OBJ_WRITE_START;
3da691bf 2257 return 0;
2169238d
AE
2258}
2259
6484cbe9
ID
2260static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2261{
2262 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2263 CEPH_OSD_OP_ZERO;
2264}
2265
27bbd911
ID
2266static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2267 int which)
2268{
2269 struct rbd_obj_request *obj_req = osd_req->r_priv;
2270
2271 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2272 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2273 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
13488d53 2274 } else {
27bbd911
ID
2275 osd_req_op_extent_init(osd_req, which,
2276 truncate_or_zero_opcode(obj_req),
2277 obj_req->ex.oe_off, obj_req->ex.oe_len,
2278 0, 0);
2279 }
2280}
2281
ea9b743c 2282static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
6484cbe9 2283{
0c93e1b7 2284 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
27bbd911 2285 u64 off, next_off;
6484cbe9
ID
2286 int ret;
2287
0c93e1b7
ID
2288 /*
2289 * Align the range to alloc_size boundary and punt on discards
2290 * that are too small to free up any space.
2291 *
2292 * alloc_size == object_size && is_tail() is a special case for
2293 * filestore with filestore_punch_hole = false, needed to allow
2294 * truncate (in addition to delete).
2295 */
2296 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2297 !rbd_obj_is_tail(obj_req)) {
27bbd911
ID
2298 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2299 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2300 rbd_dev->opts->alloc_size);
0c93e1b7
ID
2301 if (off >= next_off)
2302 return 1;
27bbd911
ID
2303
2304 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2305 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2306 off, next_off - off);
2307 obj_req->ex.oe_off = off;
2308 obj_req->ex.oe_len = next_off - off;
0c93e1b7
ID
2309 }
2310
6484cbe9
ID
2311 /* reverse map the entire object onto the parent */
2312 ret = rbd_obj_calc_img_extents(obj_req, true);
2313 if (ret)
2314 return ret;
2315
22e8bd51 2316 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
0ad5d953
ID
2317 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2318 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2319
85b5e6d1 2320 obj_req->write_state = RBD_OBJ_WRITE_START;
6484cbe9
ID
2321 return 0;
2322}
2323
bcbab1db
ID
2324static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2325 int which)
3da691bf 2326{
bcbab1db 2327 struct rbd_obj_request *obj_req = osd_req->r_priv;
3b434a2a
JD
2328 u16 opcode;
2329
3da691bf 2330 if (rbd_obj_is_entire(obj_req)) {
86bd7998 2331 if (obj_req->num_img_extents) {
0ad5d953 2332 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
bcbab1db 2333 osd_req_op_init(osd_req, which++,
9b17eb2c 2334 CEPH_OSD_OP_CREATE, 0);
3b434a2a
JD
2335 opcode = CEPH_OSD_OP_TRUNCATE;
2336 } else {
0ad5d953 2337 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
bcbab1db 2338 osd_req_op_init(osd_req, which++,
3da691bf
ID
2339 CEPH_OSD_OP_DELETE, 0);
2340 opcode = 0;
3b434a2a 2341 }
3b434a2a 2342 } else {
6484cbe9 2343 opcode = truncate_or_zero_opcode(obj_req);
3b434a2a
JD
2344 }
2345
3da691bf 2346 if (opcode)
bcbab1db 2347 osd_req_op_extent_init(osd_req, which, opcode,
43df3d35 2348 obj_req->ex.oe_off, obj_req->ex.oe_len,
3da691bf 2349 0, 0);
3b434a2a
JD
2350}
2351
ea9b743c 2352static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
bf0d5f50 2353{
3da691bf 2354 int ret;
37206ee5 2355
86bd7998
ID
2356 /* reverse map the entire object onto the parent */
2357 ret = rbd_obj_calc_img_extents(obj_req, true);
2358 if (ret)
2359 return ret;
f1a4739f 2360
0ad5d953 2361 if (!obj_req->num_img_extents) {
22e8bd51 2362 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
0ad5d953
ID
2363 if (rbd_obj_is_entire(obj_req))
2364 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
3da691bf 2365 }
3b434a2a 2366
a086a1b8 2367 obj_req->write_state = RBD_OBJ_WRITE_START;
3da691bf
ID
2368 return 0;
2369}
9d4df01f 2370
a086a1b8
ID
2371static int count_write_ops(struct rbd_obj_request *obj_req)
2372{
8b5bec5c
ID
2373 struct rbd_img_request *img_req = obj_req->img_request;
2374
2375 switch (img_req->op_type) {
a086a1b8 2376 case OBJ_OP_WRITE:
8b5bec5c
ID
2377 if (!use_object_map(img_req->rbd_dev) ||
2378 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2379 return 2; /* setallochint + write/writefull */
2380
2381 return 1; /* write/writefull */
a086a1b8
ID
2382 case OBJ_OP_DISCARD:
2383 return 1; /* delete/truncate/zero */
2384 case OBJ_OP_ZEROOUT:
2385 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2386 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2387 return 2; /* create + truncate */
bf0d5f50 2388
a086a1b8
ID
2389 return 1; /* delete/truncate/zero */
2390 default:
2391 BUG();
3da691bf 2392 }
a086a1b8 2393}
3b434a2a 2394
a086a1b8
ID
2395static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2396 int which)
2397{
2398 struct rbd_obj_request *obj_req = osd_req->r_priv;
2399
2400 switch (obj_req->img_request->op_type) {
2401 case OBJ_OP_WRITE:
2402 __rbd_osd_setup_write_ops(osd_req, which);
2403 break;
2404 case OBJ_OP_DISCARD:
2405 __rbd_osd_setup_discard_ops(osd_req, which);
2406 break;
2407 case OBJ_OP_ZEROOUT:
2408 __rbd_osd_setup_zeroout_ops(osd_req, which);
2409 break;
2410 default:
2411 BUG();
2412 }
3da691bf 2413}
9d4df01f 2414
3da691bf 2415/*
a086a1b8
ID
2416 * Prune the list of object requests (adjust offset and/or length, drop
2417 * redundant requests). Prepare object request state machines and image
2418 * request state machine for execution.
3da691bf
ID
2419 */
2420static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2421{
0c93e1b7 2422 struct rbd_obj_request *obj_req, *next_obj_req;
3da691bf 2423 int ret;
430c28c3 2424
0c93e1b7 2425 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
9bb0248d 2426 switch (img_req->op_type) {
3da691bf 2427 case OBJ_OP_READ:
ea9b743c 2428 ret = rbd_obj_init_read(obj_req);
3da691bf
ID
2429 break;
2430 case OBJ_OP_WRITE:
ea9b743c 2431 ret = rbd_obj_init_write(obj_req);
3da691bf
ID
2432 break;
2433 case OBJ_OP_DISCARD:
ea9b743c 2434 ret = rbd_obj_init_discard(obj_req);
3da691bf 2435 break;
6484cbe9 2436 case OBJ_OP_ZEROOUT:
ea9b743c 2437 ret = rbd_obj_init_zeroout(obj_req);
6484cbe9 2438 break;
3da691bf 2439 default:
16809372 2440 BUG();
3da691bf 2441 }
0c93e1b7 2442 if (ret < 0)
3da691bf 2443 return ret;
0c93e1b7 2444 if (ret > 0) {
0c93e1b7
ID
2445 rbd_img_obj_request_del(img_req, obj_req);
2446 continue;
2447 }
bf0d5f50
AE
2448 }
2449
0192ce2e 2450 img_req->state = RBD_IMG_START;
bf0d5f50 2451 return 0;
3da691bf 2452}
bf0d5f50 2453
5a237819
ID
2454union rbd_img_fill_iter {
2455 struct ceph_bio_iter bio_iter;
2456 struct ceph_bvec_iter bvec_iter;
2457};
bf0d5f50 2458
5a237819
ID
2459struct rbd_img_fill_ctx {
2460 enum obj_request_type pos_type;
2461 union rbd_img_fill_iter *pos;
2462 union rbd_img_fill_iter iter;
2463 ceph_object_extent_fn_t set_pos_fn;
afb97888
ID
2464 ceph_object_extent_fn_t count_fn;
2465 ceph_object_extent_fn_t copy_fn;
5a237819 2466};
bf0d5f50 2467
5a237819 2468static struct ceph_object_extent *alloc_object_extent(void *arg)
0eefd470 2469{
5a237819
ID
2470 struct rbd_img_request *img_req = arg;
2471 struct rbd_obj_request *obj_req;
0eefd470 2472
5a237819
ID
2473 obj_req = rbd_obj_request_create();
2474 if (!obj_req)
2475 return NULL;
2761713d 2476
5a237819
ID
2477 rbd_img_obj_request_add(img_req, obj_req);
2478 return &obj_req->ex;
2479}
0eefd470 2480
afb97888
ID
2481/*
2482 * While su != os && sc == 1 is technically not fancy (it's the same
2483 * layout as su == os && sc == 1), we can't use the nocopy path for it
2484 * because ->set_pos_fn() should be called only once per object.
2485 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2486 * treat su != os && sc == 1 as fancy.
2487 */
2488static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2489{
2490 return l->stripe_unit != l->object_size;
2491}
0eefd470 2492
afb97888
ID
2493static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2494 struct ceph_file_extent *img_extents,
2495 u32 num_img_extents,
2496 struct rbd_img_fill_ctx *fctx)
2497{
2498 u32 i;
2499 int ret;
2500
2501 img_req->data_type = fctx->pos_type;
0eefd470
AE
2502
2503 /*
afb97888
ID
2504 * Create object requests and set each object request's starting
2505 * position in the provided bio (list) or bio_vec array.
0eefd470 2506 */
afb97888
ID
2507 fctx->iter = *fctx->pos;
2508 for (i = 0; i < num_img_extents; i++) {
2509 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2510 img_extents[i].fe_off,
2511 img_extents[i].fe_len,
2512 &img_req->object_extents,
2513 alloc_object_extent, img_req,
2514 fctx->set_pos_fn, &fctx->iter);
2515 if (ret)
2516 return ret;
2517 }
0eefd470 2518
afb97888 2519 return __rbd_img_fill_request(img_req);
0eefd470
AE
2520}
2521
5a237819
ID
2522/*
2523 * Map a list of image extents to a list of object extents, create the
2524 * corresponding object requests (normally each to a different object,
2525 * but not always) and add them to @img_req. For each object request,
afb97888 2526 * set up its data descriptor to point to the corresponding chunk(s) of
5a237819
ID
2527 * @fctx->pos data buffer.
2528 *
afb97888
ID
2529 * Because ceph_file_to_extents() will merge adjacent object extents
2530 * together, each object request's data descriptor may point to multiple
2531 * different chunks of @fctx->pos data buffer.
2532 *
5a237819
ID
2533 * @fctx->pos data buffer is assumed to be large enough.
2534 */
2535static int rbd_img_fill_request(struct rbd_img_request *img_req,
2536 struct ceph_file_extent *img_extents,
2537 u32 num_img_extents,
2538 struct rbd_img_fill_ctx *fctx)
3d7efd18 2539{
afb97888
ID
2540 struct rbd_device *rbd_dev = img_req->rbd_dev;
2541 struct rbd_obj_request *obj_req;
5a237819
ID
2542 u32 i;
2543 int ret;
2544
afb97888
ID
2545 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2546 !rbd_layout_is_fancy(&rbd_dev->layout))
2547 return rbd_img_fill_request_nocopy(img_req, img_extents,
2548 num_img_extents, fctx);
3d7efd18 2549
afb97888 2550 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
0eefd470 2551
bbea1c1a 2552 /*
afb97888
ID
2553 * Create object requests and determine ->bvec_count for each object
2554 * request. Note that ->bvec_count sum over all object requests may
2555 * be greater than the number of bio_vecs in the provided bio (list)
2556 * or bio_vec array because when mapped, those bio_vecs can straddle
2557 * stripe unit boundaries.
bbea1c1a 2558 */
5a237819
ID
2559 fctx->iter = *fctx->pos;
2560 for (i = 0; i < num_img_extents; i++) {
afb97888 2561 ret = ceph_file_to_extents(&rbd_dev->layout,
5a237819
ID
2562 img_extents[i].fe_off,
2563 img_extents[i].fe_len,
2564 &img_req->object_extents,
2565 alloc_object_extent, img_req,
afb97888
ID
2566 fctx->count_fn, &fctx->iter);
2567 if (ret)
2568 return ret;
bbea1c1a 2569 }
0eefd470 2570
afb97888
ID
2571 for_each_obj_request(img_req, obj_req) {
2572 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2573 sizeof(*obj_req->bvec_pos.bvecs),
2574 GFP_NOIO);
2575 if (!obj_req->bvec_pos.bvecs)
2576 return -ENOMEM;
2577 }
0eefd470 2578
8785b1d4 2579 /*
afb97888
ID
2580 * Fill in each object request's private bio_vec array, splitting and
2581 * rearranging the provided bio_vecs in stripe unit chunks as needed.
8785b1d4 2582 */
afb97888
ID
2583 fctx->iter = *fctx->pos;
2584 for (i = 0; i < num_img_extents; i++) {
2585 ret = ceph_iterate_extents(&rbd_dev->layout,
2586 img_extents[i].fe_off,
2587 img_extents[i].fe_len,
2588 &img_req->object_extents,
2589 fctx->copy_fn, &fctx->iter);
5a237819
ID
2590 if (ret)
2591 return ret;
2592 }
3d7efd18 2593
5a237819
ID
2594 return __rbd_img_fill_request(img_req);
2595}
2596
2597static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2598 u64 off, u64 len)
2599{
2600 struct ceph_file_extent ex = { off, len };
a55e601b 2601 union rbd_img_fill_iter dummy = {};
5a237819
ID
2602 struct rbd_img_fill_ctx fctx = {
2603 .pos_type = OBJ_REQUEST_NODATA,
2604 .pos = &dummy,
2605 };
2606
2607 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2608}
2609
2610static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2611{
2612 struct rbd_obj_request *obj_req =
2613 container_of(ex, struct rbd_obj_request, ex);
2614 struct ceph_bio_iter *it = arg;
3d7efd18 2615
5a237819
ID
2616 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2617 obj_req->bio_pos = *it;
2618 ceph_bio_iter_advance(it, bytes);
2619}
3d7efd18 2620
afb97888
ID
2621static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2622{
2623 struct rbd_obj_request *obj_req =
2624 container_of(ex, struct rbd_obj_request, ex);
2625 struct ceph_bio_iter *it = arg;
0eefd470 2626
afb97888
ID
2627 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2628 ceph_bio_iter_advance_step(it, bytes, ({
2629 obj_req->bvec_count++;
2630 }));
0eefd470 2631
afb97888 2632}
0eefd470 2633
afb97888
ID
2634static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2635{
2636 struct rbd_obj_request *obj_req =
2637 container_of(ex, struct rbd_obj_request, ex);
2638 struct ceph_bio_iter *it = arg;
0eefd470 2639
afb97888
ID
2640 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2641 ceph_bio_iter_advance_step(it, bytes, ({
2642 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2643 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2644 }));
3d7efd18
AE
2645}
2646
5a237819
ID
2647static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2648 struct ceph_file_extent *img_extents,
2649 u32 num_img_extents,
2650 struct ceph_bio_iter *bio_pos)
2651{
2652 struct rbd_img_fill_ctx fctx = {
2653 .pos_type = OBJ_REQUEST_BIO,
2654 .pos = (union rbd_img_fill_iter *)bio_pos,
2655 .set_pos_fn = set_bio_pos,
afb97888
ID
2656 .count_fn = count_bio_bvecs,
2657 .copy_fn = copy_bio_bvecs,
5a237819 2658 };
3d7efd18 2659
5a237819
ID
2660 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2661 &fctx);
2662}
3d7efd18 2663
5a237819
ID
2664static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2665 u64 off, u64 len, struct bio *bio)
2666{
2667 struct ceph_file_extent ex = { off, len };
2668 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
3d7efd18 2669
5a237819
ID
2670 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2671}
a9e8ba2c 2672
5a237819
ID
2673static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2674{
2675 struct rbd_obj_request *obj_req =
2676 container_of(ex, struct rbd_obj_request, ex);
2677 struct ceph_bvec_iter *it = arg;
3d7efd18 2678
5a237819
ID
2679 obj_req->bvec_pos = *it;
2680 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2681 ceph_bvec_iter_advance(it, bytes);
2682}
3d7efd18 2683
afb97888
ID
2684static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2685{
2686 struct rbd_obj_request *obj_req =
2687 container_of(ex, struct rbd_obj_request, ex);
2688 struct ceph_bvec_iter *it = arg;
058aa991 2689
afb97888
ID
2690 ceph_bvec_iter_advance_step(it, bytes, ({
2691 obj_req->bvec_count++;
2692 }));
2693}
058aa991 2694
afb97888
ID
2695static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2696{
2697 struct rbd_obj_request *obj_req =
2698 container_of(ex, struct rbd_obj_request, ex);
2699 struct ceph_bvec_iter *it = arg;
3d7efd18 2700
afb97888
ID
2701 ceph_bvec_iter_advance_step(it, bytes, ({
2702 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2703 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2704 }));
3d7efd18
AE
2705}
2706
5a237819
ID
2707static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2708 struct ceph_file_extent *img_extents,
2709 u32 num_img_extents,
2710 struct ceph_bvec_iter *bvec_pos)
c5b5ef6c 2711{
5a237819
ID
2712 struct rbd_img_fill_ctx fctx = {
2713 .pos_type = OBJ_REQUEST_BVECS,
2714 .pos = (union rbd_img_fill_iter *)bvec_pos,
2715 .set_pos_fn = set_bvec_pos,
afb97888
ID
2716 .count_fn = count_bvecs,
2717 .copy_fn = copy_bvecs,
5a237819 2718 };
c5b5ef6c 2719
5a237819
ID
2720 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2721 &fctx);
2722}
c5b5ef6c 2723
5a237819
ID
2724static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2725 struct ceph_file_extent *img_extents,
2726 u32 num_img_extents,
2727 struct bio_vec *bvecs)
2728{
2729 struct ceph_bvec_iter it = {
2730 .bvecs = bvecs,
2731 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2732 num_img_extents) },
2733 };
c5b5ef6c 2734
5a237819
ID
2735 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2736 &it);
2737}
c5b5ef6c 2738
0192ce2e 2739static void rbd_img_handle_request_work(struct work_struct *work)
bf0d5f50 2740{
0192ce2e
ID
2741 struct rbd_img_request *img_req =
2742 container_of(work, struct rbd_img_request, work);
c5b5ef6c 2743
0192ce2e
ID
2744 rbd_img_handle_request(img_req, img_req->work_result);
2745}
c2e82414 2746
0192ce2e
ID
2747static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2748{
2749 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2750 img_req->work_result = result;
2751 queue_work(rbd_wq, &img_req->work);
c5b5ef6c 2752}
c2e82414 2753
22e8bd51
ID
2754static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2755{
2756 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2757
2758 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2759 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2760 return true;
2761 }
2762
2763 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2764 obj_req->ex.oe_objno);
2765 return false;
2766}
2767
85b5e6d1
ID
2768static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2769{
a086a1b8
ID
2770 struct ceph_osd_request *osd_req;
2771 int ret;
2772
2773 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2774 if (IS_ERR(osd_req))
2775 return PTR_ERR(osd_req);
2776
2777 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2778 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2779 rbd_osd_setup_data(osd_req, 0);
2780 rbd_osd_format_read(osd_req);
2781
2782 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2783 if (ret)
2784 return ret;
2785
2786 rbd_osd_submit(osd_req);
85b5e6d1 2787 return 0;
c5b5ef6c
AE
2788}
2789
86bd7998 2790static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
c5b5ef6c 2791{
3da691bf 2792 struct rbd_img_request *img_req = obj_req->img_request;
a52cc685 2793 struct rbd_device *parent = img_req->rbd_dev->parent;
3da691bf 2794 struct rbd_img_request *child_img_req;
c5b5ef6c
AE
2795 int ret;
2796
59e542c8 2797 child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
3da691bf 2798 if (!child_img_req)
710214e3
ID
2799 return -ENOMEM;
2800
59e542c8 2801 rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
e93aca0a
ID
2802 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2803 child_img_req->obj_request = obj_req;
a90bb0c1 2804
a52cc685
ID
2805 down_read(&parent->header_rwsem);
2806 rbd_img_capture_header(child_img_req);
2807 up_read(&parent->header_rwsem);
2808
21ed05a8
ID
2809 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2810 obj_req);
2811
3da691bf 2812 if (!rbd_img_is_write(img_req)) {
ecc633ca 2813 switch (img_req->data_type) {
3da691bf 2814 case OBJ_REQUEST_BIO:
5a237819
ID
2815 ret = __rbd_img_fill_from_bio(child_img_req,
2816 obj_req->img_extents,
2817 obj_req->num_img_extents,
2818 &obj_req->bio_pos);
3da691bf
ID
2819 break;
2820 case OBJ_REQUEST_BVECS:
afb97888 2821 case OBJ_REQUEST_OWN_BVECS:
5a237819
ID
2822 ret = __rbd_img_fill_from_bvecs(child_img_req,
2823 obj_req->img_extents,
2824 obj_req->num_img_extents,
2825 &obj_req->bvec_pos);
3da691bf
ID
2826 break;
2827 default:
d342a15b 2828 BUG();
3da691bf
ID
2829 }
2830 } else {
5a237819
ID
2831 ret = rbd_img_fill_from_bvecs(child_img_req,
2832 obj_req->img_extents,
2833 obj_req->num_img_extents,
2834 obj_req->copyup_bvecs);
3da691bf
ID
2835 }
2836 if (ret) {
679a97d2 2837 rbd_img_request_destroy(child_img_req);
3da691bf
ID
2838 return ret;
2839 }
2840
0192ce2e
ID
2841 /* avoid parent chain recursion */
2842 rbd_img_schedule(child_img_req, 0);
3da691bf
ID
2843 return 0;
2844}
2845
85b5e6d1 2846static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
3da691bf
ID
2847{
2848 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2849 int ret;
2850
22e8bd51 2851again:
a9b67e69 2852 switch (obj_req->read_state) {
85b5e6d1
ID
2853 case RBD_OBJ_READ_START:
2854 rbd_assert(!*result);
2855
22e8bd51
ID
2856 if (!rbd_obj_may_exist(obj_req)) {
2857 *result = -ENOENT;
2858 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2859 goto again;
2860 }
2861
85b5e6d1 2862 ret = rbd_obj_read_object(obj_req);
3da691bf 2863 if (ret) {
85b5e6d1 2864 *result = ret;
3da691bf
ID
2865 return true;
2866 }
85b5e6d1
ID
2867 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2868 return false;
a9b67e69
ID
2869 case RBD_OBJ_READ_OBJECT:
2870 if (*result == -ENOENT && rbd_dev->parent_overlap) {
2871 /* reverse map this object extent onto the parent */
2872 ret = rbd_obj_calc_img_extents(obj_req, false);
86bd7998 2873 if (ret) {
54ab3b24 2874 *result = ret;
86bd7998
ID
2875 return true;
2876 }
a9b67e69
ID
2877 if (obj_req->num_img_extents) {
2878 ret = rbd_obj_read_from_parent(obj_req);
2879 if (ret) {
2880 *result = ret;
2881 return true;
2882 }
2883 obj_req->read_state = RBD_OBJ_READ_PARENT;
2884 return false;
2885 }
86bd7998 2886 }
710214e3 2887
a9b67e69
ID
2888 /*
2889 * -ENOENT means a hole in the image -- zero-fill the entire
2890 * length of the request. A short read also implies zero-fill
2891 * to the end of the request.
2892 */
2893 if (*result == -ENOENT) {
2894 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
2895 *result = 0;
2896 } else if (*result >= 0) {
2897 if (*result < obj_req->ex.oe_len)
2898 rbd_obj_zero_range(obj_req, *result,
2899 obj_req->ex.oe_len - *result);
2900 else
2901 rbd_assert(*result == obj_req->ex.oe_len);
2902 *result = 0;
2903 }
2904 return true;
2905 case RBD_OBJ_READ_PARENT:
d435c9a7
ID
2906 /*
2907 * The parent image is read only up to the overlap -- zero-fill
2908 * from the overlap to the end of the request.
2909 */
2910 if (!*result) {
2911 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
2912
2913 if (obj_overlap < obj_req->ex.oe_len)
2914 rbd_obj_zero_range(obj_req, obj_overlap,
2915 obj_req->ex.oe_len - obj_overlap);
2916 }
a9b67e69
ID
2917 return true;
2918 default:
2919 BUG();
710214e3 2920 }
3da691bf 2921}
c5b5ef6c 2922
22e8bd51
ID
2923static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
2924{
2925 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2926
2927 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2928 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2929
2930 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
2931 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
2932 dout("%s %p noop for nonexistent\n", __func__, obj_req);
2933 return true;
2934 }
2935
2936 return false;
2937}
2938
2939/*
2940 * Return:
2941 * 0 - object map update sent
2942 * 1 - object map update isn't needed
2943 * <0 - error
2944 */
2945static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
2946{
2947 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2948 u8 new_state;
2949
2950 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
2951 return 1;
2952
2953 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
2954 new_state = OBJECT_PENDING;
2955 else
2956 new_state = OBJECT_EXISTS;
2957
2958 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
2959}
2960
85b5e6d1
ID
2961static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
2962{
a086a1b8
ID
2963 struct ceph_osd_request *osd_req;
2964 int num_ops = count_write_ops(obj_req);
2965 int which = 0;
2966 int ret;
710214e3 2967
a086a1b8
ID
2968 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
2969 num_ops++; /* stat */
2970
2971 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
2972 if (IS_ERR(osd_req))
2973 return PTR_ERR(osd_req);
2974
2975 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
2976 ret = rbd_osd_setup_stat(osd_req, which++);
2977 if (ret)
2978 return ret;
710214e3 2979 }
c5b5ef6c 2980
a086a1b8
ID
2981 rbd_osd_setup_write_ops(osd_req, which);
2982 rbd_osd_format_write(osd_req);
2983
2984 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2985 if (ret)
2986 return ret;
2987
2988 rbd_osd_submit(osd_req);
85b5e6d1 2989 return 0;
3da691bf 2990}
c5b5ef6c 2991
3da691bf
ID
2992/*
2993 * copyup_bvecs pages are never highmem pages
2994 */
2995static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2996{
2997 struct ceph_bvec_iter it = {
2998 .bvecs = bvecs,
2999 .iter = { .bi_size = bytes },
3000 };
c5b5ef6c 3001
3da691bf 3002 ceph_bvec_iter_advance_step(&it, bytes, ({
cf58b537 3003 if (memchr_inv(bvec_virt(&bv), 0, bv.bv_len))
3da691bf
ID
3004 return false;
3005 }));
3006 return true;
c5b5ef6c
AE
3007}
3008
3a482501
ID
3009#define MODS_ONLY U32_MAX
3010
793333a3
ID
3011static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3012 u32 bytes)
b454e36d 3013{
bcbab1db 3014 struct ceph_osd_request *osd_req;
fe943d50 3015 int ret;
70d045f6 3016
3da691bf 3017 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
89a59c1c 3018 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
70d045f6 3019
bcbab1db
ID
3020 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3021 if (IS_ERR(osd_req))
3022 return PTR_ERR(osd_req);
b454e36d 3023
b5ae8cbc 3024 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
fe943d50
CX
3025 if (ret)
3026 return ret;
3027
bcbab1db 3028 rbd_osd_format_write(osd_req);
3da691bf 3029
bcbab1db 3030 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
89a59c1c
ID
3031 if (ret)
3032 return ret;
3033
a086a1b8 3034 rbd_osd_submit(osd_req);
89a59c1c
ID
3035 return 0;
3036}
3037
793333a3
ID
3038static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3039 u32 bytes)
b454e36d 3040{
bcbab1db 3041 struct ceph_osd_request *osd_req;
a086a1b8
ID
3042 int num_ops = count_write_ops(obj_req);
3043 int which = 0;
fe943d50 3044 int ret;
70d045f6 3045
3da691bf 3046 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
70d045f6 3047
a086a1b8
ID
3048 if (bytes != MODS_ONLY)
3049 num_ops++; /* copyup */
13488d53 3050
a086a1b8 3051 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
bcbab1db
ID
3052 if (IS_ERR(osd_req))
3053 return PTR_ERR(osd_req);
b454e36d 3054
3a482501 3055 if (bytes != MODS_ONLY) {
b5ae8cbc 3056 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3a482501
ID
3057 if (ret)
3058 return ret;
3da691bf 3059 }
3da691bf 3060
a086a1b8
ID
3061 rbd_osd_setup_write_ops(osd_req, which);
3062 rbd_osd_format_write(osd_req);
70d045f6 3063
bcbab1db 3064 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
26f887e0
ID
3065 if (ret)
3066 return ret;
3067
a086a1b8 3068 rbd_osd_submit(osd_req);
3da691bf 3069 return 0;
70d045f6
ID
3070}
3071
7e07efb1 3072static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
70d045f6 3073{
7e07efb1 3074 u32 i;
b454e36d 3075
7e07efb1
ID
3076 rbd_assert(!obj_req->copyup_bvecs);
3077 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3078 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3079 sizeof(*obj_req->copyup_bvecs),
3080 GFP_NOIO);
3081 if (!obj_req->copyup_bvecs)
3082 return -ENOMEM;
b454e36d 3083
7e07efb1
ID
3084 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3085 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
7df2af0b 3086 struct page *page = alloc_page(GFP_NOIO);
7e07efb1 3087
7df2af0b 3088 if (!page)
7e07efb1 3089 return -ENOMEM;
3d7efd18 3090
7df2af0b 3091 bvec_set_page(&obj_req->copyup_bvecs[i], page, len, 0);
7e07efb1
ID
3092 obj_overlap -= len;
3093 }
b454e36d 3094
7e07efb1
ID
3095 rbd_assert(!obj_overlap);
3096 return 0;
b454e36d
AE
3097}
3098
0ad5d953
ID
3099/*
3100 * The target object doesn't exist. Read the data for the entire
3101 * target object up to the overlap point (if any) from the parent,
3102 * so we can use it for a copyup.
3103 */
793333a3 3104static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
bf0d5f50 3105{
3da691bf 3106 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3da691bf 3107 int ret;
bf0d5f50 3108
86bd7998
ID
3109 rbd_assert(obj_req->num_img_extents);
3110 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3111 rbd_dev->parent_overlap);
3112 if (!obj_req->num_img_extents) {
3da691bf
ID
3113 /*
3114 * The overlap has become 0 (most likely because the
3a482501
ID
3115 * image has been flattened). Re-submit the original write
3116 * request -- pass MODS_ONLY since the copyup isn't needed
3117 * anymore.
3da691bf 3118 */
793333a3 3119 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
bf0d5f50
AE
3120 }
3121
86bd7998 3122 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3da691bf
ID
3123 if (ret)
3124 return ret;
3125
86bd7998 3126 return rbd_obj_read_from_parent(obj_req);
bf0d5f50 3127}
8b3e1a56 3128
22e8bd51
ID
3129static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3130{
3131 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3132 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3133 u8 new_state;
3134 u32 i;
3135 int ret;
3136
3137 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3138
3139 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3140 return;
3141
3142 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3143 return;
3144
3145 for (i = 0; i < snapc->num_snaps; i++) {
3146 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3147 i + 1 < snapc->num_snaps)
3148 new_state = OBJECT_EXISTS_CLEAN;
3149 else
3150 new_state = OBJECT_EXISTS;
3151
3152 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3153 new_state, NULL);
3154 if (ret < 0) {
3155 obj_req->pending.result = ret;
3156 return;
3157 }
3158
3159 rbd_assert(!ret);
3160 obj_req->pending.num_pending++;
3161 }
3162}
3163
793333a3
ID
3164static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3165{
3166 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3167 int ret;
3168
3169 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3170
3171 /*
3172 * Only send non-zero copyup data to save some I/O and network
3173 * bandwidth -- zero copyup data is equivalent to the object not
3174 * existing.
3175 */
3176 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3177 bytes = 0;
3178
3179 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3180 /*
3181 * Send a copyup request with an empty snapshot context to
3182 * deep-copyup the object through all existing snapshots.
3183 * A second request with the current snapshot context will be
3184 * sent for the actual modification.
3185 */
3186 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3187 if (ret) {
3188 obj_req->pending.result = ret;
3189 return;
3190 }
3191
3192 obj_req->pending.num_pending++;
3193 bytes = MODS_ONLY;
3194 }
3195
3196 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3197 if (ret) {
3198 obj_req->pending.result = ret;
3199 return;
3200 }
3201
3202 obj_req->pending.num_pending++;
3203}
3204
3205static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3206{
22e8bd51 3207 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
793333a3
ID
3208 int ret;
3209
3210again:
3211 switch (obj_req->copyup_state) {
3212 case RBD_OBJ_COPYUP_START:
3213 rbd_assert(!*result);
3214
3215 ret = rbd_obj_copyup_read_parent(obj_req);
3216 if (ret) {
3217 *result = ret;
3218 return true;
3219 }
3220 if (obj_req->num_img_extents)
3221 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3222 else
3223 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3224 return false;
3225 case RBD_OBJ_COPYUP_READ_PARENT:
3226 if (*result)
3227 return true;
3228
3229 if (is_zero_bvecs(obj_req->copyup_bvecs,
3230 rbd_obj_img_extents_bytes(obj_req))) {
3231 dout("%s %p detected zeros\n", __func__, obj_req);
3232 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3233 }
3234
22e8bd51
ID
3235 rbd_obj_copyup_object_maps(obj_req);
3236 if (!obj_req->pending.num_pending) {
3237 *result = obj_req->pending.result;
3238 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3239 goto again;
3240 }
3241 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3242 return false;
3243 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3244 if (!pending_result_dec(&obj_req->pending, result))
3245 return false;
df561f66 3246 fallthrough;
22e8bd51
ID
3247 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3248 if (*result) {
3249 rbd_warn(rbd_dev, "snap object map update failed: %d",
3250 *result);
3251 return true;
3252 }
3253
793333a3
ID
3254 rbd_obj_copyup_write_object(obj_req);
3255 if (!obj_req->pending.num_pending) {
3256 *result = obj_req->pending.result;
3257 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3258 goto again;
3259 }
3260 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3261 return false;
3262 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3263 if (!pending_result_dec(&obj_req->pending, result))
3264 return false;
df561f66 3265 fallthrough;
793333a3
ID
3266 case RBD_OBJ_COPYUP_WRITE_OBJECT:
3267 return true;
3268 default:
3269 BUG();
3270 }
3271}
3272
22e8bd51
ID
3273/*
3274 * Return:
3275 * 0 - object map update sent
3276 * 1 - object map update isn't needed
3277 * <0 - error
3278 */
3279static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3280{
3281 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3282 u8 current_state = OBJECT_PENDING;
3283
3284 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3285 return 1;
3286
3287 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3288 return 1;
3289
3290 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3291 &current_state);
3292}
3293
85b5e6d1 3294static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
8b3e1a56 3295{
793333a3 3296 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3da691bf 3297 int ret;
8b3e1a56 3298
793333a3 3299again:
3da691bf 3300 switch (obj_req->write_state) {
85b5e6d1
ID
3301 case RBD_OBJ_WRITE_START:
3302 rbd_assert(!*result);
3303
09fe05c5 3304 rbd_obj_set_copyup_enabled(obj_req);
22e8bd51
ID
3305 if (rbd_obj_write_is_noop(obj_req))
3306 return true;
3307
3308 ret = rbd_obj_write_pre_object_map(obj_req);
3309 if (ret < 0) {
3310 *result = ret;
3311 return true;
3312 }
3313 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3314 if (ret > 0)
3315 goto again;
3316 return false;
3317 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3318 if (*result) {
3319 rbd_warn(rbd_dev, "pre object map update failed: %d",
3320 *result);
3321 return true;
3322 }
85b5e6d1
ID
3323 ret = rbd_obj_write_object(obj_req);
3324 if (ret) {
3325 *result = ret;
3326 return true;
3327 }
3328 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3329 return false;
0ad5d953 3330 case RBD_OBJ_WRITE_OBJECT:
54ab3b24 3331 if (*result == -ENOENT) {
0ad5d953 3332 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
793333a3
ID
3333 *result = 0;
3334 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3335 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3336 goto again;
0ad5d953 3337 }
3da691bf 3338 /*
0ad5d953
ID
3339 * On a non-existent object:
3340 * delete - -ENOENT, truncate/zero - 0
3da691bf 3341 */
0ad5d953
ID
3342 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3343 *result = 0;
3da691bf 3344 }
a9b67e69 3345 if (*result)
3a482501 3346 return true;
8b3e1a56 3347
793333a3
ID
3348 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3349 goto again;
3350 case __RBD_OBJ_WRITE_COPYUP:
3351 if (!rbd_obj_advance_copyup(obj_req, result))
3352 return false;
df561f66 3353 fallthrough;
793333a3 3354 case RBD_OBJ_WRITE_COPYUP:
22e8bd51 3355 if (*result) {
793333a3 3356 rbd_warn(rbd_dev, "copyup failed: %d", *result);
22e8bd51
ID
3357 return true;
3358 }
3359 ret = rbd_obj_write_post_object_map(obj_req);
3360 if (ret < 0) {
3361 *result = ret;
3362 return true;
3363 }
3364 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3365 if (ret > 0)
3366 goto again;
3367 return false;
3368 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3369 if (*result)
3370 rbd_warn(rbd_dev, "post object map update failed: %d",
3371 *result);
793333a3 3372 return true;
3da691bf 3373 default:
c6244b3b 3374 BUG();
3da691bf
ID
3375 }
3376}
02c74fba 3377
3da691bf 3378/*
0ad5d953 3379 * Return true if @obj_req is completed.
3da691bf 3380 */
54ab3b24
ID
3381static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3382 int *result)
3da691bf 3383{
0ad5d953 3384 struct rbd_img_request *img_req = obj_req->img_request;
0192ce2e 3385 struct rbd_device *rbd_dev = img_req->rbd_dev;
0ad5d953
ID
3386 bool done;
3387
85b5e6d1 3388 mutex_lock(&obj_req->state_mutex);
0ad5d953 3389 if (!rbd_img_is_write(img_req))
85b5e6d1 3390 done = rbd_obj_advance_read(obj_req, result);
0ad5d953 3391 else
85b5e6d1
ID
3392 done = rbd_obj_advance_write(obj_req, result);
3393 mutex_unlock(&obj_req->state_mutex);
0ad5d953 3394
0192ce2e
ID
3395 if (done && *result) {
3396 rbd_assert(*result < 0);
3397 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3398 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3399 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3400 }
0ad5d953 3401 return done;
3da691bf 3402}
02c74fba 3403
0192ce2e
ID
3404/*
3405 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3406 * recursion.
3407 */
3408static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3409{
3410 if (__rbd_obj_handle_request(obj_req, &result))
3411 rbd_img_handle_request(obj_req->img_request, result);
3412}
3413
e1fddc8f
ID
3414static bool need_exclusive_lock(struct rbd_img_request *img_req)
3415{
3416 struct rbd_device *rbd_dev = img_req->rbd_dev;
3417
3418 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3419 return false;
3420
3fe69921 3421 if (rbd_is_ro(rbd_dev))
e1fddc8f
ID
3422 return false;
3423
3424 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
22e8bd51
ID
3425 if (rbd_dev->opts->lock_on_read ||
3426 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
e1fddc8f
ID
3427 return true;
3428
3429 return rbd_img_is_write(img_req);
3430}
3431
637cd060 3432static bool rbd_lock_add_request(struct rbd_img_request *img_req)
e1fddc8f
ID
3433{
3434 struct rbd_device *rbd_dev = img_req->rbd_dev;
637cd060 3435 bool locked;
e1fddc8f
ID
3436
3437 lockdep_assert_held(&rbd_dev->lock_rwsem);
637cd060 3438 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
e1fddc8f
ID
3439 spin_lock(&rbd_dev->lock_lists_lock);
3440 rbd_assert(list_empty(&img_req->lock_item));
637cd060
ID
3441 if (!locked)
3442 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3443 else
3444 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
e1fddc8f 3445 spin_unlock(&rbd_dev->lock_lists_lock);
637cd060 3446 return locked;
e1fddc8f
ID
3447}
3448
3449static void rbd_lock_del_request(struct rbd_img_request *img_req)
3450{
3451 struct rbd_device *rbd_dev = img_req->rbd_dev;
3452 bool need_wakeup;
3453
3454 lockdep_assert_held(&rbd_dev->lock_rwsem);
3455 spin_lock(&rbd_dev->lock_lists_lock);
3456 rbd_assert(!list_empty(&img_req->lock_item));
3457 list_del_init(&img_req->lock_item);
3458 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3459 list_empty(&rbd_dev->running_list));
3460 spin_unlock(&rbd_dev->lock_lists_lock);
3461 if (need_wakeup)
3462 complete(&rbd_dev->releasing_wait);
3463}
3464
637cd060
ID
3465static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3466{
3467 struct rbd_device *rbd_dev = img_req->rbd_dev;
3468
3469 if (!need_exclusive_lock(img_req))
3470 return 1;
3471
3472 if (rbd_lock_add_request(img_req))
3473 return 1;
3474
3475 if (rbd_dev->opts->exclusive) {
3476 WARN_ON(1); /* lock got released? */
3477 return -EROFS;
3478 }
3479
3480 /*
3481 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3482 * and cancel_delayed_work() in wake_lock_waiters().
3483 */
3484 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3485 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3486 return 0;
3487}
3488
0192ce2e 3489static void rbd_img_object_requests(struct rbd_img_request *img_req)
7114edac 3490{
870611e4 3491 struct rbd_device *rbd_dev = img_req->rbd_dev;
0192ce2e 3492 struct rbd_obj_request *obj_req;
7114edac 3493
0192ce2e 3494 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
870611e4
ID
3495 rbd_assert(!need_exclusive_lock(img_req) ||
3496 __rbd_is_lock_owner(rbd_dev));
3497
3498 if (rbd_img_is_write(img_req)) {
3499 rbd_assert(!img_req->snapc);
3500 down_read(&rbd_dev->header_rwsem);
3501 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
3502 up_read(&rbd_dev->header_rwsem);
3503 }
0192ce2e
ID
3504
3505 for_each_obj_request(img_req, obj_req) {
3506 int result = 0;
a9e8ba2c 3507
0192ce2e
ID
3508 if (__rbd_obj_handle_request(obj_req, &result)) {
3509 if (result) {
3510 img_req->pending.result = result;
3511 return;
3512 }
3513 } else {
3514 img_req->pending.num_pending++;
3515 }
3516 }
8b3e1a56
AE
3517}
3518
0192ce2e 3519static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
8b3e1a56 3520{
3da691bf 3521 int ret;
8b3e1a56 3522
0192ce2e
ID
3523again:
3524 switch (img_req->state) {
3525 case RBD_IMG_START:
3526 rbd_assert(!*result);
8b3e1a56 3527
637cd060
ID
3528 ret = rbd_img_exclusive_lock(img_req);
3529 if (ret < 0) {
3530 *result = ret;
3da691bf
ID
3531 return true;
3532 }
637cd060
ID
3533 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3534 if (ret > 0)
3535 goto again;
3da691bf 3536 return false;
637cd060
ID
3537 case RBD_IMG_EXCLUSIVE_LOCK:
3538 if (*result)
89a59c1c
ID
3539 return true;
3540
0192ce2e
ID
3541 rbd_img_object_requests(img_req);
3542 if (!img_req->pending.num_pending) {
3543 *result = img_req->pending.result;
3544 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3545 goto again;
3da691bf 3546 }
0192ce2e 3547 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3da691bf 3548 return false;
0192ce2e
ID
3549 case __RBD_IMG_OBJECT_REQUESTS:
3550 if (!pending_result_dec(&img_req->pending, result))
3551 return false;
df561f66 3552 fallthrough;
0192ce2e
ID
3553 case RBD_IMG_OBJECT_REQUESTS:
3554 return true;
3da691bf 3555 default:
c6244b3b 3556 BUG();
3da691bf
ID
3557 }
3558}
02c74fba 3559
3da691bf 3560/*
0192ce2e 3561 * Return true if @img_req is completed.
3da691bf 3562 */
0192ce2e
ID
3563static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3564 int *result)
7114edac 3565{
0192ce2e
ID
3566 struct rbd_device *rbd_dev = img_req->rbd_dev;
3567 bool done;
7114edac 3568
e1fddc8f
ID
3569 if (need_exclusive_lock(img_req)) {
3570 down_read(&rbd_dev->lock_rwsem);
3571 mutex_lock(&img_req->state_mutex);
3572 done = rbd_img_advance(img_req, result);
3573 if (done)
3574 rbd_lock_del_request(img_req);
3575 mutex_unlock(&img_req->state_mutex);
3576 up_read(&rbd_dev->lock_rwsem);
3577 } else {
3578 mutex_lock(&img_req->state_mutex);
3579 done = rbd_img_advance(img_req, result);
3580 mutex_unlock(&img_req->state_mutex);
02c74fba 3581 }
a9e8ba2c 3582
0192ce2e
ID
3583 if (done && *result) {
3584 rbd_assert(*result < 0);
3585 rbd_warn(rbd_dev, "%s%s result %d",
3586 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3587 obj_op_name(img_req->op_type), *result);
7114edac 3588 }
0192ce2e 3589 return done;
7114edac 3590}
a9e8ba2c 3591
0192ce2e 3592static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3da691bf 3593{
7114edac 3594again:
0192ce2e 3595 if (!__rbd_img_handle_request(img_req, &result))
7114edac 3596 return;
8b3e1a56 3597
7114edac 3598 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
0192ce2e
ID
3599 struct rbd_obj_request *obj_req = img_req->obj_request;
3600
679a97d2 3601 rbd_img_request_destroy(img_req);
0192ce2e
ID
3602 if (__rbd_obj_handle_request(obj_req, &result)) {
3603 img_req = obj_req->img_request;
3604 goto again;
3605 }
3606 } else {
59e542c8 3607 struct request *rq = blk_mq_rq_from_pdu(img_req);
0192ce2e 3608
679a97d2 3609 rbd_img_request_destroy(img_req);
0192ce2e 3610 blk_mq_end_request(rq, errno_to_blk_status(result));
7114edac 3611 }
8b3e1a56 3612}
bf0d5f50 3613
ed95b21a 3614static const struct rbd_client_id rbd_empty_cid;
b8d70035 3615
ed95b21a
ID
3616static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3617 const struct rbd_client_id *rhs)
3618{
3619 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3620}
3621
3622static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3623{
3624 struct rbd_client_id cid;
3625
3626 mutex_lock(&rbd_dev->watch_mutex);
3627 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3628 cid.handle = rbd_dev->watch_cookie;
3629 mutex_unlock(&rbd_dev->watch_mutex);
3630 return cid;
3631}
3632
3633/*
3634 * lock_rwsem must be held for write
3635 */
3636static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3637 const struct rbd_client_id *cid)
3638{
3639 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3640 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3641 cid->gid, cid->handle);
3642 rbd_dev->owner_cid = *cid; /* struct */
3643}
3644
3645static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3646{
3647 mutex_lock(&rbd_dev->watch_mutex);
3648 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3649 mutex_unlock(&rbd_dev->watch_mutex);
3650}
3651
edd8ca80
FM
3652static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3653{
3654 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3655
a2b1da09 3656 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
edd8ca80
FM
3657 strcpy(rbd_dev->lock_cookie, cookie);
3658 rbd_set_owner_cid(rbd_dev, &cid);
3659 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3660}
3661
ed95b21a
ID
3662/*
3663 * lock_rwsem must be held for write
3664 */
3665static int rbd_lock(struct rbd_device *rbd_dev)
b8d70035 3666{
922dab61 3667 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
ed95b21a 3668 char cookie[32];
e627db08 3669 int ret;
b8d70035 3670
cbbfb0ff
ID
3671 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3672 rbd_dev->lock_cookie[0] != '\0');
52bb1f9b 3673
ed95b21a
ID
3674 format_lock_cookie(rbd_dev, cookie);
3675 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3676 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3677 RBD_LOCK_TAG, "", 0);
9d01e07f 3678 if (ret && ret != -EEXIST)
ed95b21a 3679 return ret;
b8d70035 3680
edd8ca80 3681 __rbd_lock(rbd_dev, cookie);
ed95b21a 3682 return 0;
b8d70035
AE
3683}
3684
ed95b21a
ID
3685/*
3686 * lock_rwsem must be held for write
3687 */
bbead745 3688static void rbd_unlock(struct rbd_device *rbd_dev)
bb040aa0 3689{
922dab61 3690 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
bb040aa0
ID
3691 int ret;
3692
cbbfb0ff
ID
3693 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3694 rbd_dev->lock_cookie[0] == '\0');
bb040aa0 3695
ed95b21a 3696 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
cbbfb0ff 3697 RBD_LOCK_NAME, rbd_dev->lock_cookie);
bbead745 3698 if (ret && ret != -ENOENT)
637cd060 3699 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
bb040aa0 3700
bbead745
ID
3701 /* treat errors as the image is unlocked */
3702 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
cbbfb0ff 3703 rbd_dev->lock_cookie[0] = '\0';
ed95b21a
ID
3704 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3705 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
bb040aa0
ID
3706}
3707
ed95b21a
ID
3708static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3709 enum rbd_notify_op notify_op,
3710 struct page ***preply_pages,
3711 size_t *preply_len)
9969ebc5
AE
3712{
3713 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
ed95b21a 3714 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
08a79102
KS
3715 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3716 int buf_size = sizeof(buf);
ed95b21a 3717 void *p = buf;
9969ebc5 3718
ed95b21a 3719 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
9969ebc5 3720
ed95b21a
ID
3721 /* encode *LockPayload NotifyMessage (op + ClientId) */
3722 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3723 ceph_encode_32(&p, notify_op);
3724 ceph_encode_64(&p, cid.gid);
3725 ceph_encode_64(&p, cid.handle);
8eb87565 3726
ed95b21a
ID
3727 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3728 &rbd_dev->header_oloc, buf, buf_size,
3729 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
b30a01f2
ID
3730}
3731
ed95b21a
ID
3732static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3733 enum rbd_notify_op notify_op)
b30a01f2 3734{
8ae0299a 3735 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
ed95b21a 3736}
b30a01f2 3737
ed95b21a
ID
3738static void rbd_notify_acquired_lock(struct work_struct *work)
3739{
3740 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3741 acquired_lock_work);
76756a51 3742
ed95b21a 3743 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
c525f036
ID
3744}
3745
ed95b21a 3746static void rbd_notify_released_lock(struct work_struct *work)
c525f036 3747{
ed95b21a
ID
3748 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3749 released_lock_work);
811c6688 3750
ed95b21a 3751 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
fca27065
ID
3752}
3753
ed95b21a 3754static int rbd_request_lock(struct rbd_device *rbd_dev)
36be9a76 3755{
ed95b21a
ID
3756 struct page **reply_pages;
3757 size_t reply_len;
3758 bool lock_owner_responded = false;
36be9a76
AE
3759 int ret;
3760
ed95b21a 3761 dout("%s rbd_dev %p\n", __func__, rbd_dev);
36be9a76 3762
ed95b21a
ID
3763 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3764 &reply_pages, &reply_len);
3765 if (ret && ret != -ETIMEDOUT) {
3766 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
36be9a76 3767 goto out;
ed95b21a 3768 }
36be9a76 3769
ed95b21a
ID
3770 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3771 void *p = page_address(reply_pages[0]);
3772 void *const end = p + reply_len;
3773 u32 n;
36be9a76 3774
ed95b21a
ID
3775 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3776 while (n--) {
3777 u8 struct_v;
3778 u32 len;
36be9a76 3779
ed95b21a
ID
3780 ceph_decode_need(&p, end, 8 + 8, e_inval);
3781 p += 8 + 8; /* skip gid and cookie */
04017e29 3782
ed95b21a
ID
3783 ceph_decode_32_safe(&p, end, len, e_inval);
3784 if (!len)
3785 continue;
3786
3787 if (lock_owner_responded) {
3788 rbd_warn(rbd_dev,
3789 "duplicate lock owners detected");
3790 ret = -EIO;
3791 goto out;
3792 }
3793
3794 lock_owner_responded = true;
3795 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3796 &struct_v, &len);
3797 if (ret) {
3798 rbd_warn(rbd_dev,
3799 "failed to decode ResponseMessage: %d",
3800 ret);
3801 goto e_inval;
3802 }
3803
3804 ret = ceph_decode_32(&p);
3805 }
3806 }
3807
3808 if (!lock_owner_responded) {
3809 rbd_warn(rbd_dev, "no lock owners detected");
3810 ret = -ETIMEDOUT;
3811 }
3812
3813out:
3814 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3815 return ret;
3816
3817e_inval:
3818 ret = -EINVAL;
3819 goto out;
3820}
3821
637cd060
ID
3822/*
3823 * Either image request state machine(s) or rbd_add_acquire_lock()
3824 * (i.e. "rbd map").
3825 */
3826static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
ed95b21a 3827{
637cd060
ID
3828 struct rbd_img_request *img_req;
3829
3830 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
d9b9c893 3831 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
ed95b21a
ID
3832
3833 cancel_delayed_work(&rbd_dev->lock_dwork);
637cd060
ID
3834 if (!completion_done(&rbd_dev->acquire_wait)) {
3835 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3836 list_empty(&rbd_dev->running_list));
3837 rbd_dev->acquire_err = result;
3838 complete_all(&rbd_dev->acquire_wait);
3839 return;
3840 }
3841
3842 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
3843 mutex_lock(&img_req->state_mutex);
3844 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3845 rbd_img_schedule(img_req, result);
3846 mutex_unlock(&img_req->state_mutex);
3847 }
3848
3849 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
ed95b21a
ID
3850}
3851
58815900
ID
3852static bool locker_equal(const struct ceph_locker *lhs,
3853 const struct ceph_locker *rhs)
3854{
3855 return lhs->id.name.type == rhs->id.name.type &&
3856 lhs->id.name.num == rhs->id.name.num &&
3857 !strcmp(lhs->id.cookie, rhs->id.cookie) &&
3858 ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
3859}
3860
f38cb9d9
ID
3861static void free_locker(struct ceph_locker *locker)
3862{
3863 if (locker)
3864 ceph_free_lockers(locker, 1);
3865}
3866
3867static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
ed95b21a
ID
3868{
3869 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
f38cb9d9
ID
3870 struct ceph_locker *lockers;
3871 u32 num_lockers;
ed95b21a
ID
3872 u8 lock_type;
3873 char *lock_tag;
8ff2c64c 3874 u64 handle;
ed95b21a
ID
3875 int ret;
3876
ed95b21a
ID
3877 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3878 &rbd_dev->header_oloc, RBD_LOCK_NAME,
f38cb9d9
ID
3879 &lock_type, &lock_tag, &lockers, &num_lockers);
3880 if (ret) {
9d01e07f 3881 rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
f38cb9d9
ID
3882 return ERR_PTR(ret);
3883 }
ed95b21a 3884
f38cb9d9 3885 if (num_lockers == 0) {
ed95b21a 3886 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
f38cb9d9 3887 lockers = NULL;
ed95b21a
ID
3888 goto out;
3889 }
3890
3891 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3892 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3893 lock_tag);
f38cb9d9 3894 goto err_busy;
ed95b21a
ID
3895 }
3896
8ff2c64c
ID
3897 if (lock_type != CEPH_CLS_LOCK_EXCLUSIVE) {
3898 rbd_warn(rbd_dev, "incompatible lock type detected");
f38cb9d9 3899 goto err_busy;
ed95b21a
ID
3900 }
3901
f38cb9d9 3902 WARN_ON(num_lockers != 1);
8ff2c64c
ID
3903 ret = sscanf(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu",
3904 &handle);
3905 if (ret != 1) {
ed95b21a 3906 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
f38cb9d9
ID
3907 lockers[0].id.cookie);
3908 goto err_busy;
ed95b21a 3909 }
8ff2c64c
ID
3910 if (ceph_addr_is_blank(&lockers[0].info.addr)) {
3911 rbd_warn(rbd_dev, "locker has a blank address");
3912 goto err_busy;
3913 }
3914
3915 dout("%s rbd_dev %p got locker %s%llu@%pISpc/%u handle %llu\n",
3916 __func__, rbd_dev, ENTITY_NAME(lockers[0].id.name),
3917 &lockers[0].info.addr.in_addr,
3918 le32_to_cpu(lockers[0].info.addr.nonce), handle);
ed95b21a
ID
3919
3920out:
3921 kfree(lock_tag);
f38cb9d9
ID
3922 return lockers;
3923
3924err_busy:
3925 kfree(lock_tag);
3926 ceph_free_lockers(lockers, num_lockers);
3927 return ERR_PTR(-EBUSY);
ed95b21a
ID
3928}
3929
3930static int find_watcher(struct rbd_device *rbd_dev,
3931 const struct ceph_locker *locker)
3932{
3933 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3934 struct ceph_watch_item *watchers;
3935 u32 num_watchers;
3936 u64 cookie;
3937 int i;
3938 int ret;
3939
3940 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3941 &rbd_dev->header_oloc, &watchers,
3942 &num_watchers);
9d01e07f
ID
3943 if (ret) {
3944 rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
ed95b21a 3945 return ret;
9d01e07f 3946 }
ed95b21a
ID
3947
3948 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3949 for (i = 0; i < num_watchers; i++) {
313771e8
ID
3950 /*
3951 * Ignore addr->type while comparing. This mimics
3952 * entity_addr_t::get_legacy_str() + strcmp().
3953 */
3954 if (ceph_addr_equal_no_type(&watchers[i].addr,
3955 &locker->info.addr) &&
ed95b21a
ID
3956 watchers[i].cookie == cookie) {
3957 struct rbd_client_id cid = {
3958 .gid = le64_to_cpu(watchers[i].name.num),
3959 .handle = cookie,
3960 };
3961
3962 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3963 rbd_dev, cid.gid, cid.handle);
3964 rbd_set_owner_cid(rbd_dev, &cid);
3965 ret = 1;
3966 goto out;
3967 }
3968 }
3969
3970 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3971 ret = 0;
3972out:
3973 kfree(watchers);
3974 return ret;
3975}
3976
3977/*
3978 * lock_rwsem must be held for write
3979 */
3980static int rbd_try_lock(struct rbd_device *rbd_dev)
3981{
3982 struct ceph_client *client = rbd_dev->rbd_client->client;
58815900 3983 struct ceph_locker *locker, *refreshed_locker;
ed95b21a
ID
3984 int ret;
3985
3986 for (;;) {
58815900 3987 locker = refreshed_locker = NULL;
f38cb9d9 3988
ed95b21a 3989 ret = rbd_lock(rbd_dev);
9d01e07f
ID
3990 if (!ret)
3991 goto out;
3992 if (ret != -EBUSY) {
3993 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
f38cb9d9 3994 goto out;
9d01e07f 3995 }
ed95b21a
ID
3996
3997 /* determine if the current lock holder is still alive */
f38cb9d9
ID
3998 locker = get_lock_owner_info(rbd_dev);
3999 if (IS_ERR(locker)) {
4000 ret = PTR_ERR(locker);
4001 locker = NULL;
4002 goto out;
4003 }
4004 if (!locker)
ed95b21a
ID
4005 goto again;
4006
f38cb9d9 4007 ret = find_watcher(rbd_dev, locker);
637cd060
ID
4008 if (ret)
4009 goto out; /* request lock or error */
ed95b21a 4010
58815900
ID
4011 refreshed_locker = get_lock_owner_info(rbd_dev);
4012 if (IS_ERR(refreshed_locker)) {
4013 ret = PTR_ERR(refreshed_locker);
4014 refreshed_locker = NULL;
4015 goto out;
4016 }
4017 if (!refreshed_locker ||
4018 !locker_equal(locker, refreshed_locker))
4019 goto again;
4020
22e8bd51 4021 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
f38cb9d9 4022 ENTITY_NAME(locker->id.name));
ed95b21a 4023
0b98acd6 4024 ret = ceph_monc_blocklist_add(&client->monc,
f38cb9d9 4025 &locker->info.addr);
ed95b21a 4026 if (ret) {
f38cb9d9
ID
4027 rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
4028 ENTITY_NAME(locker->id.name), ret);
ed95b21a
ID
4029 goto out;
4030 }
4031
4032 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4033 &rbd_dev->header_oloc, RBD_LOCK_NAME,
f38cb9d9
ID
4034 locker->id.cookie, &locker->id.name);
4035 if (ret && ret != -ENOENT) {
4036 rbd_warn(rbd_dev, "failed to break header lock: %d",
4037 ret);
ed95b21a 4038 goto out;
f38cb9d9 4039 }
ed95b21a
ID
4040
4041again:
58815900 4042 free_locker(refreshed_locker);
f38cb9d9 4043 free_locker(locker);
ed95b21a
ID
4044 }
4045
4046out:
58815900 4047 free_locker(refreshed_locker);
f38cb9d9 4048 free_locker(locker);
ed95b21a
ID
4049 return ret;
4050}
4051
22e8bd51
ID
4052static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4053{
4054 int ret;
4055
870611e4
ID
4056 ret = rbd_dev_refresh(rbd_dev);
4057 if (ret)
4058 return ret;
4059
22e8bd51
ID
4060 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4061 ret = rbd_object_map_open(rbd_dev);
4062 if (ret)
4063 return ret;
4064 }
4065
4066 return 0;
4067}
4068
ed95b21a 4069/*
637cd060
ID
4070 * Return:
4071 * 0 - lock acquired
4072 * 1 - caller should call rbd_request_lock()
4073 * <0 - error
ed95b21a 4074 */
637cd060 4075static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
ed95b21a 4076{
637cd060 4077 int ret;
ed95b21a
ID
4078
4079 down_read(&rbd_dev->lock_rwsem);
4080 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4081 rbd_dev->lock_state);
4082 if (__rbd_is_lock_owner(rbd_dev)) {
ed95b21a 4083 up_read(&rbd_dev->lock_rwsem);
637cd060 4084 return 0;
ed95b21a
ID
4085 }
4086
4087 up_read(&rbd_dev->lock_rwsem);
4088 down_write(&rbd_dev->lock_rwsem);
4089 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4090 rbd_dev->lock_state);
637cd060
ID
4091 if (__rbd_is_lock_owner(rbd_dev)) {
4092 up_write(&rbd_dev->lock_rwsem);
4093 return 0;
ed95b21a
ID
4094 }
4095
637cd060
ID
4096 ret = rbd_try_lock(rbd_dev);
4097 if (ret < 0) {
9d01e07f
ID
4098 rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
4099 goto out;
637cd060
ID
4100 }
4101 if (ret > 0) {
4102 up_write(&rbd_dev->lock_rwsem);
4103 return ret;
4104 }
4105
4106 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4107 rbd_assert(list_empty(&rbd_dev->running_list));
4108
22e8bd51
ID
4109 ret = rbd_post_acquire_action(rbd_dev);
4110 if (ret) {
4111 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4112 /*
4113 * Can't stay in RBD_LOCK_STATE_LOCKED because
4114 * rbd_lock_add_request() would let the request through,
4115 * assuming that e.g. object map is locked and loaded.
4116 */
4117 rbd_unlock(rbd_dev);
ed95b21a
ID
4118 }
4119
637cd060
ID
4120out:
4121 wake_lock_waiters(rbd_dev, ret);
ed95b21a 4122 up_write(&rbd_dev->lock_rwsem);
637cd060 4123 return ret;
ed95b21a
ID
4124}
4125
4126static void rbd_acquire_lock(struct work_struct *work)
4127{
4128 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4129 struct rbd_device, lock_dwork);
637cd060 4130 int ret;
ed95b21a
ID
4131
4132 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4133again:
637cd060
ID
4134 ret = rbd_try_acquire_lock(rbd_dev);
4135 if (ret <= 0) {
4136 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
ed95b21a
ID
4137 return;
4138 }
4139
4140 ret = rbd_request_lock(rbd_dev);
4141 if (ret == -ETIMEDOUT) {
4142 goto again; /* treat this as a dead client */
e010dd0a
ID
4143 } else if (ret == -EROFS) {
4144 rbd_warn(rbd_dev, "peer will not release lock");
637cd060
ID
4145 down_write(&rbd_dev->lock_rwsem);
4146 wake_lock_waiters(rbd_dev, ret);
4147 up_write(&rbd_dev->lock_rwsem);
ed95b21a
ID
4148 } else if (ret < 0) {
4149 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4150 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4151 RBD_RETRY_DELAY);
4152 } else {
4153 /*
4154 * lock owner acked, but resend if we don't see them
4155 * release the lock
4156 */
6b0a8774 4157 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
ed95b21a
ID
4158 rbd_dev);
4159 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4160 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4161 }
4162}
4163
a2b1da09 4164static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
ed95b21a 4165{
a2b1da09 4166 dout("%s rbd_dev %p\n", __func__, rbd_dev);
d9b9c893 4167 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
a2b1da09 4168
ed95b21a
ID
4169 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4170 return false;
4171
52bb1f9b 4172 /*
ed95b21a 4173 * Ensure that all in-flight IO is flushed.
52bb1f9b 4174 */
e1fddc8f
ID
4175 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4176 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
ed9eb710
ID
4177 if (list_empty(&rbd_dev->running_list))
4178 return true;
4179
4180 up_write(&rbd_dev->lock_rwsem);
4181 wait_for_completion(&rbd_dev->releasing_wait);
ed95b21a
ID
4182
4183 down_write(&rbd_dev->lock_rwsem);
ed95b21a
ID
4184 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4185 return false;
4186
e1fddc8f 4187 rbd_assert(list_empty(&rbd_dev->running_list));
a2b1da09
ID
4188 return true;
4189}
4190
22e8bd51
ID
4191static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4192{
4193 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4194 rbd_object_map_close(rbd_dev);
4195}
4196
e1fddc8f
ID
4197static void __rbd_release_lock(struct rbd_device *rbd_dev)
4198{
4199 rbd_assert(list_empty(&rbd_dev->running_list));
4200
22e8bd51 4201 rbd_pre_release_action(rbd_dev);
bbead745 4202 rbd_unlock(rbd_dev);
e1fddc8f
ID
4203}
4204
a2b1da09
ID
4205/*
4206 * lock_rwsem must be held for write
4207 */
4208static void rbd_release_lock(struct rbd_device *rbd_dev)
4209{
4210 if (!rbd_quiesce_lock(rbd_dev))
4211 return;
4212
e1fddc8f 4213 __rbd_release_lock(rbd_dev);
a2b1da09 4214
bbead745
ID
4215 /*
4216 * Give others a chance to grab the lock - we would re-acquire
637cd060
ID
4217 * almost immediately if we got new IO while draining the running
4218 * list otherwise. We need to ack our own notifications, so this
4219 * lock_dwork will be requeued from rbd_handle_released_lock() by
4220 * way of maybe_kick_acquire().
bbead745
ID
4221 */
4222 cancel_delayed_work(&rbd_dev->lock_dwork);
ed95b21a
ID
4223}
4224
4225static void rbd_release_lock_work(struct work_struct *work)
4226{
4227 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4228 unlock_work);
4229
4230 down_write(&rbd_dev->lock_rwsem);
4231 rbd_release_lock(rbd_dev);
4232 up_write(&rbd_dev->lock_rwsem);
4233}
4234
637cd060
ID
4235static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4236{
4237 bool have_requests;
4238
4239 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4240 if (__rbd_is_lock_owner(rbd_dev))
4241 return;
4242
4243 spin_lock(&rbd_dev->lock_lists_lock);
4244 have_requests = !list_empty(&rbd_dev->acquiring_list);
4245 spin_unlock(&rbd_dev->lock_lists_lock);
4246 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4247 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4248 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4249 }
4250}
4251
ed95b21a
ID
4252static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4253 void **p)
4254{
4255 struct rbd_client_id cid = { 0 };
4256
4257 if (struct_v >= 2) {
4258 cid.gid = ceph_decode_64(p);
4259 cid.handle = ceph_decode_64(p);
4260 }
4261
4262 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4263 cid.handle);
4264 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4265 down_write(&rbd_dev->lock_rwsem);
4266 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
8798d070
ID
4267 dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
4268 __func__, rbd_dev, cid.gid, cid.handle);
4269 } else {
4270 rbd_set_owner_cid(rbd_dev, &cid);
ed95b21a 4271 }
ed95b21a
ID
4272 downgrade_write(&rbd_dev->lock_rwsem);
4273 } else {
4274 down_read(&rbd_dev->lock_rwsem);
4275 }
4276
637cd060 4277 maybe_kick_acquire(rbd_dev);
ed95b21a
ID
4278 up_read(&rbd_dev->lock_rwsem);
4279}
4280
4281static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4282 void **p)
4283{
4284 struct rbd_client_id cid = { 0 };
4285
4286 if (struct_v >= 2) {
4287 cid.gid = ceph_decode_64(p);
4288 cid.handle = ceph_decode_64(p);
4289 }
4290
4291 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4292 cid.handle);
4293 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4294 down_write(&rbd_dev->lock_rwsem);
4295 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
8798d070 4296 dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
ed95b21a
ID
4297 __func__, rbd_dev, cid.gid, cid.handle,
4298 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
8798d070
ID
4299 } else {
4300 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
ed95b21a 4301 }
ed95b21a
ID
4302 downgrade_write(&rbd_dev->lock_rwsem);
4303 } else {
4304 down_read(&rbd_dev->lock_rwsem);
4305 }
4306
637cd060 4307 maybe_kick_acquire(rbd_dev);
ed95b21a
ID
4308 up_read(&rbd_dev->lock_rwsem);
4309}
4310
3b77faa0
ID
4311/*
4312 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4313 * ResponseMessage is needed.
4314 */
4315static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4316 void **p)
ed95b21a
ID
4317{
4318 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4319 struct rbd_client_id cid = { 0 };
3b77faa0 4320 int result = 1;
ed95b21a
ID
4321
4322 if (struct_v >= 2) {
4323 cid.gid = ceph_decode_64(p);
4324 cid.handle = ceph_decode_64(p);
4325 }
4326
4327 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4328 cid.handle);
4329 if (rbd_cid_equal(&cid, &my_cid))
3b77faa0 4330 return result;
ed95b21a
ID
4331
4332 down_read(&rbd_dev->lock_rwsem);
3b77faa0
ID
4333 if (__rbd_is_lock_owner(rbd_dev)) {
4334 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4335 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4336 goto out_unlock;
4337
4338 /*
4339 * encode ResponseMessage(0) so the peer can detect
4340 * a missing owner
4341 */
4342 result = 0;
4343
4344 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
e010dd0a
ID
4345 if (!rbd_dev->opts->exclusive) {
4346 dout("%s rbd_dev %p queueing unlock_work\n",
4347 __func__, rbd_dev);
4348 queue_work(rbd_dev->task_wq,
4349 &rbd_dev->unlock_work);
4350 } else {
4351 /* refuse to release the lock */
4352 result = -EROFS;
4353 }
ed95b21a
ID
4354 }
4355 }
3b77faa0
ID
4356
4357out_unlock:
ed95b21a 4358 up_read(&rbd_dev->lock_rwsem);
3b77faa0 4359 return result;
ed95b21a
ID
4360}
4361
4362static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4363 u64 notify_id, u64 cookie, s32 *result)
4364{
4365 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
08a79102
KS
4366 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4367 int buf_size = sizeof(buf);
ed95b21a
ID
4368 int ret;
4369
4370 if (result) {
4371 void *p = buf;
4372
4373 /* encode ResponseMessage */
4374 ceph_start_encoding(&p, 1, 1,
4375 buf_size - CEPH_ENCODING_START_BLK_LEN);
4376 ceph_encode_32(&p, *result);
4377 } else {
4378 buf_size = 0;
4379 }
b8d70035 4380
922dab61
ID
4381 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4382 &rbd_dev->header_oloc, notify_id, cookie,
ed95b21a 4383 buf, buf_size);
52bb1f9b 4384 if (ret)
ed95b21a
ID
4385 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4386}
4387
4388static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4389 u64 cookie)
4390{
4391 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4392 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4393}
4394
4395static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4396 u64 notify_id, u64 cookie, s32 result)
4397{
4398 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4399 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4400}
4401
4402static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4403 u64 notifier_id, void *data, size_t data_len)
4404{
4405 struct rbd_device *rbd_dev = arg;
4406 void *p = data;
4407 void *const end = p + data_len;
d4c2269b 4408 u8 struct_v = 0;
ed95b21a
ID
4409 u32 len;
4410 u32 notify_op;
4411 int ret;
4412
4413 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4414 __func__, rbd_dev, cookie, notify_id, data_len);
4415 if (data_len) {
4416 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4417 &struct_v, &len);
4418 if (ret) {
4419 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4420 ret);
4421 return;
4422 }
4423
4424 notify_op = ceph_decode_32(&p);
4425 } else {
4426 /* legacy notification for header updates */
4427 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4428 len = 0;
4429 }
4430
4431 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4432 switch (notify_op) {
4433 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4434 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4435 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4436 break;
4437 case RBD_NOTIFY_OP_RELEASED_LOCK:
4438 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4439 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4440 break;
4441 case RBD_NOTIFY_OP_REQUEST_LOCK:
3b77faa0
ID
4442 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4443 if (ret <= 0)
ed95b21a 4444 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3b77faa0 4445 cookie, ret);
ed95b21a
ID
4446 else
4447 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4448 break;
4449 case RBD_NOTIFY_OP_HEADER_UPDATE:
4450 ret = rbd_dev_refresh(rbd_dev);
4451 if (ret)
4452 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4453
4454 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4455 break;
4456 default:
4457 if (rbd_is_lock_owner(rbd_dev))
4458 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4459 cookie, -EOPNOTSUPP);
4460 else
4461 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4462 break;
4463 }
b8d70035
AE
4464}
4465
99d16943
ID
4466static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4467
922dab61 4468static void rbd_watch_errcb(void *arg, u64 cookie, int err)
bb040aa0 4469{
922dab61 4470 struct rbd_device *rbd_dev = arg;
bb040aa0 4471
922dab61 4472 rbd_warn(rbd_dev, "encountered watch error: %d", err);
bb040aa0 4473
ed95b21a
ID
4474 down_write(&rbd_dev->lock_rwsem);
4475 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4476 up_write(&rbd_dev->lock_rwsem);
4477
99d16943
ID
4478 mutex_lock(&rbd_dev->watch_mutex);
4479 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4480 __rbd_unregister_watch(rbd_dev);
4481 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
bb040aa0 4482
99d16943 4483 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
bb040aa0 4484 }
99d16943 4485 mutex_unlock(&rbd_dev->watch_mutex);
bb040aa0
ID
4486}
4487
9969ebc5 4488/*
99d16943 4489 * watch_mutex must be locked
9969ebc5 4490 */
99d16943 4491static int __rbd_register_watch(struct rbd_device *rbd_dev)
9969ebc5
AE
4492{
4493 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
922dab61 4494 struct ceph_osd_linger_request *handle;
9969ebc5 4495
922dab61 4496 rbd_assert(!rbd_dev->watch_handle);
99d16943 4497 dout("%s rbd_dev %p\n", __func__, rbd_dev);
9969ebc5 4498
922dab61
ID
4499 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4500 &rbd_dev->header_oloc, rbd_watch_cb,
4501 rbd_watch_errcb, rbd_dev);
4502 if (IS_ERR(handle))
4503 return PTR_ERR(handle);
8eb87565 4504
922dab61 4505 rbd_dev->watch_handle = handle;
b30a01f2 4506 return 0;
b30a01f2
ID
4507}
4508
99d16943
ID
4509/*
4510 * watch_mutex must be locked
4511 */
4512static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
b30a01f2 4513{
922dab61
ID
4514 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4515 int ret;
b30a01f2 4516
99d16943
ID
4517 rbd_assert(rbd_dev->watch_handle);
4518 dout("%s rbd_dev %p\n", __func__, rbd_dev);
b30a01f2 4519
922dab61
ID
4520 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4521 if (ret)
4522 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
76756a51 4523
922dab61 4524 rbd_dev->watch_handle = NULL;
c525f036
ID
4525}
4526
99d16943
ID
4527static int rbd_register_watch(struct rbd_device *rbd_dev)
4528{
4529 int ret;
4530
4531 mutex_lock(&rbd_dev->watch_mutex);
4532 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4533 ret = __rbd_register_watch(rbd_dev);
4534 if (ret)
4535 goto out;
4536
4537 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4538 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4539
4540out:
4541 mutex_unlock(&rbd_dev->watch_mutex);
4542 return ret;
4543}
4544
4545static void cancel_tasks_sync(struct rbd_device *rbd_dev)
c525f036 4546{
99d16943
ID
4547 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4548
ed95b21a
ID
4549 cancel_work_sync(&rbd_dev->acquired_lock_work);
4550 cancel_work_sync(&rbd_dev->released_lock_work);
4551 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4552 cancel_work_sync(&rbd_dev->unlock_work);
99d16943
ID
4553}
4554
0e4e1de5
ID
4555/*
4556 * header_rwsem must not be held to avoid a deadlock with
4557 * rbd_dev_refresh() when flushing notifies.
4558 */
99d16943
ID
4559static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4560{
4561 cancel_tasks_sync(rbd_dev);
4562
4563 mutex_lock(&rbd_dev->watch_mutex);
4564 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4565 __rbd_unregister_watch(rbd_dev);
4566 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4567 mutex_unlock(&rbd_dev->watch_mutex);
811c6688 4568
23edca86 4569 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
811c6688 4570 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
fca27065
ID
4571}
4572
14bb211d
ID
4573/*
4574 * lock_rwsem must be held for write
4575 */
4576static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4577{
4578 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4579 char cookie[32];
4580 int ret;
4581
a2b1da09
ID
4582 if (!rbd_quiesce_lock(rbd_dev))
4583 return;
14bb211d
ID
4584
4585 format_lock_cookie(rbd_dev, cookie);
4586 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4587 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4588 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4589 RBD_LOCK_TAG, cookie);
4590 if (ret) {
4591 if (ret != -EOPNOTSUPP)
4592 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4593 ret);
4594
4595 /*
4596 * Lock cookie cannot be updated on older OSDs, so do
4597 * a manual release and queue an acquire.
4598 */
e1fddc8f 4599 __rbd_release_lock(rbd_dev);
a2b1da09 4600 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
14bb211d 4601 } else {
edd8ca80 4602 __rbd_lock(rbd_dev, cookie);
637cd060 4603 wake_lock_waiters(rbd_dev, 0);
14bb211d
ID
4604 }
4605}
4606
99d16943
ID
4607static void rbd_reregister_watch(struct work_struct *work)
4608{
4609 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4610 struct rbd_device, watch_dwork);
4611 int ret;
4612
4613 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4614
4615 mutex_lock(&rbd_dev->watch_mutex);
87c0fded
ID
4616 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4617 mutex_unlock(&rbd_dev->watch_mutex);
14bb211d 4618 return;
87c0fded 4619 }
99d16943
ID
4620
4621 ret = __rbd_register_watch(rbd_dev);
4622 if (ret) {
4623 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
0b98acd6 4624 if (ret != -EBLOCKLISTED && ret != -ENOENT) {
99d16943
ID
4625 queue_delayed_work(rbd_dev->task_wq,
4626 &rbd_dev->watch_dwork,
4627 RBD_RETRY_DELAY);
637cd060
ID
4628 mutex_unlock(&rbd_dev->watch_mutex);
4629 return;
87c0fded 4630 }
637cd060 4631
87c0fded 4632 mutex_unlock(&rbd_dev->watch_mutex);
637cd060
ID
4633 down_write(&rbd_dev->lock_rwsem);
4634 wake_lock_waiters(rbd_dev, ret);
4635 up_write(&rbd_dev->lock_rwsem);
14bb211d 4636 return;
99d16943
ID
4637 }
4638
4639 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4640 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4641 mutex_unlock(&rbd_dev->watch_mutex);
4642
14bb211d
ID
4643 down_write(&rbd_dev->lock_rwsem);
4644 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4645 rbd_reacquire_lock(rbd_dev);
4646 up_write(&rbd_dev->lock_rwsem);
4647
99d16943
ID
4648 ret = rbd_dev_refresh(rbd_dev);
4649 if (ret)
f6870cc9 4650 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
99d16943
ID
4651}
4652
36be9a76 4653/*
f40eb349
AE
4654 * Synchronous osd object method call. Returns the number of bytes
4655 * returned in the outbound buffer, or a negative error code.
36be9a76
AE
4656 */
4657static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
ecd4a68a
ID
4658 struct ceph_object_id *oid,
4659 struct ceph_object_locator *oloc,
36be9a76 4660 const char *method_name,
4157976b 4661 const void *outbound,
36be9a76 4662 size_t outbound_size,
4157976b 4663 void *inbound,
e2a58ee5 4664 size_t inbound_size)
36be9a76 4665{
ecd4a68a
ID
4666 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4667 struct page *req_page = NULL;
4668 struct page *reply_page;
36be9a76
AE
4669 int ret;
4670
4671 /*
6010a451
AE
4672 * Method calls are ultimately read operations. The result
4673 * should placed into the inbound buffer provided. They
4674 * also supply outbound data--parameters for the object
4675 * method. Currently if this is present it will be a
4676 * snapshot id.
36be9a76 4677 */
ecd4a68a
ID
4678 if (outbound) {
4679 if (outbound_size > PAGE_SIZE)
4680 return -E2BIG;
36be9a76 4681
ecd4a68a
ID
4682 req_page = alloc_page(GFP_KERNEL);
4683 if (!req_page)
4684 return -ENOMEM;
04017e29 4685
ecd4a68a 4686 memcpy(page_address(req_page), outbound, outbound_size);
04017e29 4687 }
36be9a76 4688
ecd4a68a
ID
4689 reply_page = alloc_page(GFP_KERNEL);
4690 if (!reply_page) {
4691 if (req_page)
4692 __free_page(req_page);
4693 return -ENOMEM;
4694 }
57385b51 4695
ecd4a68a
ID
4696 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4697 CEPH_OSD_FLAG_READ, req_page, outbound_size,
68ada915 4698 &reply_page, &inbound_size);
ecd4a68a
ID
4699 if (!ret) {
4700 memcpy(inbound, page_address(reply_page), inbound_size);
4701 ret = inbound_size;
4702 }
36be9a76 4703
ecd4a68a
ID
4704 if (req_page)
4705 __free_page(req_page);
4706 __free_page(reply_page);
36be9a76
AE
4707 return ret;
4708}
4709
7ad18afa 4710static void rbd_queue_workfn(struct work_struct *work)
bf0d5f50 4711{
59e542c8
ID
4712 struct rbd_img_request *img_request =
4713 container_of(work, struct rbd_img_request, work);
4714 struct rbd_device *rbd_dev = img_request->rbd_dev;
4715 enum obj_operation_type op_type = img_request->op_type;
4716 struct request *rq = blk_mq_rq_from_pdu(img_request);
bc1ecc65
ID
4717 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4718 u64 length = blk_rq_bytes(rq);
4e752f0a 4719 u64 mapping_size;
bf0d5f50
AE
4720 int result;
4721
bc1ecc65 4722 /* Ignore/skip any zero-length requests */
bc1ecc65
ID
4723 if (!length) {
4724 dout("%s: zero-length request\n", __func__);
4725 result = 0;
59e542c8 4726 goto err_img_request;
bc1ecc65 4727 }
4dda41d3 4728
7ad18afa
CH
4729 blk_mq_start_request(rq);
4730
4e752f0a
JD
4731 down_read(&rbd_dev->header_rwsem);
4732 mapping_size = rbd_dev->mapping.size;
a52cc685 4733 rbd_img_capture_header(img_request);
4e752f0a
JD
4734 up_read(&rbd_dev->header_rwsem);
4735
4736 if (offset + length > mapping_size) {
bc1ecc65 4737 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4e752f0a 4738 length, mapping_size);
bc1ecc65 4739 result = -EIO;
a52cc685 4740 goto err_img_request;
bc1ecc65 4741 }
bf0d5f50 4742
21ed05a8
ID
4743 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4744 img_request, obj_op_name(op_type), offset, length);
4745
6484cbe9 4746 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
5a237819 4747 result = rbd_img_fill_nodata(img_request, offset, length);
90e98c52 4748 else
5a237819
ID
4749 result = rbd_img_fill_from_bio(img_request, offset, length,
4750 rq->bio);
0192ce2e 4751 if (result)
bc1ecc65 4752 goto err_img_request;
bf0d5f50 4753
e1fddc8f 4754 rbd_img_handle_request(img_request, 0);
bc1ecc65 4755 return;
bf0d5f50 4756
bc1ecc65 4757err_img_request:
679a97d2 4758 rbd_img_request_destroy(img_request);
bc1ecc65
ID
4759 if (result)
4760 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
6d2940c8 4761 obj_op_name(op_type), length, offset, result);
2a842aca 4762 blk_mq_end_request(rq, errno_to_blk_status(result));
bc1ecc65 4763}
bf0d5f50 4764
fc17b653 4765static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
7ad18afa 4766 const struct blk_mq_queue_data *bd)
bc1ecc65 4767{
59e542c8
ID
4768 struct rbd_device *rbd_dev = hctx->queue->queuedata;
4769 struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
4770 enum obj_operation_type op_type;
bf0d5f50 4771
59e542c8
ID
4772 switch (req_op(bd->rq)) {
4773 case REQ_OP_DISCARD:
4774 op_type = OBJ_OP_DISCARD;
4775 break;
4776 case REQ_OP_WRITE_ZEROES:
4777 op_type = OBJ_OP_ZEROOUT;
4778 break;
4779 case REQ_OP_WRITE:
4780 op_type = OBJ_OP_WRITE;
4781 break;
4782 case REQ_OP_READ:
4783 op_type = OBJ_OP_READ;
4784 break;
4785 default:
4786 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
4787 return BLK_STS_IOERR;
4788 }
4789
4790 rbd_img_request_init(img_req, rbd_dev, op_type);
4791
4792 if (rbd_img_is_write(img_req)) {
4793 if (rbd_is_ro(rbd_dev)) {
4794 rbd_warn(rbd_dev, "%s on read-only mapping",
4795 obj_op_name(img_req->op_type));
4796 return BLK_STS_IOERR;
4797 }
4798 rbd_assert(!rbd_is_snap(rbd_dev));
4799 }
4800
4801 INIT_WORK(&img_req->work, rbd_queue_workfn);
4802 queue_work(rbd_wq, &img_req->work);
fc17b653 4803 return BLK_STS_OK;
bf0d5f50
AE
4804}
4805
602adf40
YS
4806static void rbd_free_disk(struct rbd_device *rbd_dev)
4807{
8b9ab626 4808 put_disk(rbd_dev->disk);
5769ed0c 4809 blk_mq_free_tag_set(&rbd_dev->tag_set);
a0cab924 4810 rbd_dev->disk = NULL;
602adf40
YS
4811}
4812
788e2df3 4813static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
fe5478e0
ID
4814 struct ceph_object_id *oid,
4815 struct ceph_object_locator *oloc,
4816 void *buf, int buf_len)
788e2df3
AE
4817
4818{
fe5478e0
ID
4819 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4820 struct ceph_osd_request *req;
4821 struct page **pages;
4822 int num_pages = calc_pages_for(0, buf_len);
788e2df3
AE
4823 int ret;
4824
fe5478e0
ID
4825 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4826 if (!req)
4827 return -ENOMEM;
788e2df3 4828
fe5478e0
ID
4829 ceph_oid_copy(&req->r_base_oid, oid);
4830 ceph_oloc_copy(&req->r_base_oloc, oloc);
4831 req->r_flags = CEPH_OSD_FLAG_READ;
430c28c3 4832
fe5478e0
ID
4833 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4834 if (IS_ERR(pages)) {
4835 ret = PTR_ERR(pages);
4836 goto out_req;
4837 }
1ceae7ef 4838
fe5478e0
ID
4839 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4840 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4841 true);
4842
26f887e0
ID
4843 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4844 if (ret)
4845 goto out_req;
4846
a8af0d68 4847 ceph_osdc_start_request(osdc, req);
fe5478e0
ID
4848 ret = ceph_osdc_wait_request(osdc, req);
4849 if (ret >= 0)
4850 ceph_copy_from_page_vector(pages, buf, 0, ret);
788e2df3 4851
fe5478e0
ID
4852out_req:
4853 ceph_osdc_put_request(req);
788e2df3
AE
4854 return ret;
4855}
4856
602adf40 4857/*
662518b1
AE
4858 * Read the complete header for the given rbd device. On successful
4859 * return, the rbd_dev->header field will contain up-to-date
4860 * information about the image.
602adf40 4861 */
99a41ebc 4862static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
602adf40 4863{
4156d998 4864 struct rbd_image_header_ondisk *ondisk = NULL;
50f7c4c9 4865 u32 snap_count = 0;
4156d998
AE
4866 u64 names_size = 0;
4867 u32 want_count;
4868 int ret;
602adf40 4869
00f1f36f 4870 /*
4156d998
AE
4871 * The complete header will include an array of its 64-bit
4872 * snapshot ids, followed by the names of those snapshots as
4873 * a contiguous block of NUL-terminated strings. Note that
4874 * the number of snapshots could change by the time we read
4875 * it in, in which case we re-read it.
00f1f36f 4876 */
4156d998
AE
4877 do {
4878 size_t size;
4879
4880 kfree(ondisk);
4881
4882 size = sizeof (*ondisk);
4883 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4884 size += names_size;
4885 ondisk = kmalloc(size, GFP_KERNEL);
4886 if (!ondisk)
662518b1 4887 return -ENOMEM;
4156d998 4888
fe5478e0
ID
4889 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4890 &rbd_dev->header_oloc, ondisk, size);
4156d998 4891 if (ret < 0)
662518b1 4892 goto out;
c0cd10db 4893 if ((size_t)ret < size) {
4156d998 4894 ret = -ENXIO;
06ecc6cb
AE
4895 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4896 size, ret);
662518b1 4897 goto out;
4156d998
AE
4898 }
4899 if (!rbd_dev_ondisk_valid(ondisk)) {
4900 ret = -ENXIO;
06ecc6cb 4901 rbd_warn(rbd_dev, "invalid header");
662518b1 4902 goto out;
81e759fb 4903 }
602adf40 4904
4156d998
AE
4905 names_size = le64_to_cpu(ondisk->snap_names_len);
4906 want_count = snap_count;
4907 snap_count = le32_to_cpu(ondisk->snap_count);
4908 } while (snap_count != want_count);
00f1f36f 4909
662518b1
AE
4910 ret = rbd_header_from_disk(rbd_dev, ondisk);
4911out:
4156d998
AE
4912 kfree(ondisk);
4913
4914 return ret;
602adf40
YS
4915}
4916
9875201e
JD
4917static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4918{
4919 sector_t size;
9875201e
JD
4920
4921 /*
811c6688
ID
4922 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4923 * try to update its size. If REMOVING is set, updating size
4924 * is just useless work since the device can't be opened.
9875201e 4925 */
811c6688
ID
4926 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4927 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
9875201e
JD
4928 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4929 dout("setting size to %llu sectors", (unsigned long long)size);
e864e49a 4930 set_capacity_and_notify(rbd_dev->disk, size);
9875201e
JD
4931 }
4932}
4933
cc4a38bd 4934static int rbd_dev_refresh(struct rbd_device *rbd_dev)
1fe5e993 4935{
e627db08 4936 u64 mapping_size;
1fe5e993
AE
4937 int ret;
4938
cfbf6377 4939 down_write(&rbd_dev->header_rwsem);
3b5cf2a2 4940 mapping_size = rbd_dev->mapping.size;
a720ae09
ID
4941
4942 ret = rbd_dev_header_info(rbd_dev);
52bb1f9b 4943 if (ret)
73e39e4d 4944 goto out;
15228ede 4945
e8f59b59
ID
4946 /*
4947 * If there is a parent, see if it has disappeared due to the
4948 * mapped image getting flattened.
4949 */
4950 if (rbd_dev->parent) {
4951 ret = rbd_dev_v2_parent_info(rbd_dev);
4952 if (ret)
73e39e4d 4953 goto out;
e8f59b59
ID
4954 }
4955
686238b7
ID
4956 rbd_assert(!rbd_is_snap(rbd_dev));
4957 rbd_dev->mapping.size = rbd_dev->header.image_size;
15228ede 4958
73e39e4d 4959out:
cfbf6377 4960 up_write(&rbd_dev->header_rwsem);
73e39e4d 4961 if (!ret && mapping_size != rbd_dev->mapping.size)
9875201e 4962 rbd_dev_update_size(rbd_dev);
1fe5e993 4963
73e39e4d 4964 return ret;
1fe5e993
AE
4965}
4966
f363b089 4967static const struct blk_mq_ops rbd_mq_ops = {
7ad18afa 4968 .queue_rq = rbd_queue_rq,
7ad18afa
CH
4969};
4970
602adf40
YS
4971static int rbd_init_disk(struct rbd_device *rbd_dev)
4972{
4973 struct gendisk *disk;
4974 struct request_queue *q;
420efbdf
ID
4975 unsigned int objset_bytes =
4976 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
7ad18afa 4977 int err;
602adf40 4978
7ad18afa
CH
4979 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4980 rbd_dev->tag_set.ops = &rbd_mq_ops;
b5584180 4981 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
7ad18afa 4982 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
56d18f62 4983 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
f9b6b98d 4984 rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
59e542c8 4985 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
7ad18afa
CH
4986
4987 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4988 if (err)
195b1956 4989 return err;
029bcbd8 4990
195b1956
CH
4991 disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev);
4992 if (IS_ERR(disk)) {
4993 err = PTR_ERR(disk);
7ad18afa
CH
4994 goto out_tag_set;
4995 }
195b1956
CH
4996 q = disk->queue;
4997
4998 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
4999 rbd_dev->dev_id);
5000 disk->major = rbd_dev->major;
5001 disk->first_minor = rbd_dev->minor;
1ebe2e5f 5002 if (single_major)
195b1956 5003 disk->minors = (1 << RBD_SINGLE_MAJOR_PART_SHIFT);
1ebe2e5f 5004 else
195b1956 5005 disk->minors = RBD_MINORS_PER_MAJOR;
195b1956 5006 disk->fops = &rbd_bd_ops;
0077a500 5007 disk->private_data = rbd_dev;
7ad18afa 5008
8b904b5b 5009 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
d8a2c89c 5010 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
593a9e7b 5011
420efbdf 5012 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
0d9fde4f 5013 q->limits.max_sectors = queue_max_hw_sectors(q);
21acdf45 5014 blk_queue_max_segments(q, USHRT_MAX);
24f1df60 5015 blk_queue_max_segment_size(q, UINT_MAX);
16d80c54
ID
5016 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
5017 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
029bcbd8 5018
d9360540 5019 if (rbd_dev->opts->trim) {
16d80c54 5020 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
d9360540
ID
5021 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
5022 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
5023 }
90e98c52 5024
bae818ee 5025 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
1cb039f3 5026 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
bae818ee 5027
602adf40 5028 rbd_dev->disk = disk;
602adf40 5029
602adf40 5030 return 0;
7ad18afa
CH
5031out_tag_set:
5032 blk_mq_free_tag_set(&rbd_dev->tag_set);
7ad18afa 5033 return err;
602adf40
YS
5034}
5035
dfc5606d
YS
5036/*
5037 sysfs
5038*/
5039
593a9e7b
AE
5040static struct rbd_device *dev_to_rbd_dev(struct device *dev)
5041{
5042 return container_of(dev, struct rbd_device, dev);
5043}
5044
dfc5606d
YS
5045static ssize_t rbd_size_show(struct device *dev,
5046 struct device_attribute *attr, char *buf)
5047{
593a9e7b 5048 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
a51aa0c0 5049
fc71d833
AE
5050 return sprintf(buf, "%llu\n",
5051 (unsigned long long)rbd_dev->mapping.size);
dfc5606d
YS
5052}
5053
34b13184
AE
5054static ssize_t rbd_features_show(struct device *dev,
5055 struct device_attribute *attr, char *buf)
5056{
5057 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5058
fa58bcad 5059 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
34b13184
AE
5060}
5061
dfc5606d
YS
5062static ssize_t rbd_major_show(struct device *dev,
5063 struct device_attribute *attr, char *buf)
5064{
593a9e7b 5065 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 5066
fc71d833
AE
5067 if (rbd_dev->major)
5068 return sprintf(buf, "%d\n", rbd_dev->major);
5069
5070 return sprintf(buf, "(none)\n");
dd82fff1
ID
5071}
5072
5073static ssize_t rbd_minor_show(struct device *dev,
5074 struct device_attribute *attr, char *buf)
5075{
5076 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
fc71d833 5077
dd82fff1 5078 return sprintf(buf, "%d\n", rbd_dev->minor);
dfc5606d
YS
5079}
5080
005a07bf
ID
5081static ssize_t rbd_client_addr_show(struct device *dev,
5082 struct device_attribute *attr, char *buf)
5083{
5084 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5085 struct ceph_entity_addr *client_addr =
5086 ceph_client_addr(rbd_dev->rbd_client->client);
5087
5088 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5089 le32_to_cpu(client_addr->nonce));
5090}
5091
dfc5606d
YS
5092static ssize_t rbd_client_id_show(struct device *dev,
5093 struct device_attribute *attr, char *buf)
602adf40 5094{
593a9e7b 5095 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 5096
1dbb4399 5097 return sprintf(buf, "client%lld\n",
033268a5 5098 ceph_client_gid(rbd_dev->rbd_client->client));
602adf40
YS
5099}
5100
267fb90b
MC
5101static ssize_t rbd_cluster_fsid_show(struct device *dev,
5102 struct device_attribute *attr, char *buf)
5103{
5104 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5105
5106 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5107}
5108
0d6d1e9c
MC
5109static ssize_t rbd_config_info_show(struct device *dev,
5110 struct device_attribute *attr, char *buf)
5111{
5112 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5113
f44d04e6
ID
5114 if (!capable(CAP_SYS_ADMIN))
5115 return -EPERM;
5116
0d6d1e9c 5117 return sprintf(buf, "%s\n", rbd_dev->config_info);
602adf40
YS
5118}
5119
dfc5606d
YS
5120static ssize_t rbd_pool_show(struct device *dev,
5121 struct device_attribute *attr, char *buf)
602adf40 5122{
593a9e7b 5123 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 5124
0d7dbfce 5125 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
dfc5606d
YS
5126}
5127
9bb2f334
AE
5128static ssize_t rbd_pool_id_show(struct device *dev,
5129 struct device_attribute *attr, char *buf)
5130{
5131 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5132
0d7dbfce 5133 return sprintf(buf, "%llu\n",
fc71d833 5134 (unsigned long long) rbd_dev->spec->pool_id);
9bb2f334
AE
5135}
5136
b26c047b
ID
5137static ssize_t rbd_pool_ns_show(struct device *dev,
5138 struct device_attribute *attr, char *buf)
5139{
5140 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5141
5142 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5143}
5144
dfc5606d
YS
5145static ssize_t rbd_name_show(struct device *dev,
5146 struct device_attribute *attr, char *buf)
5147{
593a9e7b 5148 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 5149
a92ffdf8
AE
5150 if (rbd_dev->spec->image_name)
5151 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5152
5153 return sprintf(buf, "(unknown)\n");
dfc5606d
YS
5154}
5155
589d30e0
AE
5156static ssize_t rbd_image_id_show(struct device *dev,
5157 struct device_attribute *attr, char *buf)
5158{
5159 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5160
0d7dbfce 5161 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
589d30e0
AE
5162}
5163
34b13184
AE
5164/*
5165 * Shows the name of the currently-mapped snapshot (or
5166 * RBD_SNAP_HEAD_NAME for the base image).
5167 */
dfc5606d
YS
5168static ssize_t rbd_snap_show(struct device *dev,
5169 struct device_attribute *attr,
5170 char *buf)
5171{
593a9e7b 5172 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 5173
0d7dbfce 5174 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
dfc5606d
YS
5175}
5176
92a58671
MC
5177static ssize_t rbd_snap_id_show(struct device *dev,
5178 struct device_attribute *attr, char *buf)
5179{
5180 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5181
5182 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5183}
5184
86b00e0d 5185/*
ff96128f
ID
5186 * For a v2 image, shows the chain of parent images, separated by empty
5187 * lines. For v1 images or if there is no parent, shows "(no parent
5188 * image)".
86b00e0d
AE
5189 */
5190static ssize_t rbd_parent_show(struct device *dev,
ff96128f
ID
5191 struct device_attribute *attr,
5192 char *buf)
86b00e0d
AE
5193{
5194 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
ff96128f 5195 ssize_t count = 0;
86b00e0d 5196
ff96128f 5197 if (!rbd_dev->parent)
86b00e0d
AE
5198 return sprintf(buf, "(no parent image)\n");
5199
ff96128f
ID
5200 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5201 struct rbd_spec *spec = rbd_dev->parent_spec;
5202
5203 count += sprintf(&buf[count], "%s"
5204 "pool_id %llu\npool_name %s\n"
e92c0eaf 5205 "pool_ns %s\n"
ff96128f
ID
5206 "image_id %s\nimage_name %s\n"
5207 "snap_id %llu\nsnap_name %s\n"
5208 "overlap %llu\n",
5209 !count ? "" : "\n", /* first? */
5210 spec->pool_id, spec->pool_name,
e92c0eaf 5211 spec->pool_ns ?: "",
ff96128f
ID
5212 spec->image_id, spec->image_name ?: "(unknown)",
5213 spec->snap_id, spec->snap_name,
5214 rbd_dev->parent_overlap);
5215 }
5216
5217 return count;
86b00e0d
AE
5218}
5219
dfc5606d
YS
5220static ssize_t rbd_image_refresh(struct device *dev,
5221 struct device_attribute *attr,
5222 const char *buf,
5223 size_t size)
5224{
593a9e7b 5225 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
b813623a 5226 int ret;
602adf40 5227
f44d04e6
ID
5228 if (!capable(CAP_SYS_ADMIN))
5229 return -EPERM;
5230
cc4a38bd 5231 ret = rbd_dev_refresh(rbd_dev);
e627db08 5232 if (ret)
52bb1f9b 5233 return ret;
b813623a 5234
52bb1f9b 5235 return size;
dfc5606d 5236}
602adf40 5237
5657a819
JP
5238static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5239static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5240static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5241static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5242static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5243static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5244static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5245static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5246static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5247static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
b26c047b 5248static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5657a819
JP
5249static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5250static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5251static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5252static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5253static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5254static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
dfc5606d
YS
5255
5256static struct attribute *rbd_attrs[] = {
5257 &dev_attr_size.attr,
34b13184 5258 &dev_attr_features.attr,
dfc5606d 5259 &dev_attr_major.attr,
dd82fff1 5260 &dev_attr_minor.attr,
005a07bf 5261 &dev_attr_client_addr.attr,
dfc5606d 5262 &dev_attr_client_id.attr,
267fb90b 5263 &dev_attr_cluster_fsid.attr,
0d6d1e9c 5264 &dev_attr_config_info.attr,
dfc5606d 5265 &dev_attr_pool.attr,
9bb2f334 5266 &dev_attr_pool_id.attr,
b26c047b 5267 &dev_attr_pool_ns.attr,
dfc5606d 5268 &dev_attr_name.attr,
589d30e0 5269 &dev_attr_image_id.attr,
dfc5606d 5270 &dev_attr_current_snap.attr,
92a58671 5271 &dev_attr_snap_id.attr,
86b00e0d 5272 &dev_attr_parent.attr,
dfc5606d 5273 &dev_attr_refresh.attr,
dfc5606d
YS
5274 NULL
5275};
5276
5277static struct attribute_group rbd_attr_group = {
5278 .attrs = rbd_attrs,
5279};
5280
5281static const struct attribute_group *rbd_attr_groups[] = {
5282 &rbd_attr_group,
5283 NULL
5284};
5285
6cac4695 5286static void rbd_dev_release(struct device *dev);
dfc5606d 5287
b9942bc9 5288static const struct device_type rbd_device_type = {
dfc5606d
YS
5289 .name = "rbd",
5290 .groups = rbd_attr_groups,
6cac4695 5291 .release = rbd_dev_release,
dfc5606d
YS
5292};
5293
8b8fb99c
AE
5294static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5295{
5296 kref_get(&spec->kref);
5297
5298 return spec;
5299}
5300
5301static void rbd_spec_free(struct kref *kref);
5302static void rbd_spec_put(struct rbd_spec *spec)
5303{
5304 if (spec)
5305 kref_put(&spec->kref, rbd_spec_free);
5306}
5307
5308static struct rbd_spec *rbd_spec_alloc(void)
5309{
5310 struct rbd_spec *spec;
5311
5312 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5313 if (!spec)
5314 return NULL;
04077599
ID
5315
5316 spec->pool_id = CEPH_NOPOOL;
5317 spec->snap_id = CEPH_NOSNAP;
8b8fb99c
AE
5318 kref_init(&spec->kref);
5319
8b8fb99c
AE
5320 return spec;
5321}
5322
5323static void rbd_spec_free(struct kref *kref)
5324{
5325 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5326
5327 kfree(spec->pool_name);
b26c047b 5328 kfree(spec->pool_ns);
8b8fb99c
AE
5329 kfree(spec->image_id);
5330 kfree(spec->image_name);
5331 kfree(spec->snap_name);
5332 kfree(spec);
5333}
5334
1643dfa4 5335static void rbd_dev_free(struct rbd_device *rbd_dev)
dd5ac32d 5336{
99d16943 5337 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
ed95b21a 5338 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
dd5ac32d 5339
c41d13a3 5340 ceph_oid_destroy(&rbd_dev->header_oid);
6b6dddbe 5341 ceph_oloc_destroy(&rbd_dev->header_oloc);
0d6d1e9c 5342 kfree(rbd_dev->config_info);
c41d13a3 5343
dd5ac32d
ID
5344 rbd_put_client(rbd_dev->rbd_client);
5345 rbd_spec_put(rbd_dev->spec);
5346 kfree(rbd_dev->opts);
5347 kfree(rbd_dev);
1643dfa4
ID
5348}
5349
5350static void rbd_dev_release(struct device *dev)
5351{
5352 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5353 bool need_put = !!rbd_dev->opts;
5354
5355 if (need_put) {
5356 destroy_workqueue(rbd_dev->task_wq);
5357 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5358 }
5359
5360 rbd_dev_free(rbd_dev);
dd5ac32d
ID
5361
5362 /*
5363 * This is racy, but way better than putting module outside of
5364 * the release callback. The race window is pretty small, so
5365 * doing something similar to dm (dm-builtin.c) is overkill.
5366 */
5367 if (need_put)
5368 module_put(THIS_MODULE);
5369}
5370
f7c4d9b1 5371static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
c53d5893
AE
5372{
5373 struct rbd_device *rbd_dev;
5374
1643dfa4 5375 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
c53d5893
AE
5376 if (!rbd_dev)
5377 return NULL;
5378
5379 spin_lock_init(&rbd_dev->lock);
5380 INIT_LIST_HEAD(&rbd_dev->node);
c53d5893
AE
5381 init_rwsem(&rbd_dev->header_rwsem);
5382
7e97332e 5383 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
c41d13a3 5384 ceph_oid_init(&rbd_dev->header_oid);
431a02cd 5385 rbd_dev->header_oloc.pool = spec->pool_id;
b26c047b
ID
5386 if (spec->pool_ns) {
5387 WARN_ON(!*spec->pool_ns);
5388 rbd_dev->header_oloc.pool_ns =
5389 ceph_find_or_create_string(spec->pool_ns,
5390 strlen(spec->pool_ns));
5391 }
c41d13a3 5392
99d16943
ID
5393 mutex_init(&rbd_dev->watch_mutex);
5394 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5395 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5396
ed95b21a
ID
5397 init_rwsem(&rbd_dev->lock_rwsem);
5398 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5399 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5400 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5401 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5402 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
e1fddc8f 5403 spin_lock_init(&rbd_dev->lock_lists_lock);
637cd060 5404 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
e1fddc8f 5405 INIT_LIST_HEAD(&rbd_dev->running_list);
637cd060 5406 init_completion(&rbd_dev->acquire_wait);
e1fddc8f 5407 init_completion(&rbd_dev->releasing_wait);
ed95b21a 5408
22e8bd51 5409 spin_lock_init(&rbd_dev->object_map_lock);
ed95b21a 5410
dd5ac32d
ID
5411 rbd_dev->dev.bus = &rbd_bus_type;
5412 rbd_dev->dev.type = &rbd_device_type;
5413 rbd_dev->dev.parent = &rbd_root_dev;
dd5ac32d
ID
5414 device_initialize(&rbd_dev->dev);
5415
1643dfa4
ID
5416 return rbd_dev;
5417}
5418
5419/*
5420 * Create a mapping rbd_dev.
5421 */
5422static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5423 struct rbd_spec *spec,
5424 struct rbd_options *opts)
5425{
5426 struct rbd_device *rbd_dev;
5427
f7c4d9b1 5428 rbd_dev = __rbd_dev_create(spec);
1643dfa4
ID
5429 if (!rbd_dev)
5430 return NULL;
5431
1643dfa4
ID
5432 /* get an id and fill in device name */
5433 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5434 minor_to_rbd_dev_id(1 << MINORBITS),
5435 GFP_KERNEL);
5436 if (rbd_dev->dev_id < 0)
5437 goto fail_rbd_dev;
5438
5439 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5440 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5441 rbd_dev->name);
5442 if (!rbd_dev->task_wq)
5443 goto fail_dev_id;
dd5ac32d 5444
1643dfa4
ID
5445 /* we have a ref from do_rbd_add() */
5446 __module_get(THIS_MODULE);
dd5ac32d 5447
f7c4d9b1
ID
5448 rbd_dev->rbd_client = rbdc;
5449 rbd_dev->spec = spec;
5450 rbd_dev->opts = opts;
5451
1643dfa4 5452 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
c53d5893 5453 return rbd_dev;
1643dfa4
ID
5454
5455fail_dev_id:
5456 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5457fail_rbd_dev:
5458 rbd_dev_free(rbd_dev);
5459 return NULL;
c53d5893
AE
5460}
5461
5462static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5463{
dd5ac32d
ID
5464 if (rbd_dev)
5465 put_device(&rbd_dev->dev);
c53d5893
AE
5466}
5467
9d475de5
AE
5468/*
5469 * Get the size and object order for an image snapshot, or if
5470 * snap_id is CEPH_NOSNAP, gets this information for the base
5471 * image.
5472 */
5473static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5474 u8 *order, u64 *snap_size)
5475{
5476 __le64 snapid = cpu_to_le64(snap_id);
5477 int ret;
5478 struct {
5479 u8 order;
5480 __le64 size;
5481 } __attribute__ ((packed)) size_buf = { 0 };
5482
ecd4a68a
ID
5483 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5484 &rbd_dev->header_oloc, "get_size",
5485 &snapid, sizeof(snapid),
5486 &size_buf, sizeof(size_buf));
36be9a76 5487 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
9d475de5
AE
5488 if (ret < 0)
5489 return ret;
57385b51
AE
5490 if (ret < sizeof (size_buf))
5491 return -ERANGE;
9d475de5 5492
c3545579 5493 if (order) {
c86f86e9 5494 *order = size_buf.order;
c3545579
JD
5495 dout(" order %u", (unsigned int)*order);
5496 }
9d475de5
AE
5497 *snap_size = le64_to_cpu(size_buf.size);
5498
c3545579
JD
5499 dout(" snap_id 0x%016llx snap_size = %llu\n",
5500 (unsigned long long)snap_id,
57385b51 5501 (unsigned long long)*snap_size);
9d475de5
AE
5502
5503 return 0;
5504}
5505
5506static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
5507{
5508 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
5509 &rbd_dev->header.obj_order,
5510 &rbd_dev->header.image_size);
5511}
5512
1e130199
AE
5513static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
5514{
5435d206 5515 size_t size;
1e130199
AE
5516 void *reply_buf;
5517 int ret;
5518 void *p;
5519
5435d206
DY
5520 /* Response will be an encoded string, which includes a length */
5521 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5522 reply_buf = kzalloc(size, GFP_KERNEL);
1e130199
AE
5523 if (!reply_buf)
5524 return -ENOMEM;
5525
ecd4a68a
ID
5526 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5527 &rbd_dev->header_oloc, "get_object_prefix",
5435d206 5528 NULL, 0, reply_buf, size);
36be9a76 5529 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
1e130199
AE
5530 if (ret < 0)
5531 goto out;
5532
5533 p = reply_buf;
5534 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
57385b51
AE
5535 p + ret, NULL, GFP_NOIO);
5536 ret = 0;
1e130199
AE
5537
5538 if (IS_ERR(rbd_dev->header.object_prefix)) {
5539 ret = PTR_ERR(rbd_dev->header.object_prefix);
5540 rbd_dev->header.object_prefix = NULL;
5541 } else {
5542 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5543 }
1e130199
AE
5544out:
5545 kfree(reply_buf);
5546
5547 return ret;
5548}
5549
b1b5402a 5550static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
196e2d6d 5551 bool read_only, u64 *snap_features)
b1b5402a 5552{
196e2d6d
ID
5553 struct {
5554 __le64 snap_id;
5555 u8 read_only;
5556 } features_in;
b1b5402a
AE
5557 struct {
5558 __le64 features;
5559 __le64 incompat;
4157976b 5560 } __attribute__ ((packed)) features_buf = { 0 };
d3767f0f 5561 u64 unsup;
b1b5402a
AE
5562 int ret;
5563
196e2d6d
ID
5564 features_in.snap_id = cpu_to_le64(snap_id);
5565 features_in.read_only = read_only;
5566
ecd4a68a
ID
5567 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5568 &rbd_dev->header_oloc, "get_features",
196e2d6d 5569 &features_in, sizeof(features_in),
ecd4a68a 5570 &features_buf, sizeof(features_buf));
36be9a76 5571 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
b1b5402a
AE
5572 if (ret < 0)
5573 return ret;
57385b51
AE
5574 if (ret < sizeof (features_buf))
5575 return -ERANGE;
d889140c 5576
d3767f0f
ID
5577 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5578 if (unsup) {
5579 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5580 unsup);
b8f5c6ed 5581 return -ENXIO;
d3767f0f 5582 }
d889140c 5583
b1b5402a
AE
5584 *snap_features = le64_to_cpu(features_buf.features);
5585
5586 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
57385b51
AE
5587 (unsigned long long)snap_id,
5588 (unsigned long long)*snap_features,
5589 (unsigned long long)le64_to_cpu(features_buf.incompat));
b1b5402a
AE
5590
5591 return 0;
5592}
5593
5594static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5595{
5596 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
196e2d6d
ID
5597 rbd_is_ro(rbd_dev),
5598 &rbd_dev->header.features);
b1b5402a
AE
5599}
5600
22e8bd51
ID
5601/*
5602 * These are generic image flags, but since they are used only for
5603 * object map, store them in rbd_dev->object_map_flags.
5604 *
5605 * For the same reason, this function is called only on object map
5606 * (re)load and not on header refresh.
5607 */
5608static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5609{
5610 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5611 __le64 flags;
5612 int ret;
5613
5614 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5615 &rbd_dev->header_oloc, "get_flags",
5616 &snapid, sizeof(snapid),
5617 &flags, sizeof(flags));
5618 if (ret < 0)
5619 return ret;
5620 if (ret < sizeof(flags))
5621 return -EBADMSG;
5622
5623 rbd_dev->object_map_flags = le64_to_cpu(flags);
5624 return 0;
5625}
5626
eb3b2d6b
ID
5627struct parent_image_info {
5628 u64 pool_id;
e92c0eaf 5629 const char *pool_ns;
eb3b2d6b
ID
5630 const char *image_id;
5631 u64 snap_id;
5632
e92c0eaf 5633 bool has_overlap;
eb3b2d6b
ID
5634 u64 overlap;
5635};
5636
e92c0eaf
ID
5637/*
5638 * The caller is responsible for @pii.
5639 */
5640static int decode_parent_image_spec(void **p, void *end,
5641 struct parent_image_info *pii)
5642{
5643 u8 struct_v;
5644 u32 struct_len;
5645 int ret;
5646
5647 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5648 &struct_v, &struct_len);
5649 if (ret)
5650 return ret;
5651
5652 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5653 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5654 if (IS_ERR(pii->pool_ns)) {
5655 ret = PTR_ERR(pii->pool_ns);
5656 pii->pool_ns = NULL;
5657 return ret;
5658 }
5659 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5660 if (IS_ERR(pii->image_id)) {
5661 ret = PTR_ERR(pii->image_id);
5662 pii->image_id = NULL;
5663 return ret;
5664 }
5665 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5666 return 0;
5667
5668e_inval:
5669 return -EINVAL;
5670}
5671
5672static int __get_parent_info(struct rbd_device *rbd_dev,
5673 struct page *req_page,
5674 struct page *reply_page,
5675 struct parent_image_info *pii)
5676{
5677 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5678 size_t reply_len = PAGE_SIZE;
5679 void *p, *end;
5680 int ret;
5681
5682 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5683 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
68ada915 5684 req_page, sizeof(u64), &reply_page, &reply_len);
e92c0eaf
ID
5685 if (ret)
5686 return ret == -EOPNOTSUPP ? 1 : ret;
5687
5688 p = page_address(reply_page);
5689 end = p + reply_len;
5690 ret = decode_parent_image_spec(&p, end, pii);
5691 if (ret)
5692 return ret;
5693
5694 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5695 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
68ada915 5696 req_page, sizeof(u64), &reply_page, &reply_len);
e92c0eaf
ID
5697 if (ret)
5698 return ret;
5699
5700 p = page_address(reply_page);
5701 end = p + reply_len;
5702 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5703 if (pii->has_overlap)
5704 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5705
5706 return 0;
5707
5708e_inval:
5709 return -EINVAL;
5710}
5711
eb3b2d6b
ID
5712/*
5713 * The caller is responsible for @pii.
5714 */
5715static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5716 struct page *req_page,
5717 struct page *reply_page,
5718 struct parent_image_info *pii)
5719{
5720 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5721 size_t reply_len = PAGE_SIZE;
5722 void *p, *end;
5723 int ret;
5724
5725 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5726 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
68ada915 5727 req_page, sizeof(u64), &reply_page, &reply_len);
eb3b2d6b
ID
5728 if (ret)
5729 return ret;
5730
5731 p = page_address(reply_page);
5732 end = p + reply_len;
5733 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5734 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5735 if (IS_ERR(pii->image_id)) {
5736 ret = PTR_ERR(pii->image_id);
5737 pii->image_id = NULL;
5738 return ret;
5739 }
5740 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
e92c0eaf 5741 pii->has_overlap = true;
eb3b2d6b
ID
5742 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5743
5744 return 0;
5745
5746e_inval:
5747 return -EINVAL;
5748}
5749
5750static int get_parent_info(struct rbd_device *rbd_dev,
5751 struct parent_image_info *pii)
5752{
5753 struct page *req_page, *reply_page;
5754 void *p;
5755 int ret;
5756
5757 req_page = alloc_page(GFP_KERNEL);
5758 if (!req_page)
5759 return -ENOMEM;
5760
5761 reply_page = alloc_page(GFP_KERNEL);
5762 if (!reply_page) {
5763 __free_page(req_page);
5764 return -ENOMEM;
5765 }
5766
5767 p = page_address(req_page);
5768 ceph_encode_64(&p, rbd_dev->spec->snap_id);
e92c0eaf
ID
5769 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5770 if (ret > 0)
5771 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5772 pii);
eb3b2d6b
ID
5773
5774 __free_page(req_page);
5775 __free_page(reply_page);
5776 return ret;
5777}
5778
86b00e0d
AE
5779static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5780{
5781 struct rbd_spec *parent_spec;
eb3b2d6b 5782 struct parent_image_info pii = { 0 };
86b00e0d
AE
5783 int ret;
5784
5785 parent_spec = rbd_spec_alloc();
5786 if (!parent_spec)
5787 return -ENOMEM;
5788
eb3b2d6b
ID
5789 ret = get_parent_info(rbd_dev, &pii);
5790 if (ret)
86b00e0d 5791 goto out_err;
86b00e0d 5792
e92c0eaf
ID
5793 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5794 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
5795 pii.has_overlap, pii.overlap);
86b00e0d 5796
e92c0eaf 5797 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
392a9dad
AE
5798 /*
5799 * Either the parent never existed, or we have
5800 * record of it but the image got flattened so it no
5801 * longer has a parent. When the parent of a
5802 * layered image disappears we immediately set the
5803 * overlap to 0. The effect of this is that all new
5804 * requests will be treated as if the image had no
5805 * parent.
e92c0eaf
ID
5806 *
5807 * If !pii.has_overlap, the parent image spec is not
5808 * applicable. It's there to avoid duplication in each
5809 * snapshot record.
392a9dad
AE
5810 */
5811 if (rbd_dev->parent_overlap) {
5812 rbd_dev->parent_overlap = 0;
392a9dad
AE
5813 rbd_dev_parent_put(rbd_dev);
5814 pr_info("%s: clone image has been flattened\n",
5815 rbd_dev->disk->disk_name);
5816 }
5817
86b00e0d 5818 goto out; /* No parent? No problem. */
392a9dad 5819 }
86b00e0d 5820
0903e875
AE
5821 /* The ceph file layout needs to fit pool id in 32 bits */
5822
5823 ret = -EIO;
eb3b2d6b 5824 if (pii.pool_id > (u64)U32_MAX) {
9584d508 5825 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
eb3b2d6b 5826 (unsigned long long)pii.pool_id, U32_MAX);
86b00e0d
AE
5827 goto out_err;
5828 }
86b00e0d 5829
3b5cf2a2
AE
5830 /*
5831 * The parent won't change (except when the clone is
5832 * flattened, already handled that). So we only need to
5833 * record the parent spec we have not already done so.
5834 */
5835 if (!rbd_dev->parent_spec) {
eb3b2d6b 5836 parent_spec->pool_id = pii.pool_id;
e92c0eaf
ID
5837 if (pii.pool_ns && *pii.pool_ns) {
5838 parent_spec->pool_ns = pii.pool_ns;
5839 pii.pool_ns = NULL;
5840 }
eb3b2d6b
ID
5841 parent_spec->image_id = pii.image_id;
5842 pii.image_id = NULL;
5843 parent_spec->snap_id = pii.snap_id;
b26c047b 5844
70cf49cf
AE
5845 rbd_dev->parent_spec = parent_spec;
5846 parent_spec = NULL; /* rbd_dev now owns this */
3b5cf2a2
AE
5847 }
5848
5849 /*
cf32bd9c
ID
5850 * We always update the parent overlap. If it's zero we issue
5851 * a warning, as we will proceed as if there was no parent.
3b5cf2a2 5852 */
eb3b2d6b 5853 if (!pii.overlap) {
3b5cf2a2 5854 if (parent_spec) {
cf32bd9c
ID
5855 /* refresh, careful to warn just once */
5856 if (rbd_dev->parent_overlap)
5857 rbd_warn(rbd_dev,
5858 "clone now standalone (overlap became 0)");
3b5cf2a2 5859 } else {
cf32bd9c
ID
5860 /* initial probe */
5861 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
3b5cf2a2 5862 }
70cf49cf 5863 }
eb3b2d6b 5864 rbd_dev->parent_overlap = pii.overlap;
cf32bd9c 5865
86b00e0d
AE
5866out:
5867 ret = 0;
5868out_err:
e92c0eaf 5869 kfree(pii.pool_ns);
eb3b2d6b 5870 kfree(pii.image_id);
86b00e0d 5871 rbd_spec_put(parent_spec);
86b00e0d
AE
5872 return ret;
5873}
5874
cc070d59
AE
5875static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5876{
5877 struct {
5878 __le64 stripe_unit;
5879 __le64 stripe_count;
5880 } __attribute__ ((packed)) striping_info_buf = { 0 };
5881 size_t size = sizeof (striping_info_buf);
5882 void *p;
cc070d59
AE
5883 int ret;
5884
ecd4a68a
ID
5885 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5886 &rbd_dev->header_oloc, "get_stripe_unit_count",
5887 NULL, 0, &striping_info_buf, size);
cc070d59
AE
5888 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5889 if (ret < 0)
5890 return ret;
5891 if (ret < size)
5892 return -ERANGE;
5893
cc070d59 5894 p = &striping_info_buf;
b1331852
ID
5895 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
5896 rbd_dev->header.stripe_count = ceph_decode_64(&p);
cc070d59
AE
5897 return 0;
5898}
5899
7e97332e
ID
5900static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
5901{
5902 __le64 data_pool_id;
5903 int ret;
5904
5905 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5906 &rbd_dev->header_oloc, "get_data_pool",
5907 NULL, 0, &data_pool_id, sizeof(data_pool_id));
5908 if (ret < 0)
5909 return ret;
5910 if (ret < sizeof(data_pool_id))
5911 return -EBADMSG;
5912
5913 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
5914 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
5915 return 0;
5916}
5917
9e15b77d
AE
5918static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5919{
ecd4a68a 5920 CEPH_DEFINE_OID_ONSTACK(oid);
9e15b77d
AE
5921 size_t image_id_size;
5922 char *image_id;
5923 void *p;
5924 void *end;
5925 size_t size;
5926 void *reply_buf = NULL;
5927 size_t len = 0;
5928 char *image_name = NULL;
5929 int ret;
5930
5931 rbd_assert(!rbd_dev->spec->image_name);
5932
69e7a02f
AE
5933 len = strlen(rbd_dev->spec->image_id);
5934 image_id_size = sizeof (__le32) + len;
9e15b77d
AE
5935 image_id = kmalloc(image_id_size, GFP_KERNEL);
5936 if (!image_id)
5937 return NULL;
5938
5939 p = image_id;
4157976b 5940 end = image_id + image_id_size;
57385b51 5941 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
9e15b77d
AE
5942
5943 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5944 reply_buf = kmalloc(size, GFP_KERNEL);
5945 if (!reply_buf)
5946 goto out;
5947
ecd4a68a
ID
5948 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5949 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5950 "dir_get_name", image_id, image_id_size,
5951 reply_buf, size);
9e15b77d
AE
5952 if (ret < 0)
5953 goto out;
5954 p = reply_buf;
f40eb349
AE
5955 end = reply_buf + ret;
5956
9e15b77d
AE
5957 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5958 if (IS_ERR(image_name))
5959 image_name = NULL;
5960 else
5961 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5962out:
5963 kfree(reply_buf);
5964 kfree(image_id);
5965
5966 return image_name;
5967}
5968
2ad3d716
AE
5969static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5970{
5971 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5972 const char *snap_name;
5973 u32 which = 0;
5974
5975 /* Skip over names until we find the one we are looking for */
5976
5977 snap_name = rbd_dev->header.snap_names;
5978 while (which < snapc->num_snaps) {
5979 if (!strcmp(name, snap_name))
5980 return snapc->snaps[which];
5981 snap_name += strlen(snap_name) + 1;
5982 which++;
5983 }
5984 return CEPH_NOSNAP;
5985}
5986
5987static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5988{
5989 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5990 u32 which;
5991 bool found = false;
5992 u64 snap_id;
5993
5994 for (which = 0; !found && which < snapc->num_snaps; which++) {
5995 const char *snap_name;
5996
5997 snap_id = snapc->snaps[which];
5998 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
efadc98a
JD
5999 if (IS_ERR(snap_name)) {
6000 /* ignore no-longer existing snapshots */
6001 if (PTR_ERR(snap_name) == -ENOENT)
6002 continue;
6003 else
6004 break;
6005 }
2ad3d716
AE
6006 found = !strcmp(name, snap_name);
6007 kfree(snap_name);
6008 }
6009 return found ? snap_id : CEPH_NOSNAP;
6010}
6011
6012/*
6013 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
6014 * no snapshot by that name is found, or if an error occurs.
6015 */
6016static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6017{
6018 if (rbd_dev->image_format == 1)
6019 return rbd_v1_snap_id_by_name(rbd_dev, name);
6020
6021 return rbd_v2_snap_id_by_name(rbd_dev, name);
6022}
6023
9e15b77d 6024/*
04077599
ID
6025 * An image being mapped will have everything but the snap id.
6026 */
6027static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
6028{
6029 struct rbd_spec *spec = rbd_dev->spec;
6030
6031 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
6032 rbd_assert(spec->image_id && spec->image_name);
6033 rbd_assert(spec->snap_name);
6034
6035 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
6036 u64 snap_id;
6037
6038 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
6039 if (snap_id == CEPH_NOSNAP)
6040 return -ENOENT;
6041
6042 spec->snap_id = snap_id;
6043 } else {
6044 spec->snap_id = CEPH_NOSNAP;
6045 }
6046
6047 return 0;
6048}
6049
6050/*
6051 * A parent image will have all ids but none of the names.
e1d4213f 6052 *
04077599
ID
6053 * All names in an rbd spec are dynamically allocated. It's OK if we
6054 * can't figure out the name for an image id.
9e15b77d 6055 */
04077599 6056static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
9e15b77d 6057{
2e9f7f1c
AE
6058 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6059 struct rbd_spec *spec = rbd_dev->spec;
6060 const char *pool_name;
6061 const char *image_name;
6062 const char *snap_name;
9e15b77d
AE
6063 int ret;
6064
04077599
ID
6065 rbd_assert(spec->pool_id != CEPH_NOPOOL);
6066 rbd_assert(spec->image_id);
6067 rbd_assert(spec->snap_id != CEPH_NOSNAP);
9e15b77d 6068
2e9f7f1c 6069 /* Get the pool name; we have to make our own copy of this */
9e15b77d 6070
2e9f7f1c
AE
6071 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
6072 if (!pool_name) {
6073 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
935dc89f
AE
6074 return -EIO;
6075 }
2e9f7f1c
AE
6076 pool_name = kstrdup(pool_name, GFP_KERNEL);
6077 if (!pool_name)
9e15b77d
AE
6078 return -ENOMEM;
6079
6080 /* Fetch the image name; tolerate failure here */
6081
2e9f7f1c
AE
6082 image_name = rbd_dev_image_name(rbd_dev);
6083 if (!image_name)
06ecc6cb 6084 rbd_warn(rbd_dev, "unable to get image name");
9e15b77d 6085
04077599 6086 /* Fetch the snapshot name */
9e15b77d 6087
2e9f7f1c 6088 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
da6a6b63
JD
6089 if (IS_ERR(snap_name)) {
6090 ret = PTR_ERR(snap_name);
9e15b77d 6091 goto out_err;
2e9f7f1c
AE
6092 }
6093
6094 spec->pool_name = pool_name;
6095 spec->image_name = image_name;
6096 spec->snap_name = snap_name;
9e15b77d
AE
6097
6098 return 0;
04077599 6099
9e15b77d 6100out_err:
2e9f7f1c
AE
6101 kfree(image_name);
6102 kfree(pool_name);
9e15b77d
AE
6103 return ret;
6104}
6105
cc4a38bd 6106static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
35d489f9
AE
6107{
6108 size_t size;
6109 int ret;
6110 void *reply_buf;
6111 void *p;
6112 void *end;
6113 u64 seq;
6114 u32 snap_count;
6115 struct ceph_snap_context *snapc;
6116 u32 i;
6117
6118 /*
6119 * We'll need room for the seq value (maximum snapshot id),
6120 * snapshot count, and array of that many snapshot ids.
6121 * For now we have a fixed upper limit on the number we're
6122 * prepared to receive.
6123 */
6124 size = sizeof (__le64) + sizeof (__le32) +
6125 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6126 reply_buf = kzalloc(size, GFP_KERNEL);
6127 if (!reply_buf)
6128 return -ENOMEM;
6129
ecd4a68a
ID
6130 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6131 &rbd_dev->header_oloc, "get_snapcontext",
6132 NULL, 0, reply_buf, size);
36be9a76 6133 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
35d489f9
AE
6134 if (ret < 0)
6135 goto out;
6136
35d489f9 6137 p = reply_buf;
57385b51
AE
6138 end = reply_buf + ret;
6139 ret = -ERANGE;
35d489f9
AE
6140 ceph_decode_64_safe(&p, end, seq, out);
6141 ceph_decode_32_safe(&p, end, snap_count, out);
6142
6143 /*
6144 * Make sure the reported number of snapshot ids wouldn't go
6145 * beyond the end of our buffer. But before checking that,
6146 * make sure the computed size of the snapshot context we
6147 * allocate is representable in a size_t.
6148 */
6149 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6150 / sizeof (u64)) {
6151 ret = -EINVAL;
6152 goto out;
6153 }
6154 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6155 goto out;
468521c1 6156 ret = 0;
35d489f9 6157
812164f8 6158 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
35d489f9
AE
6159 if (!snapc) {
6160 ret = -ENOMEM;
6161 goto out;
6162 }
35d489f9 6163 snapc->seq = seq;
35d489f9
AE
6164 for (i = 0; i < snap_count; i++)
6165 snapc->snaps[i] = ceph_decode_64(&p);
6166
49ece554 6167 ceph_put_snap_context(rbd_dev->header.snapc);
35d489f9
AE
6168 rbd_dev->header.snapc = snapc;
6169
6170 dout(" snap context seq = %llu, snap_count = %u\n",
57385b51 6171 (unsigned long long)seq, (unsigned int)snap_count);
35d489f9
AE
6172out:
6173 kfree(reply_buf);
6174
57385b51 6175 return ret;
35d489f9
AE
6176}
6177
54cac61f
AE
6178static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6179 u64 snap_id)
b8b1e2db
AE
6180{
6181 size_t size;
6182 void *reply_buf;
54cac61f 6183 __le64 snapid;
b8b1e2db
AE
6184 int ret;
6185 void *p;
6186 void *end;
b8b1e2db
AE
6187 char *snap_name;
6188
6189 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6190 reply_buf = kmalloc(size, GFP_KERNEL);
6191 if (!reply_buf)
6192 return ERR_PTR(-ENOMEM);
6193
54cac61f 6194 snapid = cpu_to_le64(snap_id);
ecd4a68a
ID
6195 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6196 &rbd_dev->header_oloc, "get_snapshot_name",
6197 &snapid, sizeof(snapid), reply_buf, size);
36be9a76 6198 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
f40eb349
AE
6199 if (ret < 0) {
6200 snap_name = ERR_PTR(ret);
b8b1e2db 6201 goto out;
f40eb349 6202 }
b8b1e2db
AE
6203
6204 p = reply_buf;
f40eb349 6205 end = reply_buf + ret;
e5c35534 6206 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
f40eb349 6207 if (IS_ERR(snap_name))
b8b1e2db 6208 goto out;
b8b1e2db 6209
f40eb349 6210 dout(" snap_id 0x%016llx snap_name = %s\n",
54cac61f 6211 (unsigned long long)snap_id, snap_name);
b8b1e2db
AE
6212out:
6213 kfree(reply_buf);
6214
f40eb349 6215 return snap_name;
b8b1e2db
AE
6216}
6217
2df3fac7 6218static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
117973fb 6219{
2df3fac7 6220 bool first_time = rbd_dev->header.object_prefix == NULL;
117973fb 6221 int ret;
117973fb 6222
1617e40c
JD
6223 ret = rbd_dev_v2_image_size(rbd_dev);
6224 if (ret)
cfbf6377 6225 return ret;
1617e40c 6226
2df3fac7
AE
6227 if (first_time) {
6228 ret = rbd_dev_v2_header_onetime(rbd_dev);
6229 if (ret)
cfbf6377 6230 return ret;
2df3fac7
AE
6231 }
6232
cc4a38bd 6233 ret = rbd_dev_v2_snap_context(rbd_dev);
d194cd1d
ID
6234 if (ret && first_time) {
6235 kfree(rbd_dev->header.object_prefix);
6236 rbd_dev->header.object_prefix = NULL;
6237 }
117973fb
AE
6238
6239 return ret;
6240}
6241
a720ae09
ID
6242static int rbd_dev_header_info(struct rbd_device *rbd_dev)
6243{
6244 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6245
6246 if (rbd_dev->image_format == 1)
6247 return rbd_dev_v1_header_info(rbd_dev);
6248
6249 return rbd_dev_v2_header_info(rbd_dev);
6250}
6251
e28fff26
AE
6252/*
6253 * Skips over white space at *buf, and updates *buf to point to the
6254 * first found non-space character (if any). Returns the length of
593a9e7b
AE
6255 * the token (string of non-white space characters) found. Note
6256 * that *buf must be terminated with '\0'.
e28fff26
AE
6257 */
6258static inline size_t next_token(const char **buf)
6259{
6260 /*
6261 * These are the characters that produce nonzero for
6262 * isspace() in the "C" and "POSIX" locales.
6263 */
435a120a 6264 static const char spaces[] = " \f\n\r\t\v";
e28fff26
AE
6265
6266 *buf += strspn(*buf, spaces); /* Find start of token */
6267
6268 return strcspn(*buf, spaces); /* Return token length */
6269}
6270
ea3352f4
AE
6271/*
6272 * Finds the next token in *buf, dynamically allocates a buffer big
6273 * enough to hold a copy of it, and copies the token into the new
6274 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6275 * that a duplicate buffer is created even for a zero-length token.
6276 *
6277 * Returns a pointer to the newly-allocated duplicate, or a null
6278 * pointer if memory for the duplicate was not available. If
6279 * the lenp argument is a non-null pointer, the length of the token
6280 * (not including the '\0') is returned in *lenp.
6281 *
6282 * If successful, the *buf pointer will be updated to point beyond
6283 * the end of the found token.
6284 *
6285 * Note: uses GFP_KERNEL for allocation.
6286 */
6287static inline char *dup_token(const char **buf, size_t *lenp)
6288{
6289 char *dup;
6290 size_t len;
6291
6292 len = next_token(buf);
4caf35f9 6293 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
ea3352f4
AE
6294 if (!dup)
6295 return NULL;
ea3352f4
AE
6296 *(dup + len) = '\0';
6297 *buf += len;
6298
6299 if (lenp)
6300 *lenp = len;
6301
6302 return dup;
6303}
6304
82995cc6
DH
6305static int rbd_parse_param(struct fs_parameter *param,
6306 struct rbd_parse_opts_ctx *pctx)
6307{
6308 struct rbd_options *opt = pctx->opts;
6309 struct fs_parse_result result;
3fbb8d55 6310 struct p_log log = {.prefix = "rbd"};
82995cc6
DH
6311 int token, ret;
6312
6313 ret = ceph_parse_param(param, pctx->copts, NULL);
6314 if (ret != -ENOPARAM)
6315 return ret;
6316
d7167b14 6317 token = __fs_parse(&log, rbd_parameters, param, &result);
82995cc6
DH
6318 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
6319 if (token < 0) {
2c3f3dc3
AV
6320 if (token == -ENOPARAM)
6321 return inval_plog(&log, "Unknown parameter '%s'",
6322 param->key);
82995cc6
DH
6323 return token;
6324 }
6325
6326 switch (token) {
6327 case Opt_queue_depth:
6328 if (result.uint_32 < 1)
6329 goto out_of_range;
6330 opt->queue_depth = result.uint_32;
6331 break;
6332 case Opt_alloc_size:
6333 if (result.uint_32 < SECTOR_SIZE)
6334 goto out_of_range;
2c3f3dc3
AV
6335 if (!is_power_of_2(result.uint_32))
6336 return inval_plog(&log, "alloc_size must be a power of 2");
82995cc6
DH
6337 opt->alloc_size = result.uint_32;
6338 break;
6339 case Opt_lock_timeout:
6340 /* 0 is "wait forever" (i.e. infinite timeout) */
6341 if (result.uint_32 > INT_MAX / 1000)
6342 goto out_of_range;
6343 opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
6344 break;
6345 case Opt_pool_ns:
6346 kfree(pctx->spec->pool_ns);
6347 pctx->spec->pool_ns = param->string;
6348 param->string = NULL;
6349 break;
dc1dad8e
ID
6350 case Opt_compression_hint:
6351 switch (result.uint_32) {
6352 case Opt_compression_hint_none:
6353 opt->alloc_hint_flags &=
6354 ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
6355 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
6356 break;
6357 case Opt_compression_hint_compressible:
6358 opt->alloc_hint_flags |=
6359 CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6360 opt->alloc_hint_flags &=
6361 ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6362 break;
6363 case Opt_compression_hint_incompressible:
6364 opt->alloc_hint_flags |=
6365 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6366 opt->alloc_hint_flags &=
6367 ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6368 break;
6369 default:
6370 BUG();
6371 }
6372 break;
82995cc6
DH
6373 case Opt_read_only:
6374 opt->read_only = true;
6375 break;
6376 case Opt_read_write:
6377 opt->read_only = false;
6378 break;
6379 case Opt_lock_on_read:
6380 opt->lock_on_read = true;
6381 break;
6382 case Opt_exclusive:
6383 opt->exclusive = true;
6384 break;
6385 case Opt_notrim:
6386 opt->trim = false;
6387 break;
6388 default:
6389 BUG();
6390 }
6391
6392 return 0;
6393
6394out_of_range:
2c3f3dc3 6395 return inval_plog(&log, "%s out of range", param->key);
82995cc6
DH
6396}
6397
6398/*
6399 * This duplicates most of generic_parse_monolithic(), untying it from
6400 * fs_context and skipping standard superblock and security options.
6401 */
6402static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
6403{
6404 char *key;
6405 int ret = 0;
6406
6407 dout("%s '%s'\n", __func__, options);
6408 while ((key = strsep(&options, ",")) != NULL) {
6409 if (*key) {
6410 struct fs_parameter param = {
6411 .key = key,
0f89589a 6412 .type = fs_value_is_flag,
82995cc6
DH
6413 };
6414 char *value = strchr(key, '=');
6415 size_t v_len = 0;
6416
6417 if (value) {
6418 if (value == key)
6419 continue;
6420 *value++ = 0;
6421 v_len = strlen(value);
82995cc6
DH
6422 param.string = kmemdup_nul(value, v_len,
6423 GFP_KERNEL);
6424 if (!param.string)
6425 return -ENOMEM;
0f89589a 6426 param.type = fs_value_is_string;
82995cc6
DH
6427 }
6428 param.size = v_len;
6429
6430 ret = rbd_parse_param(&param, pctx);
6431 kfree(param.string);
6432 if (ret)
6433 break;
6434 }
6435 }
6436
6437 return ret;
6438}
6439
a725f65e 6440/*
859c31df
AE
6441 * Parse the options provided for an "rbd add" (i.e., rbd image
6442 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6443 * and the data written is passed here via a NUL-terminated buffer.
6444 * Returns 0 if successful or an error code otherwise.
d22f76e7 6445 *
859c31df
AE
6446 * The information extracted from these options is recorded in
6447 * the other parameters which return dynamically-allocated
6448 * structures:
6449 * ceph_opts
6450 * The address of a pointer that will refer to a ceph options
6451 * structure. Caller must release the returned pointer using
6452 * ceph_destroy_options() when it is no longer needed.
6453 * rbd_opts
6454 * Address of an rbd options pointer. Fully initialized by
6455 * this function; caller must release with kfree().
6456 * spec
6457 * Address of an rbd image specification pointer. Fully
6458 * initialized by this function based on parsed options.
6459 * Caller must release with rbd_spec_put().
6460 *
6461 * The options passed take this form:
6462 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6463 * where:
6464 * <mon_addrs>
6465 * A comma-separated list of one or more monitor addresses.
6466 * A monitor address is an ip address, optionally followed
6467 * by a port number (separated by a colon).
6468 * I.e.: ip1[:port1][,ip2[:port2]...]
6469 * <options>
6470 * A comma-separated list of ceph and/or rbd options.
6471 * <pool_name>
6472 * The name of the rados pool containing the rbd image.
6473 * <image_name>
6474 * The name of the image in that pool to map.
6475 * <snap_id>
6476 * An optional snapshot id. If provided, the mapping will
6477 * present data from the image at the time that snapshot was
6478 * created. The image head is used if no snapshot id is
6479 * provided. Snapshot mappings are always read-only.
a725f65e 6480 */
859c31df 6481static int rbd_add_parse_args(const char *buf,
dc79b113 6482 struct ceph_options **ceph_opts,
859c31df
AE
6483 struct rbd_options **opts,
6484 struct rbd_spec **rbd_spec)
e28fff26 6485{
d22f76e7 6486 size_t len;
859c31df 6487 char *options;
0ddebc0c 6488 const char *mon_addrs;
ecb4dc22 6489 char *snap_name;
0ddebc0c 6490 size_t mon_addrs_size;
82995cc6 6491 struct rbd_parse_opts_ctx pctx = { 0 };
dc79b113 6492 int ret;
e28fff26
AE
6493
6494 /* The first four tokens are required */
6495
7ef3214a 6496 len = next_token(&buf);
4fb5d671
AE
6497 if (!len) {
6498 rbd_warn(NULL, "no monitor address(es) provided");
6499 return -EINVAL;
6500 }
0ddebc0c 6501 mon_addrs = buf;
82995cc6 6502 mon_addrs_size = len;
7ef3214a 6503 buf += len;
a725f65e 6504
dc79b113 6505 ret = -EINVAL;
f28e565a
AE
6506 options = dup_token(&buf, NULL);
6507 if (!options)
dc79b113 6508 return -ENOMEM;
4fb5d671
AE
6509 if (!*options) {
6510 rbd_warn(NULL, "no options provided");
6511 goto out_err;
6512 }
e28fff26 6513
c300156b
ID
6514 pctx.spec = rbd_spec_alloc();
6515 if (!pctx.spec)
f28e565a 6516 goto out_mem;
859c31df 6517
c300156b
ID
6518 pctx.spec->pool_name = dup_token(&buf, NULL);
6519 if (!pctx.spec->pool_name)
859c31df 6520 goto out_mem;
c300156b 6521 if (!*pctx.spec->pool_name) {
4fb5d671
AE
6522 rbd_warn(NULL, "no pool name provided");
6523 goto out_err;
6524 }
e28fff26 6525
c300156b
ID
6526 pctx.spec->image_name = dup_token(&buf, NULL);
6527 if (!pctx.spec->image_name)
f28e565a 6528 goto out_mem;
c300156b 6529 if (!*pctx.spec->image_name) {
4fb5d671
AE
6530 rbd_warn(NULL, "no image name provided");
6531 goto out_err;
6532 }
d4b125e9 6533
f28e565a
AE
6534 /*
6535 * Snapshot name is optional; default is to use "-"
6536 * (indicating the head/no snapshot).
6537 */
3feeb894 6538 len = next_token(&buf);
820a5f3e 6539 if (!len) {
3feeb894
AE
6540 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6541 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
f28e565a 6542 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
dc79b113 6543 ret = -ENAMETOOLONG;
f28e565a 6544 goto out_err;
849b4260 6545 }
ecb4dc22
AE
6546 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6547 if (!snap_name)
f28e565a 6548 goto out_mem;
ecb4dc22 6549 *(snap_name + len) = '\0';
c300156b 6550 pctx.spec->snap_name = snap_name;
e5c35534 6551
82995cc6
DH
6552 pctx.copts = ceph_alloc_options();
6553 if (!pctx.copts)
6554 goto out_mem;
6555
0ddebc0c 6556 /* Initialize all rbd options to the defaults */
e28fff26 6557
c300156b
ID
6558 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6559 if (!pctx.opts)
4e9afeba
AE
6560 goto out_mem;
6561
c300156b
ID
6562 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6563 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
0c93e1b7 6564 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
c300156b
ID
6565 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6566 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6567 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6568 pctx.opts->trim = RBD_TRIM_DEFAULT;
d22f76e7 6569
2d7c86a8
VS
6570 ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL,
6571 ',');
82995cc6 6572 if (ret)
dc79b113 6573 goto out_err;
859c31df 6574
82995cc6
DH
6575 ret = rbd_parse_options(options, &pctx);
6576 if (ret)
6577 goto out_err;
6578
6579 *ceph_opts = pctx.copts;
c300156b
ID
6580 *opts = pctx.opts;
6581 *rbd_spec = pctx.spec;
82995cc6 6582 kfree(options);
dc79b113 6583 return 0;
82995cc6 6584
f28e565a 6585out_mem:
dc79b113 6586 ret = -ENOMEM;
d22f76e7 6587out_err:
c300156b 6588 kfree(pctx.opts);
82995cc6 6589 ceph_destroy_options(pctx.copts);
c300156b 6590 rbd_spec_put(pctx.spec);
f28e565a 6591 kfree(options);
dc79b113 6592 return ret;
a725f65e
AE
6593}
6594
e010dd0a
ID
6595static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6596{
6597 down_write(&rbd_dev->lock_rwsem);
6598 if (__rbd_is_lock_owner(rbd_dev))
e1fddc8f 6599 __rbd_release_lock(rbd_dev);
e010dd0a
ID
6600 up_write(&rbd_dev->lock_rwsem);
6601}
6602
637cd060
ID
6603/*
6604 * If the wait is interrupted, an error is returned even if the lock
6605 * was successfully acquired. rbd_dev_image_unlock() will release it
6606 * if needed.
6607 */
e010dd0a
ID
6608static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6609{
637cd060 6610 long ret;
2f18d466 6611
e010dd0a 6612 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
637cd060
ID
6613 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6614 return 0;
6615
e010dd0a
ID
6616 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6617 return -EINVAL;
6618 }
6619
3fe69921 6620 if (rbd_is_ro(rbd_dev))
637cd060
ID
6621 return 0;
6622
6623 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6624 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6625 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6626 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
25e6be21 6627 if (ret > 0) {
637cd060 6628 ret = rbd_dev->acquire_err;
25e6be21
DY
6629 } else {
6630 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6631 if (!ret)
6632 ret = -ETIMEDOUT;
637cd060 6633
9d01e07f 6634 rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
e010dd0a 6635 }
9d01e07f
ID
6636 if (ret)
6637 return ret;
e010dd0a 6638
637cd060
ID
6639 /*
6640 * The lock may have been released by now, unless automatic lock
6641 * transitions are disabled.
6642 */
6643 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
e010dd0a
ID
6644 return 0;
6645}
6646
589d30e0
AE
6647/*
6648 * An rbd format 2 image has a unique identifier, distinct from the
6649 * name given to it by the user. Internally, that identifier is
6650 * what's used to specify the names of objects related to the image.
6651 *
6652 * A special "rbd id" object is used to map an rbd image name to its
6653 * id. If that object doesn't exist, then there is no v2 rbd image
6654 * with the supplied name.
6655 *
6656 * This function will record the given rbd_dev's image_id field if
6657 * it can be determined, and in that case will return 0. If any
6658 * errors occur a negative errno will be returned and the rbd_dev's
6659 * image_id field will be unchanged (and should be NULL).
6660 */
6661static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6662{
6663 int ret;
6664 size_t size;
ecd4a68a 6665 CEPH_DEFINE_OID_ONSTACK(oid);
589d30e0 6666 void *response;
c0fba368 6667 char *image_id;
2f82ee54 6668
2c0d0a10
AE
6669 /*
6670 * When probing a parent image, the image id is already
6671 * known (and the image name likely is not). There's no
c0fba368
AE
6672 * need to fetch the image id again in this case. We
6673 * do still need to set the image format though.
2c0d0a10 6674 */
c0fba368
AE
6675 if (rbd_dev->spec->image_id) {
6676 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6677
2c0d0a10 6678 return 0;
c0fba368 6679 }
2c0d0a10 6680
589d30e0
AE
6681 /*
6682 * First, see if the format 2 image id file exists, and if
6683 * so, get the image's persistent id from it.
6684 */
ecd4a68a
ID
6685 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6686 rbd_dev->spec->image_name);
6687 if (ret)
6688 return ret;
6689
6690 dout("rbd id object name is %s\n", oid.name);
589d30e0
AE
6691
6692 /* Response will be an encoded string, which includes a length */
589d30e0
AE
6693 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6694 response = kzalloc(size, GFP_NOIO);
6695 if (!response) {
6696 ret = -ENOMEM;
6697 goto out;
6698 }
6699
c0fba368
AE
6700 /* If it doesn't exist we'll assume it's a format 1 image */
6701
ecd4a68a
ID
6702 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6703 "get_id", NULL, 0,
5435d206 6704 response, size);
36be9a76 6705 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
c0fba368
AE
6706 if (ret == -ENOENT) {
6707 image_id = kstrdup("", GFP_KERNEL);
6708 ret = image_id ? 0 : -ENOMEM;
6709 if (!ret)
6710 rbd_dev->image_format = 1;
7dd440c9 6711 } else if (ret >= 0) {
c0fba368
AE
6712 void *p = response;
6713
6714 image_id = ceph_extract_encoded_string(&p, p + ret,
979ed480 6715 NULL, GFP_NOIO);
461f758a 6716 ret = PTR_ERR_OR_ZERO(image_id);
c0fba368
AE
6717 if (!ret)
6718 rbd_dev->image_format = 2;
c0fba368
AE
6719 }
6720
6721 if (!ret) {
6722 rbd_dev->spec->image_id = image_id;
6723 dout("image_id is %s\n", image_id);
589d30e0
AE
6724 }
6725out:
6726 kfree(response);
ecd4a68a 6727 ceph_oid_destroy(&oid);
589d30e0
AE
6728 return ret;
6729}
6730
3abef3b3
AE
6731/*
6732 * Undo whatever state changes are made by v1 or v2 header info
6733 * call.
6734 */
6fd48b3b
AE
6735static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6736{
6737 struct rbd_image_header *header;
6738
e69b8d41 6739 rbd_dev_parent_put(rbd_dev);
22e8bd51 6740 rbd_object_map_free(rbd_dev);
da5ef6be 6741 rbd_dev_mapping_clear(rbd_dev);
6fd48b3b
AE
6742
6743 /* Free dynamic fields from the header, then zero it out */
6744
6745 header = &rbd_dev->header;
812164f8 6746 ceph_put_snap_context(header->snapc);
6fd48b3b
AE
6747 kfree(header->snap_sizes);
6748 kfree(header->snap_names);
6749 kfree(header->object_prefix);
6750 memset(header, 0, sizeof (*header));
6751}
6752
2df3fac7 6753static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
a30b71b9
AE
6754{
6755 int ret;
a30b71b9 6756
1e130199 6757 ret = rbd_dev_v2_object_prefix(rbd_dev);
57385b51 6758 if (ret)
b1b5402a
AE
6759 goto out_err;
6760
2df3fac7
AE
6761 /*
6762 * Get the and check features for the image. Currently the
6763 * features are assumed to never change.
6764 */
b1b5402a 6765 ret = rbd_dev_v2_features(rbd_dev);
57385b51 6766 if (ret)
9d475de5 6767 goto out_err;
35d489f9 6768
cc070d59
AE
6769 /* If the image supports fancy striping, get its parameters */
6770
6771 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
6772 ret = rbd_dev_v2_striping_info(rbd_dev);
6773 if (ret < 0)
6774 goto out_err;
6775 }
a30b71b9 6776
7e97332e
ID
6777 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
6778 ret = rbd_dev_v2_data_pool(rbd_dev);
6779 if (ret)
6780 goto out_err;
6781 }
6782
263423f8 6783 rbd_init_layout(rbd_dev);
35152979 6784 return 0;
263423f8 6785
9d475de5 6786out_err:
642a2537 6787 rbd_dev->header.features = 0;
1e130199
AE
6788 kfree(rbd_dev->header.object_prefix);
6789 rbd_dev->header.object_prefix = NULL;
9d475de5 6790 return ret;
a30b71b9
AE
6791}
6792
6d69bb53
ID
6793/*
6794 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6795 * rbd_dev_image_probe() recursion depth, which means it's also the
6796 * length of the already discovered part of the parent chain.
6797 */
6798static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
83a06263 6799{
2f82ee54 6800 struct rbd_device *parent = NULL;
124afba2
AE
6801 int ret;
6802
6803 if (!rbd_dev->parent_spec)
6804 return 0;
124afba2 6805
6d69bb53
ID
6806 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6807 pr_info("parent chain is too long (%d)\n", depth);
6808 ret = -EINVAL;
6809 goto out_err;
6810 }
6811
f7c4d9b1 6812 parent = __rbd_dev_create(rbd_dev->parent_spec);
1f2c6651
ID
6813 if (!parent) {
6814 ret = -ENOMEM;
124afba2 6815 goto out_err;
1f2c6651
ID
6816 }
6817
6818 /*
6819 * Images related by parent/child relationships always share
6820 * rbd_client and spec/parent_spec, so bump their refcounts.
6821 */
f7c4d9b1
ID
6822 parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
6823 parent->spec = rbd_spec_get(rbd_dev->parent_spec);
124afba2 6824
39258aa2
ID
6825 __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
6826
6d69bb53 6827 ret = rbd_dev_image_probe(parent, depth);
124afba2
AE
6828 if (ret < 0)
6829 goto out_err;
1f2c6651 6830
124afba2 6831 rbd_dev->parent = parent;
a2acd00e 6832 atomic_set(&rbd_dev->parent_ref, 1);
124afba2 6833 return 0;
1f2c6651 6834
124afba2 6835out_err:
1f2c6651 6836 rbd_dev_unparent(rbd_dev);
1761b229 6837 rbd_dev_destroy(parent);
124afba2
AE
6838 return ret;
6839}
6840
5769ed0c
ID
6841static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6842{
6843 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5769ed0c
ID
6844 rbd_free_disk(rbd_dev);
6845 if (!single_major)
6846 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6847}
6848
811c6688
ID
6849/*
6850 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6851 * upon return.
6852 */
200a6a8b 6853static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
124afba2 6854{
83a06263 6855 int ret;
d1cf5788 6856
9b60e70b 6857 /* Record our major and minor device numbers. */
83a06263 6858
9b60e70b
ID
6859 if (!single_major) {
6860 ret = register_blkdev(0, rbd_dev->name);
6861 if (ret < 0)
1643dfa4 6862 goto err_out_unlock;
9b60e70b
ID
6863
6864 rbd_dev->major = ret;
6865 rbd_dev->minor = 0;
6866 } else {
6867 rbd_dev->major = rbd_major;
6868 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6869 }
83a06263
AE
6870
6871 /* Set up the blkdev mapping. */
6872
6873 ret = rbd_init_disk(rbd_dev);
6874 if (ret)
6875 goto err_out_blkdev;
6876
f35a4dee 6877 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
39258aa2 6878 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
f35a4dee 6879
5769ed0c 6880 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
f35a4dee 6881 if (ret)
da5ef6be 6882 goto err_out_disk;
83a06263 6883
129b79d4 6884 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
811c6688 6885 up_write(&rbd_dev->header_rwsem);
5769ed0c 6886 return 0;
2f82ee54 6887
83a06263
AE
6888err_out_disk:
6889 rbd_free_disk(rbd_dev);
6890err_out_blkdev:
9b60e70b
ID
6891 if (!single_major)
6892 unregister_blkdev(rbd_dev->major, rbd_dev->name);
811c6688
ID
6893err_out_unlock:
6894 up_write(&rbd_dev->header_rwsem);
83a06263
AE
6895 return ret;
6896}
6897
332bb12d
AE
6898static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6899{
6900 struct rbd_spec *spec = rbd_dev->spec;
c41d13a3 6901 int ret;
332bb12d
AE
6902
6903 /* Record the header object name for this rbd image. */
6904
6905 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
332bb12d 6906 if (rbd_dev->image_format == 1)
c41d13a3
ID
6907 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6908 spec->image_name, RBD_SUFFIX);
332bb12d 6909 else
c41d13a3
ID
6910 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6911 RBD_HEADER_PREFIX, spec->image_id);
332bb12d 6912
c41d13a3 6913 return ret;
332bb12d
AE
6914}
6915
b9ef2b88
ID
6916static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6917{
6918 if (!is_snap) {
6919 pr_info("image %s/%s%s%s does not exist\n",
6920 rbd_dev->spec->pool_name,
6921 rbd_dev->spec->pool_ns ?: "",
6922 rbd_dev->spec->pool_ns ? "/" : "",
6923 rbd_dev->spec->image_name);
6924 } else {
6925 pr_info("snap %s/%s%s%s@%s does not exist\n",
6926 rbd_dev->spec->pool_name,
6927 rbd_dev->spec->pool_ns ?: "",
6928 rbd_dev->spec->pool_ns ? "/" : "",
6929 rbd_dev->spec->image_name,
6930 rbd_dev->spec->snap_name);
6931 }
6932}
6933
200a6a8b
AE
6934static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6935{
b8776051 6936 if (!rbd_is_ro(rbd_dev))
fd22aef8 6937 rbd_unregister_watch(rbd_dev);
952c48b0
ID
6938
6939 rbd_dev_unprobe(rbd_dev);
6fd48b3b
AE
6940 rbd_dev->image_format = 0;
6941 kfree(rbd_dev->spec->image_id);
6942 rbd_dev->spec->image_id = NULL;
200a6a8b
AE
6943}
6944
a30b71b9
AE
6945/*
6946 * Probe for the existence of the header object for the given rbd
1f3ef788
AE
6947 * device. If this image is the one being mapped (i.e., not a
6948 * parent), initiate a watch on its header object before using that
6949 * object to get detailed information about the rbd image.
0e4e1de5
ID
6950 *
6951 * On success, returns with header_rwsem held for write if called
6952 * with @depth == 0.
a30b71b9 6953 */
6d69bb53 6954static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
a30b71b9 6955{
b9ef2b88 6956 bool need_watch = !rbd_is_ro(rbd_dev);
a30b71b9
AE
6957 int ret;
6958
6959 /*
3abef3b3
AE
6960 * Get the id from the image id object. Unless there's an
6961 * error, rbd_dev->spec->image_id will be filled in with
6962 * a dynamically-allocated string, and rbd_dev->image_format
6963 * will be set to either 1 or 2.
a30b71b9
AE
6964 */
6965 ret = rbd_dev_image_id(rbd_dev);
6966 if (ret)
c0fba368 6967 return ret;
c0fba368 6968
332bb12d
AE
6969 ret = rbd_dev_header_name(rbd_dev);
6970 if (ret)
6971 goto err_out_format;
6972
b9ef2b88 6973 if (need_watch) {
99d16943 6974 ret = rbd_register_watch(rbd_dev);
1fe48023
ID
6975 if (ret) {
6976 if (ret == -ENOENT)
b9ef2b88 6977 rbd_print_dne(rbd_dev, false);
c41d13a3 6978 goto err_out_format;
1fe48023 6979 }
1f3ef788 6980 }
b644de2b 6981
0e4e1de5
ID
6982 if (!depth)
6983 down_write(&rbd_dev->header_rwsem);
6984
a720ae09 6985 ret = rbd_dev_header_info(rbd_dev);
b9ef2b88
ID
6986 if (ret) {
6987 if (ret == -ENOENT && !need_watch)
6988 rbd_print_dne(rbd_dev, false);
952c48b0 6989 goto err_out_probe;
b9ef2b88 6990 }
83a06263 6991
04077599
ID
6992 /*
6993 * If this image is the one being mapped, we have pool name and
6994 * id, image name and id, and snap name - need to fill snap id.
6995 * Otherwise this is a parent image, identified by pool, image
6996 * and snap ids - need to fill in names for those ids.
6997 */
6d69bb53 6998 if (!depth)
04077599
ID
6999 ret = rbd_spec_fill_snap_id(rbd_dev);
7000 else
7001 ret = rbd_spec_fill_names(rbd_dev);
1fe48023
ID
7002 if (ret) {
7003 if (ret == -ENOENT)
b9ef2b88 7004 rbd_print_dne(rbd_dev, true);
33dca39f 7005 goto err_out_probe;
1fe48023 7006 }
9bb81c9b 7007
da5ef6be
ID
7008 ret = rbd_dev_mapping_set(rbd_dev);
7009 if (ret)
7010 goto err_out_probe;
7011
f3c0e459 7012 if (rbd_is_snap(rbd_dev) &&
22e8bd51
ID
7013 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
7014 ret = rbd_object_map_load(rbd_dev);
7015 if (ret)
7016 goto err_out_probe;
7017 }
7018
e8f59b59
ID
7019 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
7020 ret = rbd_dev_v2_parent_info(rbd_dev);
7021 if (ret)
7022 goto err_out_probe;
e8f59b59
ID
7023 }
7024
6d69bb53 7025 ret = rbd_dev_probe_parent(rbd_dev, depth);
30d60ba2
AE
7026 if (ret)
7027 goto err_out_probe;
7028
7029 dout("discovered format %u image, header name is %s\n",
c41d13a3 7030 rbd_dev->image_format, rbd_dev->header_oid.name);
30d60ba2 7031 return 0;
e8f59b59 7032
6fd48b3b 7033err_out_probe:
0e4e1de5
ID
7034 if (!depth)
7035 up_write(&rbd_dev->header_rwsem);
b9ef2b88 7036 if (need_watch)
99d16943 7037 rbd_unregister_watch(rbd_dev);
952c48b0 7038 rbd_dev_unprobe(rbd_dev);
332bb12d
AE
7039err_out_format:
7040 rbd_dev->image_format = 0;
5655c4d9
AE
7041 kfree(rbd_dev->spec->image_id);
7042 rbd_dev->spec->image_id = NULL;
a30b71b9
AE
7043 return ret;
7044}
7045
75cff725 7046static ssize_t do_rbd_add(const char *buf, size_t count)
602adf40 7047{
cb8627c7 7048 struct rbd_device *rbd_dev = NULL;
dc79b113 7049 struct ceph_options *ceph_opts = NULL;
4e9afeba 7050 struct rbd_options *rbd_opts = NULL;
859c31df 7051 struct rbd_spec *spec = NULL;
9d3997fd 7052 struct rbd_client *rbdc;
b51c83c2 7053 int rc;
602adf40 7054
f44d04e6
ID
7055 if (!capable(CAP_SYS_ADMIN))
7056 return -EPERM;
7057
602adf40
YS
7058 if (!try_module_get(THIS_MODULE))
7059 return -ENODEV;
7060
602adf40 7061 /* parse add command */
859c31df 7062 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
dc79b113 7063 if (rc < 0)
dd5ac32d 7064 goto out;
78cea76e 7065
9d3997fd
AE
7066 rbdc = rbd_get_client(ceph_opts);
7067 if (IS_ERR(rbdc)) {
7068 rc = PTR_ERR(rbdc);
0ddebc0c 7069 goto err_out_args;
9d3997fd 7070 }
602adf40 7071
602adf40 7072 /* pick the pool */
dd435855 7073 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
1fe48023
ID
7074 if (rc < 0) {
7075 if (rc == -ENOENT)
7076 pr_info("pool %s does not exist\n", spec->pool_name);
602adf40 7077 goto err_out_client;
1fe48023 7078 }
c0cd10db 7079 spec->pool_id = (u64)rc;
859c31df 7080
d147543d 7081 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
b51c83c2
ID
7082 if (!rbd_dev) {
7083 rc = -ENOMEM;
bd4ba655 7084 goto err_out_client;
b51c83c2 7085 }
c53d5893
AE
7086 rbdc = NULL; /* rbd_dev now owns this */
7087 spec = NULL; /* rbd_dev now owns this */
d147543d 7088 rbd_opts = NULL; /* rbd_dev now owns this */
602adf40 7089
39258aa2
ID
7090 /* if we are mapping a snapshot it will be a read-only mapping */
7091 if (rbd_dev->opts->read_only ||
7092 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7093 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7094
0d6d1e9c
MC
7095 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7096 if (!rbd_dev->config_info) {
7097 rc = -ENOMEM;
7098 goto err_out_rbd_dev;
7099 }
7100
6d69bb53 7101 rc = rbd_dev_image_probe(rbd_dev, 0);
0e4e1de5 7102 if (rc < 0)
c53d5893 7103 goto err_out_rbd_dev;
05fd6f6f 7104
0c93e1b7
ID
7105 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7106 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7107 rbd_dev->layout.object_size);
7108 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7109 }
7110
b536f69a 7111 rc = rbd_dev_device_setup(rbd_dev);
fd22aef8 7112 if (rc)
8b679ec5 7113 goto err_out_image_probe;
3abef3b3 7114
637cd060
ID
7115 rc = rbd_add_acquire_lock(rbd_dev);
7116 if (rc)
7117 goto err_out_image_lock;
3abef3b3 7118
5769ed0c
ID
7119 /* Everything's ready. Announce the disk to the world. */
7120
7121 rc = device_add(&rbd_dev->dev);
7122 if (rc)
e010dd0a 7123 goto err_out_image_lock;
5769ed0c 7124
27c97abc
LC
7125 rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7126 if (rc)
7127 goto err_out_cleanup_disk;
5769ed0c
ID
7128
7129 spin_lock(&rbd_dev_list_lock);
7130 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7131 spin_unlock(&rbd_dev_list_lock);
7132
7133 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7134 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7135 rbd_dev->header.features);
dd5ac32d
ID
7136 rc = count;
7137out:
7138 module_put(THIS_MODULE);
7139 return rc;
b536f69a 7140
27c97abc
LC
7141err_out_cleanup_disk:
7142 rbd_free_disk(rbd_dev);
e010dd0a
ID
7143err_out_image_lock:
7144 rbd_dev_image_unlock(rbd_dev);
5769ed0c 7145 rbd_dev_device_release(rbd_dev);
8b679ec5
ID
7146err_out_image_probe:
7147 rbd_dev_image_release(rbd_dev);
c53d5893
AE
7148err_out_rbd_dev:
7149 rbd_dev_destroy(rbd_dev);
bd4ba655 7150err_out_client:
9d3997fd 7151 rbd_put_client(rbdc);
0ddebc0c 7152err_out_args:
859c31df 7153 rbd_spec_put(spec);
d147543d 7154 kfree(rbd_opts);
dd5ac32d 7155 goto out;
602adf40
YS
7156}
7157
75cff725 7158static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count)
9b60e70b
ID
7159{
7160 if (single_major)
7161 return -EINVAL;
7162
75cff725 7163 return do_rbd_add(buf, count);
9b60e70b
ID
7164}
7165
75cff725 7166static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf,
7e9586ba 7167 size_t count)
9b60e70b 7168{
75cff725 7169 return do_rbd_add(buf, count);
9b60e70b
ID
7170}
7171
05a46afd
AE
7172static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7173{
ad945fc1 7174 while (rbd_dev->parent) {
05a46afd
AE
7175 struct rbd_device *first = rbd_dev;
7176 struct rbd_device *second = first->parent;
7177 struct rbd_device *third;
7178
7179 /*
7180 * Follow to the parent with no grandparent and
7181 * remove it.
7182 */
7183 while (second && (third = second->parent)) {
7184 first = second;
7185 second = third;
7186 }
ad945fc1 7187 rbd_assert(second);
8ad42cd0 7188 rbd_dev_image_release(second);
8b679ec5 7189 rbd_dev_destroy(second);
ad945fc1
AE
7190 first->parent = NULL;
7191 first->parent_overlap = 0;
7192
7193 rbd_assert(first->parent_spec);
05a46afd
AE
7194 rbd_spec_put(first->parent_spec);
7195 first->parent_spec = NULL;
05a46afd
AE
7196 }
7197}
7198
75cff725 7199static ssize_t do_rbd_remove(const char *buf, size_t count)
602adf40
YS
7200{
7201 struct rbd_device *rbd_dev = NULL;
751cc0e3 7202 int dev_id;
0276dca6 7203 char opt_buf[6];
0276dca6 7204 bool force = false;
0d8189e1 7205 int ret;
602adf40 7206
f44d04e6
ID
7207 if (!capable(CAP_SYS_ADMIN))
7208 return -EPERM;
7209
0276dca6
MC
7210 dev_id = -1;
7211 opt_buf[0] = '\0';
7212 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7213 if (dev_id < 0) {
7214 pr_err("dev_id out of range\n");
602adf40 7215 return -EINVAL;
0276dca6
MC
7216 }
7217 if (opt_buf[0] != '\0') {
7218 if (!strcmp(opt_buf, "force")) {
7219 force = true;
7220 } else {
7221 pr_err("bad remove option at '%s'\n", opt_buf);
7222 return -EINVAL;
7223 }
7224 }
602adf40 7225
751cc0e3
AE
7226 ret = -ENOENT;
7227 spin_lock(&rbd_dev_list_lock);
cd59cdef 7228 list_for_each_entry(rbd_dev, &rbd_dev_list, node) {
751cc0e3
AE
7229 if (rbd_dev->dev_id == dev_id) {
7230 ret = 0;
7231 break;
7232 }
42382b70 7233 }
751cc0e3
AE
7234 if (!ret) {
7235 spin_lock_irq(&rbd_dev->lock);
0276dca6 7236 if (rbd_dev->open_count && !force)
751cc0e3 7237 ret = -EBUSY;
85f5a4d6
ID
7238 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7239 &rbd_dev->flags))
7240 ret = -EINPROGRESS;
751cc0e3
AE
7241 spin_unlock_irq(&rbd_dev->lock);
7242 }
7243 spin_unlock(&rbd_dev_list_lock);
85f5a4d6 7244 if (ret)
1ba0f1e7 7245 return ret;
751cc0e3 7246
0276dca6
MC
7247 if (force) {
7248 /*
7249 * Prevent new IO from being queued and wait for existing
7250 * IO to complete/fail.
7251 */
7252 blk_mq_freeze_queue(rbd_dev->disk->queue);
7a5428dc 7253 blk_mark_disk_dead(rbd_dev->disk);
0276dca6
MC
7254 }
7255
5769ed0c
ID
7256 del_gendisk(rbd_dev->disk);
7257 spin_lock(&rbd_dev_list_lock);
7258 list_del_init(&rbd_dev->node);
7259 spin_unlock(&rbd_dev_list_lock);
7260 device_del(&rbd_dev->dev);
fca27065 7261
e010dd0a 7262 rbd_dev_image_unlock(rbd_dev);
dd5ac32d 7263 rbd_dev_device_release(rbd_dev);
8ad42cd0 7264 rbd_dev_image_release(rbd_dev);
8b679ec5 7265 rbd_dev_destroy(rbd_dev);
1ba0f1e7 7266 return count;
602adf40
YS
7267}
7268
75cff725 7269static ssize_t remove_store(const struct bus_type *bus, const char *buf, size_t count)
9b60e70b
ID
7270{
7271 if (single_major)
7272 return -EINVAL;
7273
75cff725 7274 return do_rbd_remove(buf, count);
9b60e70b
ID
7275}
7276
75cff725 7277static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf,
7e9586ba 7278 size_t count)
9b60e70b 7279{
75cff725 7280 return do_rbd_remove(buf, count);
9b60e70b
ID
7281}
7282
602adf40
YS
7283/*
7284 * create control files in sysfs
dfc5606d 7285 * /sys/bus/rbd/...
602adf40 7286 */
7d8dc534 7287static int __init rbd_sysfs_init(void)
602adf40 7288{
dfc5606d 7289 int ret;
602adf40 7290
fed4c143 7291 ret = device_register(&rbd_root_dev);
7f21735f
YY
7292 if (ret < 0) {
7293 put_device(&rbd_root_dev);
dfc5606d 7294 return ret;
7f21735f 7295 }
602adf40 7296
fed4c143
AE
7297 ret = bus_register(&rbd_bus_type);
7298 if (ret < 0)
7299 device_unregister(&rbd_root_dev);
602adf40 7300
602adf40
YS
7301 return ret;
7302}
7303
7d8dc534 7304static void __exit rbd_sysfs_cleanup(void)
602adf40 7305{
dfc5606d 7306 bus_unregister(&rbd_bus_type);
fed4c143 7307 device_unregister(&rbd_root_dev);
602adf40
YS
7308}
7309
7d8dc534 7310static int __init rbd_slab_init(void)
1c2a9dfe
AE
7311{
7312 rbd_assert(!rbd_img_request_cache);
03d94406 7313 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
868311b1
AE
7314 if (!rbd_img_request_cache)
7315 return -ENOMEM;
7316
7317 rbd_assert(!rbd_obj_request_cache);
03d94406 7318 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
78c2a44a
AE
7319 if (!rbd_obj_request_cache)
7320 goto out_err;
7321
6c696d85 7322 return 0;
1c2a9dfe 7323
6c696d85 7324out_err:
868311b1
AE
7325 kmem_cache_destroy(rbd_img_request_cache);
7326 rbd_img_request_cache = NULL;
1c2a9dfe
AE
7327 return -ENOMEM;
7328}
7329
7330static void rbd_slab_exit(void)
7331{
868311b1
AE
7332 rbd_assert(rbd_obj_request_cache);
7333 kmem_cache_destroy(rbd_obj_request_cache);
7334 rbd_obj_request_cache = NULL;
7335
1c2a9dfe
AE
7336 rbd_assert(rbd_img_request_cache);
7337 kmem_cache_destroy(rbd_img_request_cache);
7338 rbd_img_request_cache = NULL;
7339}
7340
cc344fa1 7341static int __init rbd_init(void)
602adf40
YS
7342{
7343 int rc;
7344
1e32d34c
AE
7345 if (!libceph_compatible(NULL)) {
7346 rbd_warn(NULL, "libceph incompatibility (quitting)");
1e32d34c
AE
7347 return -EINVAL;
7348 }
e1b4d96d 7349
1c2a9dfe 7350 rc = rbd_slab_init();
602adf40
YS
7351 if (rc)
7352 return rc;
e1b4d96d 7353
f5ee37bd
ID
7354 /*
7355 * The number of active work items is limited by the number of
f77303bd 7356 * rbd devices * queue depth, so leave @max_active at default.
f5ee37bd
ID
7357 */
7358 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7359 if (!rbd_wq) {
7360 rc = -ENOMEM;
7361 goto err_out_slab;
7362 }
7363
9b60e70b
ID
7364 if (single_major) {
7365 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7366 if (rbd_major < 0) {
7367 rc = rbd_major;
f5ee37bd 7368 goto err_out_wq;
9b60e70b
ID
7369 }
7370 }
7371
1c2a9dfe
AE
7372 rc = rbd_sysfs_init();
7373 if (rc)
9b60e70b
ID
7374 goto err_out_blkdev;
7375
7376 if (single_major)
7377 pr_info("loaded (major %d)\n", rbd_major);
7378 else
7379 pr_info("loaded\n");
1c2a9dfe 7380
e1b4d96d
ID
7381 return 0;
7382
9b60e70b
ID
7383err_out_blkdev:
7384 if (single_major)
7385 unregister_blkdev(rbd_major, RBD_DRV_NAME);
f5ee37bd
ID
7386err_out_wq:
7387 destroy_workqueue(rbd_wq);
e1b4d96d
ID
7388err_out_slab:
7389 rbd_slab_exit();
1c2a9dfe 7390 return rc;
602adf40
YS
7391}
7392
cc344fa1 7393static void __exit rbd_exit(void)
602adf40 7394{
ffe312cf 7395 ida_destroy(&rbd_dev_id_ida);
602adf40 7396 rbd_sysfs_cleanup();
9b60e70b
ID
7397 if (single_major)
7398 unregister_blkdev(rbd_major, RBD_DRV_NAME);
f5ee37bd 7399 destroy_workqueue(rbd_wq);
1c2a9dfe 7400 rbd_slab_exit();
602adf40
YS
7401}
7402
7403module_init(rbd_init);
7404module_exit(rbd_exit);
7405
d552c619 7406MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
602adf40
YS
7407MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7408MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
602adf40
YS
7409/* following authorship retained from original osdblk.c */
7410MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7411
90da258b 7412MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
602adf40 7413MODULE_LICENSE("GPL");