net: stmmac: Do not stop PHY if WoL is enabled
[linux-2.6-block.git] / drivers / block / rbd.c
CommitLineData
e2a58ee5 1
602adf40
YS
2/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
dfc5606d 25 For usage instructions, please refer to:
602adf40 26
dfc5606d 27 Documentation/ABI/testing/sysfs-bus-rbd
602adf40
YS
28
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
ed95b21a 34#include <linux/ceph/cls_lock_client.h>
43df3d35 35#include <linux/ceph/striper.h>
602adf40 36#include <linux/ceph/decode.h>
59c2be1e 37#include <linux/parser.h>
30d1cff8 38#include <linux/bsearch.h>
602adf40
YS
39
40#include <linux/kernel.h>
41#include <linux/device.h>
42#include <linux/module.h>
7ad18afa 43#include <linux/blk-mq.h>
602adf40
YS
44#include <linux/fs.h>
45#include <linux/blkdev.h>
1c2a9dfe 46#include <linux/slab.h>
f8a22fc2 47#include <linux/idr.h>
bc1ecc65 48#include <linux/workqueue.h>
602adf40
YS
49
50#include "rbd_types.h"
51
aafb230e
AE
52#define RBD_DEBUG /* Activate rbd_assert() calls */
53
a2acd00e
AE
54/*
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
59 */
60static int atomic_inc_return_safe(atomic_t *v)
61{
62 unsigned int counter;
63
bfc18e38 64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
a2acd00e
AE
65 if (counter <= (unsigned int)INT_MAX)
66 return (int)counter;
67
68 atomic_dec(v);
69
70 return -EINVAL;
71}
72
73/* Decrement the counter. Return the resulting value, or -EINVAL */
74static int atomic_dec_return_safe(atomic_t *v)
75{
76 int counter;
77
78 counter = atomic_dec_return(v);
79 if (counter >= 0)
80 return counter;
81
82 atomic_inc(v);
83
84 return -EINVAL;
85}
86
f0f8cef5 87#define RBD_DRV_NAME "rbd"
602adf40 88
7e513d43
ID
89#define RBD_MINORS_PER_MAJOR 256
90#define RBD_SINGLE_MAJOR_PART_SHIFT 4
602adf40 91
6d69bb53
ID
92#define RBD_MAX_PARENT_CHAIN_LEN 16
93
d4b125e9
AE
94#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95#define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
97
35d489f9 98#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
602adf40
YS
99
100#define RBD_SNAP_HEAD_NAME "-"
101
9682fc6d
AE
102#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
103
9e15b77d
AE
104/* This allows a single page to hold an image name sent by OSD */
105#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
1e130199 106#define RBD_IMAGE_ID_LEN_MAX 64
9e15b77d 107
1e130199 108#define RBD_OBJ_PREFIX_LEN_MAX 64
589d30e0 109
ed95b21a 110#define RBD_NOTIFY_TIMEOUT 5 /* seconds */
99d16943
ID
111#define RBD_RETRY_DELAY msecs_to_jiffies(1000)
112
d889140c
AE
113/* Feature bits */
114
8767b293
ID
115#define RBD_FEATURE_LAYERING (1ULL<<0)
116#define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
22e8bd51
ID
118#define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119#define RBD_FEATURE_FAST_DIFF (1ULL<<4)
b9f6d447 120#define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
8767b293 121#define RBD_FEATURE_DATA_POOL (1ULL<<7)
e573427a 122#define RBD_FEATURE_OPERATIONS (1ULL<<8)
8767b293 123
ed95b21a
ID
124#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
7e97332e 126 RBD_FEATURE_EXCLUSIVE_LOCK | \
22e8bd51
ID
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
b9f6d447 129 RBD_FEATURE_DEEP_FLATTEN | \
e573427a
ID
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
d889140c
AE
132
133/* Features supported by this (client software) implementation. */
134
770eba6e 135#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
d889140c 136
81a89793
AE
137/*
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
81a89793 140 */
602adf40
YS
141#define DEV_NAME_LEN 32
142
143/*
144 * block device image metadata (in-memory version)
145 */
146struct rbd_image_header {
f35a4dee 147 /* These six fields never change for a given rbd image */
849b4260 148 char *object_prefix;
602adf40 149 __u8 obj_order;
f35a4dee
AE
150 u64 stripe_unit;
151 u64 stripe_count;
7e97332e 152 s64 data_pool_id;
f35a4dee 153 u64 features; /* Might be changeable someday? */
602adf40 154
f84344f3
AE
155 /* The remaining fields need to be updated occasionally */
156 u64 image_size;
157 struct ceph_snap_context *snapc;
f35a4dee
AE
158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
59c2be1e
YS
160};
161
0d7dbfce
AE
162/*
163 * An rbd image specification.
164 *
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
c66c6e0c
AE
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
168 *
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
173 *
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
179 *
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
183 *
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
0d7dbfce
AE
186 */
187struct rbd_spec {
188 u64 pool_id;
ecb4dc22 189 const char *pool_name;
b26c047b 190 const char *pool_ns; /* NULL if default, never "" */
0d7dbfce 191
ecb4dc22
AE
192 const char *image_id;
193 const char *image_name;
0d7dbfce
AE
194
195 u64 snap_id;
ecb4dc22 196 const char *snap_name;
0d7dbfce
AE
197
198 struct kref kref;
199};
200
602adf40 201/*
f0f8cef5 202 * an instance of the client. multiple devices may share an rbd client.
602adf40
YS
203 */
204struct rbd_client {
205 struct ceph_client *client;
206 struct kref kref;
207 struct list_head node;
208};
209
0192ce2e
ID
210struct pending_result {
211 int result; /* first nonzero result */
212 int num_pending;
213};
214
bf0d5f50 215struct rbd_img_request;
bf0d5f50 216
9969ebc5 217enum obj_request_type {
a1fbb5e7 218 OBJ_REQUEST_NODATA = 1,
5359a17d 219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
7e07efb1 220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
afb97888 221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
9969ebc5 222};
bf0d5f50 223
6d2940c8 224enum obj_operation_type {
a1fbb5e7 225 OBJ_OP_READ = 1,
6d2940c8 226 OBJ_OP_WRITE,
90e98c52 227 OBJ_OP_DISCARD,
6484cbe9 228 OBJ_OP_ZEROOUT,
6d2940c8
GZ
229};
230
0ad5d953
ID
231#define RBD_OBJ_FLAG_DELETION (1U << 0)
232#define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
793333a3 233#define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
22e8bd51
ID
234#define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235#define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
0ad5d953 236
a9b67e69 237enum rbd_obj_read_state {
85b5e6d1
ID
238 RBD_OBJ_READ_START = 1,
239 RBD_OBJ_READ_OBJECT,
a9b67e69
ID
240 RBD_OBJ_READ_PARENT,
241};
242
3da691bf
ID
243/*
244 * Writes go through the following state machine to deal with
245 * layering:
246 *
89a59c1c
ID
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
248 * . | .
249 * . v .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
251 * . | . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
254 * flattened) v | . .
255 * . v . .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
257 * | not needed) v
258 * v .
259 * done . . . . . . . . . . . . . . . . . .
260 * ^
261 * |
262 * RBD_OBJ_WRITE_FLAT
3da691bf
ID
263 *
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
89a59c1c
ID
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
3da691bf
ID
267 */
268enum rbd_obj_write_state {
85b5e6d1 269 RBD_OBJ_WRITE_START = 1,
22e8bd51 270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
85b5e6d1 271 RBD_OBJ_WRITE_OBJECT,
793333a3
ID
272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
22e8bd51 274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
793333a3
ID
275};
276
277enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
22e8bd51
ID
280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
793333a3
ID
282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
926f9b3f
AE
284};
285
bf0d5f50 286struct rbd_obj_request {
43df3d35 287 struct ceph_object_extent ex;
0ad5d953 288 unsigned int flags; /* RBD_OBJ_FLAG_* */
c5b5ef6c 289 union {
a9b67e69 290 enum rbd_obj_read_state read_state; /* for reads */
3da691bf 291 enum rbd_obj_write_state write_state; /* for writes */
c5b5ef6c 292 };
bf0d5f50 293
51c3509e 294 struct rbd_img_request *img_request;
86bd7998
ID
295 struct ceph_file_extent *img_extents;
296 u32 num_img_extents;
bf0d5f50 297
788e2df3 298 union {
5359a17d 299 struct ceph_bio_iter bio_pos;
788e2df3 300 struct {
7e07efb1
ID
301 struct ceph_bvec_iter bvec_pos;
302 u32 bvec_count;
afb97888 303 u32 bvec_idx;
788e2df3
AE
304 };
305 };
793333a3
ID
306
307 enum rbd_obj_copyup_state copyup_state;
7e07efb1
ID
308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
bf0d5f50 310
bcbab1db 311 struct list_head osd_reqs; /* w/ r_private_item */
bf0d5f50 312
85b5e6d1 313 struct mutex state_mutex;
793333a3 314 struct pending_result pending;
bf0d5f50
AE
315 struct kref kref;
316};
317
0c425248 318enum img_req_flags {
9849e986 319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
d0b2e944 320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
0c425248
AE
321};
322
0192ce2e
ID
323enum rbd_img_state {
324 RBD_IMG_START = 1,
637cd060 325 RBD_IMG_EXCLUSIVE_LOCK,
0192ce2e
ID
326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
328};
329
bf0d5f50 330struct rbd_img_request {
bf0d5f50 331 struct rbd_device *rbd_dev;
9bb0248d 332 enum obj_operation_type op_type;
ecc633ca 333 enum obj_request_type data_type;
0c425248 334 unsigned long flags;
0192ce2e 335 enum rbd_img_state state;
bf0d5f50 336 union {
9849e986 337 u64 snap_id; /* for reads */
bf0d5f50 338 struct ceph_snap_context *snapc; /* for writes */
9849e986
AE
339 };
340 union {
341 struct request *rq; /* block request */
342 struct rbd_obj_request *obj_request; /* obj req initiator */
bf0d5f50 343 };
bf0d5f50 344
e1fddc8f 345 struct list_head lock_item;
43df3d35 346 struct list_head object_extents; /* obj_req.ex structs */
bf0d5f50 347
0192ce2e
ID
348 struct mutex state_mutex;
349 struct pending_result pending;
350 struct work_struct work;
351 int work_result;
bf0d5f50
AE
352 struct kref kref;
353};
354
355#define for_each_obj_request(ireq, oreq) \
43df3d35 356 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
bf0d5f50 357#define for_each_obj_request_safe(ireq, oreq, n) \
43df3d35 358 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
bf0d5f50 359
99d16943
ID
360enum rbd_watch_state {
361 RBD_WATCH_STATE_UNREGISTERED,
362 RBD_WATCH_STATE_REGISTERED,
363 RBD_WATCH_STATE_ERROR,
364};
365
ed95b21a
ID
366enum rbd_lock_state {
367 RBD_LOCK_STATE_UNLOCKED,
368 RBD_LOCK_STATE_LOCKED,
369 RBD_LOCK_STATE_RELEASING,
370};
371
372/* WatchNotify::ClientId */
373struct rbd_client_id {
374 u64 gid;
375 u64 handle;
376};
377
f84344f3 378struct rbd_mapping {
99c1f08f 379 u64 size;
34b13184 380 u64 features;
f84344f3
AE
381};
382
602adf40
YS
383/*
384 * a single device
385 */
386struct rbd_device {
de71a297 387 int dev_id; /* blkdev unique id */
602adf40
YS
388
389 int major; /* blkdev assigned major */
dd82fff1 390 int minor;
602adf40 391 struct gendisk *disk; /* blkdev's gendisk and rq */
602adf40 392
a30b71b9 393 u32 image_format; /* Either 1 or 2 */
602adf40
YS
394 struct rbd_client *rbd_client;
395
396 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
397
b82d167b 398 spinlock_t lock; /* queue, flags, open_count */
602adf40
YS
399
400 struct rbd_image_header header;
b82d167b 401 unsigned long flags; /* possibly lock protected */
0d7dbfce 402 struct rbd_spec *spec;
d147543d 403 struct rbd_options *opts;
0d6d1e9c 404 char *config_info; /* add{,_single_major} string */
602adf40 405
c41d13a3 406 struct ceph_object_id header_oid;
922dab61 407 struct ceph_object_locator header_oloc;
971f839a 408
1643dfa4 409 struct ceph_file_layout layout; /* used for all rbd requests */
0903e875 410
99d16943
ID
411 struct mutex watch_mutex;
412 enum rbd_watch_state watch_state;
922dab61 413 struct ceph_osd_linger_request *watch_handle;
99d16943
ID
414 u64 watch_cookie;
415 struct delayed_work watch_dwork;
59c2be1e 416
ed95b21a
ID
417 struct rw_semaphore lock_rwsem;
418 enum rbd_lock_state lock_state;
cbbfb0ff 419 char lock_cookie[32];
ed95b21a
ID
420 struct rbd_client_id owner_cid;
421 struct work_struct acquired_lock_work;
422 struct work_struct released_lock_work;
423 struct delayed_work lock_dwork;
424 struct work_struct unlock_work;
e1fddc8f 425 spinlock_t lock_lists_lock;
637cd060 426 struct list_head acquiring_list;
e1fddc8f 427 struct list_head running_list;
637cd060
ID
428 struct completion acquire_wait;
429 int acquire_err;
e1fddc8f 430 struct completion releasing_wait;
ed95b21a 431
22e8bd51
ID
432 spinlock_t object_map_lock;
433 u8 *object_map;
434 u64 object_map_size; /* in objects */
435 u64 object_map_flags;
ed95b21a 436
1643dfa4 437 struct workqueue_struct *task_wq;
59c2be1e 438
86b00e0d
AE
439 struct rbd_spec *parent_spec;
440 u64 parent_overlap;
a2acd00e 441 atomic_t parent_ref;
2f82ee54 442 struct rbd_device *parent;
86b00e0d 443
7ad18afa
CH
444 /* Block layer tags. */
445 struct blk_mq_tag_set tag_set;
446
c666601a
JD
447 /* protects updating the header */
448 struct rw_semaphore header_rwsem;
f84344f3
AE
449
450 struct rbd_mapping mapping;
602adf40
YS
451
452 struct list_head node;
dfc5606d 453
dfc5606d
YS
454 /* sysfs related */
455 struct device dev;
b82d167b 456 unsigned long open_count; /* protected by lock */
dfc5606d
YS
457};
458
b82d167b 459/*
87c0fded
ID
460 * Flag bits for rbd_dev->flags:
461 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
462 * by rbd_dev->lock
b82d167b 463 */
6d292906
AE
464enum rbd_dev_flags {
465 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
b82d167b 466 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
6d292906
AE
467};
468
cfbf6377 469static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
e124a82f 470
602adf40 471static LIST_HEAD(rbd_dev_list); /* devices */
e124a82f
AE
472static DEFINE_SPINLOCK(rbd_dev_list_lock);
473
432b8587
AE
474static LIST_HEAD(rbd_client_list); /* clients */
475static DEFINE_SPINLOCK(rbd_client_list_lock);
602adf40 476
78c2a44a
AE
477/* Slab caches for frequently-allocated structures */
478
1c2a9dfe 479static struct kmem_cache *rbd_img_request_cache;
868311b1 480static struct kmem_cache *rbd_obj_request_cache;
1c2a9dfe 481
9b60e70b 482static int rbd_major;
f8a22fc2
ID
483static DEFINE_IDA(rbd_dev_id_ida);
484
f5ee37bd
ID
485static struct workqueue_struct *rbd_wq;
486
89a59c1c
ID
487static struct ceph_snap_context rbd_empty_snapc = {
488 .nref = REFCOUNT_INIT(1),
489};
490
9b60e70b 491/*
3cfa3b16 492 * single-major requires >= 0.75 version of userspace rbd utility.
9b60e70b 493 */
3cfa3b16 494static bool single_major = true;
5657a819 495module_param(single_major, bool, 0444);
3cfa3b16 496MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
9b60e70b 497
7e9586ba
GKH
498static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
499static ssize_t remove_store(struct bus_type *bus, const char *buf,
500 size_t count);
501static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
502 size_t count);
503static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
504 size_t count);
6d69bb53 505static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
f0f8cef5 506
9b60e70b
ID
507static int rbd_dev_id_to_minor(int dev_id)
508{
7e513d43 509 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
9b60e70b
ID
510}
511
512static int minor_to_rbd_dev_id(int minor)
513{
7e513d43 514 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
9b60e70b
ID
515}
516
ed95b21a
ID
517static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
518{
637cd060
ID
519 lockdep_assert_held(&rbd_dev->lock_rwsem);
520
ed95b21a
ID
521 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
522 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
523}
524
525static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
526{
527 bool is_lock_owner;
528
529 down_read(&rbd_dev->lock_rwsem);
530 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
531 up_read(&rbd_dev->lock_rwsem);
532 return is_lock_owner;
533}
534
7e9586ba 535static ssize_t supported_features_show(struct bus_type *bus, char *buf)
8767b293
ID
536{
537 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
538}
539
7e9586ba
GKH
540static BUS_ATTR_WO(add);
541static BUS_ATTR_WO(remove);
542static BUS_ATTR_WO(add_single_major);
543static BUS_ATTR_WO(remove_single_major);
544static BUS_ATTR_RO(supported_features);
b15a21dd
GKH
545
546static struct attribute *rbd_bus_attrs[] = {
547 &bus_attr_add.attr,
548 &bus_attr_remove.attr,
9b60e70b
ID
549 &bus_attr_add_single_major.attr,
550 &bus_attr_remove_single_major.attr,
8767b293 551 &bus_attr_supported_features.attr,
b15a21dd 552 NULL,
f0f8cef5 553};
92c76dc0
ID
554
555static umode_t rbd_bus_is_visible(struct kobject *kobj,
556 struct attribute *attr, int index)
557{
9b60e70b
ID
558 if (!single_major &&
559 (attr == &bus_attr_add_single_major.attr ||
560 attr == &bus_attr_remove_single_major.attr))
561 return 0;
562
92c76dc0
ID
563 return attr->mode;
564}
565
566static const struct attribute_group rbd_bus_group = {
567 .attrs = rbd_bus_attrs,
568 .is_visible = rbd_bus_is_visible,
569};
570__ATTRIBUTE_GROUPS(rbd_bus);
f0f8cef5
AE
571
572static struct bus_type rbd_bus_type = {
573 .name = "rbd",
b15a21dd 574 .bus_groups = rbd_bus_groups,
f0f8cef5
AE
575};
576
577static void rbd_root_dev_release(struct device *dev)
578{
579}
580
581static struct device rbd_root_dev = {
582 .init_name = "rbd",
583 .release = rbd_root_dev_release,
584};
585
06ecc6cb
AE
586static __printf(2, 3)
587void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
588{
589 struct va_format vaf;
590 va_list args;
591
592 va_start(args, fmt);
593 vaf.fmt = fmt;
594 vaf.va = &args;
595
596 if (!rbd_dev)
597 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
598 else if (rbd_dev->disk)
599 printk(KERN_WARNING "%s: %s: %pV\n",
600 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
601 else if (rbd_dev->spec && rbd_dev->spec->image_name)
602 printk(KERN_WARNING "%s: image %s: %pV\n",
603 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
604 else if (rbd_dev->spec && rbd_dev->spec->image_id)
605 printk(KERN_WARNING "%s: id %s: %pV\n",
606 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
607 else /* punt */
608 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
609 RBD_DRV_NAME, rbd_dev, &vaf);
610 va_end(args);
611}
612
aafb230e
AE
613#ifdef RBD_DEBUG
614#define rbd_assert(expr) \
615 if (unlikely(!(expr))) { \
616 printk(KERN_ERR "\nAssertion failure in %s() " \
617 "at line %d:\n\n" \
618 "\trbd_assert(%s);\n\n", \
619 __func__, __LINE__, #expr); \
620 BUG(); \
621 }
622#else /* !RBD_DEBUG */
623# define rbd_assert(expr) ((void) 0)
624#endif /* !RBD_DEBUG */
dfc5606d 625
05a46afd 626static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
8b3e1a56 627
cc4a38bd 628static int rbd_dev_refresh(struct rbd_device *rbd_dev);
2df3fac7 629static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
a720ae09 630static int rbd_dev_header_info(struct rbd_device *rbd_dev);
e8f59b59 631static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
54cac61f
AE
632static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
633 u64 snap_id);
2ad3d716
AE
634static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
635 u8 *order, u64 *snap_size);
636static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
637 u64 *snap_features);
22e8bd51 638static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
59c2be1e 639
54ab3b24 640static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
0192ce2e
ID
641static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
642
643/*
644 * Return true if nothing else is pending.
645 */
646static bool pending_result_dec(struct pending_result *pending, int *result)
647{
648 rbd_assert(pending->num_pending > 0);
649
650 if (*result && !pending->result)
651 pending->result = *result;
652 if (--pending->num_pending)
653 return false;
654
655 *result = pending->result;
656 return true;
657}
59c2be1e 658
602adf40
YS
659static int rbd_open(struct block_device *bdev, fmode_t mode)
660{
f0f8cef5 661 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
b82d167b 662 bool removing = false;
602adf40 663
a14ea269 664 spin_lock_irq(&rbd_dev->lock);
b82d167b
AE
665 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
666 removing = true;
667 else
668 rbd_dev->open_count++;
a14ea269 669 spin_unlock_irq(&rbd_dev->lock);
b82d167b
AE
670 if (removing)
671 return -ENOENT;
672
c3e946ce 673 (void) get_device(&rbd_dev->dev);
340c7a2b 674
602adf40
YS
675 return 0;
676}
677
db2a144b 678static void rbd_release(struct gendisk *disk, fmode_t mode)
dfc5606d
YS
679{
680 struct rbd_device *rbd_dev = disk->private_data;
b82d167b
AE
681 unsigned long open_count_before;
682
a14ea269 683 spin_lock_irq(&rbd_dev->lock);
b82d167b 684 open_count_before = rbd_dev->open_count--;
a14ea269 685 spin_unlock_irq(&rbd_dev->lock);
b82d167b 686 rbd_assert(open_count_before > 0);
dfc5606d 687
c3e946ce 688 put_device(&rbd_dev->dev);
dfc5606d
YS
689}
690
131fd9f6
GZ
691static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
692{
1de797bb 693 int ro;
131fd9f6 694
1de797bb 695 if (get_user(ro, (int __user *)arg))
131fd9f6
GZ
696 return -EFAULT;
697
1de797bb 698 /* Snapshots can't be marked read-write */
131fd9f6
GZ
699 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
700 return -EROFS;
701
1de797bb
ID
702 /* Let blkdev_roset() handle it */
703 return -ENOTTY;
131fd9f6
GZ
704}
705
706static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
707 unsigned int cmd, unsigned long arg)
708{
709 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
1de797bb 710 int ret;
131fd9f6 711
131fd9f6
GZ
712 switch (cmd) {
713 case BLKROSET:
714 ret = rbd_ioctl_set_ro(rbd_dev, arg);
715 break;
716 default:
717 ret = -ENOTTY;
718 }
719
131fd9f6
GZ
720 return ret;
721}
722
723#ifdef CONFIG_COMPAT
724static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
725 unsigned int cmd, unsigned long arg)
726{
727 return rbd_ioctl(bdev, mode, cmd, arg);
728}
729#endif /* CONFIG_COMPAT */
730
602adf40
YS
731static const struct block_device_operations rbd_bd_ops = {
732 .owner = THIS_MODULE,
733 .open = rbd_open,
dfc5606d 734 .release = rbd_release,
131fd9f6
GZ
735 .ioctl = rbd_ioctl,
736#ifdef CONFIG_COMPAT
737 .compat_ioctl = rbd_compat_ioctl,
738#endif
602adf40
YS
739};
740
741/*
7262cfca 742 * Initialize an rbd client instance. Success or not, this function
cfbf6377 743 * consumes ceph_opts. Caller holds client_mutex.
602adf40 744 */
f8c38929 745static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
602adf40
YS
746{
747 struct rbd_client *rbdc;
748 int ret = -ENOMEM;
749
37206ee5 750 dout("%s:\n", __func__);
602adf40
YS
751 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
752 if (!rbdc)
753 goto out_opt;
754
755 kref_init(&rbdc->kref);
756 INIT_LIST_HEAD(&rbdc->node);
757
74da4a0f 758 rbdc->client = ceph_create_client(ceph_opts, rbdc);
602adf40 759 if (IS_ERR(rbdc->client))
08f75463 760 goto out_rbdc;
43ae4701 761 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
602adf40
YS
762
763 ret = ceph_open_session(rbdc->client);
764 if (ret < 0)
08f75463 765 goto out_client;
602adf40 766
432b8587 767 spin_lock(&rbd_client_list_lock);
602adf40 768 list_add_tail(&rbdc->node, &rbd_client_list);
432b8587 769 spin_unlock(&rbd_client_list_lock);
602adf40 770
37206ee5 771 dout("%s: rbdc %p\n", __func__, rbdc);
bc534d86 772
602adf40 773 return rbdc;
08f75463 774out_client:
602adf40 775 ceph_destroy_client(rbdc->client);
08f75463 776out_rbdc:
602adf40
YS
777 kfree(rbdc);
778out_opt:
43ae4701
AE
779 if (ceph_opts)
780 ceph_destroy_options(ceph_opts);
37206ee5
AE
781 dout("%s: error %d\n", __func__, ret);
782
28f259b7 783 return ERR_PTR(ret);
602adf40
YS
784}
785
2f82ee54
AE
786static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
787{
788 kref_get(&rbdc->kref);
789
790 return rbdc;
791}
792
602adf40 793/*
1f7ba331
AE
794 * Find a ceph client with specific addr and configuration. If
795 * found, bump its reference count.
602adf40 796 */
1f7ba331 797static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
602adf40
YS
798{
799 struct rbd_client *client_node;
1f7ba331 800 bool found = false;
602adf40 801
43ae4701 802 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
602adf40
YS
803 return NULL;
804
1f7ba331
AE
805 spin_lock(&rbd_client_list_lock);
806 list_for_each_entry(client_node, &rbd_client_list, node) {
807 if (!ceph_compare_options(ceph_opts, client_node->client)) {
2f82ee54
AE
808 __rbd_get_client(client_node);
809
1f7ba331
AE
810 found = true;
811 break;
812 }
813 }
814 spin_unlock(&rbd_client_list_lock);
815
816 return found ? client_node : NULL;
602adf40
YS
817}
818
59c2be1e 819/*
210c104c 820 * (Per device) rbd map options
59c2be1e
YS
821 */
822enum {
b5584180 823 Opt_queue_depth,
0c93e1b7 824 Opt_alloc_size,
34f55d0b 825 Opt_lock_timeout,
59c2be1e
YS
826 Opt_last_int,
827 /* int args above */
b26c047b 828 Opt_pool_ns,
59c2be1e
YS
829 Opt_last_string,
830 /* string args above */
cc0538b6
AE
831 Opt_read_only,
832 Opt_read_write,
80de1912 833 Opt_lock_on_read,
e010dd0a 834 Opt_exclusive,
d9360540 835 Opt_notrim,
210c104c 836 Opt_err
59c2be1e
YS
837};
838
43ae4701 839static match_table_t rbd_opts_tokens = {
b5584180 840 {Opt_queue_depth, "queue_depth=%d"},
0c93e1b7 841 {Opt_alloc_size, "alloc_size=%d"},
34f55d0b 842 {Opt_lock_timeout, "lock_timeout=%d"},
59c2be1e 843 /* int args above */
b26c047b 844 {Opt_pool_ns, "_pool_ns=%s"},
59c2be1e 845 /* string args above */
be466c1c 846 {Opt_read_only, "read_only"},
cc0538b6
AE
847 {Opt_read_only, "ro"}, /* Alternate spelling */
848 {Opt_read_write, "read_write"},
849 {Opt_read_write, "rw"}, /* Alternate spelling */
80de1912 850 {Opt_lock_on_read, "lock_on_read"},
e010dd0a 851 {Opt_exclusive, "exclusive"},
d9360540 852 {Opt_notrim, "notrim"},
210c104c 853 {Opt_err, NULL}
59c2be1e
YS
854};
855
98571b5a 856struct rbd_options {
b5584180 857 int queue_depth;
0c93e1b7 858 int alloc_size;
34f55d0b 859 unsigned long lock_timeout;
98571b5a 860 bool read_only;
80de1912 861 bool lock_on_read;
e010dd0a 862 bool exclusive;
d9360540 863 bool trim;
98571b5a
AE
864};
865
b5584180 866#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
0c93e1b7 867#define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
34f55d0b 868#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
98571b5a 869#define RBD_READ_ONLY_DEFAULT false
80de1912 870#define RBD_LOCK_ON_READ_DEFAULT false
e010dd0a 871#define RBD_EXCLUSIVE_DEFAULT false
d9360540 872#define RBD_TRIM_DEFAULT true
98571b5a 873
c300156b
ID
874struct parse_rbd_opts_ctx {
875 struct rbd_spec *spec;
876 struct rbd_options *opts;
877};
878
59c2be1e
YS
879static int parse_rbd_opts_token(char *c, void *private)
880{
c300156b 881 struct parse_rbd_opts_ctx *pctx = private;
59c2be1e
YS
882 substring_t argstr[MAX_OPT_ARGS];
883 int token, intval, ret;
884
43ae4701 885 token = match_token(c, rbd_opts_tokens, argstr);
59c2be1e
YS
886 if (token < Opt_last_int) {
887 ret = match_int(&argstr[0], &intval);
888 if (ret < 0) {
2f56b6ba 889 pr_err("bad option arg (not int) at '%s'\n", c);
59c2be1e
YS
890 return ret;
891 }
892 dout("got int token %d val %d\n", token, intval);
893 } else if (token > Opt_last_int && token < Opt_last_string) {
210c104c 894 dout("got string token %d val %s\n", token, argstr[0].from);
59c2be1e
YS
895 } else {
896 dout("got token %d\n", token);
897 }
898
899 switch (token) {
b5584180
ID
900 case Opt_queue_depth:
901 if (intval < 1) {
902 pr_err("queue_depth out of range\n");
903 return -EINVAL;
904 }
c300156b 905 pctx->opts->queue_depth = intval;
b5584180 906 break;
0c93e1b7 907 case Opt_alloc_size:
16d80c54 908 if (intval < SECTOR_SIZE) {
0c93e1b7
ID
909 pr_err("alloc_size out of range\n");
910 return -EINVAL;
911 }
912 if (!is_power_of_2(intval)) {
913 pr_err("alloc_size must be a power of 2\n");
914 return -EINVAL;
915 }
916 pctx->opts->alloc_size = intval;
917 break;
34f55d0b
DY
918 case Opt_lock_timeout:
919 /* 0 is "wait forever" (i.e. infinite timeout) */
920 if (intval < 0 || intval > INT_MAX / 1000) {
921 pr_err("lock_timeout out of range\n");
922 return -EINVAL;
923 }
c300156b 924 pctx->opts->lock_timeout = msecs_to_jiffies(intval * 1000);
34f55d0b 925 break;
b26c047b
ID
926 case Opt_pool_ns:
927 kfree(pctx->spec->pool_ns);
928 pctx->spec->pool_ns = match_strdup(argstr);
929 if (!pctx->spec->pool_ns)
930 return -ENOMEM;
34f55d0b 931 break;
cc0538b6 932 case Opt_read_only:
c300156b 933 pctx->opts->read_only = true;
cc0538b6
AE
934 break;
935 case Opt_read_write:
c300156b 936 pctx->opts->read_only = false;
cc0538b6 937 break;
80de1912 938 case Opt_lock_on_read:
c300156b 939 pctx->opts->lock_on_read = true;
80de1912 940 break;
e010dd0a 941 case Opt_exclusive:
c300156b 942 pctx->opts->exclusive = true;
e010dd0a 943 break;
d9360540 944 case Opt_notrim:
c300156b 945 pctx->opts->trim = false;
d9360540 946 break;
59c2be1e 947 default:
210c104c
ID
948 /* libceph prints "bad option" msg */
949 return -EINVAL;
59c2be1e 950 }
210c104c 951
59c2be1e
YS
952 return 0;
953}
954
6d2940c8
GZ
955static char* obj_op_name(enum obj_operation_type op_type)
956{
957 switch (op_type) {
958 case OBJ_OP_READ:
959 return "read";
960 case OBJ_OP_WRITE:
961 return "write";
90e98c52
GZ
962 case OBJ_OP_DISCARD:
963 return "discard";
6484cbe9
ID
964 case OBJ_OP_ZEROOUT:
965 return "zeroout";
6d2940c8
GZ
966 default:
967 return "???";
968 }
969}
970
602adf40
YS
971/*
972 * Destroy ceph client
d23a4b3f 973 *
432b8587 974 * Caller must hold rbd_client_list_lock.
602adf40
YS
975 */
976static void rbd_client_release(struct kref *kref)
977{
978 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
979
37206ee5 980 dout("%s: rbdc %p\n", __func__, rbdc);
cd9d9f5d 981 spin_lock(&rbd_client_list_lock);
602adf40 982 list_del(&rbdc->node);
cd9d9f5d 983 spin_unlock(&rbd_client_list_lock);
602adf40
YS
984
985 ceph_destroy_client(rbdc->client);
986 kfree(rbdc);
987}
988
989/*
990 * Drop reference to ceph client node. If it's not referenced anymore, release
991 * it.
992 */
9d3997fd 993static void rbd_put_client(struct rbd_client *rbdc)
602adf40 994{
c53d5893
AE
995 if (rbdc)
996 kref_put(&rbdc->kref, rbd_client_release);
602adf40
YS
997}
998
5feb0d8d
ID
999/*
1000 * Get a ceph client with specific addr and configuration, if one does
1001 * not exist create it. Either way, ceph_opts is consumed by this
1002 * function.
1003 */
1004static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
1005{
1006 struct rbd_client *rbdc;
dd435855 1007 int ret;
5feb0d8d 1008
a32e4143 1009 mutex_lock(&client_mutex);
5feb0d8d 1010 rbdc = rbd_client_find(ceph_opts);
dd435855 1011 if (rbdc) {
5feb0d8d 1012 ceph_destroy_options(ceph_opts);
dd435855
ID
1013
1014 /*
1015 * Using an existing client. Make sure ->pg_pools is up to
1016 * date before we look up the pool id in do_rbd_add().
1017 */
9d4a227f
ID
1018 ret = ceph_wait_for_latest_osdmap(rbdc->client,
1019 rbdc->client->options->mount_timeout);
dd435855
ID
1020 if (ret) {
1021 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
1022 rbd_put_client(rbdc);
1023 rbdc = ERR_PTR(ret);
1024 }
1025 } else {
5feb0d8d 1026 rbdc = rbd_client_create(ceph_opts);
dd435855 1027 }
5feb0d8d
ID
1028 mutex_unlock(&client_mutex);
1029
1030 return rbdc;
1031}
1032
a30b71b9
AE
1033static bool rbd_image_format_valid(u32 image_format)
1034{
1035 return image_format == 1 || image_format == 2;
1036}
1037
8e94af8e
AE
1038static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
1039{
103a150f
AE
1040 size_t size;
1041 u32 snap_count;
1042
1043 /* The header has to start with the magic rbd header text */
1044 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
1045 return false;
1046
db2388b6
AE
1047 /* The bio layer requires at least sector-sized I/O */
1048
1049 if (ondisk->options.order < SECTOR_SHIFT)
1050 return false;
1051
1052 /* If we use u64 in a few spots we may be able to loosen this */
1053
1054 if (ondisk->options.order > 8 * sizeof (int) - 1)
1055 return false;
1056
103a150f
AE
1057 /*
1058 * The size of a snapshot header has to fit in a size_t, and
1059 * that limits the number of snapshots.
1060 */
1061 snap_count = le32_to_cpu(ondisk->snap_count);
1062 size = SIZE_MAX - sizeof (struct ceph_snap_context);
1063 if (snap_count > size / sizeof (__le64))
1064 return false;
1065
1066 /*
1067 * Not only that, but the size of the entire the snapshot
1068 * header must also be representable in a size_t.
1069 */
1070 size -= snap_count * sizeof (__le64);
1071 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
1072 return false;
1073
1074 return true;
8e94af8e
AE
1075}
1076
5bc3fb17
ID
1077/*
1078 * returns the size of an object in the image
1079 */
1080static u32 rbd_obj_bytes(struct rbd_image_header *header)
1081{
1082 return 1U << header->obj_order;
1083}
1084
263423f8
ID
1085static void rbd_init_layout(struct rbd_device *rbd_dev)
1086{
1087 if (rbd_dev->header.stripe_unit == 0 ||
1088 rbd_dev->header.stripe_count == 0) {
1089 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1090 rbd_dev->header.stripe_count = 1;
1091 }
1092
1093 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1094 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1095 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
7e97332e
ID
1096 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1097 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
263423f8
ID
1098 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1099}
1100
602adf40 1101/*
bb23e37a
AE
1102 * Fill an rbd image header with information from the given format 1
1103 * on-disk header.
602adf40 1104 */
662518b1 1105static int rbd_header_from_disk(struct rbd_device *rbd_dev,
4156d998 1106 struct rbd_image_header_ondisk *ondisk)
602adf40 1107{
662518b1 1108 struct rbd_image_header *header = &rbd_dev->header;
bb23e37a
AE
1109 bool first_time = header->object_prefix == NULL;
1110 struct ceph_snap_context *snapc;
1111 char *object_prefix = NULL;
1112 char *snap_names = NULL;
1113 u64 *snap_sizes = NULL;
ccece235 1114 u32 snap_count;
bb23e37a 1115 int ret = -ENOMEM;
621901d6 1116 u32 i;
602adf40 1117
bb23e37a 1118 /* Allocate this now to avoid having to handle failure below */
6a52325f 1119
bb23e37a 1120 if (first_time) {
848d796c
ID
1121 object_prefix = kstrndup(ondisk->object_prefix,
1122 sizeof(ondisk->object_prefix),
1123 GFP_KERNEL);
bb23e37a
AE
1124 if (!object_prefix)
1125 return -ENOMEM;
bb23e37a 1126 }
00f1f36f 1127
bb23e37a 1128 /* Allocate the snapshot context and fill it in */
00f1f36f 1129
bb23e37a
AE
1130 snap_count = le32_to_cpu(ondisk->snap_count);
1131 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1132 if (!snapc)
1133 goto out_err;
1134 snapc->seq = le64_to_cpu(ondisk->snap_seq);
602adf40 1135 if (snap_count) {
bb23e37a 1136 struct rbd_image_snap_ondisk *snaps;
f785cc1d
AE
1137 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1138
bb23e37a 1139 /* We'll keep a copy of the snapshot names... */
621901d6 1140
bb23e37a
AE
1141 if (snap_names_len > (u64)SIZE_MAX)
1142 goto out_2big;
1143 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1144 if (!snap_names)
6a52325f
AE
1145 goto out_err;
1146
bb23e37a 1147 /* ...as well as the array of their sizes. */
88a25a5f
ME
1148 snap_sizes = kmalloc_array(snap_count,
1149 sizeof(*header->snap_sizes),
1150 GFP_KERNEL);
bb23e37a 1151 if (!snap_sizes)
6a52325f 1152 goto out_err;
bb23e37a 1153
f785cc1d 1154 /*
bb23e37a
AE
1155 * Copy the names, and fill in each snapshot's id
1156 * and size.
1157 *
99a41ebc 1158 * Note that rbd_dev_v1_header_info() guarantees the
bb23e37a 1159 * ondisk buffer we're working with has
f785cc1d
AE
1160 * snap_names_len bytes beyond the end of the
1161 * snapshot id array, this memcpy() is safe.
1162 */
bb23e37a
AE
1163 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1164 snaps = ondisk->snaps;
1165 for (i = 0; i < snap_count; i++) {
1166 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1167 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1168 }
602adf40 1169 }
6a52325f 1170
bb23e37a 1171 /* We won't fail any more, fill in the header */
621901d6 1172
bb23e37a
AE
1173 if (first_time) {
1174 header->object_prefix = object_prefix;
1175 header->obj_order = ondisk->options.order;
263423f8 1176 rbd_init_layout(rbd_dev);
602adf40 1177 } else {
662518b1
AE
1178 ceph_put_snap_context(header->snapc);
1179 kfree(header->snap_names);
1180 kfree(header->snap_sizes);
602adf40 1181 }
849b4260 1182
bb23e37a 1183 /* The remaining fields always get updated (when we refresh) */
621901d6 1184
f84344f3 1185 header->image_size = le64_to_cpu(ondisk->image_size);
bb23e37a
AE
1186 header->snapc = snapc;
1187 header->snap_names = snap_names;
1188 header->snap_sizes = snap_sizes;
468521c1 1189
602adf40 1190 return 0;
bb23e37a
AE
1191out_2big:
1192 ret = -EIO;
6a52325f 1193out_err:
bb23e37a
AE
1194 kfree(snap_sizes);
1195 kfree(snap_names);
1196 ceph_put_snap_context(snapc);
1197 kfree(object_prefix);
ccece235 1198
bb23e37a 1199 return ret;
602adf40
YS
1200}
1201
9682fc6d
AE
1202static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1203{
1204 const char *snap_name;
1205
1206 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1207
1208 /* Skip over names until we find the one we are looking for */
1209
1210 snap_name = rbd_dev->header.snap_names;
1211 while (which--)
1212 snap_name += strlen(snap_name) + 1;
1213
1214 return kstrdup(snap_name, GFP_KERNEL);
1215}
1216
30d1cff8
AE
1217/*
1218 * Snapshot id comparison function for use with qsort()/bsearch().
1219 * Note that result is for snapshots in *descending* order.
1220 */
1221static int snapid_compare_reverse(const void *s1, const void *s2)
1222{
1223 u64 snap_id1 = *(u64 *)s1;
1224 u64 snap_id2 = *(u64 *)s2;
1225
1226 if (snap_id1 < snap_id2)
1227 return 1;
1228 return snap_id1 == snap_id2 ? 0 : -1;
1229}
1230
1231/*
1232 * Search a snapshot context to see if the given snapshot id is
1233 * present.
1234 *
1235 * Returns the position of the snapshot id in the array if it's found,
1236 * or BAD_SNAP_INDEX otherwise.
1237 *
1238 * Note: The snapshot array is in kept sorted (by the osd) in
1239 * reverse order, highest snapshot id first.
1240 */
9682fc6d
AE
1241static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1242{
1243 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
30d1cff8 1244 u64 *found;
9682fc6d 1245
30d1cff8
AE
1246 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1247 sizeof (snap_id), snapid_compare_reverse);
9682fc6d 1248
30d1cff8 1249 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
9682fc6d
AE
1250}
1251
2ad3d716
AE
1252static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1253 u64 snap_id)
9e15b77d 1254{
54cac61f 1255 u32 which;
da6a6b63 1256 const char *snap_name;
9e15b77d 1257
54cac61f
AE
1258 which = rbd_dev_snap_index(rbd_dev, snap_id);
1259 if (which == BAD_SNAP_INDEX)
da6a6b63 1260 return ERR_PTR(-ENOENT);
54cac61f 1261
da6a6b63
JD
1262 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1263 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
54cac61f
AE
1264}
1265
1266static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1267{
9e15b77d
AE
1268 if (snap_id == CEPH_NOSNAP)
1269 return RBD_SNAP_HEAD_NAME;
1270
54cac61f
AE
1271 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1272 if (rbd_dev->image_format == 1)
1273 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
9e15b77d 1274
54cac61f 1275 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
9e15b77d
AE
1276}
1277
2ad3d716
AE
1278static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1279 u64 *snap_size)
602adf40 1280{
2ad3d716
AE
1281 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1282 if (snap_id == CEPH_NOSNAP) {
1283 *snap_size = rbd_dev->header.image_size;
1284 } else if (rbd_dev->image_format == 1) {
1285 u32 which;
602adf40 1286
2ad3d716
AE
1287 which = rbd_dev_snap_index(rbd_dev, snap_id);
1288 if (which == BAD_SNAP_INDEX)
1289 return -ENOENT;
e86924a8 1290
2ad3d716
AE
1291 *snap_size = rbd_dev->header.snap_sizes[which];
1292 } else {
1293 u64 size = 0;
1294 int ret;
1295
1296 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1297 if (ret)
1298 return ret;
1299
1300 *snap_size = size;
1301 }
1302 return 0;
602adf40
YS
1303}
1304
2ad3d716
AE
1305static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1306 u64 *snap_features)
602adf40 1307{
2ad3d716
AE
1308 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1309 if (snap_id == CEPH_NOSNAP) {
1310 *snap_features = rbd_dev->header.features;
1311 } else if (rbd_dev->image_format == 1) {
1312 *snap_features = 0; /* No features for format 1 */
602adf40 1313 } else {
2ad3d716
AE
1314 u64 features = 0;
1315 int ret;
8b0241f8 1316
2ad3d716
AE
1317 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1318 if (ret)
1319 return ret;
1320
1321 *snap_features = features;
1322 }
1323 return 0;
1324}
1325
1326static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1327{
8f4b7d98 1328 u64 snap_id = rbd_dev->spec->snap_id;
2ad3d716
AE
1329 u64 size = 0;
1330 u64 features = 0;
1331 int ret;
1332
2ad3d716
AE
1333 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1334 if (ret)
1335 return ret;
1336 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1337 if (ret)
1338 return ret;
1339
1340 rbd_dev->mapping.size = size;
1341 rbd_dev->mapping.features = features;
1342
8b0241f8 1343 return 0;
602adf40
YS
1344}
1345
d1cf5788
AE
1346static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1347{
1348 rbd_dev->mapping.size = 0;
1349 rbd_dev->mapping.features = 0;
200a6a8b
AE
1350}
1351
5359a17d 1352static void zero_bvec(struct bio_vec *bv)
602adf40 1353{
602adf40 1354 void *buf;
5359a17d 1355 unsigned long flags;
602adf40 1356
5359a17d
ID
1357 buf = bvec_kmap_irq(bv, &flags);
1358 memset(buf, 0, bv->bv_len);
1359 flush_dcache_page(bv->bv_page);
1360 bvec_kunmap_irq(buf, &flags);
602adf40
YS
1361}
1362
5359a17d 1363static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
b9434c5b 1364{
5359a17d 1365 struct ceph_bio_iter it = *bio_pos;
b9434c5b 1366
5359a17d
ID
1367 ceph_bio_iter_advance(&it, off);
1368 ceph_bio_iter_advance_step(&it, bytes, ({
1369 zero_bvec(&bv);
1370 }));
b9434c5b
AE
1371}
1372
7e07efb1 1373static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
602adf40 1374{
7e07efb1 1375 struct ceph_bvec_iter it = *bvec_pos;
602adf40 1376
7e07efb1
ID
1377 ceph_bvec_iter_advance(&it, off);
1378 ceph_bvec_iter_advance_step(&it, bytes, ({
1379 zero_bvec(&bv);
1380 }));
f7760dad
AE
1381}
1382
1383/*
3da691bf 1384 * Zero a range in @obj_req data buffer defined by a bio (list) or
afb97888 1385 * (private) bio_vec array.
f7760dad 1386 *
3da691bf 1387 * @off is relative to the start of the data buffer.
926f9b3f 1388 */
3da691bf
ID
1389static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1390 u32 bytes)
926f9b3f 1391{
54ab3b24
ID
1392 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1393
ecc633ca 1394 switch (obj_req->img_request->data_type) {
3da691bf
ID
1395 case OBJ_REQUEST_BIO:
1396 zero_bios(&obj_req->bio_pos, off, bytes);
1397 break;
1398 case OBJ_REQUEST_BVECS:
afb97888 1399 case OBJ_REQUEST_OWN_BVECS:
3da691bf
ID
1400 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1401 break;
1402 default:
16809372 1403 BUG();
6365d33a
AE
1404 }
1405}
1406
bf0d5f50
AE
1407static void rbd_obj_request_destroy(struct kref *kref);
1408static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1409{
1410 rbd_assert(obj_request != NULL);
37206ee5 1411 dout("%s: obj %p (was %d)\n", __func__, obj_request,
2c935bc5 1412 kref_read(&obj_request->kref));
bf0d5f50
AE
1413 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1414}
1415
bf0d5f50
AE
1416static void rbd_img_request_destroy(struct kref *kref);
1417static void rbd_img_request_put(struct rbd_img_request *img_request)
1418{
1419 rbd_assert(img_request != NULL);
37206ee5 1420 dout("%s: img %p (was %d)\n", __func__, img_request,
2c935bc5 1421 kref_read(&img_request->kref));
e93aca0a 1422 kref_put(&img_request->kref, rbd_img_request_destroy);
bf0d5f50
AE
1423}
1424
1425static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1426 struct rbd_obj_request *obj_request)
1427{
25dcf954
AE
1428 rbd_assert(obj_request->img_request == NULL);
1429
b155e86c 1430 /* Image request now owns object's original reference */
bf0d5f50 1431 obj_request->img_request = img_request;
15961b44 1432 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
bf0d5f50
AE
1433}
1434
1435static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1436 struct rbd_obj_request *obj_request)
1437{
15961b44 1438 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
43df3d35 1439 list_del(&obj_request->ex.oe_item);
bf0d5f50 1440 rbd_assert(obj_request->img_request == img_request);
bf0d5f50
AE
1441 rbd_obj_request_put(obj_request);
1442}
1443
a086a1b8 1444static void rbd_osd_submit(struct ceph_osd_request *osd_req)
bf0d5f50 1445{
a086a1b8 1446 struct rbd_obj_request *obj_req = osd_req->r_priv;
980917fc 1447
a086a1b8
ID
1448 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1449 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1450 obj_req->ex.oe_off, obj_req->ex.oe_len);
980917fc 1451 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
bf0d5f50
AE
1452}
1453
0c425248
AE
1454/*
1455 * The default/initial value for all image request flags is 0. Each
1456 * is conditionally set to 1 at image request initialization time
1457 * and currently never change thereafter.
1458 */
d0b2e944
AE
1459static void img_request_layered_set(struct rbd_img_request *img_request)
1460{
1461 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1462 smp_mb();
1463}
1464
a2acd00e
AE
1465static void img_request_layered_clear(struct rbd_img_request *img_request)
1466{
1467 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1468 smp_mb();
1469}
1470
d0b2e944
AE
1471static bool img_request_layered_test(struct rbd_img_request *img_request)
1472{
1473 smp_mb();
1474 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1475}
1476
3da691bf 1477static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
6e2a4505 1478{
3da691bf 1479 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
b9434c5b 1480
43df3d35
ID
1481 return !obj_req->ex.oe_off &&
1482 obj_req->ex.oe_len == rbd_dev->layout.object_size;
6e2a4505
AE
1483}
1484
3da691bf 1485static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
bf0d5f50 1486{
3da691bf 1487 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
bf0d5f50 1488
43df3d35 1489 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
3da691bf 1490 rbd_dev->layout.object_size;
0dcc685e
ID
1491}
1492
13488d53
ID
1493/*
1494 * Must be called after rbd_obj_calc_img_extents().
1495 */
1496static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
1497{
1498 if (!obj_req->num_img_extents ||
9b17eb2c
ID
1499 (rbd_obj_is_entire(obj_req) &&
1500 !obj_req->img_request->snapc->num_snaps))
13488d53
ID
1501 return false;
1502
1503 return true;
1504}
1505
86bd7998 1506static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
bf0d5f50 1507{
86bd7998
ID
1508 return ceph_file_extents_bytes(obj_req->img_extents,
1509 obj_req->num_img_extents);
bf0d5f50
AE
1510}
1511
3da691bf 1512static bool rbd_img_is_write(struct rbd_img_request *img_req)
bf0d5f50 1513{
9bb0248d 1514 switch (img_req->op_type) {
3da691bf
ID
1515 case OBJ_OP_READ:
1516 return false;
1517 case OBJ_OP_WRITE:
1518 case OBJ_OP_DISCARD:
6484cbe9 1519 case OBJ_OP_ZEROOUT:
3da691bf
ID
1520 return true;
1521 default:
c6244b3b 1522 BUG();
3da691bf 1523 }
90e98c52
GZ
1524}
1525
85e084fe 1526static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
bf0d5f50 1527{
3da691bf 1528 struct rbd_obj_request *obj_req = osd_req->r_priv;
54ab3b24 1529 int result;
bf0d5f50 1530
3da691bf
ID
1531 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1532 osd_req->r_result, obj_req);
bf0d5f50 1533
54ab3b24
ID
1534 /*
1535 * Writes aren't allowed to return a data payload. In some
1536 * guarded write cases (e.g. stat + zero on an empty object)
1537 * a stat response makes it through, but we don't care.
1538 */
1539 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1540 result = 0;
3da691bf 1541 else
54ab3b24 1542 result = osd_req->r_result;
bf0d5f50 1543
54ab3b24 1544 rbd_obj_handle_request(obj_req, result);
bf0d5f50
AE
1545}
1546
bcbab1db 1547static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
430c28c3 1548{
bcbab1db 1549 struct rbd_obj_request *obj_request = osd_req->r_priv;
430c28c3 1550
a162b308 1551 osd_req->r_flags = CEPH_OSD_FLAG_READ;
7c84883a 1552 osd_req->r_snapid = obj_request->img_request->snap_id;
9d4df01f
AE
1553}
1554
bcbab1db 1555static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
9d4df01f 1556{
bcbab1db 1557 struct rbd_obj_request *obj_request = osd_req->r_priv;
9d4df01f 1558
a162b308 1559 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
fac02ddf 1560 ktime_get_real_ts64(&osd_req->r_mtime);
43df3d35 1561 osd_req->r_data_offset = obj_request->ex.oe_off;
430c28c3
AE
1562}
1563
bc81207e 1564static struct ceph_osd_request *
bcbab1db
ID
1565__rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1566 struct ceph_snap_context *snapc, int num_ops)
bc81207e 1567{
e28eded5 1568 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
bc81207e
ID
1569 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1570 struct ceph_osd_request *req;
a90bb0c1
ID
1571 const char *name_format = rbd_dev->image_format == 1 ?
1572 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
bcbab1db 1573 int ret;
bc81207e 1574
e28eded5 1575 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
bc81207e 1576 if (!req)
bcbab1db 1577 return ERR_PTR(-ENOMEM);
bc81207e 1578
bcbab1db 1579 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
bc81207e 1580 req->r_callback = rbd_osd_req_callback;
a162b308 1581 req->r_priv = obj_req;
bc81207e 1582
b26c047b
ID
1583 /*
1584 * Data objects may be stored in a separate pool, but always in
1585 * the same namespace in that pool as the header in its pool.
1586 */
1587 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
bc81207e 1588 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
b26c047b 1589
bcbab1db
ID
1590 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1591 rbd_dev->header.object_prefix,
1592 obj_req->ex.oe_objno);
1593 if (ret)
1594 return ERR_PTR(ret);
bc81207e 1595
bc81207e 1596 return req;
bc81207e
ID
1597}
1598
e28eded5 1599static struct ceph_osd_request *
bcbab1db 1600rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
bf0d5f50 1601{
bcbab1db
ID
1602 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1603 num_ops);
bf0d5f50
AE
1604}
1605
ecc633ca 1606static struct rbd_obj_request *rbd_obj_request_create(void)
bf0d5f50
AE
1607{
1608 struct rbd_obj_request *obj_request;
bf0d5f50 1609
5a60e876 1610 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
6c696d85 1611 if (!obj_request)
f907ad55 1612 return NULL;
f907ad55 1613
43df3d35 1614 ceph_object_extent_init(&obj_request->ex);
bcbab1db 1615 INIT_LIST_HEAD(&obj_request->osd_reqs);
85b5e6d1 1616 mutex_init(&obj_request->state_mutex);
bf0d5f50
AE
1617 kref_init(&obj_request->kref);
1618
67e2b652 1619 dout("%s %p\n", __func__, obj_request);
bf0d5f50
AE
1620 return obj_request;
1621}
1622
1623static void rbd_obj_request_destroy(struct kref *kref)
1624{
1625 struct rbd_obj_request *obj_request;
bcbab1db 1626 struct ceph_osd_request *osd_req;
7e07efb1 1627 u32 i;
bf0d5f50
AE
1628
1629 obj_request = container_of(kref, struct rbd_obj_request, kref);
1630
37206ee5
AE
1631 dout("%s: obj %p\n", __func__, obj_request);
1632
bcbab1db
ID
1633 while (!list_empty(&obj_request->osd_reqs)) {
1634 osd_req = list_first_entry(&obj_request->osd_reqs,
1635 struct ceph_osd_request, r_private_item);
1636 list_del_init(&osd_req->r_private_item);
1637 ceph_osdc_put_request(osd_req);
1638 }
bf0d5f50 1639
ecc633ca 1640 switch (obj_request->img_request->data_type) {
9969ebc5 1641 case OBJ_REQUEST_NODATA:
bf0d5f50 1642 case OBJ_REQUEST_BIO:
7e07efb1 1643 case OBJ_REQUEST_BVECS:
5359a17d 1644 break; /* Nothing to do */
afb97888
ID
1645 case OBJ_REQUEST_OWN_BVECS:
1646 kfree(obj_request->bvec_pos.bvecs);
788e2df3 1647 break;
7e07efb1 1648 default:
16809372 1649 BUG();
bf0d5f50
AE
1650 }
1651
86bd7998 1652 kfree(obj_request->img_extents);
7e07efb1
ID
1653 if (obj_request->copyup_bvecs) {
1654 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1655 if (obj_request->copyup_bvecs[i].bv_page)
1656 __free_page(obj_request->copyup_bvecs[i].bv_page);
1657 }
1658 kfree(obj_request->copyup_bvecs);
bf0d5f50
AE
1659 }
1660
868311b1 1661 kmem_cache_free(rbd_obj_request_cache, obj_request);
bf0d5f50
AE
1662}
1663
fb65d228
AE
1664/* It's OK to call this for a device with no parent */
1665
1666static void rbd_spec_put(struct rbd_spec *spec);
1667static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1668{
1669 rbd_dev_remove_parent(rbd_dev);
1670 rbd_spec_put(rbd_dev->parent_spec);
1671 rbd_dev->parent_spec = NULL;
1672 rbd_dev->parent_overlap = 0;
1673}
1674
a2acd00e
AE
1675/*
1676 * Parent image reference counting is used to determine when an
1677 * image's parent fields can be safely torn down--after there are no
1678 * more in-flight requests to the parent image. When the last
1679 * reference is dropped, cleaning them up is safe.
1680 */
1681static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1682{
1683 int counter;
1684
1685 if (!rbd_dev->parent_spec)
1686 return;
1687
1688 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1689 if (counter > 0)
1690 return;
1691
1692 /* Last reference; clean up parent data structures */
1693
1694 if (!counter)
1695 rbd_dev_unparent(rbd_dev);
1696 else
9584d508 1697 rbd_warn(rbd_dev, "parent reference underflow");
a2acd00e
AE
1698}
1699
1700/*
1701 * If an image has a non-zero parent overlap, get a reference to its
1702 * parent.
1703 *
1704 * Returns true if the rbd device has a parent with a non-zero
1705 * overlap and a reference for it was successfully taken, or
1706 * false otherwise.
1707 */
1708static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1709{
ae43e9d0 1710 int counter = 0;
a2acd00e
AE
1711
1712 if (!rbd_dev->parent_spec)
1713 return false;
1714
ae43e9d0
ID
1715 down_read(&rbd_dev->header_rwsem);
1716 if (rbd_dev->parent_overlap)
1717 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1718 up_read(&rbd_dev->header_rwsem);
a2acd00e
AE
1719
1720 if (counter < 0)
9584d508 1721 rbd_warn(rbd_dev, "parent reference overflow");
a2acd00e 1722
ae43e9d0 1723 return counter > 0;
a2acd00e
AE
1724}
1725
bf0d5f50
AE
1726/*
1727 * Caller is responsible for filling in the list of object requests
1728 * that comprises the image request, and the Linux request pointer
1729 * (if there is one).
1730 */
cc344fa1
AE
1731static struct rbd_img_request *rbd_img_request_create(
1732 struct rbd_device *rbd_dev,
6d2940c8 1733 enum obj_operation_type op_type,
4e752f0a 1734 struct ceph_snap_context *snapc)
bf0d5f50
AE
1735{
1736 struct rbd_img_request *img_request;
bf0d5f50 1737
a0c5895b 1738 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
bf0d5f50
AE
1739 if (!img_request)
1740 return NULL;
1741
bf0d5f50 1742 img_request->rbd_dev = rbd_dev;
9bb0248d 1743 img_request->op_type = op_type;
9bb0248d 1744 if (!rbd_img_is_write(img_request))
bf0d5f50 1745 img_request->snap_id = rbd_dev->spec->snap_id;
9bb0248d
ID
1746 else
1747 img_request->snapc = snapc;
1748
a2acd00e 1749 if (rbd_dev_parent_get(rbd_dev))
d0b2e944 1750 img_request_layered_set(img_request);
a0c5895b 1751
e1fddc8f 1752 INIT_LIST_HEAD(&img_request->lock_item);
43df3d35 1753 INIT_LIST_HEAD(&img_request->object_extents);
0192ce2e 1754 mutex_init(&img_request->state_mutex);
bf0d5f50
AE
1755 kref_init(&img_request->kref);
1756
bf0d5f50
AE
1757 return img_request;
1758}
1759
1760static void rbd_img_request_destroy(struct kref *kref)
1761{
1762 struct rbd_img_request *img_request;
1763 struct rbd_obj_request *obj_request;
1764 struct rbd_obj_request *next_obj_request;
1765
1766 img_request = container_of(kref, struct rbd_img_request, kref);
1767
37206ee5
AE
1768 dout("%s: img %p\n", __func__, img_request);
1769
e1fddc8f 1770 WARN_ON(!list_empty(&img_request->lock_item));
bf0d5f50
AE
1771 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1772 rbd_img_obj_request_del(img_request, obj_request);
1773
a2acd00e
AE
1774 if (img_request_layered_test(img_request)) {
1775 img_request_layered_clear(img_request);
1776 rbd_dev_parent_put(img_request->rbd_dev);
1777 }
1778
9bb0248d 1779 if (rbd_img_is_write(img_request))
812164f8 1780 ceph_put_snap_context(img_request->snapc);
bf0d5f50 1781
1c2a9dfe 1782 kmem_cache_free(rbd_img_request_cache, img_request);
bf0d5f50
AE
1783}
1784
22e8bd51
ID
1785#define BITS_PER_OBJ 2
1786#define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1787#define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
e93f3152 1788
22e8bd51
ID
1789static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1790 u64 *index, u8 *shift)
1791{
1792 u32 off;
e93f3152 1793
22e8bd51
ID
1794 rbd_assert(objno < rbd_dev->object_map_size);
1795 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1796 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1797}
e93f3152 1798
22e8bd51
ID
1799static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1800{
1801 u64 index;
1802 u8 shift;
e93f3152 1803
22e8bd51
ID
1804 lockdep_assert_held(&rbd_dev->object_map_lock);
1805 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1806 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
e93f3152
AE
1807}
1808
22e8bd51 1809static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
e93f3152 1810{
22e8bd51
ID
1811 u64 index;
1812 u8 shift;
1813 u8 *p;
e93f3152 1814
22e8bd51
ID
1815 lockdep_assert_held(&rbd_dev->object_map_lock);
1816 rbd_assert(!(val & ~OBJ_MASK));
e93f3152 1817
22e8bd51
ID
1818 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1819 p = &rbd_dev->object_map[index];
1820 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
e93f3152
AE
1821}
1822
22e8bd51 1823static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1217857f 1824{
22e8bd51
ID
1825 u8 state;
1826
1827 spin_lock(&rbd_dev->object_map_lock);
1828 state = __rbd_object_map_get(rbd_dev, objno);
1829 spin_unlock(&rbd_dev->object_map_lock);
1830 return state;
3da691bf 1831}
1217857f 1832
22e8bd51 1833static bool use_object_map(struct rbd_device *rbd_dev)
3da691bf 1834{
22e8bd51
ID
1835 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1836 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
3da691bf
ID
1837}
1838
22e8bd51 1839static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
3da691bf 1840{
22e8bd51 1841 u8 state;
8b3e1a56 1842
22e8bd51
ID
1843 /* fall back to default logic if object map is disabled or invalid */
1844 if (!use_object_map(rbd_dev))
1845 return true;
3da691bf 1846
22e8bd51
ID
1847 state = rbd_object_map_get(rbd_dev, objno);
1848 return state != OBJECT_NONEXISTENT;
1217857f
AE
1849}
1850
22e8bd51
ID
1851static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1852 struct ceph_object_id *oid)
13488d53 1853{
22e8bd51
ID
1854 if (snap_id == CEPH_NOSNAP)
1855 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1856 rbd_dev->spec->image_id);
1857 else
1858 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1859 rbd_dev->spec->image_id, snap_id);
13488d53
ID
1860}
1861
22e8bd51 1862static int rbd_object_map_lock(struct rbd_device *rbd_dev)
2169238d 1863{
22e8bd51
ID
1864 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1865 CEPH_DEFINE_OID_ONSTACK(oid);
1866 u8 lock_type;
1867 char *lock_tag;
1868 struct ceph_locker *lockers;
1869 u32 num_lockers;
1870 bool broke_lock = false;
1871 int ret;
2169238d 1872
22e8bd51 1873 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
2169238d 1874
22e8bd51
ID
1875again:
1876 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1877 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1878 if (ret != -EBUSY || broke_lock) {
1879 if (ret == -EEXIST)
1880 ret = 0; /* already locked by myself */
1881 if (ret)
1882 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1883 return ret;
1884 }
2169238d 1885
22e8bd51
ID
1886 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1887 RBD_LOCK_NAME, &lock_type, &lock_tag,
1888 &lockers, &num_lockers);
1889 if (ret) {
1890 if (ret == -ENOENT)
1891 goto again;
3da691bf 1892
22e8bd51 1893 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
86bd7998 1894 return ret;
22e8bd51 1895 }
86bd7998 1896
22e8bd51
ID
1897 kfree(lock_tag);
1898 if (num_lockers == 0)
1899 goto again;
2169238d 1900
22e8bd51
ID
1901 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1902 ENTITY_NAME(lockers[0].id.name));
2169238d 1903
22e8bd51
ID
1904 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1905 RBD_LOCK_NAME, lockers[0].id.cookie,
1906 &lockers[0].id.name);
1907 ceph_free_lockers(lockers, num_lockers);
1908 if (ret) {
1909 if (ret == -ENOENT)
1910 goto again;
13488d53 1911
22e8bd51
ID
1912 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1913 return ret;
3da691bf
ID
1914 }
1915
22e8bd51
ID
1916 broke_lock = true;
1917 goto again;
2169238d
AE
1918}
1919
22e8bd51 1920static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
6484cbe9 1921{
22e8bd51
ID
1922 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1923 CEPH_DEFINE_OID_ONSTACK(oid);
1924 int ret;
1925
1926 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1927
1928 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1929 "");
1930 if (ret && ret != -ENOENT)
1931 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
6484cbe9
ID
1932}
1933
22e8bd51 1934static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
6484cbe9 1935{
22e8bd51
ID
1936 u8 struct_v;
1937 u32 struct_len;
1938 u32 header_len;
1939 void *header_end;
6484cbe9
ID
1940 int ret;
1941
22e8bd51
ID
1942 ceph_decode_32_safe(p, end, header_len, e_inval);
1943 header_end = *p + header_len;
0c93e1b7 1944
22e8bd51
ID
1945 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1946 &struct_len);
6484cbe9
ID
1947 if (ret)
1948 return ret;
1949
22e8bd51 1950 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
6484cbe9 1951
22e8bd51 1952 *p = header_end;
6484cbe9 1953 return 0;
22e8bd51
ID
1954
1955e_inval:
1956 return -EINVAL;
6484cbe9
ID
1957}
1958
22e8bd51 1959static int __rbd_object_map_load(struct rbd_device *rbd_dev)
13488d53 1960{
22e8bd51
ID
1961 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1962 CEPH_DEFINE_OID_ONSTACK(oid);
1963 struct page **pages;
1964 void *p, *end;
1965 size_t reply_len;
1966 u64 num_objects;
1967 u64 object_map_bytes;
1968 u64 object_map_size;
1969 int num_pages;
1970 int ret;
13488d53 1971
22e8bd51 1972 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
13488d53 1973
22e8bd51
ID
1974 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1975 rbd_dev->mapping.size);
1976 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1977 BITS_PER_BYTE);
1978 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1979 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1980 if (IS_ERR(pages))
1981 return PTR_ERR(pages);
13488d53 1982
22e8bd51
ID
1983 reply_len = num_pages * PAGE_SIZE;
1984 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1985 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1986 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1987 NULL, 0, pages, &reply_len);
1988 if (ret)
1989 goto out;
3b434a2a 1990
22e8bd51
ID
1991 p = page_address(pages[0]);
1992 end = p + min(reply_len, (size_t)PAGE_SIZE);
1993 ret = decode_object_map_header(&p, end, &object_map_size);
1994 if (ret)
1995 goto out;
1996
1997 if (object_map_size != num_objects) {
1998 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1999 object_map_size, num_objects);
2000 ret = -EINVAL;
2001 goto out;
3b434a2a
JD
2002 }
2003
22e8bd51
ID
2004 if (offset_in_page(p) + object_map_bytes > reply_len) {
2005 ret = -EINVAL;
2006 goto out;
2007 }
2008
2009 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
2010 if (!rbd_dev->object_map) {
2011 ret = -ENOMEM;
2012 goto out;
2013 }
2014
2015 rbd_dev->object_map_size = object_map_size;
2016 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
2017 offset_in_page(p), object_map_bytes);
2018
2019out:
2020 ceph_release_page_vector(pages, num_pages);
2021 return ret;
2022}
3da691bf 2023
22e8bd51
ID
2024static void rbd_object_map_free(struct rbd_device *rbd_dev)
2025{
2026 kvfree(rbd_dev->object_map);
2027 rbd_dev->object_map = NULL;
2028 rbd_dev->object_map_size = 0;
3b434a2a
JD
2029}
2030
22e8bd51 2031static int rbd_object_map_load(struct rbd_device *rbd_dev)
bf0d5f50 2032{
3da691bf 2033 int ret;
37206ee5 2034
22e8bd51 2035 ret = __rbd_object_map_load(rbd_dev);
86bd7998
ID
2036 if (ret)
2037 return ret;
f1a4739f 2038
22e8bd51
ID
2039 ret = rbd_dev_v2_get_flags(rbd_dev);
2040 if (ret) {
2041 rbd_object_map_free(rbd_dev);
2042 return ret;
2043 }
2044
2045 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
2046 rbd_warn(rbd_dev, "object map is invalid");
2047
2048 return 0;
2049}
2050
2051static int rbd_object_map_open(struct rbd_device *rbd_dev)
2052{
2053 int ret;
2054
2055 ret = rbd_object_map_lock(rbd_dev);
2056 if (ret)
2057 return ret;
2058
2059 ret = rbd_object_map_load(rbd_dev);
2060 if (ret) {
2061 rbd_object_map_unlock(rbd_dev);
2062 return ret;
2063 }
2064
2065 return 0;
2066}
2067
2068static void rbd_object_map_close(struct rbd_device *rbd_dev)
2069{
2070 rbd_object_map_free(rbd_dev);
2071 rbd_object_map_unlock(rbd_dev);
2072}
2073
2074/*
2075 * This function needs snap_id (or more precisely just something to
2076 * distinguish between HEAD and snapshot object maps), new_state and
2077 * current_state that were passed to rbd_object_map_update().
2078 *
2079 * To avoid allocating and stashing a context we piggyback on the OSD
2080 * request. A HEAD update has two ops (assert_locked). For new_state
2081 * and current_state we decode our own object_map_update op, encoded in
2082 * rbd_cls_object_map_update().
2083 */
2084static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
2085 struct ceph_osd_request *osd_req)
2086{
2087 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2088 struct ceph_osd_data *osd_data;
2089 u64 objno;
2090 u8 state, new_state, current_state;
2091 bool has_current_state;
2092 void *p;
2093
2094 if (osd_req->r_result)
2095 return osd_req->r_result;
2096
2097 /*
2098 * Nothing to do for a snapshot object map.
2099 */
2100 if (osd_req->r_num_ops == 1)
2101 return 0;
2102
2103 /*
2104 * Update in-memory HEAD object map.
2105 */
2106 rbd_assert(osd_req->r_num_ops == 2);
2107 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
2108 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
2109
2110 p = page_address(osd_data->pages[0]);
2111 objno = ceph_decode_64(&p);
2112 rbd_assert(objno == obj_req->ex.oe_objno);
2113 rbd_assert(ceph_decode_64(&p) == objno + 1);
2114 new_state = ceph_decode_8(&p);
2115 has_current_state = ceph_decode_8(&p);
2116 if (has_current_state)
2117 current_state = ceph_decode_8(&p);
2118
2119 spin_lock(&rbd_dev->object_map_lock);
2120 state = __rbd_object_map_get(rbd_dev, objno);
2121 if (!has_current_state || current_state == state ||
2122 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
2123 __rbd_object_map_set(rbd_dev, objno, new_state);
2124 spin_unlock(&rbd_dev->object_map_lock);
2125
2126 return 0;
2127}
2128
2129static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
2130{
2131 struct rbd_obj_request *obj_req = osd_req->r_priv;
2132 int result;
2133
2134 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
2135 osd_req->r_result, obj_req);
2136
2137 result = rbd_object_map_update_finish(obj_req, osd_req);
2138 rbd_obj_handle_request(obj_req, result);
2139}
2140
2141static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2142{
2143 u8 state = rbd_object_map_get(rbd_dev, objno);
bf0d5f50 2144
22e8bd51
ID
2145 if (state == new_state ||
2146 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
2147 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
2148 return false;
2149
2150 return true;
2151}
2152
2153static int rbd_cls_object_map_update(struct ceph_osd_request *req,
2154 int which, u64 objno, u8 new_state,
2155 const u8 *current_state)
2156{
2157 struct page **pages;
2158 void *p, *start;
2159 int ret;
2160
2161 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2162 if (ret)
2163 return ret;
2164
2165 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2166 if (IS_ERR(pages))
2167 return PTR_ERR(pages);
2168
2169 p = start = page_address(pages[0]);
2170 ceph_encode_64(&p, objno);
2171 ceph_encode_64(&p, objno + 1);
2172 ceph_encode_8(&p, new_state);
2173 if (current_state) {
2174 ceph_encode_8(&p, 1);
2175 ceph_encode_8(&p, *current_state);
2176 } else {
2177 ceph_encode_8(&p, 0);
2178 }
2179
2180 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2181 false, true);
2182 return 0;
2183}
2184
2185/*
2186 * Return:
2187 * 0 - object map update sent
2188 * 1 - object map update isn't needed
2189 * <0 - error
2190 */
2191static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2192 u8 new_state, const u8 *current_state)
2193{
2194 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2195 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2196 struct ceph_osd_request *req;
2197 int num_ops = 1;
2198 int which = 0;
2199 int ret;
2200
2201 if (snap_id == CEPH_NOSNAP) {
2202 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2203 return 1;
2204
2205 num_ops++; /* assert_locked */
2206 }
2207
2208 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2209 if (!req)
2210 return -ENOMEM;
2211
2212 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2213 req->r_callback = rbd_object_map_callback;
2214 req->r_priv = obj_req;
2215
2216 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2217 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2218 req->r_flags = CEPH_OSD_FLAG_WRITE;
2219 ktime_get_real_ts64(&req->r_mtime);
2220
2221 if (snap_id == CEPH_NOSNAP) {
2222 /*
2223 * Protect against possible race conditions during lock
2224 * ownership transitions.
2225 */
2226 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2227 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
3da691bf
ID
2228 if (ret)
2229 return ret;
22e8bd51
ID
2230 }
2231
2232 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2233 new_state, current_state);
2234 if (ret)
2235 return ret;
2236
2237 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2238 if (ret)
2239 return ret;
13488d53 2240
22e8bd51
ID
2241 ceph_osdc_start_request(osdc, req, false);
2242 return 0;
2243}
2244
86bd7998
ID
2245static void prune_extents(struct ceph_file_extent *img_extents,
2246 u32 *num_img_extents, u64 overlap)
e93f3152 2247{
86bd7998 2248 u32 cnt = *num_img_extents;
e93f3152 2249
86bd7998
ID
2250 /* drop extents completely beyond the overlap */
2251 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2252 cnt--;
e93f3152 2253
86bd7998
ID
2254 if (cnt) {
2255 struct ceph_file_extent *ex = &img_extents[cnt - 1];
e93f3152 2256
86bd7998
ID
2257 /* trim final overlapping extent */
2258 if (ex->fe_off + ex->fe_len > overlap)
2259 ex->fe_len = overlap - ex->fe_off;
2260 }
e93f3152 2261
86bd7998 2262 *num_img_extents = cnt;
e93f3152
AE
2263}
2264
86bd7998
ID
2265/*
2266 * Determine the byte range(s) covered by either just the object extent
2267 * or the entire object in the parent image.
2268 */
2269static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2270 bool entire)
e93f3152 2271{
86bd7998
ID
2272 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2273 int ret;
e93f3152 2274
86bd7998
ID
2275 if (!rbd_dev->parent_overlap)
2276 return 0;
e93f3152 2277
86bd7998
ID
2278 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2279 entire ? 0 : obj_req->ex.oe_off,
2280 entire ? rbd_dev->layout.object_size :
2281 obj_req->ex.oe_len,
2282 &obj_req->img_extents,
2283 &obj_req->num_img_extents);
2284 if (ret)
2285 return ret;
e93f3152 2286
86bd7998
ID
2287 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2288 rbd_dev->parent_overlap);
2289 return 0;
e93f3152
AE
2290}
2291
bcbab1db 2292static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
1217857f 2293{
bcbab1db
ID
2294 struct rbd_obj_request *obj_req = osd_req->r_priv;
2295
ecc633ca 2296 switch (obj_req->img_request->data_type) {
3da691bf 2297 case OBJ_REQUEST_BIO:
bcbab1db 2298 osd_req_op_extent_osd_data_bio(osd_req, which,
3da691bf 2299 &obj_req->bio_pos,
43df3d35 2300 obj_req->ex.oe_len);
3da691bf
ID
2301 break;
2302 case OBJ_REQUEST_BVECS:
afb97888 2303 case OBJ_REQUEST_OWN_BVECS:
3da691bf 2304 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
43df3d35 2305 obj_req->ex.oe_len);
afb97888 2306 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
bcbab1db 2307 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
3da691bf
ID
2308 &obj_req->bvec_pos);
2309 break;
2310 default:
16809372 2311 BUG();
1217857f 2312 }
3da691bf 2313}
1217857f 2314
bcbab1db 2315static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
3da691bf
ID
2316{
2317 struct page **pages;
8b3e1a56 2318
3da691bf
ID
2319 /*
2320 * The response data for a STAT call consists of:
2321 * le64 length;
2322 * struct {
2323 * le32 tv_sec;
2324 * le32 tv_nsec;
2325 * } mtime;
2326 */
2327 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2328 if (IS_ERR(pages))
2329 return PTR_ERR(pages);
2330
bcbab1db
ID
2331 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2332 osd_req_op_raw_data_in_pages(osd_req, which, pages,
3da691bf
ID
2333 8 + sizeof(struct ceph_timespec),
2334 0, false, true);
2335 return 0;
1217857f
AE
2336}
2337
b5ae8cbc
ID
2338static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2339 u32 bytes)
2340{
2341 struct rbd_obj_request *obj_req = osd_req->r_priv;
2342 int ret;
2343
2344 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2345 if (ret)
2346 return ret;
2347
2348 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2349 obj_req->copyup_bvec_count, bytes);
2350 return 0;
2351}
2352
ea9b743c
ID
2353static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2354{
2355 obj_req->read_state = RBD_OBJ_READ_START;
2356 return 0;
2357}
2358
bcbab1db
ID
2359static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2360 int which)
2169238d 2361{
bcbab1db 2362 struct rbd_obj_request *obj_req = osd_req->r_priv;
3da691bf
ID
2363 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2364 u16 opcode;
2169238d 2365
8b5bec5c
ID
2366 if (!use_object_map(rbd_dev) ||
2367 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2368 osd_req_op_alloc_hint_init(osd_req, which++,
2369 rbd_dev->layout.object_size,
2370 rbd_dev->layout.object_size);
2371 }
2169238d 2372
3da691bf
ID
2373 if (rbd_obj_is_entire(obj_req))
2374 opcode = CEPH_OSD_OP_WRITEFULL;
2375 else
2376 opcode = CEPH_OSD_OP_WRITE;
2169238d 2377
bcbab1db 2378 osd_req_op_extent_init(osd_req, which, opcode,
43df3d35 2379 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
bcbab1db 2380 rbd_osd_setup_data(osd_req, which);
3da691bf 2381}
2169238d 2382
ea9b743c 2383static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
3da691bf 2384{
3da691bf
ID
2385 int ret;
2386
86bd7998
ID
2387 /* reverse map the entire object onto the parent */
2388 ret = rbd_obj_calc_img_extents(obj_req, true);
2389 if (ret)
2390 return ret;
2391
0ad5d953
ID
2392 if (rbd_obj_copyup_enabled(obj_req))
2393 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2394
85b5e6d1 2395 obj_req->write_state = RBD_OBJ_WRITE_START;
3da691bf 2396 return 0;
2169238d
AE
2397}
2398
6484cbe9
ID
2399static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2400{
2401 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2402 CEPH_OSD_OP_ZERO;
2403}
2404
27bbd911
ID
2405static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2406 int which)
2407{
2408 struct rbd_obj_request *obj_req = osd_req->r_priv;
2409
2410 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2411 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2412 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
13488d53 2413 } else {
27bbd911
ID
2414 osd_req_op_extent_init(osd_req, which,
2415 truncate_or_zero_opcode(obj_req),
2416 obj_req->ex.oe_off, obj_req->ex.oe_len,
2417 0, 0);
2418 }
2419}
2420
ea9b743c 2421static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
6484cbe9 2422{
0c93e1b7 2423 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
27bbd911 2424 u64 off, next_off;
6484cbe9
ID
2425 int ret;
2426
0c93e1b7
ID
2427 /*
2428 * Align the range to alloc_size boundary and punt on discards
2429 * that are too small to free up any space.
2430 *
2431 * alloc_size == object_size && is_tail() is a special case for
2432 * filestore with filestore_punch_hole = false, needed to allow
2433 * truncate (in addition to delete).
2434 */
2435 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2436 !rbd_obj_is_tail(obj_req)) {
27bbd911
ID
2437 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2438 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2439 rbd_dev->opts->alloc_size);
0c93e1b7
ID
2440 if (off >= next_off)
2441 return 1;
27bbd911
ID
2442
2443 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2444 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2445 off, next_off - off);
2446 obj_req->ex.oe_off = off;
2447 obj_req->ex.oe_len = next_off - off;
0c93e1b7
ID
2448 }
2449
6484cbe9
ID
2450 /* reverse map the entire object onto the parent */
2451 ret = rbd_obj_calc_img_extents(obj_req, true);
2452 if (ret)
2453 return ret;
2454
22e8bd51 2455 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
0ad5d953
ID
2456 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2457 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2458
85b5e6d1 2459 obj_req->write_state = RBD_OBJ_WRITE_START;
6484cbe9
ID
2460 return 0;
2461}
2462
bcbab1db
ID
2463static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2464 int which)
3da691bf 2465{
bcbab1db 2466 struct rbd_obj_request *obj_req = osd_req->r_priv;
3b434a2a
JD
2467 u16 opcode;
2468
3da691bf 2469 if (rbd_obj_is_entire(obj_req)) {
86bd7998 2470 if (obj_req->num_img_extents) {
0ad5d953 2471 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
bcbab1db 2472 osd_req_op_init(osd_req, which++,
9b17eb2c 2473 CEPH_OSD_OP_CREATE, 0);
3b434a2a
JD
2474 opcode = CEPH_OSD_OP_TRUNCATE;
2475 } else {
0ad5d953 2476 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
bcbab1db 2477 osd_req_op_init(osd_req, which++,
3da691bf
ID
2478 CEPH_OSD_OP_DELETE, 0);
2479 opcode = 0;
3b434a2a 2480 }
3b434a2a 2481 } else {
6484cbe9 2482 opcode = truncate_or_zero_opcode(obj_req);
3b434a2a
JD
2483 }
2484
3da691bf 2485 if (opcode)
bcbab1db 2486 osd_req_op_extent_init(osd_req, which, opcode,
43df3d35 2487 obj_req->ex.oe_off, obj_req->ex.oe_len,
3da691bf 2488 0, 0);
3b434a2a
JD
2489}
2490
ea9b743c 2491static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
bf0d5f50 2492{
3da691bf 2493 int ret;
37206ee5 2494
86bd7998
ID
2495 /* reverse map the entire object onto the parent */
2496 ret = rbd_obj_calc_img_extents(obj_req, true);
2497 if (ret)
2498 return ret;
f1a4739f 2499
0ad5d953
ID
2500 if (rbd_obj_copyup_enabled(obj_req))
2501 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2502 if (!obj_req->num_img_extents) {
22e8bd51 2503 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
0ad5d953
ID
2504 if (rbd_obj_is_entire(obj_req))
2505 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
3da691bf 2506 }
3b434a2a 2507
a086a1b8 2508 obj_req->write_state = RBD_OBJ_WRITE_START;
3da691bf
ID
2509 return 0;
2510}
9d4df01f 2511
a086a1b8
ID
2512static int count_write_ops(struct rbd_obj_request *obj_req)
2513{
8b5bec5c
ID
2514 struct rbd_img_request *img_req = obj_req->img_request;
2515
2516 switch (img_req->op_type) {
a086a1b8 2517 case OBJ_OP_WRITE:
8b5bec5c
ID
2518 if (!use_object_map(img_req->rbd_dev) ||
2519 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2520 return 2; /* setallochint + write/writefull */
2521
2522 return 1; /* write/writefull */
a086a1b8
ID
2523 case OBJ_OP_DISCARD:
2524 return 1; /* delete/truncate/zero */
2525 case OBJ_OP_ZEROOUT:
2526 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2527 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2528 return 2; /* create + truncate */
bf0d5f50 2529
a086a1b8
ID
2530 return 1; /* delete/truncate/zero */
2531 default:
2532 BUG();
3da691bf 2533 }
a086a1b8 2534}
3b434a2a 2535
a086a1b8
ID
2536static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2537 int which)
2538{
2539 struct rbd_obj_request *obj_req = osd_req->r_priv;
2540
2541 switch (obj_req->img_request->op_type) {
2542 case OBJ_OP_WRITE:
2543 __rbd_osd_setup_write_ops(osd_req, which);
2544 break;
2545 case OBJ_OP_DISCARD:
2546 __rbd_osd_setup_discard_ops(osd_req, which);
2547 break;
2548 case OBJ_OP_ZEROOUT:
2549 __rbd_osd_setup_zeroout_ops(osd_req, which);
2550 break;
2551 default:
2552 BUG();
2553 }
3da691bf 2554}
9d4df01f 2555
3da691bf 2556/*
a086a1b8
ID
2557 * Prune the list of object requests (adjust offset and/or length, drop
2558 * redundant requests). Prepare object request state machines and image
2559 * request state machine for execution.
3da691bf
ID
2560 */
2561static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2562{
0c93e1b7 2563 struct rbd_obj_request *obj_req, *next_obj_req;
3da691bf 2564 int ret;
430c28c3 2565
0c93e1b7 2566 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
9bb0248d 2567 switch (img_req->op_type) {
3da691bf 2568 case OBJ_OP_READ:
ea9b743c 2569 ret = rbd_obj_init_read(obj_req);
3da691bf
ID
2570 break;
2571 case OBJ_OP_WRITE:
ea9b743c 2572 ret = rbd_obj_init_write(obj_req);
3da691bf
ID
2573 break;
2574 case OBJ_OP_DISCARD:
ea9b743c 2575 ret = rbd_obj_init_discard(obj_req);
3da691bf 2576 break;
6484cbe9 2577 case OBJ_OP_ZEROOUT:
ea9b743c 2578 ret = rbd_obj_init_zeroout(obj_req);
6484cbe9 2579 break;
3da691bf 2580 default:
16809372 2581 BUG();
3da691bf 2582 }
0c93e1b7 2583 if (ret < 0)
3da691bf 2584 return ret;
0c93e1b7 2585 if (ret > 0) {
0c93e1b7
ID
2586 rbd_img_obj_request_del(img_req, obj_req);
2587 continue;
2588 }
bf0d5f50
AE
2589 }
2590
0192ce2e 2591 img_req->state = RBD_IMG_START;
bf0d5f50 2592 return 0;
3da691bf 2593}
bf0d5f50 2594
5a237819
ID
2595union rbd_img_fill_iter {
2596 struct ceph_bio_iter bio_iter;
2597 struct ceph_bvec_iter bvec_iter;
2598};
bf0d5f50 2599
5a237819
ID
2600struct rbd_img_fill_ctx {
2601 enum obj_request_type pos_type;
2602 union rbd_img_fill_iter *pos;
2603 union rbd_img_fill_iter iter;
2604 ceph_object_extent_fn_t set_pos_fn;
afb97888
ID
2605 ceph_object_extent_fn_t count_fn;
2606 ceph_object_extent_fn_t copy_fn;
5a237819 2607};
bf0d5f50 2608
5a237819 2609static struct ceph_object_extent *alloc_object_extent(void *arg)
0eefd470 2610{
5a237819
ID
2611 struct rbd_img_request *img_req = arg;
2612 struct rbd_obj_request *obj_req;
0eefd470 2613
5a237819
ID
2614 obj_req = rbd_obj_request_create();
2615 if (!obj_req)
2616 return NULL;
2761713d 2617
5a237819
ID
2618 rbd_img_obj_request_add(img_req, obj_req);
2619 return &obj_req->ex;
2620}
0eefd470 2621
afb97888
ID
2622/*
2623 * While su != os && sc == 1 is technically not fancy (it's the same
2624 * layout as su == os && sc == 1), we can't use the nocopy path for it
2625 * because ->set_pos_fn() should be called only once per object.
2626 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2627 * treat su != os && sc == 1 as fancy.
2628 */
2629static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2630{
2631 return l->stripe_unit != l->object_size;
2632}
0eefd470 2633
afb97888
ID
2634static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2635 struct ceph_file_extent *img_extents,
2636 u32 num_img_extents,
2637 struct rbd_img_fill_ctx *fctx)
2638{
2639 u32 i;
2640 int ret;
2641
2642 img_req->data_type = fctx->pos_type;
0eefd470
AE
2643
2644 /*
afb97888
ID
2645 * Create object requests and set each object request's starting
2646 * position in the provided bio (list) or bio_vec array.
0eefd470 2647 */
afb97888
ID
2648 fctx->iter = *fctx->pos;
2649 for (i = 0; i < num_img_extents; i++) {
2650 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2651 img_extents[i].fe_off,
2652 img_extents[i].fe_len,
2653 &img_req->object_extents,
2654 alloc_object_extent, img_req,
2655 fctx->set_pos_fn, &fctx->iter);
2656 if (ret)
2657 return ret;
2658 }
0eefd470 2659
afb97888 2660 return __rbd_img_fill_request(img_req);
0eefd470
AE
2661}
2662
5a237819
ID
2663/*
2664 * Map a list of image extents to a list of object extents, create the
2665 * corresponding object requests (normally each to a different object,
2666 * but not always) and add them to @img_req. For each object request,
afb97888 2667 * set up its data descriptor to point to the corresponding chunk(s) of
5a237819
ID
2668 * @fctx->pos data buffer.
2669 *
afb97888
ID
2670 * Because ceph_file_to_extents() will merge adjacent object extents
2671 * together, each object request's data descriptor may point to multiple
2672 * different chunks of @fctx->pos data buffer.
2673 *
5a237819
ID
2674 * @fctx->pos data buffer is assumed to be large enough.
2675 */
2676static int rbd_img_fill_request(struct rbd_img_request *img_req,
2677 struct ceph_file_extent *img_extents,
2678 u32 num_img_extents,
2679 struct rbd_img_fill_ctx *fctx)
3d7efd18 2680{
afb97888
ID
2681 struct rbd_device *rbd_dev = img_req->rbd_dev;
2682 struct rbd_obj_request *obj_req;
5a237819
ID
2683 u32 i;
2684 int ret;
2685
afb97888
ID
2686 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2687 !rbd_layout_is_fancy(&rbd_dev->layout))
2688 return rbd_img_fill_request_nocopy(img_req, img_extents,
2689 num_img_extents, fctx);
3d7efd18 2690
afb97888 2691 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
0eefd470 2692
bbea1c1a 2693 /*
afb97888
ID
2694 * Create object requests and determine ->bvec_count for each object
2695 * request. Note that ->bvec_count sum over all object requests may
2696 * be greater than the number of bio_vecs in the provided bio (list)
2697 * or bio_vec array because when mapped, those bio_vecs can straddle
2698 * stripe unit boundaries.
bbea1c1a 2699 */
5a237819
ID
2700 fctx->iter = *fctx->pos;
2701 for (i = 0; i < num_img_extents; i++) {
afb97888 2702 ret = ceph_file_to_extents(&rbd_dev->layout,
5a237819
ID
2703 img_extents[i].fe_off,
2704 img_extents[i].fe_len,
2705 &img_req->object_extents,
2706 alloc_object_extent, img_req,
afb97888
ID
2707 fctx->count_fn, &fctx->iter);
2708 if (ret)
2709 return ret;
bbea1c1a 2710 }
0eefd470 2711
afb97888
ID
2712 for_each_obj_request(img_req, obj_req) {
2713 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2714 sizeof(*obj_req->bvec_pos.bvecs),
2715 GFP_NOIO);
2716 if (!obj_req->bvec_pos.bvecs)
2717 return -ENOMEM;
2718 }
0eefd470 2719
8785b1d4 2720 /*
afb97888
ID
2721 * Fill in each object request's private bio_vec array, splitting and
2722 * rearranging the provided bio_vecs in stripe unit chunks as needed.
8785b1d4 2723 */
afb97888
ID
2724 fctx->iter = *fctx->pos;
2725 for (i = 0; i < num_img_extents; i++) {
2726 ret = ceph_iterate_extents(&rbd_dev->layout,
2727 img_extents[i].fe_off,
2728 img_extents[i].fe_len,
2729 &img_req->object_extents,
2730 fctx->copy_fn, &fctx->iter);
5a237819
ID
2731 if (ret)
2732 return ret;
2733 }
3d7efd18 2734
5a237819
ID
2735 return __rbd_img_fill_request(img_req);
2736}
2737
2738static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2739 u64 off, u64 len)
2740{
2741 struct ceph_file_extent ex = { off, len };
2742 union rbd_img_fill_iter dummy;
2743 struct rbd_img_fill_ctx fctx = {
2744 .pos_type = OBJ_REQUEST_NODATA,
2745 .pos = &dummy,
2746 };
2747
2748 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2749}
2750
2751static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2752{
2753 struct rbd_obj_request *obj_req =
2754 container_of(ex, struct rbd_obj_request, ex);
2755 struct ceph_bio_iter *it = arg;
3d7efd18 2756
5a237819
ID
2757 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2758 obj_req->bio_pos = *it;
2759 ceph_bio_iter_advance(it, bytes);
2760}
3d7efd18 2761
afb97888
ID
2762static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2763{
2764 struct rbd_obj_request *obj_req =
2765 container_of(ex, struct rbd_obj_request, ex);
2766 struct ceph_bio_iter *it = arg;
0eefd470 2767
afb97888
ID
2768 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2769 ceph_bio_iter_advance_step(it, bytes, ({
2770 obj_req->bvec_count++;
2771 }));
0eefd470 2772
afb97888 2773}
0eefd470 2774
afb97888
ID
2775static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2776{
2777 struct rbd_obj_request *obj_req =
2778 container_of(ex, struct rbd_obj_request, ex);
2779 struct ceph_bio_iter *it = arg;
0eefd470 2780
afb97888
ID
2781 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2782 ceph_bio_iter_advance_step(it, bytes, ({
2783 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2784 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2785 }));
3d7efd18
AE
2786}
2787
5a237819
ID
2788static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2789 struct ceph_file_extent *img_extents,
2790 u32 num_img_extents,
2791 struct ceph_bio_iter *bio_pos)
2792{
2793 struct rbd_img_fill_ctx fctx = {
2794 .pos_type = OBJ_REQUEST_BIO,
2795 .pos = (union rbd_img_fill_iter *)bio_pos,
2796 .set_pos_fn = set_bio_pos,
afb97888
ID
2797 .count_fn = count_bio_bvecs,
2798 .copy_fn = copy_bio_bvecs,
5a237819 2799 };
3d7efd18 2800
5a237819
ID
2801 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2802 &fctx);
2803}
3d7efd18 2804
5a237819
ID
2805static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2806 u64 off, u64 len, struct bio *bio)
2807{
2808 struct ceph_file_extent ex = { off, len };
2809 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
3d7efd18 2810
5a237819
ID
2811 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2812}
a9e8ba2c 2813
5a237819
ID
2814static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2815{
2816 struct rbd_obj_request *obj_req =
2817 container_of(ex, struct rbd_obj_request, ex);
2818 struct ceph_bvec_iter *it = arg;
3d7efd18 2819
5a237819
ID
2820 obj_req->bvec_pos = *it;
2821 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2822 ceph_bvec_iter_advance(it, bytes);
2823}
3d7efd18 2824
afb97888
ID
2825static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2826{
2827 struct rbd_obj_request *obj_req =
2828 container_of(ex, struct rbd_obj_request, ex);
2829 struct ceph_bvec_iter *it = arg;
058aa991 2830
afb97888
ID
2831 ceph_bvec_iter_advance_step(it, bytes, ({
2832 obj_req->bvec_count++;
2833 }));
2834}
058aa991 2835
afb97888
ID
2836static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2837{
2838 struct rbd_obj_request *obj_req =
2839 container_of(ex, struct rbd_obj_request, ex);
2840 struct ceph_bvec_iter *it = arg;
3d7efd18 2841
afb97888
ID
2842 ceph_bvec_iter_advance_step(it, bytes, ({
2843 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2844 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2845 }));
3d7efd18
AE
2846}
2847
5a237819
ID
2848static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2849 struct ceph_file_extent *img_extents,
2850 u32 num_img_extents,
2851 struct ceph_bvec_iter *bvec_pos)
c5b5ef6c 2852{
5a237819
ID
2853 struct rbd_img_fill_ctx fctx = {
2854 .pos_type = OBJ_REQUEST_BVECS,
2855 .pos = (union rbd_img_fill_iter *)bvec_pos,
2856 .set_pos_fn = set_bvec_pos,
afb97888
ID
2857 .count_fn = count_bvecs,
2858 .copy_fn = copy_bvecs,
5a237819 2859 };
c5b5ef6c 2860
5a237819
ID
2861 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2862 &fctx);
2863}
c5b5ef6c 2864
5a237819
ID
2865static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2866 struct ceph_file_extent *img_extents,
2867 u32 num_img_extents,
2868 struct bio_vec *bvecs)
2869{
2870 struct ceph_bvec_iter it = {
2871 .bvecs = bvecs,
2872 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2873 num_img_extents) },
2874 };
c5b5ef6c 2875
5a237819
ID
2876 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2877 &it);
2878}
c5b5ef6c 2879
0192ce2e 2880static void rbd_img_handle_request_work(struct work_struct *work)
bf0d5f50 2881{
0192ce2e
ID
2882 struct rbd_img_request *img_req =
2883 container_of(work, struct rbd_img_request, work);
c5b5ef6c 2884
0192ce2e
ID
2885 rbd_img_handle_request(img_req, img_req->work_result);
2886}
c2e82414 2887
0192ce2e
ID
2888static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2889{
2890 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2891 img_req->work_result = result;
2892 queue_work(rbd_wq, &img_req->work);
c5b5ef6c 2893}
c2e82414 2894
22e8bd51
ID
2895static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2896{
2897 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2898
2899 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2900 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2901 return true;
2902 }
2903
2904 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2905 obj_req->ex.oe_objno);
2906 return false;
2907}
2908
85b5e6d1
ID
2909static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2910{
a086a1b8
ID
2911 struct ceph_osd_request *osd_req;
2912 int ret;
2913
2914 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2915 if (IS_ERR(osd_req))
2916 return PTR_ERR(osd_req);
2917
2918 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2919 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2920 rbd_osd_setup_data(osd_req, 0);
2921 rbd_osd_format_read(osd_req);
2922
2923 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2924 if (ret)
2925 return ret;
2926
2927 rbd_osd_submit(osd_req);
85b5e6d1 2928 return 0;
c5b5ef6c
AE
2929}
2930
86bd7998 2931static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
c5b5ef6c 2932{
3da691bf
ID
2933 struct rbd_img_request *img_req = obj_req->img_request;
2934 struct rbd_img_request *child_img_req;
c5b5ef6c
AE
2935 int ret;
2936
e93aca0a
ID
2937 child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
2938 OBJ_OP_READ, NULL);
3da691bf 2939 if (!child_img_req)
710214e3
ID
2940 return -ENOMEM;
2941
e93aca0a
ID
2942 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2943 child_img_req->obj_request = obj_req;
a90bb0c1 2944
21ed05a8
ID
2945 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2946 obj_req);
2947
3da691bf 2948 if (!rbd_img_is_write(img_req)) {
ecc633ca 2949 switch (img_req->data_type) {
3da691bf 2950 case OBJ_REQUEST_BIO:
5a237819
ID
2951 ret = __rbd_img_fill_from_bio(child_img_req,
2952 obj_req->img_extents,
2953 obj_req->num_img_extents,
2954 &obj_req->bio_pos);
3da691bf
ID
2955 break;
2956 case OBJ_REQUEST_BVECS:
afb97888 2957 case OBJ_REQUEST_OWN_BVECS:
5a237819
ID
2958 ret = __rbd_img_fill_from_bvecs(child_img_req,
2959 obj_req->img_extents,
2960 obj_req->num_img_extents,
2961 &obj_req->bvec_pos);
3da691bf
ID
2962 break;
2963 default:
d342a15b 2964 BUG();
3da691bf
ID
2965 }
2966 } else {
5a237819
ID
2967 ret = rbd_img_fill_from_bvecs(child_img_req,
2968 obj_req->img_extents,
2969 obj_req->num_img_extents,
2970 obj_req->copyup_bvecs);
3da691bf
ID
2971 }
2972 if (ret) {
2973 rbd_img_request_put(child_img_req);
2974 return ret;
2975 }
2976
0192ce2e
ID
2977 /* avoid parent chain recursion */
2978 rbd_img_schedule(child_img_req, 0);
3da691bf
ID
2979 return 0;
2980}
2981
85b5e6d1 2982static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
3da691bf
ID
2983{
2984 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2985 int ret;
2986
22e8bd51 2987again:
a9b67e69 2988 switch (obj_req->read_state) {
85b5e6d1
ID
2989 case RBD_OBJ_READ_START:
2990 rbd_assert(!*result);
2991
22e8bd51
ID
2992 if (!rbd_obj_may_exist(obj_req)) {
2993 *result = -ENOENT;
2994 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2995 goto again;
2996 }
2997
85b5e6d1 2998 ret = rbd_obj_read_object(obj_req);
3da691bf 2999 if (ret) {
85b5e6d1 3000 *result = ret;
3da691bf
ID
3001 return true;
3002 }
85b5e6d1
ID
3003 obj_req->read_state = RBD_OBJ_READ_OBJECT;
3004 return false;
a9b67e69
ID
3005 case RBD_OBJ_READ_OBJECT:
3006 if (*result == -ENOENT && rbd_dev->parent_overlap) {
3007 /* reverse map this object extent onto the parent */
3008 ret = rbd_obj_calc_img_extents(obj_req, false);
86bd7998 3009 if (ret) {
54ab3b24 3010 *result = ret;
86bd7998
ID
3011 return true;
3012 }
a9b67e69
ID
3013 if (obj_req->num_img_extents) {
3014 ret = rbd_obj_read_from_parent(obj_req);
3015 if (ret) {
3016 *result = ret;
3017 return true;
3018 }
3019 obj_req->read_state = RBD_OBJ_READ_PARENT;
3020 return false;
3021 }
86bd7998 3022 }
710214e3 3023
a9b67e69
ID
3024 /*
3025 * -ENOENT means a hole in the image -- zero-fill the entire
3026 * length of the request. A short read also implies zero-fill
3027 * to the end of the request.
3028 */
3029 if (*result == -ENOENT) {
3030 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
3031 *result = 0;
3032 } else if (*result >= 0) {
3033 if (*result < obj_req->ex.oe_len)
3034 rbd_obj_zero_range(obj_req, *result,
3035 obj_req->ex.oe_len - *result);
3036 else
3037 rbd_assert(*result == obj_req->ex.oe_len);
3038 *result = 0;
3039 }
3040 return true;
3041 case RBD_OBJ_READ_PARENT:
d435c9a7
ID
3042 /*
3043 * The parent image is read only up to the overlap -- zero-fill
3044 * from the overlap to the end of the request.
3045 */
3046 if (!*result) {
3047 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
3048
3049 if (obj_overlap < obj_req->ex.oe_len)
3050 rbd_obj_zero_range(obj_req, obj_overlap,
3051 obj_req->ex.oe_len - obj_overlap);
3052 }
a9b67e69
ID
3053 return true;
3054 default:
3055 BUG();
710214e3 3056 }
3da691bf 3057}
c5b5ef6c 3058
22e8bd51
ID
3059static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
3060{
3061 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3062
3063 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
3064 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
3065
3066 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
3067 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
3068 dout("%s %p noop for nonexistent\n", __func__, obj_req);
3069 return true;
3070 }
3071
3072 return false;
3073}
3074
3075/*
3076 * Return:
3077 * 0 - object map update sent
3078 * 1 - object map update isn't needed
3079 * <0 - error
3080 */
3081static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
3082{
3083 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3084 u8 new_state;
3085
3086 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3087 return 1;
3088
3089 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3090 new_state = OBJECT_PENDING;
3091 else
3092 new_state = OBJECT_EXISTS;
3093
3094 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
3095}
3096
85b5e6d1
ID
3097static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
3098{
a086a1b8
ID
3099 struct ceph_osd_request *osd_req;
3100 int num_ops = count_write_ops(obj_req);
3101 int which = 0;
3102 int ret;
710214e3 3103
a086a1b8
ID
3104 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
3105 num_ops++; /* stat */
3106
3107 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3108 if (IS_ERR(osd_req))
3109 return PTR_ERR(osd_req);
3110
3111 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3112 ret = rbd_osd_setup_stat(osd_req, which++);
3113 if (ret)
3114 return ret;
710214e3 3115 }
c5b5ef6c 3116
a086a1b8
ID
3117 rbd_osd_setup_write_ops(osd_req, which);
3118 rbd_osd_format_write(osd_req);
3119
3120 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3121 if (ret)
3122 return ret;
3123
3124 rbd_osd_submit(osd_req);
85b5e6d1 3125 return 0;
3da691bf 3126}
c5b5ef6c 3127
3da691bf
ID
3128/*
3129 * copyup_bvecs pages are never highmem pages
3130 */
3131static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
3132{
3133 struct ceph_bvec_iter it = {
3134 .bvecs = bvecs,
3135 .iter = { .bi_size = bytes },
3136 };
c5b5ef6c 3137
3da691bf
ID
3138 ceph_bvec_iter_advance_step(&it, bytes, ({
3139 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
3140 bv.bv_len))
3141 return false;
3142 }));
3143 return true;
c5b5ef6c
AE
3144}
3145
3a482501
ID
3146#define MODS_ONLY U32_MAX
3147
793333a3
ID
3148static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3149 u32 bytes)
b454e36d 3150{
bcbab1db 3151 struct ceph_osd_request *osd_req;
fe943d50 3152 int ret;
70d045f6 3153
3da691bf 3154 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
89a59c1c 3155 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
70d045f6 3156
bcbab1db
ID
3157 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3158 if (IS_ERR(osd_req))
3159 return PTR_ERR(osd_req);
b454e36d 3160
b5ae8cbc 3161 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
fe943d50
CX
3162 if (ret)
3163 return ret;
3164
bcbab1db 3165 rbd_osd_format_write(osd_req);
3da691bf 3166
bcbab1db 3167 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
89a59c1c
ID
3168 if (ret)
3169 return ret;
3170
a086a1b8 3171 rbd_osd_submit(osd_req);
89a59c1c
ID
3172 return 0;
3173}
3174
793333a3
ID
3175static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3176 u32 bytes)
b454e36d 3177{
bcbab1db 3178 struct ceph_osd_request *osd_req;
a086a1b8
ID
3179 int num_ops = count_write_ops(obj_req);
3180 int which = 0;
fe943d50 3181 int ret;
70d045f6 3182
3da691bf 3183 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
70d045f6 3184
a086a1b8
ID
3185 if (bytes != MODS_ONLY)
3186 num_ops++; /* copyup */
13488d53 3187
a086a1b8 3188 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
bcbab1db
ID
3189 if (IS_ERR(osd_req))
3190 return PTR_ERR(osd_req);
b454e36d 3191
3a482501 3192 if (bytes != MODS_ONLY) {
b5ae8cbc 3193 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3a482501
ID
3194 if (ret)
3195 return ret;
3da691bf 3196 }
3da691bf 3197
a086a1b8
ID
3198 rbd_osd_setup_write_ops(osd_req, which);
3199 rbd_osd_format_write(osd_req);
70d045f6 3200
bcbab1db 3201 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
26f887e0
ID
3202 if (ret)
3203 return ret;
3204
a086a1b8 3205 rbd_osd_submit(osd_req);
3da691bf 3206 return 0;
70d045f6
ID
3207}
3208
7e07efb1 3209static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
70d045f6 3210{
7e07efb1 3211 u32 i;
b454e36d 3212
7e07efb1
ID
3213 rbd_assert(!obj_req->copyup_bvecs);
3214 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3215 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3216 sizeof(*obj_req->copyup_bvecs),
3217 GFP_NOIO);
3218 if (!obj_req->copyup_bvecs)
3219 return -ENOMEM;
b454e36d 3220
7e07efb1
ID
3221 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3222 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3223
3224 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
3225 if (!obj_req->copyup_bvecs[i].bv_page)
3226 return -ENOMEM;
3d7efd18 3227
7e07efb1
ID
3228 obj_req->copyup_bvecs[i].bv_offset = 0;
3229 obj_req->copyup_bvecs[i].bv_len = len;
3230 obj_overlap -= len;
3231 }
b454e36d 3232
7e07efb1
ID
3233 rbd_assert(!obj_overlap);
3234 return 0;
b454e36d
AE
3235}
3236
0ad5d953
ID
3237/*
3238 * The target object doesn't exist. Read the data for the entire
3239 * target object up to the overlap point (if any) from the parent,
3240 * so we can use it for a copyup.
3241 */
793333a3 3242static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
bf0d5f50 3243{
3da691bf 3244 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3da691bf 3245 int ret;
bf0d5f50 3246
86bd7998
ID
3247 rbd_assert(obj_req->num_img_extents);
3248 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3249 rbd_dev->parent_overlap);
3250 if (!obj_req->num_img_extents) {
3da691bf
ID
3251 /*
3252 * The overlap has become 0 (most likely because the
3a482501
ID
3253 * image has been flattened). Re-submit the original write
3254 * request -- pass MODS_ONLY since the copyup isn't needed
3255 * anymore.
3da691bf 3256 */
793333a3 3257 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
bf0d5f50
AE
3258 }
3259
86bd7998 3260 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3da691bf
ID
3261 if (ret)
3262 return ret;
3263
86bd7998 3264 return rbd_obj_read_from_parent(obj_req);
bf0d5f50 3265}
8b3e1a56 3266
22e8bd51
ID
3267static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3268{
3269 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3270 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3271 u8 new_state;
3272 u32 i;
3273 int ret;
3274
3275 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3276
3277 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3278 return;
3279
3280 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3281 return;
3282
3283 for (i = 0; i < snapc->num_snaps; i++) {
3284 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3285 i + 1 < snapc->num_snaps)
3286 new_state = OBJECT_EXISTS_CLEAN;
3287 else
3288 new_state = OBJECT_EXISTS;
3289
3290 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3291 new_state, NULL);
3292 if (ret < 0) {
3293 obj_req->pending.result = ret;
3294 return;
3295 }
3296
3297 rbd_assert(!ret);
3298 obj_req->pending.num_pending++;
3299 }
3300}
3301
793333a3
ID
3302static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3303{
3304 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3305 int ret;
3306
3307 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3308
3309 /*
3310 * Only send non-zero copyup data to save some I/O and network
3311 * bandwidth -- zero copyup data is equivalent to the object not
3312 * existing.
3313 */
3314 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3315 bytes = 0;
3316
3317 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3318 /*
3319 * Send a copyup request with an empty snapshot context to
3320 * deep-copyup the object through all existing snapshots.
3321 * A second request with the current snapshot context will be
3322 * sent for the actual modification.
3323 */
3324 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3325 if (ret) {
3326 obj_req->pending.result = ret;
3327 return;
3328 }
3329
3330 obj_req->pending.num_pending++;
3331 bytes = MODS_ONLY;
3332 }
3333
3334 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3335 if (ret) {
3336 obj_req->pending.result = ret;
3337 return;
3338 }
3339
3340 obj_req->pending.num_pending++;
3341}
3342
3343static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3344{
22e8bd51 3345 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
793333a3
ID
3346 int ret;
3347
3348again:
3349 switch (obj_req->copyup_state) {
3350 case RBD_OBJ_COPYUP_START:
3351 rbd_assert(!*result);
3352
3353 ret = rbd_obj_copyup_read_parent(obj_req);
3354 if (ret) {
3355 *result = ret;
3356 return true;
3357 }
3358 if (obj_req->num_img_extents)
3359 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3360 else
3361 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3362 return false;
3363 case RBD_OBJ_COPYUP_READ_PARENT:
3364 if (*result)
3365 return true;
3366
3367 if (is_zero_bvecs(obj_req->copyup_bvecs,
3368 rbd_obj_img_extents_bytes(obj_req))) {
3369 dout("%s %p detected zeros\n", __func__, obj_req);
3370 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3371 }
3372
22e8bd51
ID
3373 rbd_obj_copyup_object_maps(obj_req);
3374 if (!obj_req->pending.num_pending) {
3375 *result = obj_req->pending.result;
3376 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3377 goto again;
3378 }
3379 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3380 return false;
3381 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3382 if (!pending_result_dec(&obj_req->pending, result))
3383 return false;
3384 /* fall through */
3385 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3386 if (*result) {
3387 rbd_warn(rbd_dev, "snap object map update failed: %d",
3388 *result);
3389 return true;
3390 }
3391
793333a3
ID
3392 rbd_obj_copyup_write_object(obj_req);
3393 if (!obj_req->pending.num_pending) {
3394 *result = obj_req->pending.result;
3395 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3396 goto again;
3397 }
3398 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3399 return false;
3400 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3401 if (!pending_result_dec(&obj_req->pending, result))
3402 return false;
3403 /* fall through */
3404 case RBD_OBJ_COPYUP_WRITE_OBJECT:
3405 return true;
3406 default:
3407 BUG();
3408 }
3409}
3410
22e8bd51
ID
3411/*
3412 * Return:
3413 * 0 - object map update sent
3414 * 1 - object map update isn't needed
3415 * <0 - error
3416 */
3417static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3418{
3419 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3420 u8 current_state = OBJECT_PENDING;
3421
3422 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3423 return 1;
3424
3425 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3426 return 1;
3427
3428 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3429 &current_state);
3430}
3431
85b5e6d1 3432static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
8b3e1a56 3433{
793333a3 3434 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3da691bf 3435 int ret;
8b3e1a56 3436
793333a3 3437again:
3da691bf 3438 switch (obj_req->write_state) {
85b5e6d1
ID
3439 case RBD_OBJ_WRITE_START:
3440 rbd_assert(!*result);
3441
22e8bd51
ID
3442 if (rbd_obj_write_is_noop(obj_req))
3443 return true;
3444
3445 ret = rbd_obj_write_pre_object_map(obj_req);
3446 if (ret < 0) {
3447 *result = ret;
3448 return true;
3449 }
3450 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3451 if (ret > 0)
3452 goto again;
3453 return false;
3454 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3455 if (*result) {
3456 rbd_warn(rbd_dev, "pre object map update failed: %d",
3457 *result);
3458 return true;
3459 }
85b5e6d1
ID
3460 ret = rbd_obj_write_object(obj_req);
3461 if (ret) {
3462 *result = ret;
3463 return true;
3464 }
3465 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3466 return false;
0ad5d953 3467 case RBD_OBJ_WRITE_OBJECT:
54ab3b24 3468 if (*result == -ENOENT) {
0ad5d953 3469 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
793333a3
ID
3470 *result = 0;
3471 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3472 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3473 goto again;
0ad5d953 3474 }
3da691bf 3475 /*
0ad5d953
ID
3476 * On a non-existent object:
3477 * delete - -ENOENT, truncate/zero - 0
3da691bf 3478 */
0ad5d953
ID
3479 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3480 *result = 0;
3da691bf 3481 }
a9b67e69 3482 if (*result)
3a482501 3483 return true;
8b3e1a56 3484
793333a3
ID
3485 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3486 goto again;
3487 case __RBD_OBJ_WRITE_COPYUP:
3488 if (!rbd_obj_advance_copyup(obj_req, result))
3489 return false;
3490 /* fall through */
3491 case RBD_OBJ_WRITE_COPYUP:
22e8bd51 3492 if (*result) {
793333a3 3493 rbd_warn(rbd_dev, "copyup failed: %d", *result);
22e8bd51
ID
3494 return true;
3495 }
3496 ret = rbd_obj_write_post_object_map(obj_req);
3497 if (ret < 0) {
3498 *result = ret;
3499 return true;
3500 }
3501 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3502 if (ret > 0)
3503 goto again;
3504 return false;
3505 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3506 if (*result)
3507 rbd_warn(rbd_dev, "post object map update failed: %d",
3508 *result);
793333a3 3509 return true;
3da691bf 3510 default:
c6244b3b 3511 BUG();
3da691bf
ID
3512 }
3513}
02c74fba 3514
3da691bf 3515/*
0ad5d953 3516 * Return true if @obj_req is completed.
3da691bf 3517 */
54ab3b24
ID
3518static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3519 int *result)
3da691bf 3520{
0ad5d953 3521 struct rbd_img_request *img_req = obj_req->img_request;
0192ce2e 3522 struct rbd_device *rbd_dev = img_req->rbd_dev;
0ad5d953
ID
3523 bool done;
3524
85b5e6d1 3525 mutex_lock(&obj_req->state_mutex);
0ad5d953 3526 if (!rbd_img_is_write(img_req))
85b5e6d1 3527 done = rbd_obj_advance_read(obj_req, result);
0ad5d953 3528 else
85b5e6d1
ID
3529 done = rbd_obj_advance_write(obj_req, result);
3530 mutex_unlock(&obj_req->state_mutex);
0ad5d953 3531
0192ce2e
ID
3532 if (done && *result) {
3533 rbd_assert(*result < 0);
3534 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3535 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3536 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3537 }
0ad5d953 3538 return done;
3da691bf 3539}
02c74fba 3540
0192ce2e
ID
3541/*
3542 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3543 * recursion.
3544 */
3545static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3546{
3547 if (__rbd_obj_handle_request(obj_req, &result))
3548 rbd_img_handle_request(obj_req->img_request, result);
3549}
3550
e1fddc8f
ID
3551static bool need_exclusive_lock(struct rbd_img_request *img_req)
3552{
3553 struct rbd_device *rbd_dev = img_req->rbd_dev;
3554
3555 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3556 return false;
3557
3558 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
3559 return false;
3560
3561 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
22e8bd51
ID
3562 if (rbd_dev->opts->lock_on_read ||
3563 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
e1fddc8f
ID
3564 return true;
3565
3566 return rbd_img_is_write(img_req);
3567}
3568
637cd060 3569static bool rbd_lock_add_request(struct rbd_img_request *img_req)
e1fddc8f
ID
3570{
3571 struct rbd_device *rbd_dev = img_req->rbd_dev;
637cd060 3572 bool locked;
e1fddc8f
ID
3573
3574 lockdep_assert_held(&rbd_dev->lock_rwsem);
637cd060 3575 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
e1fddc8f
ID
3576 spin_lock(&rbd_dev->lock_lists_lock);
3577 rbd_assert(list_empty(&img_req->lock_item));
637cd060
ID
3578 if (!locked)
3579 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3580 else
3581 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
e1fddc8f 3582 spin_unlock(&rbd_dev->lock_lists_lock);
637cd060 3583 return locked;
e1fddc8f
ID
3584}
3585
3586static void rbd_lock_del_request(struct rbd_img_request *img_req)
3587{
3588 struct rbd_device *rbd_dev = img_req->rbd_dev;
3589 bool need_wakeup;
3590
3591 lockdep_assert_held(&rbd_dev->lock_rwsem);
3592 spin_lock(&rbd_dev->lock_lists_lock);
3593 rbd_assert(!list_empty(&img_req->lock_item));
3594 list_del_init(&img_req->lock_item);
3595 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3596 list_empty(&rbd_dev->running_list));
3597 spin_unlock(&rbd_dev->lock_lists_lock);
3598 if (need_wakeup)
3599 complete(&rbd_dev->releasing_wait);
3600}
3601
637cd060
ID
3602static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3603{
3604 struct rbd_device *rbd_dev = img_req->rbd_dev;
3605
3606 if (!need_exclusive_lock(img_req))
3607 return 1;
3608
3609 if (rbd_lock_add_request(img_req))
3610 return 1;
3611
3612 if (rbd_dev->opts->exclusive) {
3613 WARN_ON(1); /* lock got released? */
3614 return -EROFS;
3615 }
3616
3617 /*
3618 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3619 * and cancel_delayed_work() in wake_lock_waiters().
3620 */
3621 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3622 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3623 return 0;
3624}
3625
0192ce2e 3626static void rbd_img_object_requests(struct rbd_img_request *img_req)
7114edac 3627{
0192ce2e 3628 struct rbd_obj_request *obj_req;
7114edac 3629
0192ce2e
ID
3630 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3631
3632 for_each_obj_request(img_req, obj_req) {
3633 int result = 0;
a9e8ba2c 3634
0192ce2e
ID
3635 if (__rbd_obj_handle_request(obj_req, &result)) {
3636 if (result) {
3637 img_req->pending.result = result;
3638 return;
3639 }
3640 } else {
3641 img_req->pending.num_pending++;
3642 }
3643 }
8b3e1a56
AE
3644}
3645
0192ce2e 3646static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
8b3e1a56 3647{
637cd060 3648 struct rbd_device *rbd_dev = img_req->rbd_dev;
3da691bf 3649 int ret;
8b3e1a56 3650
0192ce2e
ID
3651again:
3652 switch (img_req->state) {
3653 case RBD_IMG_START:
3654 rbd_assert(!*result);
8b3e1a56 3655
637cd060
ID
3656 ret = rbd_img_exclusive_lock(img_req);
3657 if (ret < 0) {
3658 *result = ret;
3da691bf
ID
3659 return true;
3660 }
637cd060
ID
3661 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3662 if (ret > 0)
3663 goto again;
3da691bf 3664 return false;
637cd060
ID
3665 case RBD_IMG_EXCLUSIVE_LOCK:
3666 if (*result)
89a59c1c
ID
3667 return true;
3668
637cd060
ID
3669 rbd_assert(!need_exclusive_lock(img_req) ||
3670 __rbd_is_lock_owner(rbd_dev));
3671
0192ce2e
ID
3672 rbd_img_object_requests(img_req);
3673 if (!img_req->pending.num_pending) {
3674 *result = img_req->pending.result;
3675 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3676 goto again;
3da691bf 3677 }
0192ce2e 3678 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3da691bf 3679 return false;
0192ce2e
ID
3680 case __RBD_IMG_OBJECT_REQUESTS:
3681 if (!pending_result_dec(&img_req->pending, result))
3682 return false;
3683 /* fall through */
3684 case RBD_IMG_OBJECT_REQUESTS:
3685 return true;
3da691bf 3686 default:
c6244b3b 3687 BUG();
3da691bf
ID
3688 }
3689}
02c74fba 3690
3da691bf 3691/*
0192ce2e 3692 * Return true if @img_req is completed.
3da691bf 3693 */
0192ce2e
ID
3694static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3695 int *result)
7114edac 3696{
0192ce2e
ID
3697 struct rbd_device *rbd_dev = img_req->rbd_dev;
3698 bool done;
7114edac 3699
e1fddc8f
ID
3700 if (need_exclusive_lock(img_req)) {
3701 down_read(&rbd_dev->lock_rwsem);
3702 mutex_lock(&img_req->state_mutex);
3703 done = rbd_img_advance(img_req, result);
3704 if (done)
3705 rbd_lock_del_request(img_req);
3706 mutex_unlock(&img_req->state_mutex);
3707 up_read(&rbd_dev->lock_rwsem);
3708 } else {
3709 mutex_lock(&img_req->state_mutex);
3710 done = rbd_img_advance(img_req, result);
3711 mutex_unlock(&img_req->state_mutex);
02c74fba 3712 }
a9e8ba2c 3713
0192ce2e
ID
3714 if (done && *result) {
3715 rbd_assert(*result < 0);
3716 rbd_warn(rbd_dev, "%s%s result %d",
3717 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3718 obj_op_name(img_req->op_type), *result);
7114edac 3719 }
0192ce2e 3720 return done;
7114edac 3721}
a9e8ba2c 3722
0192ce2e 3723static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3da691bf 3724{
7114edac 3725again:
0192ce2e 3726 if (!__rbd_img_handle_request(img_req, &result))
7114edac 3727 return;
8b3e1a56 3728
7114edac 3729 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
0192ce2e
ID
3730 struct rbd_obj_request *obj_req = img_req->obj_request;
3731
54ab3b24 3732 rbd_img_request_put(img_req);
0192ce2e
ID
3733 if (__rbd_obj_handle_request(obj_req, &result)) {
3734 img_req = obj_req->img_request;
3735 goto again;
3736 }
3737 } else {
3738 struct request *rq = img_req->rq;
3739
3740 rbd_img_request_put(img_req);
3741 blk_mq_end_request(rq, errno_to_blk_status(result));
7114edac 3742 }
8b3e1a56 3743}
bf0d5f50 3744
ed95b21a 3745static const struct rbd_client_id rbd_empty_cid;
b8d70035 3746
ed95b21a
ID
3747static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3748 const struct rbd_client_id *rhs)
3749{
3750 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3751}
3752
3753static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3754{
3755 struct rbd_client_id cid;
3756
3757 mutex_lock(&rbd_dev->watch_mutex);
3758 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3759 cid.handle = rbd_dev->watch_cookie;
3760 mutex_unlock(&rbd_dev->watch_mutex);
3761 return cid;
3762}
3763
3764/*
3765 * lock_rwsem must be held for write
3766 */
3767static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3768 const struct rbd_client_id *cid)
3769{
3770 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3771 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3772 cid->gid, cid->handle);
3773 rbd_dev->owner_cid = *cid; /* struct */
3774}
3775
3776static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3777{
3778 mutex_lock(&rbd_dev->watch_mutex);
3779 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3780 mutex_unlock(&rbd_dev->watch_mutex);
3781}
3782
edd8ca80
FM
3783static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3784{
3785 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3786
a2b1da09 3787 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
edd8ca80
FM
3788 strcpy(rbd_dev->lock_cookie, cookie);
3789 rbd_set_owner_cid(rbd_dev, &cid);
3790 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3791}
3792
ed95b21a
ID
3793/*
3794 * lock_rwsem must be held for write
3795 */
3796static int rbd_lock(struct rbd_device *rbd_dev)
b8d70035 3797{
922dab61 3798 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
ed95b21a 3799 char cookie[32];
e627db08 3800 int ret;
b8d70035 3801
cbbfb0ff
ID
3802 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3803 rbd_dev->lock_cookie[0] != '\0');
52bb1f9b 3804
ed95b21a
ID
3805 format_lock_cookie(rbd_dev, cookie);
3806 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3807 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3808 RBD_LOCK_TAG, "", 0);
e627db08 3809 if (ret)
ed95b21a 3810 return ret;
b8d70035 3811
edd8ca80 3812 __rbd_lock(rbd_dev, cookie);
ed95b21a 3813 return 0;
b8d70035
AE
3814}
3815
ed95b21a
ID
3816/*
3817 * lock_rwsem must be held for write
3818 */
bbead745 3819static void rbd_unlock(struct rbd_device *rbd_dev)
bb040aa0 3820{
922dab61 3821 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
bb040aa0
ID
3822 int ret;
3823
cbbfb0ff
ID
3824 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3825 rbd_dev->lock_cookie[0] == '\0');
bb040aa0 3826
ed95b21a 3827 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
cbbfb0ff 3828 RBD_LOCK_NAME, rbd_dev->lock_cookie);
bbead745 3829 if (ret && ret != -ENOENT)
637cd060 3830 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
bb040aa0 3831
bbead745
ID
3832 /* treat errors as the image is unlocked */
3833 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
cbbfb0ff 3834 rbd_dev->lock_cookie[0] = '\0';
ed95b21a
ID
3835 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3836 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
bb040aa0
ID
3837}
3838
ed95b21a
ID
3839static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3840 enum rbd_notify_op notify_op,
3841 struct page ***preply_pages,
3842 size_t *preply_len)
9969ebc5
AE
3843{
3844 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
ed95b21a 3845 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
08a79102
KS
3846 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3847 int buf_size = sizeof(buf);
ed95b21a 3848 void *p = buf;
9969ebc5 3849
ed95b21a 3850 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
9969ebc5 3851
ed95b21a
ID
3852 /* encode *LockPayload NotifyMessage (op + ClientId) */
3853 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3854 ceph_encode_32(&p, notify_op);
3855 ceph_encode_64(&p, cid.gid);
3856 ceph_encode_64(&p, cid.handle);
8eb87565 3857
ed95b21a
ID
3858 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3859 &rbd_dev->header_oloc, buf, buf_size,
3860 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
b30a01f2
ID
3861}
3862
ed95b21a
ID
3863static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3864 enum rbd_notify_op notify_op)
b30a01f2 3865{
ed95b21a
ID
3866 struct page **reply_pages;
3867 size_t reply_len;
b30a01f2 3868
ed95b21a
ID
3869 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
3870 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3871}
b30a01f2 3872
ed95b21a
ID
3873static void rbd_notify_acquired_lock(struct work_struct *work)
3874{
3875 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3876 acquired_lock_work);
76756a51 3877
ed95b21a 3878 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
c525f036
ID
3879}
3880
ed95b21a 3881static void rbd_notify_released_lock(struct work_struct *work)
c525f036 3882{
ed95b21a
ID
3883 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3884 released_lock_work);
811c6688 3885
ed95b21a 3886 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
fca27065
ID
3887}
3888
ed95b21a 3889static int rbd_request_lock(struct rbd_device *rbd_dev)
36be9a76 3890{
ed95b21a
ID
3891 struct page **reply_pages;
3892 size_t reply_len;
3893 bool lock_owner_responded = false;
36be9a76
AE
3894 int ret;
3895
ed95b21a 3896 dout("%s rbd_dev %p\n", __func__, rbd_dev);
36be9a76 3897
ed95b21a
ID
3898 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3899 &reply_pages, &reply_len);
3900 if (ret && ret != -ETIMEDOUT) {
3901 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
36be9a76 3902 goto out;
ed95b21a 3903 }
36be9a76 3904
ed95b21a
ID
3905 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3906 void *p = page_address(reply_pages[0]);
3907 void *const end = p + reply_len;
3908 u32 n;
36be9a76 3909
ed95b21a
ID
3910 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3911 while (n--) {
3912 u8 struct_v;
3913 u32 len;
36be9a76 3914
ed95b21a
ID
3915 ceph_decode_need(&p, end, 8 + 8, e_inval);
3916 p += 8 + 8; /* skip gid and cookie */
04017e29 3917
ed95b21a
ID
3918 ceph_decode_32_safe(&p, end, len, e_inval);
3919 if (!len)
3920 continue;
3921
3922 if (lock_owner_responded) {
3923 rbd_warn(rbd_dev,
3924 "duplicate lock owners detected");
3925 ret = -EIO;
3926 goto out;
3927 }
3928
3929 lock_owner_responded = true;
3930 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3931 &struct_v, &len);
3932 if (ret) {
3933 rbd_warn(rbd_dev,
3934 "failed to decode ResponseMessage: %d",
3935 ret);
3936 goto e_inval;
3937 }
3938
3939 ret = ceph_decode_32(&p);
3940 }
3941 }
3942
3943 if (!lock_owner_responded) {
3944 rbd_warn(rbd_dev, "no lock owners detected");
3945 ret = -ETIMEDOUT;
3946 }
3947
3948out:
3949 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3950 return ret;
3951
3952e_inval:
3953 ret = -EINVAL;
3954 goto out;
3955}
3956
637cd060
ID
3957/*
3958 * Either image request state machine(s) or rbd_add_acquire_lock()
3959 * (i.e. "rbd map").
3960 */
3961static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
ed95b21a 3962{
637cd060
ID
3963 struct rbd_img_request *img_req;
3964
3965 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
d9b9c893 3966 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
ed95b21a
ID
3967
3968 cancel_delayed_work(&rbd_dev->lock_dwork);
637cd060
ID
3969 if (!completion_done(&rbd_dev->acquire_wait)) {
3970 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3971 list_empty(&rbd_dev->running_list));
3972 rbd_dev->acquire_err = result;
3973 complete_all(&rbd_dev->acquire_wait);
3974 return;
3975 }
3976
3977 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
3978 mutex_lock(&img_req->state_mutex);
3979 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3980 rbd_img_schedule(img_req, result);
3981 mutex_unlock(&img_req->state_mutex);
3982 }
3983
3984 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
ed95b21a
ID
3985}
3986
3987static int get_lock_owner_info(struct rbd_device *rbd_dev,
3988 struct ceph_locker **lockers, u32 *num_lockers)
3989{
3990 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3991 u8 lock_type;
3992 char *lock_tag;
3993 int ret;
3994
3995 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3996
3997 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3998 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3999 &lock_type, &lock_tag, lockers, num_lockers);
4000 if (ret)
4001 return ret;
4002
4003 if (*num_lockers == 0) {
4004 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
4005 goto out;
4006 }
4007
4008 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
4009 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
4010 lock_tag);
4011 ret = -EBUSY;
4012 goto out;
4013 }
4014
4015 if (lock_type == CEPH_CLS_LOCK_SHARED) {
4016 rbd_warn(rbd_dev, "shared lock type detected");
4017 ret = -EBUSY;
4018 goto out;
4019 }
4020
4021 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
4022 strlen(RBD_LOCK_COOKIE_PREFIX))) {
4023 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
4024 (*lockers)[0].id.cookie);
4025 ret = -EBUSY;
4026 goto out;
4027 }
4028
4029out:
4030 kfree(lock_tag);
4031 return ret;
4032}
4033
4034static int find_watcher(struct rbd_device *rbd_dev,
4035 const struct ceph_locker *locker)
4036{
4037 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4038 struct ceph_watch_item *watchers;
4039 u32 num_watchers;
4040 u64 cookie;
4041 int i;
4042 int ret;
4043
4044 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
4045 &rbd_dev->header_oloc, &watchers,
4046 &num_watchers);
4047 if (ret)
4048 return ret;
4049
4050 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
4051 for (i = 0; i < num_watchers; i++) {
4052 if (!memcmp(&watchers[i].addr, &locker->info.addr,
4053 sizeof(locker->info.addr)) &&
4054 watchers[i].cookie == cookie) {
4055 struct rbd_client_id cid = {
4056 .gid = le64_to_cpu(watchers[i].name.num),
4057 .handle = cookie,
4058 };
4059
4060 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
4061 rbd_dev, cid.gid, cid.handle);
4062 rbd_set_owner_cid(rbd_dev, &cid);
4063 ret = 1;
4064 goto out;
4065 }
4066 }
4067
4068 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
4069 ret = 0;
4070out:
4071 kfree(watchers);
4072 return ret;
4073}
4074
4075/*
4076 * lock_rwsem must be held for write
4077 */
4078static int rbd_try_lock(struct rbd_device *rbd_dev)
4079{
4080 struct ceph_client *client = rbd_dev->rbd_client->client;
4081 struct ceph_locker *lockers;
4082 u32 num_lockers;
4083 int ret;
4084
4085 for (;;) {
4086 ret = rbd_lock(rbd_dev);
4087 if (ret != -EBUSY)
4088 return ret;
4089
4090 /* determine if the current lock holder is still alive */
4091 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
4092 if (ret)
4093 return ret;
4094
4095 if (num_lockers == 0)
4096 goto again;
4097
4098 ret = find_watcher(rbd_dev, lockers);
637cd060
ID
4099 if (ret)
4100 goto out; /* request lock or error */
ed95b21a 4101
22e8bd51 4102 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
ed95b21a
ID
4103 ENTITY_NAME(lockers[0].id.name));
4104
4105 ret = ceph_monc_blacklist_add(&client->monc,
4106 &lockers[0].info.addr);
4107 if (ret) {
4108 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
4109 ENTITY_NAME(lockers[0].id.name), ret);
4110 goto out;
4111 }
4112
4113 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4114 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4115 lockers[0].id.cookie,
4116 &lockers[0].id.name);
4117 if (ret && ret != -ENOENT)
4118 goto out;
4119
4120again:
4121 ceph_free_lockers(lockers, num_lockers);
4122 }
4123
4124out:
4125 ceph_free_lockers(lockers, num_lockers);
4126 return ret;
4127}
4128
22e8bd51
ID
4129static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4130{
4131 int ret;
4132
4133 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4134 ret = rbd_object_map_open(rbd_dev);
4135 if (ret)
4136 return ret;
4137 }
4138
4139 return 0;
4140}
4141
ed95b21a 4142/*
637cd060
ID
4143 * Return:
4144 * 0 - lock acquired
4145 * 1 - caller should call rbd_request_lock()
4146 * <0 - error
ed95b21a 4147 */
637cd060 4148static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
ed95b21a 4149{
637cd060 4150 int ret;
ed95b21a
ID
4151
4152 down_read(&rbd_dev->lock_rwsem);
4153 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4154 rbd_dev->lock_state);
4155 if (__rbd_is_lock_owner(rbd_dev)) {
ed95b21a 4156 up_read(&rbd_dev->lock_rwsem);
637cd060 4157 return 0;
ed95b21a
ID
4158 }
4159
4160 up_read(&rbd_dev->lock_rwsem);
4161 down_write(&rbd_dev->lock_rwsem);
4162 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4163 rbd_dev->lock_state);
637cd060
ID
4164 if (__rbd_is_lock_owner(rbd_dev)) {
4165 up_write(&rbd_dev->lock_rwsem);
4166 return 0;
ed95b21a
ID
4167 }
4168
637cd060
ID
4169 ret = rbd_try_lock(rbd_dev);
4170 if (ret < 0) {
4171 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
4172 if (ret == -EBLACKLISTED)
4173 goto out;
4174
4175 ret = 1; /* request lock anyway */
4176 }
4177 if (ret > 0) {
4178 up_write(&rbd_dev->lock_rwsem);
4179 return ret;
4180 }
4181
4182 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4183 rbd_assert(list_empty(&rbd_dev->running_list));
4184
22e8bd51
ID
4185 ret = rbd_post_acquire_action(rbd_dev);
4186 if (ret) {
4187 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4188 /*
4189 * Can't stay in RBD_LOCK_STATE_LOCKED because
4190 * rbd_lock_add_request() would let the request through,
4191 * assuming that e.g. object map is locked and loaded.
4192 */
4193 rbd_unlock(rbd_dev);
ed95b21a
ID
4194 }
4195
637cd060
ID
4196out:
4197 wake_lock_waiters(rbd_dev, ret);
ed95b21a 4198 up_write(&rbd_dev->lock_rwsem);
637cd060 4199 return ret;
ed95b21a
ID
4200}
4201
4202static void rbd_acquire_lock(struct work_struct *work)
4203{
4204 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4205 struct rbd_device, lock_dwork);
637cd060 4206 int ret;
ed95b21a
ID
4207
4208 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4209again:
637cd060
ID
4210 ret = rbd_try_acquire_lock(rbd_dev);
4211 if (ret <= 0) {
4212 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
ed95b21a
ID
4213 return;
4214 }
4215
4216 ret = rbd_request_lock(rbd_dev);
4217 if (ret == -ETIMEDOUT) {
4218 goto again; /* treat this as a dead client */
e010dd0a
ID
4219 } else if (ret == -EROFS) {
4220 rbd_warn(rbd_dev, "peer will not release lock");
637cd060
ID
4221 down_write(&rbd_dev->lock_rwsem);
4222 wake_lock_waiters(rbd_dev, ret);
4223 up_write(&rbd_dev->lock_rwsem);
ed95b21a
ID
4224 } else if (ret < 0) {
4225 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4226 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4227 RBD_RETRY_DELAY);
4228 } else {
4229 /*
4230 * lock owner acked, but resend if we don't see them
4231 * release the lock
4232 */
4233 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
4234 rbd_dev);
4235 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4236 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4237 }
4238}
4239
a2b1da09 4240static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
ed95b21a 4241{
e1fddc8f
ID
4242 bool need_wait;
4243
a2b1da09 4244 dout("%s rbd_dev %p\n", __func__, rbd_dev);
d9b9c893 4245 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
a2b1da09 4246
ed95b21a
ID
4247 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4248 return false;
4249
52bb1f9b 4250 /*
ed95b21a 4251 * Ensure that all in-flight IO is flushed.
52bb1f9b 4252 */
e1fddc8f
ID
4253 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4254 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4255 need_wait = !list_empty(&rbd_dev->running_list);
4256 downgrade_write(&rbd_dev->lock_rwsem);
4257 if (need_wait)
4258 wait_for_completion(&rbd_dev->releasing_wait);
ed95b21a
ID
4259 up_read(&rbd_dev->lock_rwsem);
4260
4261 down_write(&rbd_dev->lock_rwsem);
ed95b21a
ID
4262 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4263 return false;
4264
e1fddc8f 4265 rbd_assert(list_empty(&rbd_dev->running_list));
a2b1da09
ID
4266 return true;
4267}
4268
22e8bd51
ID
4269static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4270{
4271 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4272 rbd_object_map_close(rbd_dev);
4273}
4274
e1fddc8f
ID
4275static void __rbd_release_lock(struct rbd_device *rbd_dev)
4276{
4277 rbd_assert(list_empty(&rbd_dev->running_list));
4278
22e8bd51 4279 rbd_pre_release_action(rbd_dev);
bbead745 4280 rbd_unlock(rbd_dev);
e1fddc8f
ID
4281}
4282
a2b1da09
ID
4283/*
4284 * lock_rwsem must be held for write
4285 */
4286static void rbd_release_lock(struct rbd_device *rbd_dev)
4287{
4288 if (!rbd_quiesce_lock(rbd_dev))
4289 return;
4290
e1fddc8f 4291 __rbd_release_lock(rbd_dev);
a2b1da09 4292
bbead745
ID
4293 /*
4294 * Give others a chance to grab the lock - we would re-acquire
637cd060
ID
4295 * almost immediately if we got new IO while draining the running
4296 * list otherwise. We need to ack our own notifications, so this
4297 * lock_dwork will be requeued from rbd_handle_released_lock() by
4298 * way of maybe_kick_acquire().
bbead745
ID
4299 */
4300 cancel_delayed_work(&rbd_dev->lock_dwork);
ed95b21a
ID
4301}
4302
4303static void rbd_release_lock_work(struct work_struct *work)
4304{
4305 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4306 unlock_work);
4307
4308 down_write(&rbd_dev->lock_rwsem);
4309 rbd_release_lock(rbd_dev);
4310 up_write(&rbd_dev->lock_rwsem);
4311}
4312
637cd060
ID
4313static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4314{
4315 bool have_requests;
4316
4317 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4318 if (__rbd_is_lock_owner(rbd_dev))
4319 return;
4320
4321 spin_lock(&rbd_dev->lock_lists_lock);
4322 have_requests = !list_empty(&rbd_dev->acquiring_list);
4323 spin_unlock(&rbd_dev->lock_lists_lock);
4324 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4325 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4326 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4327 }
4328}
4329
ed95b21a
ID
4330static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4331 void **p)
4332{
4333 struct rbd_client_id cid = { 0 };
4334
4335 if (struct_v >= 2) {
4336 cid.gid = ceph_decode_64(p);
4337 cid.handle = ceph_decode_64(p);
4338 }
4339
4340 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4341 cid.handle);
4342 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4343 down_write(&rbd_dev->lock_rwsem);
4344 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4345 /*
4346 * we already know that the remote client is
4347 * the owner
4348 */
4349 up_write(&rbd_dev->lock_rwsem);
4350 return;
4351 }
4352
4353 rbd_set_owner_cid(rbd_dev, &cid);
4354 downgrade_write(&rbd_dev->lock_rwsem);
4355 } else {
4356 down_read(&rbd_dev->lock_rwsem);
4357 }
4358
637cd060 4359 maybe_kick_acquire(rbd_dev);
ed95b21a
ID
4360 up_read(&rbd_dev->lock_rwsem);
4361}
4362
4363static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4364 void **p)
4365{
4366 struct rbd_client_id cid = { 0 };
4367
4368 if (struct_v >= 2) {
4369 cid.gid = ceph_decode_64(p);
4370 cid.handle = ceph_decode_64(p);
4371 }
4372
4373 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4374 cid.handle);
4375 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4376 down_write(&rbd_dev->lock_rwsem);
4377 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4378 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
4379 __func__, rbd_dev, cid.gid, cid.handle,
4380 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4381 up_write(&rbd_dev->lock_rwsem);
4382 return;
4383 }
4384
4385 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4386 downgrade_write(&rbd_dev->lock_rwsem);
4387 } else {
4388 down_read(&rbd_dev->lock_rwsem);
4389 }
4390
637cd060 4391 maybe_kick_acquire(rbd_dev);
ed95b21a
ID
4392 up_read(&rbd_dev->lock_rwsem);
4393}
4394
3b77faa0
ID
4395/*
4396 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4397 * ResponseMessage is needed.
4398 */
4399static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4400 void **p)
ed95b21a
ID
4401{
4402 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4403 struct rbd_client_id cid = { 0 };
3b77faa0 4404 int result = 1;
ed95b21a
ID
4405
4406 if (struct_v >= 2) {
4407 cid.gid = ceph_decode_64(p);
4408 cid.handle = ceph_decode_64(p);
4409 }
4410
4411 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4412 cid.handle);
4413 if (rbd_cid_equal(&cid, &my_cid))
3b77faa0 4414 return result;
ed95b21a
ID
4415
4416 down_read(&rbd_dev->lock_rwsem);
3b77faa0
ID
4417 if (__rbd_is_lock_owner(rbd_dev)) {
4418 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4419 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4420 goto out_unlock;
4421
4422 /*
4423 * encode ResponseMessage(0) so the peer can detect
4424 * a missing owner
4425 */
4426 result = 0;
4427
4428 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
e010dd0a
ID
4429 if (!rbd_dev->opts->exclusive) {
4430 dout("%s rbd_dev %p queueing unlock_work\n",
4431 __func__, rbd_dev);
4432 queue_work(rbd_dev->task_wq,
4433 &rbd_dev->unlock_work);
4434 } else {
4435 /* refuse to release the lock */
4436 result = -EROFS;
4437 }
ed95b21a
ID
4438 }
4439 }
3b77faa0
ID
4440
4441out_unlock:
ed95b21a 4442 up_read(&rbd_dev->lock_rwsem);
3b77faa0 4443 return result;
ed95b21a
ID
4444}
4445
4446static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4447 u64 notify_id, u64 cookie, s32 *result)
4448{
4449 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
08a79102
KS
4450 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4451 int buf_size = sizeof(buf);
ed95b21a
ID
4452 int ret;
4453
4454 if (result) {
4455 void *p = buf;
4456
4457 /* encode ResponseMessage */
4458 ceph_start_encoding(&p, 1, 1,
4459 buf_size - CEPH_ENCODING_START_BLK_LEN);
4460 ceph_encode_32(&p, *result);
4461 } else {
4462 buf_size = 0;
4463 }
b8d70035 4464
922dab61
ID
4465 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4466 &rbd_dev->header_oloc, notify_id, cookie,
ed95b21a 4467 buf, buf_size);
52bb1f9b 4468 if (ret)
ed95b21a
ID
4469 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4470}
4471
4472static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4473 u64 cookie)
4474{
4475 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4476 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4477}
4478
4479static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4480 u64 notify_id, u64 cookie, s32 result)
4481{
4482 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4483 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4484}
4485
4486static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4487 u64 notifier_id, void *data, size_t data_len)
4488{
4489 struct rbd_device *rbd_dev = arg;
4490 void *p = data;
4491 void *const end = p + data_len;
d4c2269b 4492 u8 struct_v = 0;
ed95b21a
ID
4493 u32 len;
4494 u32 notify_op;
4495 int ret;
4496
4497 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4498 __func__, rbd_dev, cookie, notify_id, data_len);
4499 if (data_len) {
4500 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4501 &struct_v, &len);
4502 if (ret) {
4503 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4504 ret);
4505 return;
4506 }
4507
4508 notify_op = ceph_decode_32(&p);
4509 } else {
4510 /* legacy notification for header updates */
4511 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4512 len = 0;
4513 }
4514
4515 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4516 switch (notify_op) {
4517 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4518 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4519 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4520 break;
4521 case RBD_NOTIFY_OP_RELEASED_LOCK:
4522 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4523 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4524 break;
4525 case RBD_NOTIFY_OP_REQUEST_LOCK:
3b77faa0
ID
4526 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4527 if (ret <= 0)
ed95b21a 4528 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3b77faa0 4529 cookie, ret);
ed95b21a
ID
4530 else
4531 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4532 break;
4533 case RBD_NOTIFY_OP_HEADER_UPDATE:
4534 ret = rbd_dev_refresh(rbd_dev);
4535 if (ret)
4536 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4537
4538 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4539 break;
4540 default:
4541 if (rbd_is_lock_owner(rbd_dev))
4542 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4543 cookie, -EOPNOTSUPP);
4544 else
4545 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4546 break;
4547 }
b8d70035
AE
4548}
4549
99d16943
ID
4550static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4551
922dab61 4552static void rbd_watch_errcb(void *arg, u64 cookie, int err)
bb040aa0 4553{
922dab61 4554 struct rbd_device *rbd_dev = arg;
bb040aa0 4555
922dab61 4556 rbd_warn(rbd_dev, "encountered watch error: %d", err);
bb040aa0 4557
ed95b21a
ID
4558 down_write(&rbd_dev->lock_rwsem);
4559 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4560 up_write(&rbd_dev->lock_rwsem);
4561
99d16943
ID
4562 mutex_lock(&rbd_dev->watch_mutex);
4563 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4564 __rbd_unregister_watch(rbd_dev);
4565 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
bb040aa0 4566
99d16943 4567 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
bb040aa0 4568 }
99d16943 4569 mutex_unlock(&rbd_dev->watch_mutex);
bb040aa0
ID
4570}
4571
9969ebc5 4572/*
99d16943 4573 * watch_mutex must be locked
9969ebc5 4574 */
99d16943 4575static int __rbd_register_watch(struct rbd_device *rbd_dev)
9969ebc5
AE
4576{
4577 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
922dab61 4578 struct ceph_osd_linger_request *handle;
9969ebc5 4579
922dab61 4580 rbd_assert(!rbd_dev->watch_handle);
99d16943 4581 dout("%s rbd_dev %p\n", __func__, rbd_dev);
9969ebc5 4582
922dab61
ID
4583 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4584 &rbd_dev->header_oloc, rbd_watch_cb,
4585 rbd_watch_errcb, rbd_dev);
4586 if (IS_ERR(handle))
4587 return PTR_ERR(handle);
8eb87565 4588
922dab61 4589 rbd_dev->watch_handle = handle;
b30a01f2 4590 return 0;
b30a01f2
ID
4591}
4592
99d16943
ID
4593/*
4594 * watch_mutex must be locked
4595 */
4596static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
b30a01f2 4597{
922dab61
ID
4598 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4599 int ret;
b30a01f2 4600
99d16943
ID
4601 rbd_assert(rbd_dev->watch_handle);
4602 dout("%s rbd_dev %p\n", __func__, rbd_dev);
b30a01f2 4603
922dab61
ID
4604 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4605 if (ret)
4606 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
76756a51 4607
922dab61 4608 rbd_dev->watch_handle = NULL;
c525f036
ID
4609}
4610
99d16943
ID
4611static int rbd_register_watch(struct rbd_device *rbd_dev)
4612{
4613 int ret;
4614
4615 mutex_lock(&rbd_dev->watch_mutex);
4616 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4617 ret = __rbd_register_watch(rbd_dev);
4618 if (ret)
4619 goto out;
4620
4621 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4622 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4623
4624out:
4625 mutex_unlock(&rbd_dev->watch_mutex);
4626 return ret;
4627}
4628
4629static void cancel_tasks_sync(struct rbd_device *rbd_dev)
c525f036 4630{
99d16943
ID
4631 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4632
ed95b21a
ID
4633 cancel_work_sync(&rbd_dev->acquired_lock_work);
4634 cancel_work_sync(&rbd_dev->released_lock_work);
4635 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4636 cancel_work_sync(&rbd_dev->unlock_work);
99d16943
ID
4637}
4638
4639static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4640{
4641 cancel_tasks_sync(rbd_dev);
4642
4643 mutex_lock(&rbd_dev->watch_mutex);
4644 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4645 __rbd_unregister_watch(rbd_dev);
4646 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4647 mutex_unlock(&rbd_dev->watch_mutex);
811c6688 4648
23edca86 4649 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
811c6688 4650 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
fca27065
ID
4651}
4652
14bb211d
ID
4653/*
4654 * lock_rwsem must be held for write
4655 */
4656static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4657{
4658 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4659 char cookie[32];
4660 int ret;
4661
a2b1da09
ID
4662 if (!rbd_quiesce_lock(rbd_dev))
4663 return;
14bb211d
ID
4664
4665 format_lock_cookie(rbd_dev, cookie);
4666 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4667 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4668 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4669 RBD_LOCK_TAG, cookie);
4670 if (ret) {
4671 if (ret != -EOPNOTSUPP)
4672 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4673 ret);
4674
4675 /*
4676 * Lock cookie cannot be updated on older OSDs, so do
4677 * a manual release and queue an acquire.
4678 */
e1fddc8f 4679 __rbd_release_lock(rbd_dev);
a2b1da09 4680 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
14bb211d 4681 } else {
edd8ca80 4682 __rbd_lock(rbd_dev, cookie);
637cd060 4683 wake_lock_waiters(rbd_dev, 0);
14bb211d
ID
4684 }
4685}
4686
99d16943
ID
4687static void rbd_reregister_watch(struct work_struct *work)
4688{
4689 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4690 struct rbd_device, watch_dwork);
4691 int ret;
4692
4693 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4694
4695 mutex_lock(&rbd_dev->watch_mutex);
87c0fded
ID
4696 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4697 mutex_unlock(&rbd_dev->watch_mutex);
14bb211d 4698 return;
87c0fded 4699 }
99d16943
ID
4700
4701 ret = __rbd_register_watch(rbd_dev);
4702 if (ret) {
4703 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
637cd060 4704 if (ret != -EBLACKLISTED && ret != -ENOENT) {
99d16943
ID
4705 queue_delayed_work(rbd_dev->task_wq,
4706 &rbd_dev->watch_dwork,
4707 RBD_RETRY_DELAY);
637cd060
ID
4708 mutex_unlock(&rbd_dev->watch_mutex);
4709 return;
87c0fded 4710 }
637cd060 4711
87c0fded 4712 mutex_unlock(&rbd_dev->watch_mutex);
637cd060
ID
4713 down_write(&rbd_dev->lock_rwsem);
4714 wake_lock_waiters(rbd_dev, ret);
4715 up_write(&rbd_dev->lock_rwsem);
14bb211d 4716 return;
99d16943
ID
4717 }
4718
4719 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4720 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4721 mutex_unlock(&rbd_dev->watch_mutex);
4722
14bb211d
ID
4723 down_write(&rbd_dev->lock_rwsem);
4724 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4725 rbd_reacquire_lock(rbd_dev);
4726 up_write(&rbd_dev->lock_rwsem);
4727
99d16943
ID
4728 ret = rbd_dev_refresh(rbd_dev);
4729 if (ret)
f6870cc9 4730 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
99d16943
ID
4731}
4732
36be9a76 4733/*
f40eb349
AE
4734 * Synchronous osd object method call. Returns the number of bytes
4735 * returned in the outbound buffer, or a negative error code.
36be9a76
AE
4736 */
4737static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
ecd4a68a
ID
4738 struct ceph_object_id *oid,
4739 struct ceph_object_locator *oloc,
36be9a76 4740 const char *method_name,
4157976b 4741 const void *outbound,
36be9a76 4742 size_t outbound_size,
4157976b 4743 void *inbound,
e2a58ee5 4744 size_t inbound_size)
36be9a76 4745{
ecd4a68a
ID
4746 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4747 struct page *req_page = NULL;
4748 struct page *reply_page;
36be9a76
AE
4749 int ret;
4750
4751 /*
6010a451
AE
4752 * Method calls are ultimately read operations. The result
4753 * should placed into the inbound buffer provided. They
4754 * also supply outbound data--parameters for the object
4755 * method. Currently if this is present it will be a
4756 * snapshot id.
36be9a76 4757 */
ecd4a68a
ID
4758 if (outbound) {
4759 if (outbound_size > PAGE_SIZE)
4760 return -E2BIG;
36be9a76 4761
ecd4a68a
ID
4762 req_page = alloc_page(GFP_KERNEL);
4763 if (!req_page)
4764 return -ENOMEM;
04017e29 4765
ecd4a68a 4766 memcpy(page_address(req_page), outbound, outbound_size);
04017e29 4767 }
36be9a76 4768
ecd4a68a
ID
4769 reply_page = alloc_page(GFP_KERNEL);
4770 if (!reply_page) {
4771 if (req_page)
4772 __free_page(req_page);
4773 return -ENOMEM;
4774 }
57385b51 4775
ecd4a68a
ID
4776 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4777 CEPH_OSD_FLAG_READ, req_page, outbound_size,
68ada915 4778 &reply_page, &inbound_size);
ecd4a68a
ID
4779 if (!ret) {
4780 memcpy(inbound, page_address(reply_page), inbound_size);
4781 ret = inbound_size;
4782 }
36be9a76 4783
ecd4a68a
ID
4784 if (req_page)
4785 __free_page(req_page);
4786 __free_page(reply_page);
36be9a76
AE
4787 return ret;
4788}
4789
7ad18afa 4790static void rbd_queue_workfn(struct work_struct *work)
bf0d5f50 4791{
7ad18afa
CH
4792 struct request *rq = blk_mq_rq_from_pdu(work);
4793 struct rbd_device *rbd_dev = rq->q->queuedata;
bc1ecc65 4794 struct rbd_img_request *img_request;
4e752f0a 4795 struct ceph_snap_context *snapc = NULL;
bc1ecc65
ID
4796 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4797 u64 length = blk_rq_bytes(rq);
6d2940c8 4798 enum obj_operation_type op_type;
4e752f0a 4799 u64 mapping_size;
bf0d5f50
AE
4800 int result;
4801
aebf526b
CH
4802 switch (req_op(rq)) {
4803 case REQ_OP_DISCARD:
90e98c52 4804 op_type = OBJ_OP_DISCARD;
aebf526b 4805 break;
6484cbe9
ID
4806 case REQ_OP_WRITE_ZEROES:
4807 op_type = OBJ_OP_ZEROOUT;
4808 break;
aebf526b 4809 case REQ_OP_WRITE:
6d2940c8 4810 op_type = OBJ_OP_WRITE;
aebf526b
CH
4811 break;
4812 case REQ_OP_READ:
6d2940c8 4813 op_type = OBJ_OP_READ;
aebf526b
CH
4814 break;
4815 default:
4816 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
4817 result = -EIO;
4818 goto err;
4819 }
6d2940c8 4820
bc1ecc65 4821 /* Ignore/skip any zero-length requests */
bf0d5f50 4822
bc1ecc65
ID
4823 if (!length) {
4824 dout("%s: zero-length request\n", __func__);
4825 result = 0;
4826 goto err_rq;
4827 }
bf0d5f50 4828
b91a7bdc
ID
4829 if (op_type != OBJ_OP_READ && rbd_dev->spec->snap_id != CEPH_NOSNAP) {
4830 rbd_warn(rbd_dev, "%s on read-only snapshot",
4831 obj_op_name(op_type));
4832 result = -EIO;
4833 goto err;
4834 }
4dda41d3 4835
bc1ecc65
ID
4836 /*
4837 * Quit early if the mapped snapshot no longer exists. It's
4838 * still possible the snapshot will have disappeared by the
4839 * time our request arrives at the osd, but there's no sense in
4840 * sending it if we already know.
4841 */
4842 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
4843 dout("request for non-existent snapshot");
4844 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
4845 result = -ENXIO;
4846 goto err_rq;
4847 }
4dda41d3 4848
bc1ecc65
ID
4849 if (offset && length > U64_MAX - offset + 1) {
4850 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
4851 length);
4852 result = -EINVAL;
4853 goto err_rq; /* Shouldn't happen */
4854 }
4dda41d3 4855
7ad18afa
CH
4856 blk_mq_start_request(rq);
4857
4e752f0a
JD
4858 down_read(&rbd_dev->header_rwsem);
4859 mapping_size = rbd_dev->mapping.size;
6d2940c8 4860 if (op_type != OBJ_OP_READ) {
4e752f0a
JD
4861 snapc = rbd_dev->header.snapc;
4862 ceph_get_snap_context(snapc);
4863 }
4864 up_read(&rbd_dev->header_rwsem);
4865
4866 if (offset + length > mapping_size) {
bc1ecc65 4867 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4e752f0a 4868 length, mapping_size);
bc1ecc65
ID
4869 result = -EIO;
4870 goto err_rq;
4871 }
bf0d5f50 4872
dfd9875f 4873 img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
bc1ecc65
ID
4874 if (!img_request) {
4875 result = -ENOMEM;
637cd060 4876 goto err_rq;
bc1ecc65
ID
4877 }
4878 img_request->rq = rq;
70b16db8 4879 snapc = NULL; /* img_request consumes a ref */
bf0d5f50 4880
21ed05a8
ID
4881 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4882 img_request, obj_op_name(op_type), offset, length);
4883
6484cbe9 4884 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
5a237819 4885 result = rbd_img_fill_nodata(img_request, offset, length);
90e98c52 4886 else
5a237819
ID
4887 result = rbd_img_fill_from_bio(img_request, offset, length,
4888 rq->bio);
0192ce2e 4889 if (result)
bc1ecc65 4890 goto err_img_request;
bf0d5f50 4891
e1fddc8f 4892 rbd_img_handle_request(img_request, 0);
bc1ecc65 4893 return;
bf0d5f50 4894
bc1ecc65
ID
4895err_img_request:
4896 rbd_img_request_put(img_request);
4897err_rq:
4898 if (result)
4899 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
6d2940c8 4900 obj_op_name(op_type), length, offset, result);
e96a650a 4901 ceph_put_snap_context(snapc);
7ad18afa 4902err:
2a842aca 4903 blk_mq_end_request(rq, errno_to_blk_status(result));
bc1ecc65 4904}
bf0d5f50 4905
fc17b653 4906static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
7ad18afa 4907 const struct blk_mq_queue_data *bd)
bc1ecc65 4908{
7ad18afa
CH
4909 struct request *rq = bd->rq;
4910 struct work_struct *work = blk_mq_rq_to_pdu(rq);
bf0d5f50 4911
7ad18afa 4912 queue_work(rbd_wq, work);
fc17b653 4913 return BLK_STS_OK;
bf0d5f50
AE
4914}
4915
602adf40
YS
4916static void rbd_free_disk(struct rbd_device *rbd_dev)
4917{
5769ed0c
ID
4918 blk_cleanup_queue(rbd_dev->disk->queue);
4919 blk_mq_free_tag_set(&rbd_dev->tag_set);
4920 put_disk(rbd_dev->disk);
a0cab924 4921 rbd_dev->disk = NULL;
602adf40
YS
4922}
4923
788e2df3 4924static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
fe5478e0
ID
4925 struct ceph_object_id *oid,
4926 struct ceph_object_locator *oloc,
4927 void *buf, int buf_len)
788e2df3
AE
4928
4929{
fe5478e0
ID
4930 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4931 struct ceph_osd_request *req;
4932 struct page **pages;
4933 int num_pages = calc_pages_for(0, buf_len);
788e2df3
AE
4934 int ret;
4935
fe5478e0
ID
4936 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4937 if (!req)
4938 return -ENOMEM;
788e2df3 4939
fe5478e0
ID
4940 ceph_oid_copy(&req->r_base_oid, oid);
4941 ceph_oloc_copy(&req->r_base_oloc, oloc);
4942 req->r_flags = CEPH_OSD_FLAG_READ;
430c28c3 4943
fe5478e0
ID
4944 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4945 if (IS_ERR(pages)) {
4946 ret = PTR_ERR(pages);
4947 goto out_req;
4948 }
1ceae7ef 4949
fe5478e0
ID
4950 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4951 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4952 true);
4953
26f887e0
ID
4954 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4955 if (ret)
4956 goto out_req;
4957
fe5478e0
ID
4958 ceph_osdc_start_request(osdc, req, false);
4959 ret = ceph_osdc_wait_request(osdc, req);
4960 if (ret >= 0)
4961 ceph_copy_from_page_vector(pages, buf, 0, ret);
788e2df3 4962
fe5478e0
ID
4963out_req:
4964 ceph_osdc_put_request(req);
788e2df3
AE
4965 return ret;
4966}
4967
602adf40 4968/*
662518b1
AE
4969 * Read the complete header for the given rbd device. On successful
4970 * return, the rbd_dev->header field will contain up-to-date
4971 * information about the image.
602adf40 4972 */
99a41ebc 4973static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
602adf40 4974{
4156d998 4975 struct rbd_image_header_ondisk *ondisk = NULL;
50f7c4c9 4976 u32 snap_count = 0;
4156d998
AE
4977 u64 names_size = 0;
4978 u32 want_count;
4979 int ret;
602adf40 4980
00f1f36f 4981 /*
4156d998
AE
4982 * The complete header will include an array of its 64-bit
4983 * snapshot ids, followed by the names of those snapshots as
4984 * a contiguous block of NUL-terminated strings. Note that
4985 * the number of snapshots could change by the time we read
4986 * it in, in which case we re-read it.
00f1f36f 4987 */
4156d998
AE
4988 do {
4989 size_t size;
4990
4991 kfree(ondisk);
4992
4993 size = sizeof (*ondisk);
4994 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4995 size += names_size;
4996 ondisk = kmalloc(size, GFP_KERNEL);
4997 if (!ondisk)
662518b1 4998 return -ENOMEM;
4156d998 4999
fe5478e0
ID
5000 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
5001 &rbd_dev->header_oloc, ondisk, size);
4156d998 5002 if (ret < 0)
662518b1 5003 goto out;
c0cd10db 5004 if ((size_t)ret < size) {
4156d998 5005 ret = -ENXIO;
06ecc6cb
AE
5006 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
5007 size, ret);
662518b1 5008 goto out;
4156d998
AE
5009 }
5010 if (!rbd_dev_ondisk_valid(ondisk)) {
5011 ret = -ENXIO;
06ecc6cb 5012 rbd_warn(rbd_dev, "invalid header");
662518b1 5013 goto out;
81e759fb 5014 }
602adf40 5015
4156d998
AE
5016 names_size = le64_to_cpu(ondisk->snap_names_len);
5017 want_count = snap_count;
5018 snap_count = le32_to_cpu(ondisk->snap_count);
5019 } while (snap_count != want_count);
00f1f36f 5020
662518b1
AE
5021 ret = rbd_header_from_disk(rbd_dev, ondisk);
5022out:
4156d998
AE
5023 kfree(ondisk);
5024
5025 return ret;
602adf40
YS
5026}
5027
15228ede
AE
5028/*
5029 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
5030 * has disappeared from the (just updated) snapshot context.
5031 */
5032static void rbd_exists_validate(struct rbd_device *rbd_dev)
5033{
5034 u64 snap_id;
5035
5036 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
5037 return;
5038
5039 snap_id = rbd_dev->spec->snap_id;
5040 if (snap_id == CEPH_NOSNAP)
5041 return;
5042
5043 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
5044 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5045}
5046
9875201e
JD
5047static void rbd_dev_update_size(struct rbd_device *rbd_dev)
5048{
5049 sector_t size;
9875201e
JD
5050
5051 /*
811c6688
ID
5052 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
5053 * try to update its size. If REMOVING is set, updating size
5054 * is just useless work since the device can't be opened.
9875201e 5055 */
811c6688
ID
5056 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
5057 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
9875201e
JD
5058 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
5059 dout("setting size to %llu sectors", (unsigned long long)size);
5060 set_capacity(rbd_dev->disk, size);
5061 revalidate_disk(rbd_dev->disk);
5062 }
5063}
5064
cc4a38bd 5065static int rbd_dev_refresh(struct rbd_device *rbd_dev)
1fe5e993 5066{
e627db08 5067 u64 mapping_size;
1fe5e993
AE
5068 int ret;
5069
cfbf6377 5070 down_write(&rbd_dev->header_rwsem);
3b5cf2a2 5071 mapping_size = rbd_dev->mapping.size;
a720ae09
ID
5072
5073 ret = rbd_dev_header_info(rbd_dev);
52bb1f9b 5074 if (ret)
73e39e4d 5075 goto out;
15228ede 5076
e8f59b59
ID
5077 /*
5078 * If there is a parent, see if it has disappeared due to the
5079 * mapped image getting flattened.
5080 */
5081 if (rbd_dev->parent) {
5082 ret = rbd_dev_v2_parent_info(rbd_dev);
5083 if (ret)
73e39e4d 5084 goto out;
e8f59b59
ID
5085 }
5086
5ff1108c 5087 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
73e39e4d 5088 rbd_dev->mapping.size = rbd_dev->header.image_size;
5ff1108c
ID
5089 } else {
5090 /* validate mapped snapshot's EXISTS flag */
5091 rbd_exists_validate(rbd_dev);
5092 }
15228ede 5093
73e39e4d 5094out:
cfbf6377 5095 up_write(&rbd_dev->header_rwsem);
73e39e4d 5096 if (!ret && mapping_size != rbd_dev->mapping.size)
9875201e 5097 rbd_dev_update_size(rbd_dev);
1fe5e993 5098
73e39e4d 5099 return ret;
1fe5e993
AE
5100}
5101
d6296d39
CH
5102static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
5103 unsigned int hctx_idx, unsigned int numa_node)
7ad18afa
CH
5104{
5105 struct work_struct *work = blk_mq_rq_to_pdu(rq);
5106
5107 INIT_WORK(work, rbd_queue_workfn);
5108 return 0;
5109}
5110
f363b089 5111static const struct blk_mq_ops rbd_mq_ops = {
7ad18afa 5112 .queue_rq = rbd_queue_rq,
7ad18afa
CH
5113 .init_request = rbd_init_request,
5114};
5115
602adf40
YS
5116static int rbd_init_disk(struct rbd_device *rbd_dev)
5117{
5118 struct gendisk *disk;
5119 struct request_queue *q;
420efbdf
ID
5120 unsigned int objset_bytes =
5121 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
7ad18afa 5122 int err;
602adf40 5123
602adf40 5124 /* create gendisk info */
7e513d43
ID
5125 disk = alloc_disk(single_major ?
5126 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
5127 RBD_MINORS_PER_MAJOR);
602adf40 5128 if (!disk)
1fcdb8aa 5129 return -ENOMEM;
602adf40 5130
f0f8cef5 5131 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
de71a297 5132 rbd_dev->dev_id);
602adf40 5133 disk->major = rbd_dev->major;
dd82fff1 5134 disk->first_minor = rbd_dev->minor;
7e513d43
ID
5135 if (single_major)
5136 disk->flags |= GENHD_FL_EXT_DEVT;
602adf40
YS
5137 disk->fops = &rbd_bd_ops;
5138 disk->private_data = rbd_dev;
5139
7ad18afa
CH
5140 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
5141 rbd_dev->tag_set.ops = &rbd_mq_ops;
b5584180 5142 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
7ad18afa 5143 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
56d18f62 5144 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
7ad18afa
CH
5145 rbd_dev->tag_set.nr_hw_queues = 1;
5146 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
5147
5148 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
5149 if (err)
602adf40 5150 goto out_disk;
029bcbd8 5151
7ad18afa
CH
5152 q = blk_mq_init_queue(&rbd_dev->tag_set);
5153 if (IS_ERR(q)) {
5154 err = PTR_ERR(q);
5155 goto out_tag_set;
5156 }
5157
8b904b5b 5158 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
d8a2c89c 5159 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
593a9e7b 5160
420efbdf 5161 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
0d9fde4f 5162 q->limits.max_sectors = queue_max_hw_sectors(q);
21acdf45 5163 blk_queue_max_segments(q, USHRT_MAX);
24f1df60 5164 blk_queue_max_segment_size(q, UINT_MAX);
16d80c54
ID
5165 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
5166 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
029bcbd8 5167
d9360540
ID
5168 if (rbd_dev->opts->trim) {
5169 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
16d80c54 5170 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
d9360540
ID
5171 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
5172 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
5173 }
90e98c52 5174
bae818ee 5175 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
dc3b17cc 5176 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
bae818ee 5177
5769ed0c
ID
5178 /*
5179 * disk_release() expects a queue ref from add_disk() and will
5180 * put it. Hold an extra ref until add_disk() is called.
5181 */
5182 WARN_ON(!blk_get_queue(q));
602adf40 5183 disk->queue = q;
602adf40
YS
5184 q->queuedata = rbd_dev;
5185
5186 rbd_dev->disk = disk;
602adf40 5187
602adf40 5188 return 0;
7ad18afa
CH
5189out_tag_set:
5190 blk_mq_free_tag_set(&rbd_dev->tag_set);
602adf40
YS
5191out_disk:
5192 put_disk(disk);
7ad18afa 5193 return err;
602adf40
YS
5194}
5195
dfc5606d
YS
5196/*
5197 sysfs
5198*/
5199
593a9e7b
AE
5200static struct rbd_device *dev_to_rbd_dev(struct device *dev)
5201{
5202 return container_of(dev, struct rbd_device, dev);
5203}
5204
dfc5606d
YS
5205static ssize_t rbd_size_show(struct device *dev,
5206 struct device_attribute *attr, char *buf)
5207{
593a9e7b 5208 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
a51aa0c0 5209
fc71d833
AE
5210 return sprintf(buf, "%llu\n",
5211 (unsigned long long)rbd_dev->mapping.size);
dfc5606d
YS
5212}
5213
34b13184
AE
5214/*
5215 * Note this shows the features for whatever's mapped, which is not
5216 * necessarily the base image.
5217 */
5218static ssize_t rbd_features_show(struct device *dev,
5219 struct device_attribute *attr, char *buf)
5220{
5221 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5222
5223 return sprintf(buf, "0x%016llx\n",
fc71d833 5224 (unsigned long long)rbd_dev->mapping.features);
34b13184
AE
5225}
5226
dfc5606d
YS
5227static ssize_t rbd_major_show(struct device *dev,
5228 struct device_attribute *attr, char *buf)
5229{
593a9e7b 5230 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 5231
fc71d833
AE
5232 if (rbd_dev->major)
5233 return sprintf(buf, "%d\n", rbd_dev->major);
5234
5235 return sprintf(buf, "(none)\n");
dd82fff1
ID
5236}
5237
5238static ssize_t rbd_minor_show(struct device *dev,
5239 struct device_attribute *attr, char *buf)
5240{
5241 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
fc71d833 5242
dd82fff1 5243 return sprintf(buf, "%d\n", rbd_dev->minor);
dfc5606d
YS
5244}
5245
005a07bf
ID
5246static ssize_t rbd_client_addr_show(struct device *dev,
5247 struct device_attribute *attr, char *buf)
5248{
5249 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5250 struct ceph_entity_addr *client_addr =
5251 ceph_client_addr(rbd_dev->rbd_client->client);
5252
5253 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5254 le32_to_cpu(client_addr->nonce));
5255}
5256
dfc5606d
YS
5257static ssize_t rbd_client_id_show(struct device *dev,
5258 struct device_attribute *attr, char *buf)
602adf40 5259{
593a9e7b 5260 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 5261
1dbb4399 5262 return sprintf(buf, "client%lld\n",
033268a5 5263 ceph_client_gid(rbd_dev->rbd_client->client));
602adf40
YS
5264}
5265
267fb90b
MC
5266static ssize_t rbd_cluster_fsid_show(struct device *dev,
5267 struct device_attribute *attr, char *buf)
5268{
5269 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5270
5271 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5272}
5273
0d6d1e9c
MC
5274static ssize_t rbd_config_info_show(struct device *dev,
5275 struct device_attribute *attr, char *buf)
5276{
5277 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5278
5279 return sprintf(buf, "%s\n", rbd_dev->config_info);
602adf40
YS
5280}
5281
dfc5606d
YS
5282static ssize_t rbd_pool_show(struct device *dev,
5283 struct device_attribute *attr, char *buf)
602adf40 5284{
593a9e7b 5285 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 5286
0d7dbfce 5287 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
dfc5606d
YS
5288}
5289
9bb2f334
AE
5290static ssize_t rbd_pool_id_show(struct device *dev,
5291 struct device_attribute *attr, char *buf)
5292{
5293 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5294
0d7dbfce 5295 return sprintf(buf, "%llu\n",
fc71d833 5296 (unsigned long long) rbd_dev->spec->pool_id);
9bb2f334
AE
5297}
5298
b26c047b
ID
5299static ssize_t rbd_pool_ns_show(struct device *dev,
5300 struct device_attribute *attr, char *buf)
5301{
5302 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5303
5304 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5305}
5306
dfc5606d
YS
5307static ssize_t rbd_name_show(struct device *dev,
5308 struct device_attribute *attr, char *buf)
5309{
593a9e7b 5310 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 5311
a92ffdf8
AE
5312 if (rbd_dev->spec->image_name)
5313 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5314
5315 return sprintf(buf, "(unknown)\n");
dfc5606d
YS
5316}
5317
589d30e0
AE
5318static ssize_t rbd_image_id_show(struct device *dev,
5319 struct device_attribute *attr, char *buf)
5320{
5321 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5322
0d7dbfce 5323 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
589d30e0
AE
5324}
5325
34b13184
AE
5326/*
5327 * Shows the name of the currently-mapped snapshot (or
5328 * RBD_SNAP_HEAD_NAME for the base image).
5329 */
dfc5606d
YS
5330static ssize_t rbd_snap_show(struct device *dev,
5331 struct device_attribute *attr,
5332 char *buf)
5333{
593a9e7b 5334 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 5335
0d7dbfce 5336 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
dfc5606d
YS
5337}
5338
92a58671
MC
5339static ssize_t rbd_snap_id_show(struct device *dev,
5340 struct device_attribute *attr, char *buf)
5341{
5342 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5343
5344 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5345}
5346
86b00e0d 5347/*
ff96128f
ID
5348 * For a v2 image, shows the chain of parent images, separated by empty
5349 * lines. For v1 images or if there is no parent, shows "(no parent
5350 * image)".
86b00e0d
AE
5351 */
5352static ssize_t rbd_parent_show(struct device *dev,
ff96128f
ID
5353 struct device_attribute *attr,
5354 char *buf)
86b00e0d
AE
5355{
5356 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
ff96128f 5357 ssize_t count = 0;
86b00e0d 5358
ff96128f 5359 if (!rbd_dev->parent)
86b00e0d
AE
5360 return sprintf(buf, "(no parent image)\n");
5361
ff96128f
ID
5362 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5363 struct rbd_spec *spec = rbd_dev->parent_spec;
5364
5365 count += sprintf(&buf[count], "%s"
5366 "pool_id %llu\npool_name %s\n"
e92c0eaf 5367 "pool_ns %s\n"
ff96128f
ID
5368 "image_id %s\nimage_name %s\n"
5369 "snap_id %llu\nsnap_name %s\n"
5370 "overlap %llu\n",
5371 !count ? "" : "\n", /* first? */
5372 spec->pool_id, spec->pool_name,
e92c0eaf 5373 spec->pool_ns ?: "",
ff96128f
ID
5374 spec->image_id, spec->image_name ?: "(unknown)",
5375 spec->snap_id, spec->snap_name,
5376 rbd_dev->parent_overlap);
5377 }
5378
5379 return count;
86b00e0d
AE
5380}
5381
dfc5606d
YS
5382static ssize_t rbd_image_refresh(struct device *dev,
5383 struct device_attribute *attr,
5384 const char *buf,
5385 size_t size)
5386{
593a9e7b 5387 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
b813623a 5388 int ret;
602adf40 5389
cc4a38bd 5390 ret = rbd_dev_refresh(rbd_dev);
e627db08 5391 if (ret)
52bb1f9b 5392 return ret;
b813623a 5393
52bb1f9b 5394 return size;
dfc5606d 5395}
602adf40 5396
5657a819
JP
5397static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5398static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5399static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5400static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5401static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5402static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5403static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5404static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5405static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5406static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
b26c047b 5407static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5657a819
JP
5408static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5409static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5410static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5411static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5412static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5413static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
dfc5606d
YS
5414
5415static struct attribute *rbd_attrs[] = {
5416 &dev_attr_size.attr,
34b13184 5417 &dev_attr_features.attr,
dfc5606d 5418 &dev_attr_major.attr,
dd82fff1 5419 &dev_attr_minor.attr,
005a07bf 5420 &dev_attr_client_addr.attr,
dfc5606d 5421 &dev_attr_client_id.attr,
267fb90b 5422 &dev_attr_cluster_fsid.attr,
0d6d1e9c 5423 &dev_attr_config_info.attr,
dfc5606d 5424 &dev_attr_pool.attr,
9bb2f334 5425 &dev_attr_pool_id.attr,
b26c047b 5426 &dev_attr_pool_ns.attr,
dfc5606d 5427 &dev_attr_name.attr,
589d30e0 5428 &dev_attr_image_id.attr,
dfc5606d 5429 &dev_attr_current_snap.attr,
92a58671 5430 &dev_attr_snap_id.attr,
86b00e0d 5431 &dev_attr_parent.attr,
dfc5606d 5432 &dev_attr_refresh.attr,
dfc5606d
YS
5433 NULL
5434};
5435
5436static struct attribute_group rbd_attr_group = {
5437 .attrs = rbd_attrs,
5438};
5439
5440static const struct attribute_group *rbd_attr_groups[] = {
5441 &rbd_attr_group,
5442 NULL
5443};
5444
6cac4695 5445static void rbd_dev_release(struct device *dev);
dfc5606d 5446
b9942bc9 5447static const struct device_type rbd_device_type = {
dfc5606d
YS
5448 .name = "rbd",
5449 .groups = rbd_attr_groups,
6cac4695 5450 .release = rbd_dev_release,
dfc5606d
YS
5451};
5452
8b8fb99c
AE
5453static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5454{
5455 kref_get(&spec->kref);
5456
5457 return spec;
5458}
5459
5460static void rbd_spec_free(struct kref *kref);
5461static void rbd_spec_put(struct rbd_spec *spec)
5462{
5463 if (spec)
5464 kref_put(&spec->kref, rbd_spec_free);
5465}
5466
5467static struct rbd_spec *rbd_spec_alloc(void)
5468{
5469 struct rbd_spec *spec;
5470
5471 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5472 if (!spec)
5473 return NULL;
04077599
ID
5474
5475 spec->pool_id = CEPH_NOPOOL;
5476 spec->snap_id = CEPH_NOSNAP;
8b8fb99c
AE
5477 kref_init(&spec->kref);
5478
8b8fb99c
AE
5479 return spec;
5480}
5481
5482static void rbd_spec_free(struct kref *kref)
5483{
5484 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5485
5486 kfree(spec->pool_name);
b26c047b 5487 kfree(spec->pool_ns);
8b8fb99c
AE
5488 kfree(spec->image_id);
5489 kfree(spec->image_name);
5490 kfree(spec->snap_name);
5491 kfree(spec);
5492}
5493
1643dfa4 5494static void rbd_dev_free(struct rbd_device *rbd_dev)
dd5ac32d 5495{
99d16943 5496 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
ed95b21a 5497 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
dd5ac32d 5498
c41d13a3 5499 ceph_oid_destroy(&rbd_dev->header_oid);
6b6dddbe 5500 ceph_oloc_destroy(&rbd_dev->header_oloc);
0d6d1e9c 5501 kfree(rbd_dev->config_info);
c41d13a3 5502
dd5ac32d
ID
5503 rbd_put_client(rbd_dev->rbd_client);
5504 rbd_spec_put(rbd_dev->spec);
5505 kfree(rbd_dev->opts);
5506 kfree(rbd_dev);
1643dfa4
ID
5507}
5508
5509static void rbd_dev_release(struct device *dev)
5510{
5511 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5512 bool need_put = !!rbd_dev->opts;
5513
5514 if (need_put) {
5515 destroy_workqueue(rbd_dev->task_wq);
5516 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5517 }
5518
5519 rbd_dev_free(rbd_dev);
dd5ac32d
ID
5520
5521 /*
5522 * This is racy, but way better than putting module outside of
5523 * the release callback. The race window is pretty small, so
5524 * doing something similar to dm (dm-builtin.c) is overkill.
5525 */
5526 if (need_put)
5527 module_put(THIS_MODULE);
5528}
5529
1643dfa4
ID
5530static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
5531 struct rbd_spec *spec)
c53d5893
AE
5532{
5533 struct rbd_device *rbd_dev;
5534
1643dfa4 5535 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
c53d5893
AE
5536 if (!rbd_dev)
5537 return NULL;
5538
5539 spin_lock_init(&rbd_dev->lock);
5540 INIT_LIST_HEAD(&rbd_dev->node);
c53d5893
AE
5541 init_rwsem(&rbd_dev->header_rwsem);
5542
7e97332e 5543 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
c41d13a3 5544 ceph_oid_init(&rbd_dev->header_oid);
431a02cd 5545 rbd_dev->header_oloc.pool = spec->pool_id;
b26c047b
ID
5546 if (spec->pool_ns) {
5547 WARN_ON(!*spec->pool_ns);
5548 rbd_dev->header_oloc.pool_ns =
5549 ceph_find_or_create_string(spec->pool_ns,
5550 strlen(spec->pool_ns));
5551 }
c41d13a3 5552
99d16943
ID
5553 mutex_init(&rbd_dev->watch_mutex);
5554 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5555 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5556
ed95b21a
ID
5557 init_rwsem(&rbd_dev->lock_rwsem);
5558 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5559 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5560 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5561 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5562 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
e1fddc8f 5563 spin_lock_init(&rbd_dev->lock_lists_lock);
637cd060 5564 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
e1fddc8f 5565 INIT_LIST_HEAD(&rbd_dev->running_list);
637cd060 5566 init_completion(&rbd_dev->acquire_wait);
e1fddc8f 5567 init_completion(&rbd_dev->releasing_wait);
ed95b21a 5568
22e8bd51 5569 spin_lock_init(&rbd_dev->object_map_lock);
ed95b21a 5570
dd5ac32d
ID
5571 rbd_dev->dev.bus = &rbd_bus_type;
5572 rbd_dev->dev.type = &rbd_device_type;
5573 rbd_dev->dev.parent = &rbd_root_dev;
dd5ac32d
ID
5574 device_initialize(&rbd_dev->dev);
5575
c53d5893 5576 rbd_dev->rbd_client = rbdc;
d147543d 5577 rbd_dev->spec = spec;
0903e875 5578
1643dfa4
ID
5579 return rbd_dev;
5580}
5581
5582/*
5583 * Create a mapping rbd_dev.
5584 */
5585static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5586 struct rbd_spec *spec,
5587 struct rbd_options *opts)
5588{
5589 struct rbd_device *rbd_dev;
5590
5591 rbd_dev = __rbd_dev_create(rbdc, spec);
5592 if (!rbd_dev)
5593 return NULL;
5594
5595 rbd_dev->opts = opts;
5596
5597 /* get an id and fill in device name */
5598 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5599 minor_to_rbd_dev_id(1 << MINORBITS),
5600 GFP_KERNEL);
5601 if (rbd_dev->dev_id < 0)
5602 goto fail_rbd_dev;
5603
5604 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5605 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5606 rbd_dev->name);
5607 if (!rbd_dev->task_wq)
5608 goto fail_dev_id;
dd5ac32d 5609
1643dfa4
ID
5610 /* we have a ref from do_rbd_add() */
5611 __module_get(THIS_MODULE);
dd5ac32d 5612
1643dfa4 5613 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
c53d5893 5614 return rbd_dev;
1643dfa4
ID
5615
5616fail_dev_id:
5617 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5618fail_rbd_dev:
5619 rbd_dev_free(rbd_dev);
5620 return NULL;
c53d5893
AE
5621}
5622
5623static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5624{
dd5ac32d
ID
5625 if (rbd_dev)
5626 put_device(&rbd_dev->dev);
c53d5893
AE
5627}
5628
9d475de5
AE
5629/*
5630 * Get the size and object order for an image snapshot, or if
5631 * snap_id is CEPH_NOSNAP, gets this information for the base
5632 * image.
5633 */
5634static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5635 u8 *order, u64 *snap_size)
5636{
5637 __le64 snapid = cpu_to_le64(snap_id);
5638 int ret;
5639 struct {
5640 u8 order;
5641 __le64 size;
5642 } __attribute__ ((packed)) size_buf = { 0 };
5643
ecd4a68a
ID
5644 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5645 &rbd_dev->header_oloc, "get_size",
5646 &snapid, sizeof(snapid),
5647 &size_buf, sizeof(size_buf));
36be9a76 5648 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
9d475de5
AE
5649 if (ret < 0)
5650 return ret;
57385b51
AE
5651 if (ret < sizeof (size_buf))
5652 return -ERANGE;
9d475de5 5653
c3545579 5654 if (order) {
c86f86e9 5655 *order = size_buf.order;
c3545579
JD
5656 dout(" order %u", (unsigned int)*order);
5657 }
9d475de5
AE
5658 *snap_size = le64_to_cpu(size_buf.size);
5659
c3545579
JD
5660 dout(" snap_id 0x%016llx snap_size = %llu\n",
5661 (unsigned long long)snap_id,
57385b51 5662 (unsigned long long)*snap_size);
9d475de5
AE
5663
5664 return 0;
5665}
5666
5667static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
5668{
5669 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
5670 &rbd_dev->header.obj_order,
5671 &rbd_dev->header.image_size);
5672}
5673
1e130199
AE
5674static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
5675{
5435d206 5676 size_t size;
1e130199
AE
5677 void *reply_buf;
5678 int ret;
5679 void *p;
5680
5435d206
DY
5681 /* Response will be an encoded string, which includes a length */
5682 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5683 reply_buf = kzalloc(size, GFP_KERNEL);
1e130199
AE
5684 if (!reply_buf)
5685 return -ENOMEM;
5686
ecd4a68a
ID
5687 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5688 &rbd_dev->header_oloc, "get_object_prefix",
5435d206 5689 NULL, 0, reply_buf, size);
36be9a76 5690 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
1e130199
AE
5691 if (ret < 0)
5692 goto out;
5693
5694 p = reply_buf;
5695 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
57385b51
AE
5696 p + ret, NULL, GFP_NOIO);
5697 ret = 0;
1e130199
AE
5698
5699 if (IS_ERR(rbd_dev->header.object_prefix)) {
5700 ret = PTR_ERR(rbd_dev->header.object_prefix);
5701 rbd_dev->header.object_prefix = NULL;
5702 } else {
5703 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5704 }
1e130199
AE
5705out:
5706 kfree(reply_buf);
5707
5708 return ret;
5709}
5710
b1b5402a
AE
5711static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5712 u64 *snap_features)
5713{
5714 __le64 snapid = cpu_to_le64(snap_id);
5715 struct {
5716 __le64 features;
5717 __le64 incompat;
4157976b 5718 } __attribute__ ((packed)) features_buf = { 0 };
d3767f0f 5719 u64 unsup;
b1b5402a
AE
5720 int ret;
5721
ecd4a68a
ID
5722 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5723 &rbd_dev->header_oloc, "get_features",
5724 &snapid, sizeof(snapid),
5725 &features_buf, sizeof(features_buf));
36be9a76 5726 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
b1b5402a
AE
5727 if (ret < 0)
5728 return ret;
57385b51
AE
5729 if (ret < sizeof (features_buf))
5730 return -ERANGE;
d889140c 5731
d3767f0f
ID
5732 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5733 if (unsup) {
5734 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5735 unsup);
b8f5c6ed 5736 return -ENXIO;
d3767f0f 5737 }
d889140c 5738
b1b5402a
AE
5739 *snap_features = le64_to_cpu(features_buf.features);
5740
5741 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
57385b51
AE
5742 (unsigned long long)snap_id,
5743 (unsigned long long)*snap_features,
5744 (unsigned long long)le64_to_cpu(features_buf.incompat));
b1b5402a
AE
5745
5746 return 0;
5747}
5748
5749static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5750{
5751 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
5752 &rbd_dev->header.features);
5753}
5754
22e8bd51
ID
5755/*
5756 * These are generic image flags, but since they are used only for
5757 * object map, store them in rbd_dev->object_map_flags.
5758 *
5759 * For the same reason, this function is called only on object map
5760 * (re)load and not on header refresh.
5761 */
5762static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5763{
5764 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5765 __le64 flags;
5766 int ret;
5767
5768 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5769 &rbd_dev->header_oloc, "get_flags",
5770 &snapid, sizeof(snapid),
5771 &flags, sizeof(flags));
5772 if (ret < 0)
5773 return ret;
5774 if (ret < sizeof(flags))
5775 return -EBADMSG;
5776
5777 rbd_dev->object_map_flags = le64_to_cpu(flags);
5778 return 0;
5779}
5780
eb3b2d6b
ID
5781struct parent_image_info {
5782 u64 pool_id;
e92c0eaf 5783 const char *pool_ns;
eb3b2d6b
ID
5784 const char *image_id;
5785 u64 snap_id;
5786
e92c0eaf 5787 bool has_overlap;
eb3b2d6b
ID
5788 u64 overlap;
5789};
5790
e92c0eaf
ID
5791/*
5792 * The caller is responsible for @pii.
5793 */
5794static int decode_parent_image_spec(void **p, void *end,
5795 struct parent_image_info *pii)
5796{
5797 u8 struct_v;
5798 u32 struct_len;
5799 int ret;
5800
5801 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5802 &struct_v, &struct_len);
5803 if (ret)
5804 return ret;
5805
5806 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5807 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5808 if (IS_ERR(pii->pool_ns)) {
5809 ret = PTR_ERR(pii->pool_ns);
5810 pii->pool_ns = NULL;
5811 return ret;
5812 }
5813 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5814 if (IS_ERR(pii->image_id)) {
5815 ret = PTR_ERR(pii->image_id);
5816 pii->image_id = NULL;
5817 return ret;
5818 }
5819 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5820 return 0;
5821
5822e_inval:
5823 return -EINVAL;
5824}
5825
5826static int __get_parent_info(struct rbd_device *rbd_dev,
5827 struct page *req_page,
5828 struct page *reply_page,
5829 struct parent_image_info *pii)
5830{
5831 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5832 size_t reply_len = PAGE_SIZE;
5833 void *p, *end;
5834 int ret;
5835
5836 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5837 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
68ada915 5838 req_page, sizeof(u64), &reply_page, &reply_len);
e92c0eaf
ID
5839 if (ret)
5840 return ret == -EOPNOTSUPP ? 1 : ret;
5841
5842 p = page_address(reply_page);
5843 end = p + reply_len;
5844 ret = decode_parent_image_spec(&p, end, pii);
5845 if (ret)
5846 return ret;
5847
5848 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5849 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
68ada915 5850 req_page, sizeof(u64), &reply_page, &reply_len);
e92c0eaf
ID
5851 if (ret)
5852 return ret;
5853
5854 p = page_address(reply_page);
5855 end = p + reply_len;
5856 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5857 if (pii->has_overlap)
5858 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5859
5860 return 0;
5861
5862e_inval:
5863 return -EINVAL;
5864}
5865
eb3b2d6b
ID
5866/*
5867 * The caller is responsible for @pii.
5868 */
5869static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5870 struct page *req_page,
5871 struct page *reply_page,
5872 struct parent_image_info *pii)
5873{
5874 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5875 size_t reply_len = PAGE_SIZE;
5876 void *p, *end;
5877 int ret;
5878
5879 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5880 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
68ada915 5881 req_page, sizeof(u64), &reply_page, &reply_len);
eb3b2d6b
ID
5882 if (ret)
5883 return ret;
5884
5885 p = page_address(reply_page);
5886 end = p + reply_len;
5887 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5888 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5889 if (IS_ERR(pii->image_id)) {
5890 ret = PTR_ERR(pii->image_id);
5891 pii->image_id = NULL;
5892 return ret;
5893 }
5894 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
e92c0eaf 5895 pii->has_overlap = true;
eb3b2d6b
ID
5896 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5897
5898 return 0;
5899
5900e_inval:
5901 return -EINVAL;
5902}
5903
5904static int get_parent_info(struct rbd_device *rbd_dev,
5905 struct parent_image_info *pii)
5906{
5907 struct page *req_page, *reply_page;
5908 void *p;
5909 int ret;
5910
5911 req_page = alloc_page(GFP_KERNEL);
5912 if (!req_page)
5913 return -ENOMEM;
5914
5915 reply_page = alloc_page(GFP_KERNEL);
5916 if (!reply_page) {
5917 __free_page(req_page);
5918 return -ENOMEM;
5919 }
5920
5921 p = page_address(req_page);
5922 ceph_encode_64(&p, rbd_dev->spec->snap_id);
e92c0eaf
ID
5923 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5924 if (ret > 0)
5925 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5926 pii);
eb3b2d6b
ID
5927
5928 __free_page(req_page);
5929 __free_page(reply_page);
5930 return ret;
5931}
5932
86b00e0d
AE
5933static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5934{
5935 struct rbd_spec *parent_spec;
eb3b2d6b 5936 struct parent_image_info pii = { 0 };
86b00e0d
AE
5937 int ret;
5938
5939 parent_spec = rbd_spec_alloc();
5940 if (!parent_spec)
5941 return -ENOMEM;
5942
eb3b2d6b
ID
5943 ret = get_parent_info(rbd_dev, &pii);
5944 if (ret)
86b00e0d 5945 goto out_err;
86b00e0d 5946
e92c0eaf
ID
5947 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5948 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
5949 pii.has_overlap, pii.overlap);
86b00e0d 5950
e92c0eaf 5951 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
392a9dad
AE
5952 /*
5953 * Either the parent never existed, or we have
5954 * record of it but the image got flattened so it no
5955 * longer has a parent. When the parent of a
5956 * layered image disappears we immediately set the
5957 * overlap to 0. The effect of this is that all new
5958 * requests will be treated as if the image had no
5959 * parent.
e92c0eaf
ID
5960 *
5961 * If !pii.has_overlap, the parent image spec is not
5962 * applicable. It's there to avoid duplication in each
5963 * snapshot record.
392a9dad
AE
5964 */
5965 if (rbd_dev->parent_overlap) {
5966 rbd_dev->parent_overlap = 0;
392a9dad
AE
5967 rbd_dev_parent_put(rbd_dev);
5968 pr_info("%s: clone image has been flattened\n",
5969 rbd_dev->disk->disk_name);
5970 }
5971
86b00e0d 5972 goto out; /* No parent? No problem. */
392a9dad 5973 }
86b00e0d 5974
0903e875
AE
5975 /* The ceph file layout needs to fit pool id in 32 bits */
5976
5977 ret = -EIO;
eb3b2d6b 5978 if (pii.pool_id > (u64)U32_MAX) {
9584d508 5979 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
eb3b2d6b 5980 (unsigned long long)pii.pool_id, U32_MAX);
86b00e0d
AE
5981 goto out_err;
5982 }
86b00e0d 5983
3b5cf2a2
AE
5984 /*
5985 * The parent won't change (except when the clone is
5986 * flattened, already handled that). So we only need to
5987 * record the parent spec we have not already done so.
5988 */
5989 if (!rbd_dev->parent_spec) {
eb3b2d6b 5990 parent_spec->pool_id = pii.pool_id;
e92c0eaf
ID
5991 if (pii.pool_ns && *pii.pool_ns) {
5992 parent_spec->pool_ns = pii.pool_ns;
5993 pii.pool_ns = NULL;
5994 }
eb3b2d6b
ID
5995 parent_spec->image_id = pii.image_id;
5996 pii.image_id = NULL;
5997 parent_spec->snap_id = pii.snap_id;
b26c047b 5998
70cf49cf
AE
5999 rbd_dev->parent_spec = parent_spec;
6000 parent_spec = NULL; /* rbd_dev now owns this */
3b5cf2a2
AE
6001 }
6002
6003 /*
cf32bd9c
ID
6004 * We always update the parent overlap. If it's zero we issue
6005 * a warning, as we will proceed as if there was no parent.
3b5cf2a2 6006 */
eb3b2d6b 6007 if (!pii.overlap) {
3b5cf2a2 6008 if (parent_spec) {
cf32bd9c
ID
6009 /* refresh, careful to warn just once */
6010 if (rbd_dev->parent_overlap)
6011 rbd_warn(rbd_dev,
6012 "clone now standalone (overlap became 0)");
3b5cf2a2 6013 } else {
cf32bd9c
ID
6014 /* initial probe */
6015 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
3b5cf2a2 6016 }
70cf49cf 6017 }
eb3b2d6b 6018 rbd_dev->parent_overlap = pii.overlap;
cf32bd9c 6019
86b00e0d
AE
6020out:
6021 ret = 0;
6022out_err:
e92c0eaf 6023 kfree(pii.pool_ns);
eb3b2d6b 6024 kfree(pii.image_id);
86b00e0d 6025 rbd_spec_put(parent_spec);
86b00e0d
AE
6026 return ret;
6027}
6028
cc070d59
AE
6029static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
6030{
6031 struct {
6032 __le64 stripe_unit;
6033 __le64 stripe_count;
6034 } __attribute__ ((packed)) striping_info_buf = { 0 };
6035 size_t size = sizeof (striping_info_buf);
6036 void *p;
cc070d59
AE
6037 int ret;
6038
ecd4a68a
ID
6039 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6040 &rbd_dev->header_oloc, "get_stripe_unit_count",
6041 NULL, 0, &striping_info_buf, size);
cc070d59
AE
6042 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6043 if (ret < 0)
6044 return ret;
6045 if (ret < size)
6046 return -ERANGE;
6047
cc070d59 6048 p = &striping_info_buf;
b1331852
ID
6049 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
6050 rbd_dev->header.stripe_count = ceph_decode_64(&p);
cc070d59
AE
6051 return 0;
6052}
6053
7e97332e
ID
6054static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
6055{
6056 __le64 data_pool_id;
6057 int ret;
6058
6059 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6060 &rbd_dev->header_oloc, "get_data_pool",
6061 NULL, 0, &data_pool_id, sizeof(data_pool_id));
6062 if (ret < 0)
6063 return ret;
6064 if (ret < sizeof(data_pool_id))
6065 return -EBADMSG;
6066
6067 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
6068 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
6069 return 0;
6070}
6071
9e15b77d
AE
6072static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
6073{
ecd4a68a 6074 CEPH_DEFINE_OID_ONSTACK(oid);
9e15b77d
AE
6075 size_t image_id_size;
6076 char *image_id;
6077 void *p;
6078 void *end;
6079 size_t size;
6080 void *reply_buf = NULL;
6081 size_t len = 0;
6082 char *image_name = NULL;
6083 int ret;
6084
6085 rbd_assert(!rbd_dev->spec->image_name);
6086
69e7a02f
AE
6087 len = strlen(rbd_dev->spec->image_id);
6088 image_id_size = sizeof (__le32) + len;
9e15b77d
AE
6089 image_id = kmalloc(image_id_size, GFP_KERNEL);
6090 if (!image_id)
6091 return NULL;
6092
6093 p = image_id;
4157976b 6094 end = image_id + image_id_size;
57385b51 6095 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
9e15b77d
AE
6096
6097 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
6098 reply_buf = kmalloc(size, GFP_KERNEL);
6099 if (!reply_buf)
6100 goto out;
6101
ecd4a68a
ID
6102 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
6103 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6104 "dir_get_name", image_id, image_id_size,
6105 reply_buf, size);
9e15b77d
AE
6106 if (ret < 0)
6107 goto out;
6108 p = reply_buf;
f40eb349
AE
6109 end = reply_buf + ret;
6110
9e15b77d
AE
6111 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
6112 if (IS_ERR(image_name))
6113 image_name = NULL;
6114 else
6115 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
6116out:
6117 kfree(reply_buf);
6118 kfree(image_id);
6119
6120 return image_name;
6121}
6122
2ad3d716
AE
6123static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6124{
6125 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
6126 const char *snap_name;
6127 u32 which = 0;
6128
6129 /* Skip over names until we find the one we are looking for */
6130
6131 snap_name = rbd_dev->header.snap_names;
6132 while (which < snapc->num_snaps) {
6133 if (!strcmp(name, snap_name))
6134 return snapc->snaps[which];
6135 snap_name += strlen(snap_name) + 1;
6136 which++;
6137 }
6138 return CEPH_NOSNAP;
6139}
6140
6141static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6142{
6143 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
6144 u32 which;
6145 bool found = false;
6146 u64 snap_id;
6147
6148 for (which = 0; !found && which < snapc->num_snaps; which++) {
6149 const char *snap_name;
6150
6151 snap_id = snapc->snaps[which];
6152 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
efadc98a
JD
6153 if (IS_ERR(snap_name)) {
6154 /* ignore no-longer existing snapshots */
6155 if (PTR_ERR(snap_name) == -ENOENT)
6156 continue;
6157 else
6158 break;
6159 }
2ad3d716
AE
6160 found = !strcmp(name, snap_name);
6161 kfree(snap_name);
6162 }
6163 return found ? snap_id : CEPH_NOSNAP;
6164}
6165
6166/*
6167 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
6168 * no snapshot by that name is found, or if an error occurs.
6169 */
6170static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6171{
6172 if (rbd_dev->image_format == 1)
6173 return rbd_v1_snap_id_by_name(rbd_dev, name);
6174
6175 return rbd_v2_snap_id_by_name(rbd_dev, name);
6176}
6177
9e15b77d 6178/*
04077599
ID
6179 * An image being mapped will have everything but the snap id.
6180 */
6181static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
6182{
6183 struct rbd_spec *spec = rbd_dev->spec;
6184
6185 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
6186 rbd_assert(spec->image_id && spec->image_name);
6187 rbd_assert(spec->snap_name);
6188
6189 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
6190 u64 snap_id;
6191
6192 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
6193 if (snap_id == CEPH_NOSNAP)
6194 return -ENOENT;
6195
6196 spec->snap_id = snap_id;
6197 } else {
6198 spec->snap_id = CEPH_NOSNAP;
6199 }
6200
6201 return 0;
6202}
6203
6204/*
6205 * A parent image will have all ids but none of the names.
e1d4213f 6206 *
04077599
ID
6207 * All names in an rbd spec are dynamically allocated. It's OK if we
6208 * can't figure out the name for an image id.
9e15b77d 6209 */
04077599 6210static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
9e15b77d 6211{
2e9f7f1c
AE
6212 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6213 struct rbd_spec *spec = rbd_dev->spec;
6214 const char *pool_name;
6215 const char *image_name;
6216 const char *snap_name;
9e15b77d
AE
6217 int ret;
6218
04077599
ID
6219 rbd_assert(spec->pool_id != CEPH_NOPOOL);
6220 rbd_assert(spec->image_id);
6221 rbd_assert(spec->snap_id != CEPH_NOSNAP);
9e15b77d 6222
2e9f7f1c 6223 /* Get the pool name; we have to make our own copy of this */
9e15b77d 6224
2e9f7f1c
AE
6225 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
6226 if (!pool_name) {
6227 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
935dc89f
AE
6228 return -EIO;
6229 }
2e9f7f1c
AE
6230 pool_name = kstrdup(pool_name, GFP_KERNEL);
6231 if (!pool_name)
9e15b77d
AE
6232 return -ENOMEM;
6233
6234 /* Fetch the image name; tolerate failure here */
6235
2e9f7f1c
AE
6236 image_name = rbd_dev_image_name(rbd_dev);
6237 if (!image_name)
06ecc6cb 6238 rbd_warn(rbd_dev, "unable to get image name");
9e15b77d 6239
04077599 6240 /* Fetch the snapshot name */
9e15b77d 6241
2e9f7f1c 6242 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
da6a6b63
JD
6243 if (IS_ERR(snap_name)) {
6244 ret = PTR_ERR(snap_name);
9e15b77d 6245 goto out_err;
2e9f7f1c
AE
6246 }
6247
6248 spec->pool_name = pool_name;
6249 spec->image_name = image_name;
6250 spec->snap_name = snap_name;
9e15b77d
AE
6251
6252 return 0;
04077599 6253
9e15b77d 6254out_err:
2e9f7f1c
AE
6255 kfree(image_name);
6256 kfree(pool_name);
9e15b77d
AE
6257 return ret;
6258}
6259
cc4a38bd 6260static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
35d489f9
AE
6261{
6262 size_t size;
6263 int ret;
6264 void *reply_buf;
6265 void *p;
6266 void *end;
6267 u64 seq;
6268 u32 snap_count;
6269 struct ceph_snap_context *snapc;
6270 u32 i;
6271
6272 /*
6273 * We'll need room for the seq value (maximum snapshot id),
6274 * snapshot count, and array of that many snapshot ids.
6275 * For now we have a fixed upper limit on the number we're
6276 * prepared to receive.
6277 */
6278 size = sizeof (__le64) + sizeof (__le32) +
6279 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6280 reply_buf = kzalloc(size, GFP_KERNEL);
6281 if (!reply_buf)
6282 return -ENOMEM;
6283
ecd4a68a
ID
6284 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6285 &rbd_dev->header_oloc, "get_snapcontext",
6286 NULL, 0, reply_buf, size);
36be9a76 6287 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
35d489f9
AE
6288 if (ret < 0)
6289 goto out;
6290
35d489f9 6291 p = reply_buf;
57385b51
AE
6292 end = reply_buf + ret;
6293 ret = -ERANGE;
35d489f9
AE
6294 ceph_decode_64_safe(&p, end, seq, out);
6295 ceph_decode_32_safe(&p, end, snap_count, out);
6296
6297 /*
6298 * Make sure the reported number of snapshot ids wouldn't go
6299 * beyond the end of our buffer. But before checking that,
6300 * make sure the computed size of the snapshot context we
6301 * allocate is representable in a size_t.
6302 */
6303 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6304 / sizeof (u64)) {
6305 ret = -EINVAL;
6306 goto out;
6307 }
6308 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6309 goto out;
468521c1 6310 ret = 0;
35d489f9 6311
812164f8 6312 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
35d489f9
AE
6313 if (!snapc) {
6314 ret = -ENOMEM;
6315 goto out;
6316 }
35d489f9 6317 snapc->seq = seq;
35d489f9
AE
6318 for (i = 0; i < snap_count; i++)
6319 snapc->snaps[i] = ceph_decode_64(&p);
6320
49ece554 6321 ceph_put_snap_context(rbd_dev->header.snapc);
35d489f9
AE
6322 rbd_dev->header.snapc = snapc;
6323
6324 dout(" snap context seq = %llu, snap_count = %u\n",
57385b51 6325 (unsigned long long)seq, (unsigned int)snap_count);
35d489f9
AE
6326out:
6327 kfree(reply_buf);
6328
57385b51 6329 return ret;
35d489f9
AE
6330}
6331
54cac61f
AE
6332static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6333 u64 snap_id)
b8b1e2db
AE
6334{
6335 size_t size;
6336 void *reply_buf;
54cac61f 6337 __le64 snapid;
b8b1e2db
AE
6338 int ret;
6339 void *p;
6340 void *end;
b8b1e2db
AE
6341 char *snap_name;
6342
6343 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6344 reply_buf = kmalloc(size, GFP_KERNEL);
6345 if (!reply_buf)
6346 return ERR_PTR(-ENOMEM);
6347
54cac61f 6348 snapid = cpu_to_le64(snap_id);
ecd4a68a
ID
6349 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6350 &rbd_dev->header_oloc, "get_snapshot_name",
6351 &snapid, sizeof(snapid), reply_buf, size);
36be9a76 6352 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
f40eb349
AE
6353 if (ret < 0) {
6354 snap_name = ERR_PTR(ret);
b8b1e2db 6355 goto out;
f40eb349 6356 }
b8b1e2db
AE
6357
6358 p = reply_buf;
f40eb349 6359 end = reply_buf + ret;
e5c35534 6360 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
f40eb349 6361 if (IS_ERR(snap_name))
b8b1e2db 6362 goto out;
b8b1e2db 6363
f40eb349 6364 dout(" snap_id 0x%016llx snap_name = %s\n",
54cac61f 6365 (unsigned long long)snap_id, snap_name);
b8b1e2db
AE
6366out:
6367 kfree(reply_buf);
6368
f40eb349 6369 return snap_name;
b8b1e2db
AE
6370}
6371
2df3fac7 6372static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
117973fb 6373{
2df3fac7 6374 bool first_time = rbd_dev->header.object_prefix == NULL;
117973fb 6375 int ret;
117973fb 6376
1617e40c
JD
6377 ret = rbd_dev_v2_image_size(rbd_dev);
6378 if (ret)
cfbf6377 6379 return ret;
1617e40c 6380
2df3fac7
AE
6381 if (first_time) {
6382 ret = rbd_dev_v2_header_onetime(rbd_dev);
6383 if (ret)
cfbf6377 6384 return ret;
2df3fac7
AE
6385 }
6386
cc4a38bd 6387 ret = rbd_dev_v2_snap_context(rbd_dev);
d194cd1d
ID
6388 if (ret && first_time) {
6389 kfree(rbd_dev->header.object_prefix);
6390 rbd_dev->header.object_prefix = NULL;
6391 }
117973fb
AE
6392
6393 return ret;
6394}
6395
a720ae09
ID
6396static int rbd_dev_header_info(struct rbd_device *rbd_dev)
6397{
6398 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6399
6400 if (rbd_dev->image_format == 1)
6401 return rbd_dev_v1_header_info(rbd_dev);
6402
6403 return rbd_dev_v2_header_info(rbd_dev);
6404}
6405
e28fff26
AE
6406/*
6407 * Skips over white space at *buf, and updates *buf to point to the
6408 * first found non-space character (if any). Returns the length of
593a9e7b
AE
6409 * the token (string of non-white space characters) found. Note
6410 * that *buf must be terminated with '\0'.
e28fff26
AE
6411 */
6412static inline size_t next_token(const char **buf)
6413{
6414 /*
6415 * These are the characters that produce nonzero for
6416 * isspace() in the "C" and "POSIX" locales.
6417 */
6418 const char *spaces = " \f\n\r\t\v";
6419
6420 *buf += strspn(*buf, spaces); /* Find start of token */
6421
6422 return strcspn(*buf, spaces); /* Return token length */
6423}
6424
ea3352f4
AE
6425/*
6426 * Finds the next token in *buf, dynamically allocates a buffer big
6427 * enough to hold a copy of it, and copies the token into the new
6428 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6429 * that a duplicate buffer is created even for a zero-length token.
6430 *
6431 * Returns a pointer to the newly-allocated duplicate, or a null
6432 * pointer if memory for the duplicate was not available. If
6433 * the lenp argument is a non-null pointer, the length of the token
6434 * (not including the '\0') is returned in *lenp.
6435 *
6436 * If successful, the *buf pointer will be updated to point beyond
6437 * the end of the found token.
6438 *
6439 * Note: uses GFP_KERNEL for allocation.
6440 */
6441static inline char *dup_token(const char **buf, size_t *lenp)
6442{
6443 char *dup;
6444 size_t len;
6445
6446 len = next_token(buf);
4caf35f9 6447 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
ea3352f4
AE
6448 if (!dup)
6449 return NULL;
ea3352f4
AE
6450 *(dup + len) = '\0';
6451 *buf += len;
6452
6453 if (lenp)
6454 *lenp = len;
6455
6456 return dup;
6457}
6458
a725f65e 6459/*
859c31df
AE
6460 * Parse the options provided for an "rbd add" (i.e., rbd image
6461 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6462 * and the data written is passed here via a NUL-terminated buffer.
6463 * Returns 0 if successful or an error code otherwise.
d22f76e7 6464 *
859c31df
AE
6465 * The information extracted from these options is recorded in
6466 * the other parameters which return dynamically-allocated
6467 * structures:
6468 * ceph_opts
6469 * The address of a pointer that will refer to a ceph options
6470 * structure. Caller must release the returned pointer using
6471 * ceph_destroy_options() when it is no longer needed.
6472 * rbd_opts
6473 * Address of an rbd options pointer. Fully initialized by
6474 * this function; caller must release with kfree().
6475 * spec
6476 * Address of an rbd image specification pointer. Fully
6477 * initialized by this function based on parsed options.
6478 * Caller must release with rbd_spec_put().
6479 *
6480 * The options passed take this form:
6481 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6482 * where:
6483 * <mon_addrs>
6484 * A comma-separated list of one or more monitor addresses.
6485 * A monitor address is an ip address, optionally followed
6486 * by a port number (separated by a colon).
6487 * I.e.: ip1[:port1][,ip2[:port2]...]
6488 * <options>
6489 * A comma-separated list of ceph and/or rbd options.
6490 * <pool_name>
6491 * The name of the rados pool containing the rbd image.
6492 * <image_name>
6493 * The name of the image in that pool to map.
6494 * <snap_id>
6495 * An optional snapshot id. If provided, the mapping will
6496 * present data from the image at the time that snapshot was
6497 * created. The image head is used if no snapshot id is
6498 * provided. Snapshot mappings are always read-only.
a725f65e 6499 */
859c31df 6500static int rbd_add_parse_args(const char *buf,
dc79b113 6501 struct ceph_options **ceph_opts,
859c31df
AE
6502 struct rbd_options **opts,
6503 struct rbd_spec **rbd_spec)
e28fff26 6504{
d22f76e7 6505 size_t len;
859c31df 6506 char *options;
0ddebc0c 6507 const char *mon_addrs;
ecb4dc22 6508 char *snap_name;
0ddebc0c 6509 size_t mon_addrs_size;
c300156b 6510 struct parse_rbd_opts_ctx pctx = { 0 };
859c31df 6511 struct ceph_options *copts;
dc79b113 6512 int ret;
e28fff26
AE
6513
6514 /* The first four tokens are required */
6515
7ef3214a 6516 len = next_token(&buf);
4fb5d671
AE
6517 if (!len) {
6518 rbd_warn(NULL, "no monitor address(es) provided");
6519 return -EINVAL;
6520 }
0ddebc0c 6521 mon_addrs = buf;
f28e565a 6522 mon_addrs_size = len + 1;
7ef3214a 6523 buf += len;
a725f65e 6524
dc79b113 6525 ret = -EINVAL;
f28e565a
AE
6526 options = dup_token(&buf, NULL);
6527 if (!options)
dc79b113 6528 return -ENOMEM;
4fb5d671
AE
6529 if (!*options) {
6530 rbd_warn(NULL, "no options provided");
6531 goto out_err;
6532 }
e28fff26 6533
c300156b
ID
6534 pctx.spec = rbd_spec_alloc();
6535 if (!pctx.spec)
f28e565a 6536 goto out_mem;
859c31df 6537
c300156b
ID
6538 pctx.spec->pool_name = dup_token(&buf, NULL);
6539 if (!pctx.spec->pool_name)
859c31df 6540 goto out_mem;
c300156b 6541 if (!*pctx.spec->pool_name) {
4fb5d671
AE
6542 rbd_warn(NULL, "no pool name provided");
6543 goto out_err;
6544 }
e28fff26 6545
c300156b
ID
6546 pctx.spec->image_name = dup_token(&buf, NULL);
6547 if (!pctx.spec->image_name)
f28e565a 6548 goto out_mem;
c300156b 6549 if (!*pctx.spec->image_name) {
4fb5d671
AE
6550 rbd_warn(NULL, "no image name provided");
6551 goto out_err;
6552 }
d4b125e9 6553
f28e565a
AE
6554 /*
6555 * Snapshot name is optional; default is to use "-"
6556 * (indicating the head/no snapshot).
6557 */
3feeb894 6558 len = next_token(&buf);
820a5f3e 6559 if (!len) {
3feeb894
AE
6560 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6561 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
f28e565a 6562 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
dc79b113 6563 ret = -ENAMETOOLONG;
f28e565a 6564 goto out_err;
849b4260 6565 }
ecb4dc22
AE
6566 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6567 if (!snap_name)
f28e565a 6568 goto out_mem;
ecb4dc22 6569 *(snap_name + len) = '\0';
c300156b 6570 pctx.spec->snap_name = snap_name;
e5c35534 6571
0ddebc0c 6572 /* Initialize all rbd options to the defaults */
e28fff26 6573
c300156b
ID
6574 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6575 if (!pctx.opts)
4e9afeba
AE
6576 goto out_mem;
6577
c300156b
ID
6578 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6579 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
0c93e1b7 6580 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
c300156b
ID
6581 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6582 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6583 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6584 pctx.opts->trim = RBD_TRIM_DEFAULT;
d22f76e7 6585
859c31df 6586 copts = ceph_parse_options(options, mon_addrs,
c300156b
ID
6587 mon_addrs + mon_addrs_size - 1,
6588 parse_rbd_opts_token, &pctx);
859c31df
AE
6589 if (IS_ERR(copts)) {
6590 ret = PTR_ERR(copts);
dc79b113
AE
6591 goto out_err;
6592 }
859c31df
AE
6593 kfree(options);
6594
6595 *ceph_opts = copts;
c300156b
ID
6596 *opts = pctx.opts;
6597 *rbd_spec = pctx.spec;
0ddebc0c 6598
dc79b113 6599 return 0;
f28e565a 6600out_mem:
dc79b113 6601 ret = -ENOMEM;
d22f76e7 6602out_err:
c300156b
ID
6603 kfree(pctx.opts);
6604 rbd_spec_put(pctx.spec);
f28e565a 6605 kfree(options);
d22f76e7 6606
dc79b113 6607 return ret;
a725f65e
AE
6608}
6609
e010dd0a
ID
6610static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6611{
6612 down_write(&rbd_dev->lock_rwsem);
6613 if (__rbd_is_lock_owner(rbd_dev))
e1fddc8f 6614 __rbd_release_lock(rbd_dev);
e010dd0a
ID
6615 up_write(&rbd_dev->lock_rwsem);
6616}
6617
637cd060
ID
6618/*
6619 * If the wait is interrupted, an error is returned even if the lock
6620 * was successfully acquired. rbd_dev_image_unlock() will release it
6621 * if needed.
6622 */
e010dd0a
ID
6623static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6624{
637cd060 6625 long ret;
2f18d466 6626
e010dd0a 6627 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
637cd060
ID
6628 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6629 return 0;
6630
e010dd0a
ID
6631 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6632 return -EINVAL;
6633 }
6634
637cd060
ID
6635 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
6636 return 0;
6637
6638 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6639 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6640 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6641 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6642 if (ret > 0)
6643 ret = rbd_dev->acquire_err;
6644 else if (!ret)
6645 ret = -ETIMEDOUT;
6646
2f18d466 6647 if (ret) {
637cd060
ID
6648 rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
6649 return ret;
e010dd0a
ID
6650 }
6651
637cd060
ID
6652 /*
6653 * The lock may have been released by now, unless automatic lock
6654 * transitions are disabled.
6655 */
6656 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
e010dd0a
ID
6657 return 0;
6658}
6659
589d30e0
AE
6660/*
6661 * An rbd format 2 image has a unique identifier, distinct from the
6662 * name given to it by the user. Internally, that identifier is
6663 * what's used to specify the names of objects related to the image.
6664 *
6665 * A special "rbd id" object is used to map an rbd image name to its
6666 * id. If that object doesn't exist, then there is no v2 rbd image
6667 * with the supplied name.
6668 *
6669 * This function will record the given rbd_dev's image_id field if
6670 * it can be determined, and in that case will return 0. If any
6671 * errors occur a negative errno will be returned and the rbd_dev's
6672 * image_id field will be unchanged (and should be NULL).
6673 */
6674static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6675{
6676 int ret;
6677 size_t size;
ecd4a68a 6678 CEPH_DEFINE_OID_ONSTACK(oid);
589d30e0 6679 void *response;
c0fba368 6680 char *image_id;
2f82ee54 6681
2c0d0a10
AE
6682 /*
6683 * When probing a parent image, the image id is already
6684 * known (and the image name likely is not). There's no
c0fba368
AE
6685 * need to fetch the image id again in this case. We
6686 * do still need to set the image format though.
2c0d0a10 6687 */
c0fba368
AE
6688 if (rbd_dev->spec->image_id) {
6689 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6690
2c0d0a10 6691 return 0;
c0fba368 6692 }
2c0d0a10 6693
589d30e0
AE
6694 /*
6695 * First, see if the format 2 image id file exists, and if
6696 * so, get the image's persistent id from it.
6697 */
ecd4a68a
ID
6698 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6699 rbd_dev->spec->image_name);
6700 if (ret)
6701 return ret;
6702
6703 dout("rbd id object name is %s\n", oid.name);
589d30e0
AE
6704
6705 /* Response will be an encoded string, which includes a length */
589d30e0
AE
6706 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6707 response = kzalloc(size, GFP_NOIO);
6708 if (!response) {
6709 ret = -ENOMEM;
6710 goto out;
6711 }
6712
c0fba368
AE
6713 /* If it doesn't exist we'll assume it's a format 1 image */
6714
ecd4a68a
ID
6715 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6716 "get_id", NULL, 0,
5435d206 6717 response, size);
36be9a76 6718 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
c0fba368
AE
6719 if (ret == -ENOENT) {
6720 image_id = kstrdup("", GFP_KERNEL);
6721 ret = image_id ? 0 : -ENOMEM;
6722 if (!ret)
6723 rbd_dev->image_format = 1;
7dd440c9 6724 } else if (ret >= 0) {
c0fba368
AE
6725 void *p = response;
6726
6727 image_id = ceph_extract_encoded_string(&p, p + ret,
979ed480 6728 NULL, GFP_NOIO);
461f758a 6729 ret = PTR_ERR_OR_ZERO(image_id);
c0fba368
AE
6730 if (!ret)
6731 rbd_dev->image_format = 2;
c0fba368
AE
6732 }
6733
6734 if (!ret) {
6735 rbd_dev->spec->image_id = image_id;
6736 dout("image_id is %s\n", image_id);
589d30e0
AE
6737 }
6738out:
6739 kfree(response);
ecd4a68a 6740 ceph_oid_destroy(&oid);
589d30e0
AE
6741 return ret;
6742}
6743
3abef3b3
AE
6744/*
6745 * Undo whatever state changes are made by v1 or v2 header info
6746 * call.
6747 */
6fd48b3b
AE
6748static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6749{
6750 struct rbd_image_header *header;
6751
e69b8d41 6752 rbd_dev_parent_put(rbd_dev);
22e8bd51 6753 rbd_object_map_free(rbd_dev);
da5ef6be 6754 rbd_dev_mapping_clear(rbd_dev);
6fd48b3b
AE
6755
6756 /* Free dynamic fields from the header, then zero it out */
6757
6758 header = &rbd_dev->header;
812164f8 6759 ceph_put_snap_context(header->snapc);
6fd48b3b
AE
6760 kfree(header->snap_sizes);
6761 kfree(header->snap_names);
6762 kfree(header->object_prefix);
6763 memset(header, 0, sizeof (*header));
6764}
6765
2df3fac7 6766static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
a30b71b9
AE
6767{
6768 int ret;
a30b71b9 6769
1e130199 6770 ret = rbd_dev_v2_object_prefix(rbd_dev);
57385b51 6771 if (ret)
b1b5402a
AE
6772 goto out_err;
6773
2df3fac7
AE
6774 /*
6775 * Get the and check features for the image. Currently the
6776 * features are assumed to never change.
6777 */
b1b5402a 6778 ret = rbd_dev_v2_features(rbd_dev);
57385b51 6779 if (ret)
9d475de5 6780 goto out_err;
35d489f9 6781
cc070d59
AE
6782 /* If the image supports fancy striping, get its parameters */
6783
6784 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
6785 ret = rbd_dev_v2_striping_info(rbd_dev);
6786 if (ret < 0)
6787 goto out_err;
6788 }
a30b71b9 6789
7e97332e
ID
6790 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
6791 ret = rbd_dev_v2_data_pool(rbd_dev);
6792 if (ret)
6793 goto out_err;
6794 }
6795
263423f8 6796 rbd_init_layout(rbd_dev);
35152979 6797 return 0;
263423f8 6798
9d475de5 6799out_err:
642a2537 6800 rbd_dev->header.features = 0;
1e130199
AE
6801 kfree(rbd_dev->header.object_prefix);
6802 rbd_dev->header.object_prefix = NULL;
9d475de5 6803 return ret;
a30b71b9
AE
6804}
6805
6d69bb53
ID
6806/*
6807 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6808 * rbd_dev_image_probe() recursion depth, which means it's also the
6809 * length of the already discovered part of the parent chain.
6810 */
6811static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
83a06263 6812{
2f82ee54 6813 struct rbd_device *parent = NULL;
124afba2
AE
6814 int ret;
6815
6816 if (!rbd_dev->parent_spec)
6817 return 0;
124afba2 6818
6d69bb53
ID
6819 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6820 pr_info("parent chain is too long (%d)\n", depth);
6821 ret = -EINVAL;
6822 goto out_err;
6823 }
6824
1643dfa4 6825 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
1f2c6651
ID
6826 if (!parent) {
6827 ret = -ENOMEM;
124afba2 6828 goto out_err;
1f2c6651
ID
6829 }
6830
6831 /*
6832 * Images related by parent/child relationships always share
6833 * rbd_client and spec/parent_spec, so bump their refcounts.
6834 */
6835 __rbd_get_client(rbd_dev->rbd_client);
6836 rbd_spec_get(rbd_dev->parent_spec);
124afba2 6837
6d69bb53 6838 ret = rbd_dev_image_probe(parent, depth);
124afba2
AE
6839 if (ret < 0)
6840 goto out_err;
1f2c6651 6841
124afba2 6842 rbd_dev->parent = parent;
a2acd00e 6843 atomic_set(&rbd_dev->parent_ref, 1);
124afba2 6844 return 0;
1f2c6651 6845
124afba2 6846out_err:
1f2c6651 6847 rbd_dev_unparent(rbd_dev);
1761b229 6848 rbd_dev_destroy(parent);
124afba2
AE
6849 return ret;
6850}
6851
5769ed0c
ID
6852static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6853{
6854 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5769ed0c
ID
6855 rbd_free_disk(rbd_dev);
6856 if (!single_major)
6857 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6858}
6859
811c6688
ID
6860/*
6861 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6862 * upon return.
6863 */
200a6a8b 6864static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
124afba2 6865{
83a06263 6866 int ret;
d1cf5788 6867
9b60e70b 6868 /* Record our major and minor device numbers. */
83a06263 6869
9b60e70b
ID
6870 if (!single_major) {
6871 ret = register_blkdev(0, rbd_dev->name);
6872 if (ret < 0)
1643dfa4 6873 goto err_out_unlock;
9b60e70b
ID
6874
6875 rbd_dev->major = ret;
6876 rbd_dev->minor = 0;
6877 } else {
6878 rbd_dev->major = rbd_major;
6879 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6880 }
83a06263
AE
6881
6882 /* Set up the blkdev mapping. */
6883
6884 ret = rbd_init_disk(rbd_dev);
6885 if (ret)
6886 goto err_out_blkdev;
6887
f35a4dee 6888 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
9568c93e 6889 set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
f35a4dee 6890
5769ed0c 6891 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
f35a4dee 6892 if (ret)
da5ef6be 6893 goto err_out_disk;
83a06263 6894
129b79d4 6895 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
811c6688 6896 up_write(&rbd_dev->header_rwsem);
5769ed0c 6897 return 0;
2f82ee54 6898
83a06263
AE
6899err_out_disk:
6900 rbd_free_disk(rbd_dev);
6901err_out_blkdev:
9b60e70b
ID
6902 if (!single_major)
6903 unregister_blkdev(rbd_dev->major, rbd_dev->name);
811c6688
ID
6904err_out_unlock:
6905 up_write(&rbd_dev->header_rwsem);
83a06263
AE
6906 return ret;
6907}
6908
332bb12d
AE
6909static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6910{
6911 struct rbd_spec *spec = rbd_dev->spec;
c41d13a3 6912 int ret;
332bb12d
AE
6913
6914 /* Record the header object name for this rbd image. */
6915
6916 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
332bb12d 6917 if (rbd_dev->image_format == 1)
c41d13a3
ID
6918 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6919 spec->image_name, RBD_SUFFIX);
332bb12d 6920 else
c41d13a3
ID
6921 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6922 RBD_HEADER_PREFIX, spec->image_id);
332bb12d 6923
c41d13a3 6924 return ret;
332bb12d
AE
6925}
6926
200a6a8b
AE
6927static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6928{
6fd48b3b 6929 rbd_dev_unprobe(rbd_dev);
fd22aef8
ID
6930 if (rbd_dev->opts)
6931 rbd_unregister_watch(rbd_dev);
6fd48b3b
AE
6932 rbd_dev->image_format = 0;
6933 kfree(rbd_dev->spec->image_id);
6934 rbd_dev->spec->image_id = NULL;
200a6a8b
AE
6935}
6936
a30b71b9
AE
6937/*
6938 * Probe for the existence of the header object for the given rbd
1f3ef788
AE
6939 * device. If this image is the one being mapped (i.e., not a
6940 * parent), initiate a watch on its header object before using that
6941 * object to get detailed information about the rbd image.
a30b71b9 6942 */
6d69bb53 6943static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
a30b71b9
AE
6944{
6945 int ret;
6946
6947 /*
3abef3b3
AE
6948 * Get the id from the image id object. Unless there's an
6949 * error, rbd_dev->spec->image_id will be filled in with
6950 * a dynamically-allocated string, and rbd_dev->image_format
6951 * will be set to either 1 or 2.
a30b71b9
AE
6952 */
6953 ret = rbd_dev_image_id(rbd_dev);
6954 if (ret)
c0fba368 6955 return ret;
c0fba368 6956
332bb12d
AE
6957 ret = rbd_dev_header_name(rbd_dev);
6958 if (ret)
6959 goto err_out_format;
6960
6d69bb53 6961 if (!depth) {
99d16943 6962 ret = rbd_register_watch(rbd_dev);
1fe48023
ID
6963 if (ret) {
6964 if (ret == -ENOENT)
b26c047b 6965 pr_info("image %s/%s%s%s does not exist\n",
1fe48023 6966 rbd_dev->spec->pool_name,
b26c047b
ID
6967 rbd_dev->spec->pool_ns ?: "",
6968 rbd_dev->spec->pool_ns ? "/" : "",
1fe48023 6969 rbd_dev->spec->image_name);
c41d13a3 6970 goto err_out_format;
1fe48023 6971 }
1f3ef788 6972 }
b644de2b 6973
a720ae09 6974 ret = rbd_dev_header_info(rbd_dev);
5655c4d9 6975 if (ret)
b644de2b 6976 goto err_out_watch;
83a06263 6977
04077599
ID
6978 /*
6979 * If this image is the one being mapped, we have pool name and
6980 * id, image name and id, and snap name - need to fill snap id.
6981 * Otherwise this is a parent image, identified by pool, image
6982 * and snap ids - need to fill in names for those ids.
6983 */
6d69bb53 6984 if (!depth)
04077599
ID
6985 ret = rbd_spec_fill_snap_id(rbd_dev);
6986 else
6987 ret = rbd_spec_fill_names(rbd_dev);
1fe48023
ID
6988 if (ret) {
6989 if (ret == -ENOENT)
b26c047b 6990 pr_info("snap %s/%s%s%s@%s does not exist\n",
1fe48023 6991 rbd_dev->spec->pool_name,
b26c047b
ID
6992 rbd_dev->spec->pool_ns ?: "",
6993 rbd_dev->spec->pool_ns ? "/" : "",
1fe48023
ID
6994 rbd_dev->spec->image_name,
6995 rbd_dev->spec->snap_name);
33dca39f 6996 goto err_out_probe;
1fe48023 6997 }
9bb81c9b 6998
da5ef6be
ID
6999 ret = rbd_dev_mapping_set(rbd_dev);
7000 if (ret)
7001 goto err_out_probe;
7002
22e8bd51
ID
7003 if (rbd_dev->spec->snap_id != CEPH_NOSNAP &&
7004 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
7005 ret = rbd_object_map_load(rbd_dev);
7006 if (ret)
7007 goto err_out_probe;
7008 }
7009
e8f59b59
ID
7010 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
7011 ret = rbd_dev_v2_parent_info(rbd_dev);
7012 if (ret)
7013 goto err_out_probe;
e8f59b59
ID
7014 }
7015
6d69bb53 7016 ret = rbd_dev_probe_parent(rbd_dev, depth);
30d60ba2
AE
7017 if (ret)
7018 goto err_out_probe;
7019
7020 dout("discovered format %u image, header name is %s\n",
c41d13a3 7021 rbd_dev->image_format, rbd_dev->header_oid.name);
30d60ba2 7022 return 0;
e8f59b59 7023
6fd48b3b
AE
7024err_out_probe:
7025 rbd_dev_unprobe(rbd_dev);
b644de2b 7026err_out_watch:
6d69bb53 7027 if (!depth)
99d16943 7028 rbd_unregister_watch(rbd_dev);
332bb12d
AE
7029err_out_format:
7030 rbd_dev->image_format = 0;
5655c4d9
AE
7031 kfree(rbd_dev->spec->image_id);
7032 rbd_dev->spec->image_id = NULL;
a30b71b9
AE
7033 return ret;
7034}
7035
9b60e70b
ID
7036static ssize_t do_rbd_add(struct bus_type *bus,
7037 const char *buf,
7038 size_t count)
602adf40 7039{
cb8627c7 7040 struct rbd_device *rbd_dev = NULL;
dc79b113 7041 struct ceph_options *ceph_opts = NULL;
4e9afeba 7042 struct rbd_options *rbd_opts = NULL;
859c31df 7043 struct rbd_spec *spec = NULL;
9d3997fd 7044 struct rbd_client *rbdc;
b51c83c2 7045 int rc;
602adf40
YS
7046
7047 if (!try_module_get(THIS_MODULE))
7048 return -ENODEV;
7049
602adf40 7050 /* parse add command */
859c31df 7051 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
dc79b113 7052 if (rc < 0)
dd5ac32d 7053 goto out;
78cea76e 7054
9d3997fd
AE
7055 rbdc = rbd_get_client(ceph_opts);
7056 if (IS_ERR(rbdc)) {
7057 rc = PTR_ERR(rbdc);
0ddebc0c 7058 goto err_out_args;
9d3997fd 7059 }
602adf40 7060
602adf40 7061 /* pick the pool */
dd435855 7062 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
1fe48023
ID
7063 if (rc < 0) {
7064 if (rc == -ENOENT)
7065 pr_info("pool %s does not exist\n", spec->pool_name);
602adf40 7066 goto err_out_client;
1fe48023 7067 }
c0cd10db 7068 spec->pool_id = (u64)rc;
859c31df 7069
d147543d 7070 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
b51c83c2
ID
7071 if (!rbd_dev) {
7072 rc = -ENOMEM;
bd4ba655 7073 goto err_out_client;
b51c83c2 7074 }
c53d5893
AE
7075 rbdc = NULL; /* rbd_dev now owns this */
7076 spec = NULL; /* rbd_dev now owns this */
d147543d 7077 rbd_opts = NULL; /* rbd_dev now owns this */
602adf40 7078
0d6d1e9c
MC
7079 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7080 if (!rbd_dev->config_info) {
7081 rc = -ENOMEM;
7082 goto err_out_rbd_dev;
7083 }
7084
811c6688 7085 down_write(&rbd_dev->header_rwsem);
6d69bb53 7086 rc = rbd_dev_image_probe(rbd_dev, 0);
0d6d1e9c
MC
7087 if (rc < 0) {
7088 up_write(&rbd_dev->header_rwsem);
c53d5893 7089 goto err_out_rbd_dev;
0d6d1e9c 7090 }
05fd6f6f 7091
7ce4eef7 7092 /* If we are mapping a snapshot it must be marked read-only */
7ce4eef7 7093 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
9568c93e 7094 rbd_dev->opts->read_only = true;
7ce4eef7 7095
0c93e1b7
ID
7096 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7097 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7098 rbd_dev->layout.object_size);
7099 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7100 }
7101
b536f69a 7102 rc = rbd_dev_device_setup(rbd_dev);
fd22aef8 7103 if (rc)
8b679ec5 7104 goto err_out_image_probe;
3abef3b3 7105
637cd060
ID
7106 rc = rbd_add_acquire_lock(rbd_dev);
7107 if (rc)
7108 goto err_out_image_lock;
3abef3b3 7109
5769ed0c
ID
7110 /* Everything's ready. Announce the disk to the world. */
7111
7112 rc = device_add(&rbd_dev->dev);
7113 if (rc)
e010dd0a 7114 goto err_out_image_lock;
5769ed0c
ID
7115
7116 add_disk(rbd_dev->disk);
7117 /* see rbd_init_disk() */
7118 blk_put_queue(rbd_dev->disk->queue);
7119
7120 spin_lock(&rbd_dev_list_lock);
7121 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7122 spin_unlock(&rbd_dev_list_lock);
7123
7124 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7125 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7126 rbd_dev->header.features);
dd5ac32d
ID
7127 rc = count;
7128out:
7129 module_put(THIS_MODULE);
7130 return rc;
b536f69a 7131
e010dd0a
ID
7132err_out_image_lock:
7133 rbd_dev_image_unlock(rbd_dev);
5769ed0c 7134 rbd_dev_device_release(rbd_dev);
8b679ec5
ID
7135err_out_image_probe:
7136 rbd_dev_image_release(rbd_dev);
c53d5893
AE
7137err_out_rbd_dev:
7138 rbd_dev_destroy(rbd_dev);
bd4ba655 7139err_out_client:
9d3997fd 7140 rbd_put_client(rbdc);
0ddebc0c 7141err_out_args:
859c31df 7142 rbd_spec_put(spec);
d147543d 7143 kfree(rbd_opts);
dd5ac32d 7144 goto out;
602adf40
YS
7145}
7146
7e9586ba 7147static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
9b60e70b
ID
7148{
7149 if (single_major)
7150 return -EINVAL;
7151
7152 return do_rbd_add(bus, buf, count);
7153}
7154
7e9586ba
GKH
7155static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
7156 size_t count)
9b60e70b
ID
7157{
7158 return do_rbd_add(bus, buf, count);
7159}
7160
05a46afd
AE
7161static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7162{
ad945fc1 7163 while (rbd_dev->parent) {
05a46afd
AE
7164 struct rbd_device *first = rbd_dev;
7165 struct rbd_device *second = first->parent;
7166 struct rbd_device *third;
7167
7168 /*
7169 * Follow to the parent with no grandparent and
7170 * remove it.
7171 */
7172 while (second && (third = second->parent)) {
7173 first = second;
7174 second = third;
7175 }
ad945fc1 7176 rbd_assert(second);
8ad42cd0 7177 rbd_dev_image_release(second);
8b679ec5 7178 rbd_dev_destroy(second);
ad945fc1
AE
7179 first->parent = NULL;
7180 first->parent_overlap = 0;
7181
7182 rbd_assert(first->parent_spec);
05a46afd
AE
7183 rbd_spec_put(first->parent_spec);
7184 first->parent_spec = NULL;
05a46afd
AE
7185 }
7186}
7187
9b60e70b
ID
7188static ssize_t do_rbd_remove(struct bus_type *bus,
7189 const char *buf,
7190 size_t count)
602adf40
YS
7191{
7192 struct rbd_device *rbd_dev = NULL;
751cc0e3
AE
7193 struct list_head *tmp;
7194 int dev_id;
0276dca6 7195 char opt_buf[6];
0276dca6 7196 bool force = false;
0d8189e1 7197 int ret;
602adf40 7198
0276dca6
MC
7199 dev_id = -1;
7200 opt_buf[0] = '\0';
7201 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7202 if (dev_id < 0) {
7203 pr_err("dev_id out of range\n");
602adf40 7204 return -EINVAL;
0276dca6
MC
7205 }
7206 if (opt_buf[0] != '\0') {
7207 if (!strcmp(opt_buf, "force")) {
7208 force = true;
7209 } else {
7210 pr_err("bad remove option at '%s'\n", opt_buf);
7211 return -EINVAL;
7212 }
7213 }
602adf40 7214
751cc0e3
AE
7215 ret = -ENOENT;
7216 spin_lock(&rbd_dev_list_lock);
7217 list_for_each(tmp, &rbd_dev_list) {
7218 rbd_dev = list_entry(tmp, struct rbd_device, node);
7219 if (rbd_dev->dev_id == dev_id) {
7220 ret = 0;
7221 break;
7222 }
42382b70 7223 }
751cc0e3
AE
7224 if (!ret) {
7225 spin_lock_irq(&rbd_dev->lock);
0276dca6 7226 if (rbd_dev->open_count && !force)
751cc0e3 7227 ret = -EBUSY;
85f5a4d6
ID
7228 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7229 &rbd_dev->flags))
7230 ret = -EINPROGRESS;
751cc0e3
AE
7231 spin_unlock_irq(&rbd_dev->lock);
7232 }
7233 spin_unlock(&rbd_dev_list_lock);
85f5a4d6 7234 if (ret)
1ba0f1e7 7235 return ret;
751cc0e3 7236
0276dca6
MC
7237 if (force) {
7238 /*
7239 * Prevent new IO from being queued and wait for existing
7240 * IO to complete/fail.
7241 */
7242 blk_mq_freeze_queue(rbd_dev->disk->queue);
7243 blk_set_queue_dying(rbd_dev->disk->queue);
7244 }
7245
5769ed0c
ID
7246 del_gendisk(rbd_dev->disk);
7247 spin_lock(&rbd_dev_list_lock);
7248 list_del_init(&rbd_dev->node);
7249 spin_unlock(&rbd_dev_list_lock);
7250 device_del(&rbd_dev->dev);
fca27065 7251
e010dd0a 7252 rbd_dev_image_unlock(rbd_dev);
dd5ac32d 7253 rbd_dev_device_release(rbd_dev);
8ad42cd0 7254 rbd_dev_image_release(rbd_dev);
8b679ec5 7255 rbd_dev_destroy(rbd_dev);
1ba0f1e7 7256 return count;
602adf40
YS
7257}
7258
7e9586ba 7259static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
9b60e70b
ID
7260{
7261 if (single_major)
7262 return -EINVAL;
7263
7264 return do_rbd_remove(bus, buf, count);
7265}
7266
7e9586ba
GKH
7267static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
7268 size_t count)
9b60e70b
ID
7269{
7270 return do_rbd_remove(bus, buf, count);
7271}
7272
602adf40
YS
7273/*
7274 * create control files in sysfs
dfc5606d 7275 * /sys/bus/rbd/...
602adf40 7276 */
7d8dc534 7277static int __init rbd_sysfs_init(void)
602adf40 7278{
dfc5606d 7279 int ret;
602adf40 7280
fed4c143 7281 ret = device_register(&rbd_root_dev);
21079786 7282 if (ret < 0)
dfc5606d 7283 return ret;
602adf40 7284
fed4c143
AE
7285 ret = bus_register(&rbd_bus_type);
7286 if (ret < 0)
7287 device_unregister(&rbd_root_dev);
602adf40 7288
602adf40
YS
7289 return ret;
7290}
7291
7d8dc534 7292static void __exit rbd_sysfs_cleanup(void)
602adf40 7293{
dfc5606d 7294 bus_unregister(&rbd_bus_type);
fed4c143 7295 device_unregister(&rbd_root_dev);
602adf40
YS
7296}
7297
7d8dc534 7298static int __init rbd_slab_init(void)
1c2a9dfe
AE
7299{
7300 rbd_assert(!rbd_img_request_cache);
03d94406 7301 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
868311b1
AE
7302 if (!rbd_img_request_cache)
7303 return -ENOMEM;
7304
7305 rbd_assert(!rbd_obj_request_cache);
03d94406 7306 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
78c2a44a
AE
7307 if (!rbd_obj_request_cache)
7308 goto out_err;
7309
6c696d85 7310 return 0;
1c2a9dfe 7311
6c696d85 7312out_err:
868311b1
AE
7313 kmem_cache_destroy(rbd_img_request_cache);
7314 rbd_img_request_cache = NULL;
1c2a9dfe
AE
7315 return -ENOMEM;
7316}
7317
7318static void rbd_slab_exit(void)
7319{
868311b1
AE
7320 rbd_assert(rbd_obj_request_cache);
7321 kmem_cache_destroy(rbd_obj_request_cache);
7322 rbd_obj_request_cache = NULL;
7323
1c2a9dfe
AE
7324 rbd_assert(rbd_img_request_cache);
7325 kmem_cache_destroy(rbd_img_request_cache);
7326 rbd_img_request_cache = NULL;
7327}
7328
cc344fa1 7329static int __init rbd_init(void)
602adf40
YS
7330{
7331 int rc;
7332
1e32d34c
AE
7333 if (!libceph_compatible(NULL)) {
7334 rbd_warn(NULL, "libceph incompatibility (quitting)");
1e32d34c
AE
7335 return -EINVAL;
7336 }
e1b4d96d 7337
1c2a9dfe 7338 rc = rbd_slab_init();
602adf40
YS
7339 if (rc)
7340 return rc;
e1b4d96d 7341
f5ee37bd
ID
7342 /*
7343 * The number of active work items is limited by the number of
f77303bd 7344 * rbd devices * queue depth, so leave @max_active at default.
f5ee37bd
ID
7345 */
7346 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7347 if (!rbd_wq) {
7348 rc = -ENOMEM;
7349 goto err_out_slab;
7350 }
7351
9b60e70b
ID
7352 if (single_major) {
7353 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7354 if (rbd_major < 0) {
7355 rc = rbd_major;
f5ee37bd 7356 goto err_out_wq;
9b60e70b
ID
7357 }
7358 }
7359
1c2a9dfe
AE
7360 rc = rbd_sysfs_init();
7361 if (rc)
9b60e70b
ID
7362 goto err_out_blkdev;
7363
7364 if (single_major)
7365 pr_info("loaded (major %d)\n", rbd_major);
7366 else
7367 pr_info("loaded\n");
1c2a9dfe 7368
e1b4d96d
ID
7369 return 0;
7370
9b60e70b
ID
7371err_out_blkdev:
7372 if (single_major)
7373 unregister_blkdev(rbd_major, RBD_DRV_NAME);
f5ee37bd
ID
7374err_out_wq:
7375 destroy_workqueue(rbd_wq);
e1b4d96d
ID
7376err_out_slab:
7377 rbd_slab_exit();
1c2a9dfe 7378 return rc;
602adf40
YS
7379}
7380
cc344fa1 7381static void __exit rbd_exit(void)
602adf40 7382{
ffe312cf 7383 ida_destroy(&rbd_dev_id_ida);
602adf40 7384 rbd_sysfs_cleanup();
9b60e70b
ID
7385 if (single_major)
7386 unregister_blkdev(rbd_major, RBD_DRV_NAME);
f5ee37bd 7387 destroy_workqueue(rbd_wq);
1c2a9dfe 7388 rbd_slab_exit();
602adf40
YS
7389}
7390
7391module_init(rbd_init);
7392module_exit(rbd_exit);
7393
d552c619 7394MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
602adf40
YS
7395MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7396MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
602adf40
YS
7397/* following authorship retained from original osdblk.c */
7398MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7399
90da258b 7400MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
602adf40 7401MODULE_LICENSE("GPL");