2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
6 #include <linux/highmem.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
12 #include <linux/bio.h>
15 #include <linux/ceph/libceph.h>
16 #include <linux/ceph/osd_client.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/pagelist.h>
22 #define OSD_OPREPLY_FRONT_LEN 512
24 static struct kmem_cache *ceph_osd_request_cache;
26 static const struct ceph_connection_operations osd_con_ops;
29 * Implement client access to distributed object storage cluster.
31 * All data objects are stored within a cluster/cloud of OSDs, or
32 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
33 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
34 * remote daemons serving up and coordinating consistent and safe
37 * Cluster membership and the mapping of data objects onto storage devices
38 * are described by the osd map.
40 * We keep track of pending OSD requests (read, write), resubmit
41 * requests to different OSDs when the cluster topology/data layout
42 * change, or retry the affected requests when the communications
43 * channel with an OSD is reset.
46 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
47 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
48 static void link_linger(struct ceph_osd *osd,
49 struct ceph_osd_linger_request *lreq);
50 static void unlink_linger(struct ceph_osd *osd,
51 struct ceph_osd_linger_request *lreq);
54 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
58 if (unlikely(down_read_trylock(sem))) {
65 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
67 WARN_ON(!rwsem_is_locked(&osdc->lock));
69 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
71 WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
73 static inline void verify_osd_locked(struct ceph_osd *osd)
75 struct ceph_osd_client *osdc = osd->o_osdc;
77 WARN_ON(!(mutex_is_locked(&osd->lock) &&
78 rwsem_is_locked(&osdc->lock)) &&
79 !rwsem_is_wrlocked(&osdc->lock));
81 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
83 WARN_ON(!mutex_is_locked(&lreq->lock));
86 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
87 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
88 static inline void verify_osd_locked(struct ceph_osd *osd) { }
89 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
93 * calculate the mapping of a file extent onto an object, and fill out the
94 * request accordingly. shorten extent as necessary if it crosses an
97 * fill osd op in request message.
99 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
100 u64 *objnum, u64 *objoff, u64 *objlen)
102 u64 orig_len = *plen;
106 r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
110 if (*objlen < orig_len) {
112 dout(" skipping last %llu, final file extent %llu~%llu\n",
113 orig_len - *plen, off, *plen);
116 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
121 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
123 memset(osd_data, 0, sizeof (*osd_data));
124 osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
127 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
128 struct page **pages, u64 length, u32 alignment,
129 bool pages_from_pool, bool own_pages)
131 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
132 osd_data->pages = pages;
133 osd_data->length = length;
134 osd_data->alignment = alignment;
135 osd_data->pages_from_pool = pages_from_pool;
136 osd_data->own_pages = own_pages;
139 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
140 struct ceph_pagelist *pagelist)
142 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
143 osd_data->pagelist = pagelist;
147 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
148 struct bio *bio, size_t bio_length)
150 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
152 osd_data->bio_length = bio_length;
154 #endif /* CONFIG_BLOCK */
156 #define osd_req_op_data(oreq, whch, typ, fld) \
158 struct ceph_osd_request *__oreq = (oreq); \
159 unsigned int __whch = (whch); \
160 BUG_ON(__whch >= __oreq->r_num_ops); \
161 &__oreq->r_ops[__whch].typ.fld; \
164 static struct ceph_osd_data *
165 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
167 BUG_ON(which >= osd_req->r_num_ops);
169 return &osd_req->r_ops[which].raw_data_in;
172 struct ceph_osd_data *
173 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
176 return osd_req_op_data(osd_req, which, extent, osd_data);
178 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
180 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
181 unsigned int which, struct page **pages,
182 u64 length, u32 alignment,
183 bool pages_from_pool, bool own_pages)
185 struct ceph_osd_data *osd_data;
187 osd_data = osd_req_op_raw_data_in(osd_req, which);
188 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
189 pages_from_pool, own_pages);
191 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
193 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
194 unsigned int which, struct page **pages,
195 u64 length, u32 alignment,
196 bool pages_from_pool, bool own_pages)
198 struct ceph_osd_data *osd_data;
200 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
201 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
202 pages_from_pool, own_pages);
204 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
206 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
207 unsigned int which, struct ceph_pagelist *pagelist)
209 struct ceph_osd_data *osd_data;
211 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
212 ceph_osd_data_pagelist_init(osd_data, pagelist);
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
217 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
218 unsigned int which, struct bio *bio, size_t bio_length)
220 struct ceph_osd_data *osd_data;
222 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
223 ceph_osd_data_bio_init(osd_data, bio, bio_length);
225 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
226 #endif /* CONFIG_BLOCK */
228 static void osd_req_op_cls_request_info_pagelist(
229 struct ceph_osd_request *osd_req,
230 unsigned int which, struct ceph_pagelist *pagelist)
232 struct ceph_osd_data *osd_data;
234 osd_data = osd_req_op_data(osd_req, which, cls, request_info);
235 ceph_osd_data_pagelist_init(osd_data, pagelist);
238 void osd_req_op_cls_request_data_pagelist(
239 struct ceph_osd_request *osd_req,
240 unsigned int which, struct ceph_pagelist *pagelist)
242 struct ceph_osd_data *osd_data;
244 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
245 ceph_osd_data_pagelist_init(osd_data, pagelist);
246 osd_req->r_ops[which].cls.indata_len += pagelist->length;
247 osd_req->r_ops[which].indata_len += pagelist->length;
249 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
251 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
252 unsigned int which, struct page **pages, u64 length,
253 u32 alignment, bool pages_from_pool, bool own_pages)
255 struct ceph_osd_data *osd_data;
257 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
258 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
259 pages_from_pool, own_pages);
260 osd_req->r_ops[which].cls.indata_len += length;
261 osd_req->r_ops[which].indata_len += length;
263 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
265 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
266 unsigned int which, struct page **pages, u64 length,
267 u32 alignment, bool pages_from_pool, bool own_pages)
269 struct ceph_osd_data *osd_data;
271 osd_data = osd_req_op_data(osd_req, which, cls, response_data);
272 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
273 pages_from_pool, own_pages);
275 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
277 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
279 switch (osd_data->type) {
280 case CEPH_OSD_DATA_TYPE_NONE:
282 case CEPH_OSD_DATA_TYPE_PAGES:
283 return osd_data->length;
284 case CEPH_OSD_DATA_TYPE_PAGELIST:
285 return (u64)osd_data->pagelist->length;
287 case CEPH_OSD_DATA_TYPE_BIO:
288 return (u64)osd_data->bio_length;
289 #endif /* CONFIG_BLOCK */
291 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
296 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
298 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
301 num_pages = calc_pages_for((u64)osd_data->alignment,
302 (u64)osd_data->length);
303 ceph_release_page_vector(osd_data->pages, num_pages);
305 ceph_osd_data_init(osd_data);
308 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
311 struct ceph_osd_req_op *op;
313 BUG_ON(which >= osd_req->r_num_ops);
314 op = &osd_req->r_ops[which];
317 case CEPH_OSD_OP_READ:
318 case CEPH_OSD_OP_WRITE:
319 case CEPH_OSD_OP_WRITEFULL:
320 ceph_osd_data_release(&op->extent.osd_data);
322 case CEPH_OSD_OP_CALL:
323 ceph_osd_data_release(&op->cls.request_info);
324 ceph_osd_data_release(&op->cls.request_data);
325 ceph_osd_data_release(&op->cls.response_data);
327 case CEPH_OSD_OP_SETXATTR:
328 case CEPH_OSD_OP_CMPXATTR:
329 ceph_osd_data_release(&op->xattr.osd_data);
331 case CEPH_OSD_OP_STAT:
332 ceph_osd_data_release(&op->raw_data_in);
334 case CEPH_OSD_OP_NOTIFY_ACK:
335 ceph_osd_data_release(&op->notify_ack.request_data);
337 case CEPH_OSD_OP_NOTIFY:
338 ceph_osd_data_release(&op->notify.request_data);
339 ceph_osd_data_release(&op->notify.response_data);
341 case CEPH_OSD_OP_LIST_WATCHERS:
342 ceph_osd_data_release(&op->list_watchers.response_data);
350 * Assumes @t is zero-initialized.
352 static void target_init(struct ceph_osd_request_target *t)
354 ceph_oid_init(&t->base_oid);
355 ceph_oloc_init(&t->base_oloc);
356 ceph_oid_init(&t->target_oid);
357 ceph_oloc_init(&t->target_oloc);
359 ceph_osds_init(&t->acting);
360 ceph_osds_init(&t->up);
364 t->osd = CEPH_HOMELESS_OSD;
367 static void target_copy(struct ceph_osd_request_target *dest,
368 const struct ceph_osd_request_target *src)
370 ceph_oid_copy(&dest->base_oid, &src->base_oid);
371 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
372 ceph_oid_copy(&dest->target_oid, &src->target_oid);
373 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
375 dest->pgid = src->pgid; /* struct */
376 dest->spgid = src->spgid; /* struct */
377 dest->pg_num = src->pg_num;
378 dest->pg_num_mask = src->pg_num_mask;
379 ceph_osds_copy(&dest->acting, &src->acting);
380 ceph_osds_copy(&dest->up, &src->up);
381 dest->size = src->size;
382 dest->min_size = src->min_size;
383 dest->sort_bitwise = src->sort_bitwise;
385 dest->flags = src->flags;
386 dest->paused = src->paused;
388 dest->last_force_resend = src->last_force_resend;
390 dest->osd = src->osd;
393 static void target_destroy(struct ceph_osd_request_target *t)
395 ceph_oid_destroy(&t->base_oid);
396 ceph_oloc_destroy(&t->base_oloc);
397 ceph_oid_destroy(&t->target_oid);
398 ceph_oloc_destroy(&t->target_oloc);
404 static void request_release_checks(struct ceph_osd_request *req)
406 WARN_ON(!RB_EMPTY_NODE(&req->r_node));
407 WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
408 WARN_ON(!list_empty(&req->r_unsafe_item));
412 static void ceph_osdc_release_request(struct kref *kref)
414 struct ceph_osd_request *req = container_of(kref,
415 struct ceph_osd_request, r_kref);
418 dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
419 req->r_request, req->r_reply);
420 request_release_checks(req);
423 ceph_msg_put(req->r_request);
425 ceph_msg_put(req->r_reply);
427 for (which = 0; which < req->r_num_ops; which++)
428 osd_req_op_data_release(req, which);
430 target_destroy(&req->r_t);
431 ceph_put_snap_context(req->r_snapc);
434 mempool_free(req, req->r_osdc->req_mempool);
435 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
436 kmem_cache_free(ceph_osd_request_cache, req);
441 void ceph_osdc_get_request(struct ceph_osd_request *req)
443 dout("%s %p (was %d)\n", __func__, req,
444 kref_read(&req->r_kref));
445 kref_get(&req->r_kref);
447 EXPORT_SYMBOL(ceph_osdc_get_request);
449 void ceph_osdc_put_request(struct ceph_osd_request *req)
452 dout("%s %p (was %d)\n", __func__, req,
453 kref_read(&req->r_kref));
454 kref_put(&req->r_kref, ceph_osdc_release_request);
457 EXPORT_SYMBOL(ceph_osdc_put_request);
459 static void request_init(struct ceph_osd_request *req)
461 /* req only, each op is zeroed in _osd_req_op_init() */
462 memset(req, 0, sizeof(*req));
464 kref_init(&req->r_kref);
465 init_completion(&req->r_completion);
466 RB_CLEAR_NODE(&req->r_node);
467 RB_CLEAR_NODE(&req->r_mc_node);
468 INIT_LIST_HEAD(&req->r_unsafe_item);
470 target_init(&req->r_t);
474 * This is ugly, but it allows us to reuse linger registration and ping
475 * requests, keeping the structure of the code around send_linger{_ping}()
476 * reasonable. Setting up a min_nr=2 mempool for each linger request
477 * and dealing with copying ops (this blasts req only, watch op remains
478 * intact) isn't any better.
480 static void request_reinit(struct ceph_osd_request *req)
482 struct ceph_osd_client *osdc = req->r_osdc;
483 bool mempool = req->r_mempool;
484 unsigned int num_ops = req->r_num_ops;
485 u64 snapid = req->r_snapid;
486 struct ceph_snap_context *snapc = req->r_snapc;
487 bool linger = req->r_linger;
488 struct ceph_msg *request_msg = req->r_request;
489 struct ceph_msg *reply_msg = req->r_reply;
491 dout("%s req %p\n", __func__, req);
492 WARN_ON(kref_read(&req->r_kref) != 1);
493 request_release_checks(req);
495 WARN_ON(kref_read(&request_msg->kref) != 1);
496 WARN_ON(kref_read(&reply_msg->kref) != 1);
497 target_destroy(&req->r_t);
501 req->r_mempool = mempool;
502 req->r_num_ops = num_ops;
503 req->r_snapid = snapid;
504 req->r_snapc = snapc;
505 req->r_linger = linger;
506 req->r_request = request_msg;
507 req->r_reply = reply_msg;
510 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
511 struct ceph_snap_context *snapc,
512 unsigned int num_ops,
516 struct ceph_osd_request *req;
519 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
520 req = mempool_alloc(osdc->req_mempool, gfp_flags);
521 } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
522 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
524 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
525 req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
533 req->r_mempool = use_mempool;
534 req->r_num_ops = num_ops;
535 req->r_snapid = CEPH_NOSNAP;
536 req->r_snapc = ceph_get_snap_context(snapc);
538 dout("%s req %p\n", __func__, req);
541 EXPORT_SYMBOL(ceph_osdc_alloc_request);
543 static int ceph_oloc_encoding_size(struct ceph_object_locator *oloc)
545 return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
548 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
550 struct ceph_osd_client *osdc = req->r_osdc;
551 struct ceph_msg *msg;
554 WARN_ON(ceph_oid_empty(&req->r_base_oid));
555 WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
557 /* create request message */
558 msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
559 msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
560 msg_size += CEPH_ENCODING_START_BLK_LEN +
561 ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
562 msg_size += 1 + 8 + 4 + 4; /* pgid */
563 msg_size += 4 + req->r_base_oid.name_len; /* oid */
564 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
565 msg_size += 8; /* snapid */
566 msg_size += 8; /* snap_seq */
567 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
568 msg_size += 4; /* retry_attempt */
571 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
573 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
577 memset(msg->front.iov_base, 0, msg->front.iov_len);
578 req->r_request = msg;
580 /* create reply message */
581 msg_size = OSD_OPREPLY_FRONT_LEN;
582 msg_size += req->r_base_oid.name_len;
583 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
586 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
588 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
596 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
598 static bool osd_req_opcode_valid(u16 opcode)
601 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
602 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
610 * This is an osd op init function for opcodes that have no data or
611 * other information associated with them. It also serves as a
612 * common init routine for all the other init functions, below.
614 static struct ceph_osd_req_op *
615 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
616 u16 opcode, u32 flags)
618 struct ceph_osd_req_op *op;
620 BUG_ON(which >= osd_req->r_num_ops);
621 BUG_ON(!osd_req_opcode_valid(opcode));
623 op = &osd_req->r_ops[which];
624 memset(op, 0, sizeof (*op));
631 void osd_req_op_init(struct ceph_osd_request *osd_req,
632 unsigned int which, u16 opcode, u32 flags)
634 (void)_osd_req_op_init(osd_req, which, opcode, flags);
636 EXPORT_SYMBOL(osd_req_op_init);
638 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
639 unsigned int which, u16 opcode,
640 u64 offset, u64 length,
641 u64 truncate_size, u32 truncate_seq)
643 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
645 size_t payload_len = 0;
647 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
648 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
649 opcode != CEPH_OSD_OP_TRUNCATE);
651 op->extent.offset = offset;
652 op->extent.length = length;
653 op->extent.truncate_size = truncate_size;
654 op->extent.truncate_seq = truncate_seq;
655 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
656 payload_len += length;
658 op->indata_len = payload_len;
660 EXPORT_SYMBOL(osd_req_op_extent_init);
662 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
663 unsigned int which, u64 length)
665 struct ceph_osd_req_op *op;
668 BUG_ON(which >= osd_req->r_num_ops);
669 op = &osd_req->r_ops[which];
670 previous = op->extent.length;
672 if (length == previous)
673 return; /* Nothing to do */
674 BUG_ON(length > previous);
676 op->extent.length = length;
677 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
678 op->indata_len -= previous - length;
680 EXPORT_SYMBOL(osd_req_op_extent_update);
682 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
683 unsigned int which, u64 offset_inc)
685 struct ceph_osd_req_op *op, *prev_op;
687 BUG_ON(which + 1 >= osd_req->r_num_ops);
689 prev_op = &osd_req->r_ops[which];
690 op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
691 /* dup previous one */
692 op->indata_len = prev_op->indata_len;
693 op->outdata_len = prev_op->outdata_len;
694 op->extent = prev_op->extent;
696 op->extent.offset += offset_inc;
697 op->extent.length -= offset_inc;
699 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
700 op->indata_len -= offset_inc;
702 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
704 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
705 u16 opcode, const char *class, const char *method)
707 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
709 struct ceph_pagelist *pagelist;
710 size_t payload_len = 0;
713 BUG_ON(opcode != CEPH_OSD_OP_CALL);
715 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
717 ceph_pagelist_init(pagelist);
719 op->cls.class_name = class;
720 size = strlen(class);
721 BUG_ON(size > (size_t) U8_MAX);
722 op->cls.class_len = size;
723 ceph_pagelist_append(pagelist, class, size);
726 op->cls.method_name = method;
727 size = strlen(method);
728 BUG_ON(size > (size_t) U8_MAX);
729 op->cls.method_len = size;
730 ceph_pagelist_append(pagelist, method, size);
733 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
735 op->indata_len = payload_len;
737 EXPORT_SYMBOL(osd_req_op_cls_init);
739 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
740 u16 opcode, const char *name, const void *value,
741 size_t size, u8 cmp_op, u8 cmp_mode)
743 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
745 struct ceph_pagelist *pagelist;
748 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
750 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
754 ceph_pagelist_init(pagelist);
756 payload_len = strlen(name);
757 op->xattr.name_len = payload_len;
758 ceph_pagelist_append(pagelist, name, payload_len);
760 op->xattr.value_len = size;
761 ceph_pagelist_append(pagelist, value, size);
764 op->xattr.cmp_op = cmp_op;
765 op->xattr.cmp_mode = cmp_mode;
767 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
768 op->indata_len = payload_len;
771 EXPORT_SYMBOL(osd_req_op_xattr_init);
774 * @watch_opcode: CEPH_OSD_WATCH_OP_*
776 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
777 u64 cookie, u8 watch_opcode)
779 struct ceph_osd_req_op *op;
781 op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
782 op->watch.cookie = cookie;
783 op->watch.op = watch_opcode;
787 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
789 u64 expected_object_size,
790 u64 expected_write_size)
792 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
793 CEPH_OSD_OP_SETALLOCHINT,
796 op->alloc_hint.expected_object_size = expected_object_size;
797 op->alloc_hint.expected_write_size = expected_write_size;
800 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
801 * not worth a feature bit. Set FAILOK per-op flag to make
802 * sure older osds don't trip over an unsupported opcode.
804 op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
806 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
808 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
809 struct ceph_osd_data *osd_data)
811 u64 length = ceph_osd_data_length(osd_data);
813 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
814 BUG_ON(length > (u64) SIZE_MAX);
816 ceph_msg_data_add_pages(msg, osd_data->pages,
817 length, osd_data->alignment);
818 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
820 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
822 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
823 ceph_msg_data_add_bio(msg, osd_data->bio, length);
826 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
830 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
831 const struct ceph_osd_req_op *src)
833 if (WARN_ON(!osd_req_opcode_valid(src->op))) {
834 pr_err("unrecognized osd opcode %d\n", src->op);
840 case CEPH_OSD_OP_STAT:
842 case CEPH_OSD_OP_READ:
843 case CEPH_OSD_OP_WRITE:
844 case CEPH_OSD_OP_WRITEFULL:
845 case CEPH_OSD_OP_ZERO:
846 case CEPH_OSD_OP_TRUNCATE:
847 dst->extent.offset = cpu_to_le64(src->extent.offset);
848 dst->extent.length = cpu_to_le64(src->extent.length);
849 dst->extent.truncate_size =
850 cpu_to_le64(src->extent.truncate_size);
851 dst->extent.truncate_seq =
852 cpu_to_le32(src->extent.truncate_seq);
854 case CEPH_OSD_OP_CALL:
855 dst->cls.class_len = src->cls.class_len;
856 dst->cls.method_len = src->cls.method_len;
857 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
859 case CEPH_OSD_OP_STARTSYNC:
861 case CEPH_OSD_OP_WATCH:
862 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
863 dst->watch.ver = cpu_to_le64(0);
864 dst->watch.op = src->watch.op;
865 dst->watch.gen = cpu_to_le32(src->watch.gen);
867 case CEPH_OSD_OP_NOTIFY_ACK:
869 case CEPH_OSD_OP_NOTIFY:
870 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
872 case CEPH_OSD_OP_LIST_WATCHERS:
874 case CEPH_OSD_OP_SETALLOCHINT:
875 dst->alloc_hint.expected_object_size =
876 cpu_to_le64(src->alloc_hint.expected_object_size);
877 dst->alloc_hint.expected_write_size =
878 cpu_to_le64(src->alloc_hint.expected_write_size);
880 case CEPH_OSD_OP_SETXATTR:
881 case CEPH_OSD_OP_CMPXATTR:
882 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
883 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
884 dst->xattr.cmp_op = src->xattr.cmp_op;
885 dst->xattr.cmp_mode = src->xattr.cmp_mode;
887 case CEPH_OSD_OP_CREATE:
888 case CEPH_OSD_OP_DELETE:
891 pr_err("unsupported osd opcode %s\n",
892 ceph_osd_op_name(src->op));
898 dst->op = cpu_to_le16(src->op);
899 dst->flags = cpu_to_le32(src->flags);
900 dst->payload_len = cpu_to_le32(src->indata_len);
902 return src->indata_len;
906 * build new request AND message, calculate layout, and adjust file
909 * if the file was recently truncated, we include information about its
910 * old and new size so that the object can be updated appropriately. (we
911 * avoid synchronously deleting truncated objects because it's slow.)
913 * if @do_sync, include a 'startsync' command so that the osd will flush
916 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
917 struct ceph_file_layout *layout,
918 struct ceph_vino vino,
920 unsigned int which, int num_ops,
921 int opcode, int flags,
922 struct ceph_snap_context *snapc,
927 struct ceph_osd_request *req;
933 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
934 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
935 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
937 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
944 /* calculate max write size */
945 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
949 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
950 osd_req_op_init(req, which, opcode, 0);
952 u32 object_size = layout->object_size;
953 u32 object_base = off - objoff;
954 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
955 if (truncate_size <= object_base) {
958 truncate_size -= object_base;
959 if (truncate_size > object_size)
960 truncate_size = object_size;
963 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
964 truncate_size, truncate_seq);
967 req->r_abort_on_full = true;
968 req->r_flags = flags;
969 req->r_base_oloc.pool = layout->pool_id;
970 req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
971 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
973 req->r_snapid = vino.snap;
974 if (flags & CEPH_OSD_FLAG_WRITE)
975 req->r_data_offset = off;
977 r = ceph_osdc_alloc_messages(req, GFP_NOFS);
984 ceph_osdc_put_request(req);
987 EXPORT_SYMBOL(ceph_osdc_new_request);
990 * We keep osd requests in an rbtree, sorted by ->r_tid.
992 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
993 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
995 static bool osd_homeless(struct ceph_osd *osd)
997 return osd->o_osd == CEPH_HOMELESS_OSD;
1000 static bool osd_registered(struct ceph_osd *osd)
1002 verify_osdc_locked(osd->o_osdc);
1004 return !RB_EMPTY_NODE(&osd->o_node);
1008 * Assumes @osd is zero-initialized.
1010 static void osd_init(struct ceph_osd *osd)
1012 refcount_set(&osd->o_ref, 1);
1013 RB_CLEAR_NODE(&osd->o_node);
1014 osd->o_requests = RB_ROOT;
1015 osd->o_linger_requests = RB_ROOT;
1016 INIT_LIST_HEAD(&osd->o_osd_lru);
1017 INIT_LIST_HEAD(&osd->o_keepalive_item);
1018 osd->o_incarnation = 1;
1019 mutex_init(&osd->lock);
1022 static void osd_cleanup(struct ceph_osd *osd)
1024 WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1025 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1026 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1027 WARN_ON(!list_empty(&osd->o_osd_lru));
1028 WARN_ON(!list_empty(&osd->o_keepalive_item));
1030 if (osd->o_auth.authorizer) {
1031 WARN_ON(osd_homeless(osd));
1032 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1037 * Track open sessions with osds.
1039 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1041 struct ceph_osd *osd;
1043 WARN_ON(onum == CEPH_HOMELESS_OSD);
1045 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1050 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1055 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1057 if (refcount_inc_not_zero(&osd->o_ref)) {
1058 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1059 refcount_read(&osd->o_ref));
1062 dout("get_osd %p FAIL\n", osd);
1067 static void put_osd(struct ceph_osd *osd)
1069 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1070 refcount_read(&osd->o_ref) - 1);
1071 if (refcount_dec_and_test(&osd->o_ref)) {
1077 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1079 static void __move_osd_to_lru(struct ceph_osd *osd)
1081 struct ceph_osd_client *osdc = osd->o_osdc;
1083 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1084 BUG_ON(!list_empty(&osd->o_osd_lru));
1086 spin_lock(&osdc->osd_lru_lock);
1087 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1088 spin_unlock(&osdc->osd_lru_lock);
1090 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1093 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1095 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1096 RB_EMPTY_ROOT(&osd->o_linger_requests))
1097 __move_osd_to_lru(osd);
1100 static void __remove_osd_from_lru(struct ceph_osd *osd)
1102 struct ceph_osd_client *osdc = osd->o_osdc;
1104 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1106 spin_lock(&osdc->osd_lru_lock);
1107 if (!list_empty(&osd->o_osd_lru))
1108 list_del_init(&osd->o_osd_lru);
1109 spin_unlock(&osdc->osd_lru_lock);
1113 * Close the connection and assign any leftover requests to the
1116 static void close_osd(struct ceph_osd *osd)
1118 struct ceph_osd_client *osdc = osd->o_osdc;
1121 verify_osdc_wrlocked(osdc);
1122 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1124 ceph_con_close(&osd->o_con);
1126 for (n = rb_first(&osd->o_requests); n; ) {
1127 struct ceph_osd_request *req =
1128 rb_entry(n, struct ceph_osd_request, r_node);
1130 n = rb_next(n); /* unlink_request() */
1132 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1133 unlink_request(osd, req);
1134 link_request(&osdc->homeless_osd, req);
1136 for (n = rb_first(&osd->o_linger_requests); n; ) {
1137 struct ceph_osd_linger_request *lreq =
1138 rb_entry(n, struct ceph_osd_linger_request, node);
1140 n = rb_next(n); /* unlink_linger() */
1142 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1144 unlink_linger(osd, lreq);
1145 link_linger(&osdc->homeless_osd, lreq);
1148 __remove_osd_from_lru(osd);
1149 erase_osd(&osdc->osds, osd);
1156 static int reopen_osd(struct ceph_osd *osd)
1158 struct ceph_entity_addr *peer_addr;
1160 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1162 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1163 RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1168 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1169 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1170 !ceph_con_opened(&osd->o_con)) {
1173 dout("osd addr hasn't changed and connection never opened, "
1174 "letting msgr retry\n");
1175 /* touch each r_stamp for handle_timeout()'s benfit */
1176 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1177 struct ceph_osd_request *req =
1178 rb_entry(n, struct ceph_osd_request, r_node);
1179 req->r_stamp = jiffies;
1185 ceph_con_close(&osd->o_con);
1186 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1187 osd->o_incarnation++;
1192 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1195 struct ceph_osd *osd;
1198 verify_osdc_wrlocked(osdc);
1200 verify_osdc_locked(osdc);
1202 if (o != CEPH_HOMELESS_OSD)
1203 osd = lookup_osd(&osdc->osds, o);
1205 osd = &osdc->homeless_osd;
1208 return ERR_PTR(-EAGAIN);
1210 osd = create_osd(osdc, o);
1211 insert_osd(&osdc->osds, osd);
1212 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1213 &osdc->osdmap->osd_addr[osd->o_osd]);
1216 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1221 * Create request <-> OSD session relation.
1223 * @req has to be assigned a tid, @osd may be homeless.
1225 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1227 verify_osd_locked(osd);
1228 WARN_ON(!req->r_tid || req->r_osd);
1229 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1232 if (!osd_homeless(osd))
1233 __remove_osd_from_lru(osd);
1235 atomic_inc(&osd->o_osdc->num_homeless);
1238 insert_request(&osd->o_requests, req);
1242 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1244 verify_osd_locked(osd);
1245 WARN_ON(req->r_osd != osd);
1246 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1250 erase_request(&osd->o_requests, req);
1253 if (!osd_homeless(osd))
1254 maybe_move_osd_to_lru(osd);
1256 atomic_dec(&osd->o_osdc->num_homeless);
1259 static bool __pool_full(struct ceph_pg_pool_info *pi)
1261 return pi->flags & CEPH_POOL_FLAG_FULL;
1264 static bool have_pool_full(struct ceph_osd_client *osdc)
1268 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1269 struct ceph_pg_pool_info *pi =
1270 rb_entry(n, struct ceph_pg_pool_info, node);
1272 if (__pool_full(pi))
1279 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1281 struct ceph_pg_pool_info *pi;
1283 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1287 return __pool_full(pi);
1291 * Returns whether a request should be blocked from being sent
1292 * based on the current osdmap and osd_client settings.
1294 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1295 const struct ceph_osd_request_target *t,
1296 struct ceph_pg_pool_info *pi)
1298 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1299 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1300 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1303 WARN_ON(pi->id != t->base_oloc.pool);
1304 return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1305 ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1306 (osdc->osdmap->epoch < osdc->epoch_barrier);
1309 enum calc_target_result {
1310 CALC_TARGET_NO_ACTION = 0,
1311 CALC_TARGET_NEED_RESEND,
1312 CALC_TARGET_POOL_DNE,
1315 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1316 struct ceph_osd_request_target *t,
1319 struct ceph_pg_pool_info *pi;
1320 struct ceph_pg pgid, last_pgid;
1321 struct ceph_osds up, acting;
1322 bool force_resend = false;
1323 bool need_check_tiering = false;
1324 bool need_resend = false;
1325 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1326 enum calc_target_result ct_res;
1329 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1331 t->osd = CEPH_HOMELESS_OSD;
1332 ct_res = CALC_TARGET_POOL_DNE;
1336 if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1337 if (t->last_force_resend < pi->last_force_request_resend) {
1338 t->last_force_resend = pi->last_force_request_resend;
1339 force_resend = true;
1340 } else if (t->last_force_resend == 0) {
1341 force_resend = true;
1344 if (ceph_oid_empty(&t->target_oid) || force_resend) {
1345 ceph_oid_copy(&t->target_oid, &t->base_oid);
1346 need_check_tiering = true;
1348 if (ceph_oloc_empty(&t->target_oloc) || force_resend) {
1349 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1350 need_check_tiering = true;
1353 if (need_check_tiering &&
1354 (t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1355 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1356 t->target_oloc.pool = pi->read_tier;
1357 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1358 t->target_oloc.pool = pi->write_tier;
1361 ret = ceph_object_locator_to_pg(osdc->osdmap, &t->target_oid,
1362 &t->target_oloc, &pgid);
1364 WARN_ON(ret != -ENOENT);
1365 t->osd = CEPH_HOMELESS_OSD;
1366 ct_res = CALC_TARGET_POOL_DNE;
1369 last_pgid.pool = pgid.pool;
1370 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1372 ceph_pg_to_up_acting_osds(osdc->osdmap, &pgid, &up, &acting);
1374 ceph_is_new_interval(&t->acting,
1387 force_resend = true;
1389 if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1394 if (ceph_pg_compare(&t->pgid, &pgid) ||
1395 ceph_osds_changed(&t->acting, &acting, any_change) ||
1397 t->pgid = pgid; /* struct */
1398 ceph_pg_to_primary_shard(osdc->osdmap, &pgid, &t->spgid);
1399 ceph_osds_copy(&t->acting, &acting);
1400 ceph_osds_copy(&t->up, &up);
1402 t->min_size = pi->min_size;
1403 t->pg_num = pi->pg_num;
1404 t->pg_num_mask = pi->pg_num_mask;
1405 t->sort_bitwise = sort_bitwise;
1407 t->osd = acting.primary;
1411 ct_res = need_resend ? CALC_TARGET_NEED_RESEND : CALC_TARGET_NO_ACTION;
1413 dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1417 static void setup_request_data(struct ceph_osd_request *req,
1418 struct ceph_msg *msg)
1423 if (!list_empty(&msg->data))
1426 WARN_ON(msg->data_length);
1427 for (i = 0; i < req->r_num_ops; i++) {
1428 struct ceph_osd_req_op *op = &req->r_ops[i];
1432 case CEPH_OSD_OP_WRITE:
1433 case CEPH_OSD_OP_WRITEFULL:
1434 WARN_ON(op->indata_len != op->extent.length);
1435 ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1437 case CEPH_OSD_OP_SETXATTR:
1438 case CEPH_OSD_OP_CMPXATTR:
1439 WARN_ON(op->indata_len != op->xattr.name_len +
1440 op->xattr.value_len);
1441 ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1443 case CEPH_OSD_OP_NOTIFY_ACK:
1444 ceph_osdc_msg_data_add(msg,
1445 &op->notify_ack.request_data);
1449 case CEPH_OSD_OP_STAT:
1450 ceph_osdc_msg_data_add(req->r_reply,
1453 case CEPH_OSD_OP_READ:
1454 ceph_osdc_msg_data_add(req->r_reply,
1455 &op->extent.osd_data);
1457 case CEPH_OSD_OP_LIST_WATCHERS:
1458 ceph_osdc_msg_data_add(req->r_reply,
1459 &op->list_watchers.response_data);
1463 case CEPH_OSD_OP_CALL:
1464 WARN_ON(op->indata_len != op->cls.class_len +
1465 op->cls.method_len +
1466 op->cls.indata_len);
1467 ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1468 /* optional, can be NONE */
1469 ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1470 /* optional, can be NONE */
1471 ceph_osdc_msg_data_add(req->r_reply,
1472 &op->cls.response_data);
1474 case CEPH_OSD_OP_NOTIFY:
1475 ceph_osdc_msg_data_add(msg,
1476 &op->notify.request_data);
1477 ceph_osdc_msg_data_add(req->r_reply,
1478 &op->notify.response_data);
1482 data_len += op->indata_len;
1485 WARN_ON(data_len != msg->data_length);
1488 static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
1490 void *p = msg->front.iov_base;
1491 void *const end = p + msg->front_alloc_len;
1495 if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1496 /* snapshots aren't writeable */
1497 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1499 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1500 req->r_data_offset || req->r_snapc);
1503 setup_request_data(req, msg);
1505 ceph_encode_32(&p, 1); /* client_inc, always 1 */
1506 ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1507 ceph_encode_32(&p, req->r_flags);
1508 ceph_encode_timespec(p, &req->r_mtime);
1509 p += sizeof(struct ceph_timespec);
1511 /* reassert_version */
1512 memset(p, 0, sizeof(struct ceph_eversion));
1513 p += sizeof(struct ceph_eversion);
1516 ceph_start_encoding(&p, 5, 4,
1517 ceph_oloc_encoding_size(&req->r_t.target_oloc));
1518 ceph_encode_64(&p, req->r_t.target_oloc.pool);
1519 ceph_encode_32(&p, -1); /* preferred */
1520 ceph_encode_32(&p, 0); /* key len */
1521 if (req->r_t.target_oloc.pool_ns)
1522 ceph_encode_string(&p, end, req->r_t.target_oloc.pool_ns->str,
1523 req->r_t.target_oloc.pool_ns->len);
1525 ceph_encode_32(&p, 0);
1528 ceph_encode_8(&p, 1);
1529 ceph_encode_64(&p, req->r_t.pgid.pool);
1530 ceph_encode_32(&p, req->r_t.pgid.seed);
1531 ceph_encode_32(&p, -1); /* preferred */
1534 ceph_encode_32(&p, req->r_t.target_oid.name_len);
1535 memcpy(p, req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1536 p += req->r_t.target_oid.name_len;
1538 /* ops, can imply data */
1539 ceph_encode_16(&p, req->r_num_ops);
1540 for (i = 0; i < req->r_num_ops; i++) {
1541 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1542 p += sizeof(struct ceph_osd_op);
1545 ceph_encode_64(&p, req->r_snapid); /* snapid */
1547 ceph_encode_64(&p, req->r_snapc->seq);
1548 ceph_encode_32(&p, req->r_snapc->num_snaps);
1549 for (i = 0; i < req->r_snapc->num_snaps; i++)
1550 ceph_encode_64(&p, req->r_snapc->snaps[i]);
1552 ceph_encode_64(&p, 0); /* snap_seq */
1553 ceph_encode_32(&p, 0); /* snaps len */
1556 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1559 msg->front.iov_len = p - msg->front.iov_base;
1560 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
1561 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1562 msg->hdr.data_len = cpu_to_le32(data_len);
1564 * The header "data_off" is a hint to the receiver allowing it
1565 * to align received data into its buffers such that there's no
1566 * need to re-copy it before writing it to disk (direct I/O).
1568 msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1570 dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
1571 req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
1572 msg->front.iov_len, data_len);
1576 * @req has to be assigned a tid and registered.
1578 static void send_request(struct ceph_osd_request *req)
1580 struct ceph_osd *osd = req->r_osd;
1582 verify_osd_locked(osd);
1583 WARN_ON(osd->o_osd != req->r_t.osd);
1586 * We may have a previously queued request message hanging
1587 * around. Cancel it to avoid corrupting the msgr.
1590 ceph_msg_revoke(req->r_request);
1592 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
1593 if (req->r_attempts)
1594 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1596 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
1598 encode_request(req, req->r_request);
1600 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d flags 0x%x attempt %d\n",
1601 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
1602 req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
1603 req->r_t.spgid.shard, osd->o_osd, req->r_flags, req->r_attempts);
1605 req->r_t.paused = false;
1606 req->r_stamp = jiffies;
1609 req->r_sent = osd->o_incarnation;
1610 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
1611 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
1614 static void maybe_request_map(struct ceph_osd_client *osdc)
1616 bool continuous = false;
1618 verify_osdc_locked(osdc);
1619 WARN_ON(!osdc->osdmap->epoch);
1621 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1622 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
1623 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1624 dout("%s osdc %p continuous\n", __func__, osdc);
1627 dout("%s osdc %p onetime\n", __func__, osdc);
1630 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
1631 osdc->osdmap->epoch + 1, continuous))
1632 ceph_monc_renew_subs(&osdc->client->monc);
1635 static void complete_request(struct ceph_osd_request *req, int err);
1636 static void send_map_check(struct ceph_osd_request *req);
1638 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
1640 struct ceph_osd_client *osdc = req->r_osdc;
1641 struct ceph_osd *osd;
1642 enum calc_target_result ct_res;
1643 bool need_send = false;
1644 bool promoted = false;
1645 bool need_abort = false;
1647 WARN_ON(req->r_tid);
1648 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
1651 ct_res = calc_target(osdc, &req->r_t, false);
1652 if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
1655 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
1657 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
1661 if (osdc->osdmap->epoch < osdc->epoch_barrier) {
1662 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
1663 osdc->epoch_barrier);
1664 req->r_t.paused = true;
1665 maybe_request_map(osdc);
1666 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1667 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1668 dout("req %p pausewr\n", req);
1669 req->r_t.paused = true;
1670 maybe_request_map(osdc);
1671 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
1672 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
1673 dout("req %p pauserd\n", req);
1674 req->r_t.paused = true;
1675 maybe_request_map(osdc);
1676 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1677 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
1678 CEPH_OSD_FLAG_FULL_FORCE)) &&
1679 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1680 pool_full(osdc, req->r_t.base_oloc.pool))) {
1681 dout("req %p full/pool_full\n", req);
1682 pr_warn_ratelimited("FULL or reached pool quota\n");
1683 req->r_t.paused = true;
1684 maybe_request_map(osdc);
1685 if (req->r_abort_on_full)
1687 } else if (!osd_homeless(osd)) {
1690 maybe_request_map(osdc);
1693 mutex_lock(&osd->lock);
1695 * Assign the tid atomically with send_request() to protect
1696 * multiple writes to the same object from racing with each
1697 * other, resulting in out of order ops on the OSDs.
1699 req->r_tid = atomic64_inc_return(&osdc->last_tid);
1700 link_request(osd, req);
1703 else if (need_abort)
1704 complete_request(req, -ENOSPC);
1705 mutex_unlock(&osd->lock);
1707 if (ct_res == CALC_TARGET_POOL_DNE)
1708 send_map_check(req);
1711 downgrade_write(&osdc->lock);
1715 up_read(&osdc->lock);
1716 down_write(&osdc->lock);
1722 static void account_request(struct ceph_osd_request *req)
1724 WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
1725 WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
1727 req->r_flags |= CEPH_OSD_FLAG_ONDISK;
1728 atomic_inc(&req->r_osdc->num_requests);
1730 req->r_start_stamp = jiffies;
1733 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
1735 ceph_osdc_get_request(req);
1736 account_request(req);
1737 __submit_request(req, wrlocked);
1740 static void finish_request(struct ceph_osd_request *req)
1742 struct ceph_osd_client *osdc = req->r_osdc;
1743 struct ceph_osd *osd = req->r_osd;
1745 verify_osd_locked(osd);
1746 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1748 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
1749 unlink_request(osd, req);
1750 atomic_dec(&osdc->num_requests);
1753 * If an OSD has failed or returned and a request has been sent
1754 * twice, it's possible to get a reply and end up here while the
1755 * request message is queued for delivery. We will ignore the
1756 * reply, so not a big deal, but better to try and catch it.
1758 ceph_msg_revoke(req->r_request);
1759 ceph_msg_revoke_incoming(req->r_reply);
1762 static void __complete_request(struct ceph_osd_request *req)
1764 if (req->r_callback) {
1765 dout("%s req %p tid %llu cb %pf result %d\n", __func__, req,
1766 req->r_tid, req->r_callback, req->r_result);
1767 req->r_callback(req);
1772 * This is open-coded in handle_reply().
1774 static void complete_request(struct ceph_osd_request *req, int err)
1776 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
1778 req->r_result = err;
1779 finish_request(req);
1780 __complete_request(req);
1781 complete_all(&req->r_completion);
1782 ceph_osdc_put_request(req);
1785 static void cancel_map_check(struct ceph_osd_request *req)
1787 struct ceph_osd_client *osdc = req->r_osdc;
1788 struct ceph_osd_request *lookup_req;
1790 verify_osdc_wrlocked(osdc);
1792 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1796 WARN_ON(lookup_req != req);
1797 erase_request_mc(&osdc->map_checks, req);
1798 ceph_osdc_put_request(req);
1801 static void cancel_request(struct ceph_osd_request *req)
1803 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1805 cancel_map_check(req);
1806 finish_request(req);
1807 complete_all(&req->r_completion);
1808 ceph_osdc_put_request(req);
1811 static void abort_request(struct ceph_osd_request *req, int err)
1813 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
1815 cancel_map_check(req);
1816 complete_request(req, err);
1819 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
1821 if (likely(eb > osdc->epoch_barrier)) {
1822 dout("updating epoch_barrier from %u to %u\n",
1823 osdc->epoch_barrier, eb);
1824 osdc->epoch_barrier = eb;
1825 /* Request map if we're not to the barrier yet */
1826 if (eb > osdc->osdmap->epoch)
1827 maybe_request_map(osdc);
1831 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
1833 down_read(&osdc->lock);
1834 if (unlikely(eb > osdc->epoch_barrier)) {
1835 up_read(&osdc->lock);
1836 down_write(&osdc->lock);
1837 update_epoch_barrier(osdc, eb);
1838 up_write(&osdc->lock);
1840 up_read(&osdc->lock);
1843 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
1846 * Drop all pending requests that are stalled waiting on a full condition to
1847 * clear, and complete them with ENOSPC as the return code. Set the
1848 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
1851 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
1854 bool victims = false;
1856 dout("enter abort_on_full\n");
1858 if (!ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && !have_pool_full(osdc))
1861 /* Scan list and see if there is anything to abort */
1862 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1863 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1866 m = rb_first(&osd->o_requests);
1868 struct ceph_osd_request *req = rb_entry(m,
1869 struct ceph_osd_request, r_node);
1872 if (req->r_abort_on_full) {
1885 * Update the barrier to current epoch if it's behind that point,
1886 * since we know we have some calls to be aborted in the tree.
1888 update_epoch_barrier(osdc, osdc->osdmap->epoch);
1890 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1891 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1894 m = rb_first(&osd->o_requests);
1896 struct ceph_osd_request *req = rb_entry(m,
1897 struct ceph_osd_request, r_node);
1900 if (req->r_abort_on_full &&
1901 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1902 pool_full(osdc, req->r_t.target_oloc.pool)))
1903 abort_request(req, -ENOSPC);
1907 dout("return abort_on_full barrier=%u\n", osdc->epoch_barrier);
1910 static void check_pool_dne(struct ceph_osd_request *req)
1912 struct ceph_osd_client *osdc = req->r_osdc;
1913 struct ceph_osdmap *map = osdc->osdmap;
1915 verify_osdc_wrlocked(osdc);
1916 WARN_ON(!map->epoch);
1918 if (req->r_attempts) {
1920 * We sent a request earlier, which means that
1921 * previously the pool existed, and now it does not
1922 * (i.e., it was deleted).
1924 req->r_map_dne_bound = map->epoch;
1925 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
1928 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
1929 req, req->r_tid, req->r_map_dne_bound, map->epoch);
1932 if (req->r_map_dne_bound) {
1933 if (map->epoch >= req->r_map_dne_bound) {
1934 /* we had a new enough map */
1935 pr_info_ratelimited("tid %llu pool does not exist\n",
1937 complete_request(req, -ENOENT);
1940 send_map_check(req);
1944 static void map_check_cb(struct ceph_mon_generic_request *greq)
1946 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
1947 struct ceph_osd_request *req;
1948 u64 tid = greq->private_data;
1950 WARN_ON(greq->result || !greq->u.newest);
1952 down_write(&osdc->lock);
1953 req = lookup_request_mc(&osdc->map_checks, tid);
1955 dout("%s tid %llu dne\n", __func__, tid);
1959 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
1960 req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
1961 if (!req->r_map_dne_bound)
1962 req->r_map_dne_bound = greq->u.newest;
1963 erase_request_mc(&osdc->map_checks, req);
1964 check_pool_dne(req);
1966 ceph_osdc_put_request(req);
1968 up_write(&osdc->lock);
1971 static void send_map_check(struct ceph_osd_request *req)
1973 struct ceph_osd_client *osdc = req->r_osdc;
1974 struct ceph_osd_request *lookup_req;
1977 verify_osdc_wrlocked(osdc);
1979 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1981 WARN_ON(lookup_req != req);
1985 ceph_osdc_get_request(req);
1986 insert_request_mc(&osdc->map_checks, req);
1987 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
1988 map_check_cb, req->r_tid);
1993 * lingering requests, watch/notify v2 infrastructure
1995 static void linger_release(struct kref *kref)
1997 struct ceph_osd_linger_request *lreq =
1998 container_of(kref, struct ceph_osd_linger_request, kref);
2000 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2001 lreq->reg_req, lreq->ping_req);
2002 WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2003 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2004 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2005 WARN_ON(!list_empty(&lreq->scan_item));
2006 WARN_ON(!list_empty(&lreq->pending_lworks));
2010 ceph_osdc_put_request(lreq->reg_req);
2012 ceph_osdc_put_request(lreq->ping_req);
2013 target_destroy(&lreq->t);
2017 static void linger_put(struct ceph_osd_linger_request *lreq)
2020 kref_put(&lreq->kref, linger_release);
2023 static struct ceph_osd_linger_request *
2024 linger_get(struct ceph_osd_linger_request *lreq)
2026 kref_get(&lreq->kref);
2030 static struct ceph_osd_linger_request *
2031 linger_alloc(struct ceph_osd_client *osdc)
2033 struct ceph_osd_linger_request *lreq;
2035 lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2039 kref_init(&lreq->kref);
2040 mutex_init(&lreq->lock);
2041 RB_CLEAR_NODE(&lreq->node);
2042 RB_CLEAR_NODE(&lreq->osdc_node);
2043 RB_CLEAR_NODE(&lreq->mc_node);
2044 INIT_LIST_HEAD(&lreq->scan_item);
2045 INIT_LIST_HEAD(&lreq->pending_lworks);
2046 init_completion(&lreq->reg_commit_wait);
2047 init_completion(&lreq->notify_finish_wait);
2050 target_init(&lreq->t);
2052 dout("%s lreq %p\n", __func__, lreq);
2056 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2057 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2058 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2061 * Create linger request <-> OSD session relation.
2063 * @lreq has to be registered, @osd may be homeless.
2065 static void link_linger(struct ceph_osd *osd,
2066 struct ceph_osd_linger_request *lreq)
2068 verify_osd_locked(osd);
2069 WARN_ON(!lreq->linger_id || lreq->osd);
2070 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2071 osd->o_osd, lreq, lreq->linger_id);
2073 if (!osd_homeless(osd))
2074 __remove_osd_from_lru(osd);
2076 atomic_inc(&osd->o_osdc->num_homeless);
2079 insert_linger(&osd->o_linger_requests, lreq);
2083 static void unlink_linger(struct ceph_osd *osd,
2084 struct ceph_osd_linger_request *lreq)
2086 verify_osd_locked(osd);
2087 WARN_ON(lreq->osd != osd);
2088 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2089 osd->o_osd, lreq, lreq->linger_id);
2092 erase_linger(&osd->o_linger_requests, lreq);
2095 if (!osd_homeless(osd))
2096 maybe_move_osd_to_lru(osd);
2098 atomic_dec(&osd->o_osdc->num_homeless);
2101 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2103 verify_osdc_locked(lreq->osdc);
2105 return !RB_EMPTY_NODE(&lreq->osdc_node);
2108 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2110 struct ceph_osd_client *osdc = lreq->osdc;
2113 down_read(&osdc->lock);
2114 registered = __linger_registered(lreq);
2115 up_read(&osdc->lock);
2120 static void linger_register(struct ceph_osd_linger_request *lreq)
2122 struct ceph_osd_client *osdc = lreq->osdc;
2124 verify_osdc_wrlocked(osdc);
2125 WARN_ON(lreq->linger_id);
2128 lreq->linger_id = ++osdc->last_linger_id;
2129 insert_linger_osdc(&osdc->linger_requests, lreq);
2132 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2134 struct ceph_osd_client *osdc = lreq->osdc;
2136 verify_osdc_wrlocked(osdc);
2138 erase_linger_osdc(&osdc->linger_requests, lreq);
2142 static void cancel_linger_request(struct ceph_osd_request *req)
2144 struct ceph_osd_linger_request *lreq = req->r_priv;
2146 WARN_ON(!req->r_linger);
2147 cancel_request(req);
2151 struct linger_work {
2152 struct work_struct work;
2153 struct ceph_osd_linger_request *lreq;
2154 struct list_head pending_item;
2155 unsigned long queued_stamp;
2161 void *payload; /* points into @msg front */
2164 struct ceph_msg *msg; /* for ceph_msg_put() */
2172 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2175 struct linger_work *lwork;
2177 lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2181 INIT_WORK(&lwork->work, workfn);
2182 INIT_LIST_HEAD(&lwork->pending_item);
2183 lwork->lreq = linger_get(lreq);
2188 static void lwork_free(struct linger_work *lwork)
2190 struct ceph_osd_linger_request *lreq = lwork->lreq;
2192 mutex_lock(&lreq->lock);
2193 list_del(&lwork->pending_item);
2194 mutex_unlock(&lreq->lock);
2200 static void lwork_queue(struct linger_work *lwork)
2202 struct ceph_osd_linger_request *lreq = lwork->lreq;
2203 struct ceph_osd_client *osdc = lreq->osdc;
2205 verify_lreq_locked(lreq);
2206 WARN_ON(!list_empty(&lwork->pending_item));
2208 lwork->queued_stamp = jiffies;
2209 list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2210 queue_work(osdc->notify_wq, &lwork->work);
2213 static void do_watch_notify(struct work_struct *w)
2215 struct linger_work *lwork = container_of(w, struct linger_work, work);
2216 struct ceph_osd_linger_request *lreq = lwork->lreq;
2218 if (!linger_registered(lreq)) {
2219 dout("%s lreq %p not registered\n", __func__, lreq);
2223 WARN_ON(!lreq->is_watch);
2224 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2225 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2226 lwork->notify.payload_len);
2227 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2228 lwork->notify.notifier_id, lwork->notify.payload,
2229 lwork->notify.payload_len);
2232 ceph_msg_put(lwork->notify.msg);
2236 static void do_watch_error(struct work_struct *w)
2238 struct linger_work *lwork = container_of(w, struct linger_work, work);
2239 struct ceph_osd_linger_request *lreq = lwork->lreq;
2241 if (!linger_registered(lreq)) {
2242 dout("%s lreq %p not registered\n", __func__, lreq);
2246 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2247 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2253 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2255 struct linger_work *lwork;
2257 lwork = lwork_alloc(lreq, do_watch_error);
2259 pr_err("failed to allocate error-lwork\n");
2263 lwork->error.err = lreq->last_error;
2267 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2270 if (!completion_done(&lreq->reg_commit_wait)) {
2271 lreq->reg_commit_error = (result <= 0 ? result : 0);
2272 complete_all(&lreq->reg_commit_wait);
2276 static void linger_commit_cb(struct ceph_osd_request *req)
2278 struct ceph_osd_linger_request *lreq = req->r_priv;
2280 mutex_lock(&lreq->lock);
2281 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2282 lreq->linger_id, req->r_result);
2283 linger_reg_commit_complete(lreq, req->r_result);
2284 lreq->committed = true;
2286 if (!lreq->is_watch) {
2287 struct ceph_osd_data *osd_data =
2288 osd_req_op_data(req, 0, notify, response_data);
2289 void *p = page_address(osd_data->pages[0]);
2291 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2292 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2294 /* make note of the notify_id */
2295 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2296 lreq->notify_id = ceph_decode_64(&p);
2297 dout("lreq %p notify_id %llu\n", lreq,
2300 dout("lreq %p no notify_id\n", lreq);
2304 mutex_unlock(&lreq->lock);
2308 static int normalize_watch_error(int err)
2311 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2312 * notification and a failure to reconnect because we raced with
2313 * the delete appear the same to the user.
2321 static void linger_reconnect_cb(struct ceph_osd_request *req)
2323 struct ceph_osd_linger_request *lreq = req->r_priv;
2325 mutex_lock(&lreq->lock);
2326 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2327 lreq, lreq->linger_id, req->r_result, lreq->last_error);
2328 if (req->r_result < 0) {
2329 if (!lreq->last_error) {
2330 lreq->last_error = normalize_watch_error(req->r_result);
2331 queue_watch_error(lreq);
2335 mutex_unlock(&lreq->lock);
2339 static void send_linger(struct ceph_osd_linger_request *lreq)
2341 struct ceph_osd_request *req = lreq->reg_req;
2342 struct ceph_osd_req_op *op = &req->r_ops[0];
2344 verify_osdc_wrlocked(req->r_osdc);
2345 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2348 cancel_linger_request(req);
2350 request_reinit(req);
2351 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2352 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2353 req->r_flags = lreq->t.flags;
2354 req->r_mtime = lreq->mtime;
2356 mutex_lock(&lreq->lock);
2357 if (lreq->is_watch && lreq->committed) {
2358 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2359 op->watch.cookie != lreq->linger_id);
2360 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2361 op->watch.gen = ++lreq->register_gen;
2362 dout("lreq %p reconnect register_gen %u\n", lreq,
2364 req->r_callback = linger_reconnect_cb;
2366 if (!lreq->is_watch)
2367 lreq->notify_id = 0;
2369 WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2370 dout("lreq %p register\n", lreq);
2371 req->r_callback = linger_commit_cb;
2373 mutex_unlock(&lreq->lock);
2375 req->r_priv = linger_get(lreq);
2376 req->r_linger = true;
2378 submit_request(req, true);
2381 static void linger_ping_cb(struct ceph_osd_request *req)
2383 struct ceph_osd_linger_request *lreq = req->r_priv;
2385 mutex_lock(&lreq->lock);
2386 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2387 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2389 if (lreq->register_gen == req->r_ops[0].watch.gen) {
2390 if (!req->r_result) {
2391 lreq->watch_valid_thru = lreq->ping_sent;
2392 } else if (!lreq->last_error) {
2393 lreq->last_error = normalize_watch_error(req->r_result);
2394 queue_watch_error(lreq);
2397 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2398 lreq->register_gen, req->r_ops[0].watch.gen);
2401 mutex_unlock(&lreq->lock);
2405 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2407 struct ceph_osd_client *osdc = lreq->osdc;
2408 struct ceph_osd_request *req = lreq->ping_req;
2409 struct ceph_osd_req_op *op = &req->r_ops[0];
2411 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2412 dout("%s PAUSERD\n", __func__);
2416 lreq->ping_sent = jiffies;
2417 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2418 __func__, lreq, lreq->linger_id, lreq->ping_sent,
2419 lreq->register_gen);
2422 cancel_linger_request(req);
2424 request_reinit(req);
2425 target_copy(&req->r_t, &lreq->t);
2427 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2428 op->watch.cookie != lreq->linger_id ||
2429 op->watch.op != CEPH_OSD_WATCH_OP_PING);
2430 op->watch.gen = lreq->register_gen;
2431 req->r_callback = linger_ping_cb;
2432 req->r_priv = linger_get(lreq);
2433 req->r_linger = true;
2435 ceph_osdc_get_request(req);
2436 account_request(req);
2437 req->r_tid = atomic64_inc_return(&osdc->last_tid);
2438 link_request(lreq->osd, req);
2442 static void linger_submit(struct ceph_osd_linger_request *lreq)
2444 struct ceph_osd_client *osdc = lreq->osdc;
2445 struct ceph_osd *osd;
2447 calc_target(osdc, &lreq->t, false);
2448 osd = lookup_create_osd(osdc, lreq->t.osd, true);
2449 link_linger(osd, lreq);
2454 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
2456 struct ceph_osd_client *osdc = lreq->osdc;
2457 struct ceph_osd_linger_request *lookup_lreq;
2459 verify_osdc_wrlocked(osdc);
2461 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2466 WARN_ON(lookup_lreq != lreq);
2467 erase_linger_mc(&osdc->linger_map_checks, lreq);
2472 * @lreq has to be both registered and linked.
2474 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2476 if (lreq->is_watch && lreq->ping_req->r_osd)
2477 cancel_linger_request(lreq->ping_req);
2478 if (lreq->reg_req->r_osd)
2479 cancel_linger_request(lreq->reg_req);
2480 cancel_linger_map_check(lreq);
2481 unlink_linger(lreq->osd, lreq);
2482 linger_unregister(lreq);
2485 static void linger_cancel(struct ceph_osd_linger_request *lreq)
2487 struct ceph_osd_client *osdc = lreq->osdc;
2489 down_write(&osdc->lock);
2490 if (__linger_registered(lreq))
2491 __linger_cancel(lreq);
2492 up_write(&osdc->lock);
2495 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
2497 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
2499 struct ceph_osd_client *osdc = lreq->osdc;
2500 struct ceph_osdmap *map = osdc->osdmap;
2502 verify_osdc_wrlocked(osdc);
2503 WARN_ON(!map->epoch);
2505 if (lreq->register_gen) {
2506 lreq->map_dne_bound = map->epoch;
2507 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
2508 lreq, lreq->linger_id);
2510 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2511 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2515 if (lreq->map_dne_bound) {
2516 if (map->epoch >= lreq->map_dne_bound) {
2517 /* we had a new enough map */
2518 pr_info("linger_id %llu pool does not exist\n",
2520 linger_reg_commit_complete(lreq, -ENOENT);
2521 __linger_cancel(lreq);
2524 send_linger_map_check(lreq);
2528 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
2530 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2531 struct ceph_osd_linger_request *lreq;
2532 u64 linger_id = greq->private_data;
2534 WARN_ON(greq->result || !greq->u.newest);
2536 down_write(&osdc->lock);
2537 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
2539 dout("%s linger_id %llu dne\n", __func__, linger_id);
2543 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
2544 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2546 if (!lreq->map_dne_bound)
2547 lreq->map_dne_bound = greq->u.newest;
2548 erase_linger_mc(&osdc->linger_map_checks, lreq);
2549 check_linger_pool_dne(lreq);
2553 up_write(&osdc->lock);
2556 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
2558 struct ceph_osd_client *osdc = lreq->osdc;
2559 struct ceph_osd_linger_request *lookup_lreq;
2562 verify_osdc_wrlocked(osdc);
2564 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2567 WARN_ON(lookup_lreq != lreq);
2572 insert_linger_mc(&osdc->linger_map_checks, lreq);
2573 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2574 linger_map_check_cb, lreq->linger_id);
2578 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
2582 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2583 ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
2584 return ret ?: lreq->reg_commit_error;
2587 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
2591 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2592 ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
2593 return ret ?: lreq->notify_finish_error;
2597 * Timeout callback, called every N seconds. When 1 or more OSD
2598 * requests has been active for more than N seconds, we send a keepalive
2599 * (tag + timestamp) to its OSD to ensure any communications channel
2600 * reset is detected.
2602 static void handle_timeout(struct work_struct *work)
2604 struct ceph_osd_client *osdc =
2605 container_of(work, struct ceph_osd_client, timeout_work.work);
2606 struct ceph_options *opts = osdc->client->options;
2607 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
2608 unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
2609 LIST_HEAD(slow_osds);
2610 struct rb_node *n, *p;
2612 dout("%s osdc %p\n", __func__, osdc);
2613 down_write(&osdc->lock);
2616 * ping osds that are a bit slow. this ensures that if there
2617 * is a break in the TCP connection we will notice, and reopen
2618 * a connection with that osd (from the fault callback).
2620 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2621 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2624 for (p = rb_first(&osd->o_requests); p; ) {
2625 struct ceph_osd_request *req =
2626 rb_entry(p, struct ceph_osd_request, r_node);
2628 p = rb_next(p); /* abort_request() */
2630 if (time_before(req->r_stamp, cutoff)) {
2631 dout(" req %p tid %llu on osd%d is laggy\n",
2632 req, req->r_tid, osd->o_osd);
2635 if (opts->osd_request_timeout &&
2636 time_before(req->r_start_stamp, expiry_cutoff)) {
2637 pr_err_ratelimited("tid %llu on osd%d timeout\n",
2638 req->r_tid, osd->o_osd);
2639 abort_request(req, -ETIMEDOUT);
2642 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
2643 struct ceph_osd_linger_request *lreq =
2644 rb_entry(p, struct ceph_osd_linger_request, node);
2646 dout(" lreq %p linger_id %llu is served by osd%d\n",
2647 lreq, lreq->linger_id, osd->o_osd);
2650 mutex_lock(&lreq->lock);
2651 if (lreq->is_watch && lreq->committed && !lreq->last_error)
2652 send_linger_ping(lreq);
2653 mutex_unlock(&lreq->lock);
2657 list_move_tail(&osd->o_keepalive_item, &slow_osds);
2660 if (opts->osd_request_timeout) {
2661 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
2662 struct ceph_osd_request *req =
2663 rb_entry(p, struct ceph_osd_request, r_node);
2665 p = rb_next(p); /* abort_request() */
2667 if (time_before(req->r_start_stamp, expiry_cutoff)) {
2668 pr_err_ratelimited("tid %llu on osd%d timeout\n",
2669 req->r_tid, osdc->homeless_osd.o_osd);
2670 abort_request(req, -ETIMEDOUT);
2675 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
2676 maybe_request_map(osdc);
2678 while (!list_empty(&slow_osds)) {
2679 struct ceph_osd *osd = list_first_entry(&slow_osds,
2682 list_del_init(&osd->o_keepalive_item);
2683 ceph_con_keepalive(&osd->o_con);
2686 up_write(&osdc->lock);
2687 schedule_delayed_work(&osdc->timeout_work,
2688 osdc->client->options->osd_keepalive_timeout);
2691 static void handle_osds_timeout(struct work_struct *work)
2693 struct ceph_osd_client *osdc =
2694 container_of(work, struct ceph_osd_client,
2695 osds_timeout_work.work);
2696 unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
2697 struct ceph_osd *osd, *nosd;
2699 dout("%s osdc %p\n", __func__, osdc);
2700 down_write(&osdc->lock);
2701 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
2702 if (time_before(jiffies, osd->lru_ttl))
2705 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
2706 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
2710 up_write(&osdc->lock);
2711 schedule_delayed_work(&osdc->osds_timeout_work,
2712 round_jiffies_relative(delay));
2715 static int ceph_oloc_decode(void **p, void *end,
2716 struct ceph_object_locator *oloc)
2718 u8 struct_v, struct_cv;
2723 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2724 struct_v = ceph_decode_8(p);
2725 struct_cv = ceph_decode_8(p);
2727 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
2728 struct_v, struct_cv);
2731 if (struct_cv > 6) {
2732 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
2733 struct_v, struct_cv);
2736 len = ceph_decode_32(p);
2737 ceph_decode_need(p, end, len, e_inval);
2738 struct_end = *p + len;
2740 oloc->pool = ceph_decode_64(p);
2741 *p += 4; /* skip preferred */
2743 len = ceph_decode_32(p);
2745 pr_warn("ceph_object_locator::key is set\n");
2749 if (struct_v >= 5) {
2750 bool changed = false;
2752 len = ceph_decode_32(p);
2754 ceph_decode_need(p, end, len, e_inval);
2755 if (!oloc->pool_ns ||
2756 ceph_compare_string(oloc->pool_ns, *p, len))
2764 /* redirect changes namespace */
2765 pr_warn("ceph_object_locator::nspace is changed\n");
2770 if (struct_v >= 6) {
2771 s64 hash = ceph_decode_64(p);
2773 pr_warn("ceph_object_locator::hash is set\n");
2788 static int ceph_redirect_decode(void **p, void *end,
2789 struct ceph_request_redirect *redir)
2791 u8 struct_v, struct_cv;
2796 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2797 struct_v = ceph_decode_8(p);
2798 struct_cv = ceph_decode_8(p);
2799 if (struct_cv > 1) {
2800 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
2801 struct_v, struct_cv);
2804 len = ceph_decode_32(p);
2805 ceph_decode_need(p, end, len, e_inval);
2806 struct_end = *p + len;
2808 ret = ceph_oloc_decode(p, end, &redir->oloc);
2812 len = ceph_decode_32(p);
2814 pr_warn("ceph_request_redirect::object_name is set\n");
2818 len = ceph_decode_32(p);
2819 *p += len; /* skip osd_instructions */
2831 struct MOSDOpReply {
2832 struct ceph_pg pgid;
2837 u32 outdata_len[CEPH_OSD_MAX_OPS];
2838 s32 rval[CEPH_OSD_MAX_OPS];
2840 struct ceph_eversion replay_version;
2842 struct ceph_request_redirect redirect;
2845 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
2847 void *p = msg->front.iov_base;
2848 void *const end = p + msg->front.iov_len;
2849 u16 version = le16_to_cpu(msg->hdr.version);
2850 struct ceph_eversion bad_replay_version;
2856 ceph_decode_32_safe(&p, end, len, e_inval);
2857 ceph_decode_need(&p, end, len, e_inval);
2858 p += len; /* skip oid */
2860 ret = ceph_decode_pgid(&p, end, &m->pgid);
2864 ceph_decode_64_safe(&p, end, m->flags, e_inval);
2865 ceph_decode_32_safe(&p, end, m->result, e_inval);
2866 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
2867 memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
2868 p += sizeof(bad_replay_version);
2869 ceph_decode_32_safe(&p, end, m->epoch, e_inval);
2871 ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
2872 if (m->num_ops > ARRAY_SIZE(m->outdata_len))
2875 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
2877 for (i = 0; i < m->num_ops; i++) {
2878 struct ceph_osd_op *op = p;
2880 m->outdata_len[i] = le32_to_cpu(op->payload_len);
2884 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
2885 for (i = 0; i < m->num_ops; i++)
2886 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
2889 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
2890 memcpy(&m->replay_version, p, sizeof(m->replay_version));
2891 p += sizeof(m->replay_version);
2892 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
2894 m->replay_version = bad_replay_version; /* struct */
2895 m->user_version = le64_to_cpu(m->replay_version.version);
2900 ceph_decode_8_safe(&p, end, decode_redir, e_inval);
2908 ret = ceph_redirect_decode(&p, end, &m->redirect);
2912 ceph_oloc_init(&m->redirect.oloc);
2922 * Handle MOSDOpReply. Set ->r_result and call the callback if it is
2925 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2927 struct ceph_osd_client *osdc = osd->o_osdc;
2928 struct ceph_osd_request *req;
2929 struct MOSDOpReply m;
2930 u64 tid = le64_to_cpu(msg->hdr.tid);
2935 dout("%s msg %p tid %llu\n", __func__, msg, tid);
2937 down_read(&osdc->lock);
2938 if (!osd_registered(osd)) {
2939 dout("%s osd%d unknown\n", __func__, osd->o_osd);
2940 goto out_unlock_osdc;
2942 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
2944 mutex_lock(&osd->lock);
2945 req = lookup_request(&osd->o_requests, tid);
2947 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
2948 goto out_unlock_session;
2951 m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
2952 ret = decode_MOSDOpReply(msg, &m);
2953 m.redirect.oloc.pool_ns = NULL;
2955 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
2960 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
2961 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
2962 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
2963 le64_to_cpu(m.replay_version.version), m.user_version);
2965 if (m.retry_attempt >= 0) {
2966 if (m.retry_attempt != req->r_attempts - 1) {
2967 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
2968 req, req->r_tid, m.retry_attempt,
2969 req->r_attempts - 1);
2970 goto out_unlock_session;
2973 WARN_ON(1); /* MOSDOpReply v4 is assumed */
2976 if (!ceph_oloc_empty(&m.redirect.oloc)) {
2977 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
2978 m.redirect.oloc.pool);
2979 unlink_request(osd, req);
2980 mutex_unlock(&osd->lock);
2983 * Not ceph_oloc_copy() - changing pool_ns is not
2986 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
2987 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
2989 __submit_request(req, false);
2990 goto out_unlock_osdc;
2993 if (m.num_ops != req->r_num_ops) {
2994 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
2995 req->r_num_ops, req->r_tid);
2998 for (i = 0; i < req->r_num_ops; i++) {
2999 dout(" req %p tid %llu op %d rval %d len %u\n", req,
3000 req->r_tid, i, m.rval[i], m.outdata_len[i]);
3001 req->r_ops[i].rval = m.rval[i];
3002 req->r_ops[i].outdata_len = m.outdata_len[i];
3003 data_len += m.outdata_len[i];
3005 if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3006 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3007 le32_to_cpu(msg->hdr.data_len), req->r_tid);
3010 dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3011 req, req->r_tid, m.result, data_len);
3014 * Since we only ever request ONDISK, we should only ever get
3015 * one (type of) reply back.
3017 WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3018 req->r_result = m.result ?: data_len;
3019 finish_request(req);
3020 mutex_unlock(&osd->lock);
3021 up_read(&osdc->lock);
3023 __complete_request(req);
3024 complete_all(&req->r_completion);
3025 ceph_osdc_put_request(req);
3029 complete_request(req, -EIO);
3031 mutex_unlock(&osd->lock);
3033 up_read(&osdc->lock);
3036 static void set_pool_was_full(struct ceph_osd_client *osdc)
3040 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3041 struct ceph_pg_pool_info *pi =
3042 rb_entry(n, struct ceph_pg_pool_info, node);
3044 pi->was_full = __pool_full(pi);
3048 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3050 struct ceph_pg_pool_info *pi;
3052 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3056 return pi->was_full && !__pool_full(pi);
3059 static enum calc_target_result
3060 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3062 struct ceph_osd_client *osdc = lreq->osdc;
3063 enum calc_target_result ct_res;
3065 ct_res = calc_target(osdc, &lreq->t, true);
3066 if (ct_res == CALC_TARGET_NEED_RESEND) {
3067 struct ceph_osd *osd;
3069 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3070 if (osd != lreq->osd) {
3071 unlink_linger(lreq->osd, lreq);
3072 link_linger(osd, lreq);
3080 * Requeue requests whose mapping to an OSD has changed.
3082 static void scan_requests(struct ceph_osd *osd,
3085 bool check_pool_cleared_full,
3086 struct rb_root *need_resend,
3087 struct list_head *need_resend_linger)
3089 struct ceph_osd_client *osdc = osd->o_osdc;
3091 bool force_resend_writes;
3093 for (n = rb_first(&osd->o_linger_requests); n; ) {
3094 struct ceph_osd_linger_request *lreq =
3095 rb_entry(n, struct ceph_osd_linger_request, node);
3096 enum calc_target_result ct_res;
3098 n = rb_next(n); /* recalc_linger_target() */
3100 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3102 ct_res = recalc_linger_target(lreq);
3104 case CALC_TARGET_NO_ACTION:
3105 force_resend_writes = cleared_full ||
3106 (check_pool_cleared_full &&
3107 pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3108 if (!force_resend && !force_resend_writes)
3112 case CALC_TARGET_NEED_RESEND:
3113 cancel_linger_map_check(lreq);
3115 * scan_requests() for the previous epoch(s)
3116 * may have already added it to the list, since
3117 * it's not unlinked here.
3119 if (list_empty(&lreq->scan_item))
3120 list_add_tail(&lreq->scan_item, need_resend_linger);
3122 case CALC_TARGET_POOL_DNE:
3123 check_linger_pool_dne(lreq);
3128 for (n = rb_first(&osd->o_requests); n; ) {
3129 struct ceph_osd_request *req =
3130 rb_entry(n, struct ceph_osd_request, r_node);
3131 enum calc_target_result ct_res;
3133 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3135 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3136 ct_res = calc_target(osdc, &req->r_t, false);
3138 case CALC_TARGET_NO_ACTION:
3139 force_resend_writes = cleared_full ||
3140 (check_pool_cleared_full &&
3141 pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3142 if (!force_resend &&
3143 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3144 !force_resend_writes))
3148 case CALC_TARGET_NEED_RESEND:
3149 cancel_map_check(req);
3150 unlink_request(osd, req);
3151 insert_request(need_resend, req);
3153 case CALC_TARGET_POOL_DNE:
3154 check_pool_dne(req);
3160 static int handle_one_map(struct ceph_osd_client *osdc,
3161 void *p, void *end, bool incremental,
3162 struct rb_root *need_resend,
3163 struct list_head *need_resend_linger)
3165 struct ceph_osdmap *newmap;
3167 bool skipped_map = false;
3170 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3171 set_pool_was_full(osdc);
3174 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3176 newmap = ceph_osdmap_decode(&p, end);
3178 return PTR_ERR(newmap);
3180 if (newmap != osdc->osdmap) {
3182 * Preserve ->was_full before destroying the old map.
3183 * For pools that weren't in the old map, ->was_full
3186 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3187 struct ceph_pg_pool_info *pi =
3188 rb_entry(n, struct ceph_pg_pool_info, node);
3189 struct ceph_pg_pool_info *old_pi;
3191 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3193 pi->was_full = old_pi->was_full;
3195 WARN_ON(pi->was_full);
3198 if (osdc->osdmap->epoch &&
3199 osdc->osdmap->epoch + 1 < newmap->epoch) {
3200 WARN_ON(incremental);
3204 ceph_osdmap_destroy(osdc->osdmap);
3205 osdc->osdmap = newmap;
3208 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3209 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3210 need_resend, need_resend_linger);
3212 for (n = rb_first(&osdc->osds); n; ) {
3213 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3215 n = rb_next(n); /* close_osd() */
3217 scan_requests(osd, skipped_map, was_full, true, need_resend,
3218 need_resend_linger);
3219 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3220 memcmp(&osd->o_con.peer_addr,
3221 ceph_osd_addr(osdc->osdmap, osd->o_osd),
3222 sizeof(struct ceph_entity_addr)))
3229 static void kick_requests(struct ceph_osd_client *osdc,
3230 struct rb_root *need_resend,
3231 struct list_head *need_resend_linger)
3233 struct ceph_osd_linger_request *lreq, *nlreq;
3236 for (n = rb_first(need_resend); n; ) {
3237 struct ceph_osd_request *req =
3238 rb_entry(n, struct ceph_osd_request, r_node);
3239 struct ceph_osd *osd;
3242 erase_request(need_resend, req); /* before link_request() */
3244 WARN_ON(req->r_osd);
3245 calc_target(osdc, &req->r_t, false);
3246 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3247 link_request(osd, req);
3248 if (!req->r_linger) {
3249 if (!osd_homeless(osd) && !req->r_t.paused)
3252 cancel_linger_request(req);
3256 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3257 if (!osd_homeless(lreq->osd))
3260 list_del_init(&lreq->scan_item);
3265 * Process updated osd map.
3267 * The message contains any number of incremental and full maps, normally
3268 * indicating some sort of topology change in the cluster. Kick requests
3269 * off to different OSDs as needed.
3271 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3273 void *p = msg->front.iov_base;
3274 void *const end = p + msg->front.iov_len;
3275 u32 nr_maps, maplen;
3277 struct ceph_fsid fsid;
3278 struct rb_root need_resend = RB_ROOT;
3279 LIST_HEAD(need_resend_linger);
3280 bool handled_incremental = false;
3281 bool was_pauserd, was_pausewr;
3282 bool pauserd, pausewr;
3285 dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3286 down_write(&osdc->lock);
3289 ceph_decode_need(&p, end, sizeof(fsid), bad);
3290 ceph_decode_copy(&p, &fsid, sizeof(fsid));
3291 if (ceph_check_fsid(osdc->client, &fsid) < 0)
3294 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3295 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3296 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3297 have_pool_full(osdc);
3299 /* incremental maps */
3300 ceph_decode_32_safe(&p, end, nr_maps, bad);
3301 dout(" %d inc maps\n", nr_maps);
3302 while (nr_maps > 0) {
3303 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3304 epoch = ceph_decode_32(&p);
3305 maplen = ceph_decode_32(&p);
3306 ceph_decode_need(&p, end, maplen, bad);
3307 if (osdc->osdmap->epoch &&
3308 osdc->osdmap->epoch + 1 == epoch) {
3309 dout("applying incremental map %u len %d\n",
3311 err = handle_one_map(osdc, p, p + maplen, true,
3312 &need_resend, &need_resend_linger);
3315 handled_incremental = true;
3317 dout("ignoring incremental map %u len %d\n",
3323 if (handled_incremental)
3327 ceph_decode_32_safe(&p, end, nr_maps, bad);
3328 dout(" %d full maps\n", nr_maps);
3330 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3331 epoch = ceph_decode_32(&p);
3332 maplen = ceph_decode_32(&p);
3333 ceph_decode_need(&p, end, maplen, bad);
3335 dout("skipping non-latest full map %u len %d\n",
3337 } else if (osdc->osdmap->epoch >= epoch) {
3338 dout("skipping full map %u len %d, "
3339 "older than our %u\n", epoch, maplen,
3340 osdc->osdmap->epoch);
3342 dout("taking full map %u len %d\n", epoch, maplen);
3343 err = handle_one_map(osdc, p, p + maplen, false,
3344 &need_resend, &need_resend_linger);
3354 * subscribe to subsequent osdmap updates if full to ensure
3355 * we find out when we are no longer full and stop returning
3358 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3359 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3360 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3361 have_pool_full(osdc);
3362 if (was_pauserd || was_pausewr || pauserd || pausewr ||
3363 osdc->osdmap->epoch < osdc->epoch_barrier)
3364 maybe_request_map(osdc);
3366 kick_requests(osdc, &need_resend, &need_resend_linger);
3368 ceph_osdc_abort_on_full(osdc);
3369 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3370 osdc->osdmap->epoch);
3371 up_write(&osdc->lock);
3372 wake_up_all(&osdc->client->auth_wq);
3376 pr_err("osdc handle_map corrupt msg\n");
3378 up_write(&osdc->lock);
3382 * Resubmit requests pending on the given osd.
3384 static void kick_osd_requests(struct ceph_osd *osd)
3388 for (n = rb_first(&osd->o_requests); n; ) {
3389 struct ceph_osd_request *req =
3390 rb_entry(n, struct ceph_osd_request, r_node);
3392 n = rb_next(n); /* cancel_linger_request() */
3394 if (!req->r_linger) {
3395 if (!req->r_t.paused)
3398 cancel_linger_request(req);
3401 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3402 struct ceph_osd_linger_request *lreq =
3403 rb_entry(n, struct ceph_osd_linger_request, node);
3410 * If the osd connection drops, we need to resubmit all requests.
3412 static void osd_fault(struct ceph_connection *con)
3414 struct ceph_osd *osd = con->private;
3415 struct ceph_osd_client *osdc = osd->o_osdc;
3417 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3419 down_write(&osdc->lock);
3420 if (!osd_registered(osd)) {
3421 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3425 if (!reopen_osd(osd))
3426 kick_osd_requests(osd);
3427 maybe_request_map(osdc);
3430 up_write(&osdc->lock);
3434 * Process osd watch notifications
3436 static void handle_watch_notify(struct ceph_osd_client *osdc,
3437 struct ceph_msg *msg)
3439 void *p = msg->front.iov_base;
3440 void *const end = p + msg->front.iov_len;
3441 struct ceph_osd_linger_request *lreq;
3442 struct linger_work *lwork;
3443 u8 proto_ver, opcode;
3444 u64 cookie, notify_id;
3445 u64 notifier_id = 0;
3446 s32 return_code = 0;
3447 void *payload = NULL;
3448 u32 payload_len = 0;
3450 ceph_decode_8_safe(&p, end, proto_ver, bad);
3451 ceph_decode_8_safe(&p, end, opcode, bad);
3452 ceph_decode_64_safe(&p, end, cookie, bad);
3453 p += 8; /* skip ver */
3454 ceph_decode_64_safe(&p, end, notify_id, bad);
3456 if (proto_ver >= 1) {
3457 ceph_decode_32_safe(&p, end, payload_len, bad);
3458 ceph_decode_need(&p, end, payload_len, bad);
3463 if (le16_to_cpu(msg->hdr.version) >= 2)
3464 ceph_decode_32_safe(&p, end, return_code, bad);
3466 if (le16_to_cpu(msg->hdr.version) >= 3)
3467 ceph_decode_64_safe(&p, end, notifier_id, bad);
3469 down_read(&osdc->lock);
3470 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
3472 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
3474 goto out_unlock_osdc;
3477 mutex_lock(&lreq->lock);
3478 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
3479 opcode, cookie, lreq, lreq->is_watch);
3480 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
3481 if (!lreq->last_error) {
3482 lreq->last_error = -ENOTCONN;
3483 queue_watch_error(lreq);
3485 } else if (!lreq->is_watch) {
3486 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
3487 if (lreq->notify_id && lreq->notify_id != notify_id) {
3488 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
3489 lreq->notify_id, notify_id);
3490 } else if (!completion_done(&lreq->notify_finish_wait)) {
3491 struct ceph_msg_data *data =
3492 list_first_entry_or_null(&msg->data,
3493 struct ceph_msg_data,
3497 if (lreq->preply_pages) {
3498 WARN_ON(data->type !=
3499 CEPH_MSG_DATA_PAGES);
3500 *lreq->preply_pages = data->pages;
3501 *lreq->preply_len = data->length;
3503 ceph_release_page_vector(data->pages,
3504 calc_pages_for(0, data->length));
3507 lreq->notify_finish_error = return_code;
3508 complete_all(&lreq->notify_finish_wait);
3511 /* CEPH_WATCH_EVENT_NOTIFY */
3512 lwork = lwork_alloc(lreq, do_watch_notify);
3514 pr_err("failed to allocate notify-lwork\n");
3515 goto out_unlock_lreq;
3518 lwork->notify.notify_id = notify_id;
3519 lwork->notify.notifier_id = notifier_id;
3520 lwork->notify.payload = payload;
3521 lwork->notify.payload_len = payload_len;
3522 lwork->notify.msg = ceph_msg_get(msg);
3527 mutex_unlock(&lreq->lock);
3529 up_read(&osdc->lock);
3533 pr_err("osdc handle_watch_notify corrupt msg\n");
3537 * Register request, send initial attempt.
3539 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
3540 struct ceph_osd_request *req,
3543 down_read(&osdc->lock);
3544 submit_request(req, false);
3545 up_read(&osdc->lock);
3549 EXPORT_SYMBOL(ceph_osdc_start_request);
3552 * Unregister a registered request. The request is not completed:
3553 * ->r_result isn't set and __complete_request() isn't called.
3555 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
3557 struct ceph_osd_client *osdc = req->r_osdc;
3559 down_write(&osdc->lock);
3561 cancel_request(req);
3562 up_write(&osdc->lock);
3564 EXPORT_SYMBOL(ceph_osdc_cancel_request);
3567 * @timeout: in jiffies, 0 means "wait forever"
3569 static int wait_request_timeout(struct ceph_osd_request *req,
3570 unsigned long timeout)
3574 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3575 left = wait_for_completion_killable_timeout(&req->r_completion,
3576 ceph_timeout_jiffies(timeout));
3578 left = left ?: -ETIMEDOUT;
3579 ceph_osdc_cancel_request(req);
3581 left = req->r_result; /* completed */
3588 * wait for a request to complete
3590 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
3591 struct ceph_osd_request *req)
3593 return wait_request_timeout(req, 0);
3595 EXPORT_SYMBOL(ceph_osdc_wait_request);
3598 * sync - wait for all in-flight requests to flush. avoid starvation.
3600 void ceph_osdc_sync(struct ceph_osd_client *osdc)
3602 struct rb_node *n, *p;
3603 u64 last_tid = atomic64_read(&osdc->last_tid);
3606 down_read(&osdc->lock);
3607 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3608 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3610 mutex_lock(&osd->lock);
3611 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
3612 struct ceph_osd_request *req =
3613 rb_entry(p, struct ceph_osd_request, r_node);
3615 if (req->r_tid > last_tid)
3618 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
3621 ceph_osdc_get_request(req);
3622 mutex_unlock(&osd->lock);
3623 up_read(&osdc->lock);
3624 dout("%s waiting on req %p tid %llu last_tid %llu\n",
3625 __func__, req, req->r_tid, last_tid);
3626 wait_for_completion(&req->r_completion);
3627 ceph_osdc_put_request(req);
3631 mutex_unlock(&osd->lock);
3634 up_read(&osdc->lock);
3635 dout("%s done last_tid %llu\n", __func__, last_tid);
3637 EXPORT_SYMBOL(ceph_osdc_sync);
3639 static struct ceph_osd_request *
3640 alloc_linger_request(struct ceph_osd_linger_request *lreq)
3642 struct ceph_osd_request *req;
3644 req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
3648 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3649 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3651 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
3652 ceph_osdc_put_request(req);
3660 * Returns a handle, caller owns a ref.
3662 struct ceph_osd_linger_request *
3663 ceph_osdc_watch(struct ceph_osd_client *osdc,
3664 struct ceph_object_id *oid,
3665 struct ceph_object_locator *oloc,
3666 rados_watchcb2_t wcb,
3667 rados_watcherrcb_t errcb,
3670 struct ceph_osd_linger_request *lreq;
3673 lreq = linger_alloc(osdc);
3675 return ERR_PTR(-ENOMEM);
3677 lreq->is_watch = true;
3679 lreq->errcb = errcb;
3681 lreq->watch_valid_thru = jiffies;
3683 ceph_oid_copy(&lreq->t.base_oid, oid);
3684 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3685 lreq->t.flags = CEPH_OSD_FLAG_WRITE;
3686 ktime_get_real_ts(&lreq->mtime);
3688 lreq->reg_req = alloc_linger_request(lreq);
3689 if (!lreq->reg_req) {
3694 lreq->ping_req = alloc_linger_request(lreq);
3695 if (!lreq->ping_req) {
3700 down_write(&osdc->lock);
3701 linger_register(lreq); /* before osd_req_op_* */
3702 osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
3703 CEPH_OSD_WATCH_OP_WATCH);
3704 osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
3705 CEPH_OSD_WATCH_OP_PING);
3706 linger_submit(lreq);
3707 up_write(&osdc->lock);
3709 ret = linger_reg_commit_wait(lreq);
3711 linger_cancel(lreq);
3719 return ERR_PTR(ret);
3721 EXPORT_SYMBOL(ceph_osdc_watch);
3726 * Times out after mount_timeout to preserve rbd unmap behaviour
3727 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
3728 * with mount_timeout").
3730 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
3731 struct ceph_osd_linger_request *lreq)
3733 struct ceph_options *opts = osdc->client->options;
3734 struct ceph_osd_request *req;
3737 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3741 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3742 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3743 req->r_flags = CEPH_OSD_FLAG_WRITE;
3744 ktime_get_real_ts(&req->r_mtime);
3745 osd_req_op_watch_init(req, 0, lreq->linger_id,
3746 CEPH_OSD_WATCH_OP_UNWATCH);
3748 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3752 ceph_osdc_start_request(osdc, req, false);
3753 linger_cancel(lreq);
3755 ret = wait_request_timeout(req, opts->mount_timeout);
3758 ceph_osdc_put_request(req);
3761 EXPORT_SYMBOL(ceph_osdc_unwatch);
3763 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
3764 u64 notify_id, u64 cookie, void *payload,
3767 struct ceph_osd_req_op *op;
3768 struct ceph_pagelist *pl;
3771 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
3773 pl = kmalloc(sizeof(*pl), GFP_NOIO);
3777 ceph_pagelist_init(pl);
3778 ret = ceph_pagelist_encode_64(pl, notify_id);
3779 ret |= ceph_pagelist_encode_64(pl, cookie);
3781 ret |= ceph_pagelist_encode_32(pl, payload_len);
3782 ret |= ceph_pagelist_append(pl, payload, payload_len);
3784 ret |= ceph_pagelist_encode_32(pl, 0);
3787 ceph_pagelist_release(pl);
3791 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
3792 op->indata_len = pl->length;
3796 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
3797 struct ceph_object_id *oid,
3798 struct ceph_object_locator *oloc,
3804 struct ceph_osd_request *req;
3807 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3811 ceph_oid_copy(&req->r_base_oid, oid);
3812 ceph_oloc_copy(&req->r_base_oloc, oloc);
3813 req->r_flags = CEPH_OSD_FLAG_READ;
3815 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3819 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
3824 ceph_osdc_start_request(osdc, req, false);
3825 ret = ceph_osdc_wait_request(osdc, req);
3828 ceph_osdc_put_request(req);
3831 EXPORT_SYMBOL(ceph_osdc_notify_ack);
3833 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
3834 u64 cookie, u32 prot_ver, u32 timeout,
3835 void *payload, size_t payload_len)
3837 struct ceph_osd_req_op *op;
3838 struct ceph_pagelist *pl;
3841 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
3842 op->notify.cookie = cookie;
3844 pl = kmalloc(sizeof(*pl), GFP_NOIO);
3848 ceph_pagelist_init(pl);
3849 ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
3850 ret |= ceph_pagelist_encode_32(pl, timeout);
3851 ret |= ceph_pagelist_encode_32(pl, payload_len);
3852 ret |= ceph_pagelist_append(pl, payload, payload_len);
3854 ceph_pagelist_release(pl);
3858 ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
3859 op->indata_len = pl->length;
3864 * @timeout: in seconds
3866 * @preply_{pages,len} are initialized both on success and error.
3867 * The caller is responsible for:
3869 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
3871 int ceph_osdc_notify(struct ceph_osd_client *osdc,
3872 struct ceph_object_id *oid,
3873 struct ceph_object_locator *oloc,
3877 struct page ***preply_pages,
3880 struct ceph_osd_linger_request *lreq;
3881 struct page **pages;
3886 *preply_pages = NULL;
3890 lreq = linger_alloc(osdc);
3894 lreq->preply_pages = preply_pages;
3895 lreq->preply_len = preply_len;
3897 ceph_oid_copy(&lreq->t.base_oid, oid);
3898 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3899 lreq->t.flags = CEPH_OSD_FLAG_READ;
3901 lreq->reg_req = alloc_linger_request(lreq);
3902 if (!lreq->reg_req) {
3908 pages = ceph_alloc_page_vector(1, GFP_NOIO);
3909 if (IS_ERR(pages)) {
3910 ret = PTR_ERR(pages);
3914 down_write(&osdc->lock);
3915 linger_register(lreq); /* before osd_req_op_* */
3916 ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
3917 timeout, payload, payload_len);
3919 linger_unregister(lreq);
3920 up_write(&osdc->lock);
3921 ceph_release_page_vector(pages, 1);
3924 ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
3926 pages, PAGE_SIZE, 0, false, true);
3927 linger_submit(lreq);
3928 up_write(&osdc->lock);
3930 ret = linger_reg_commit_wait(lreq);
3932 ret = linger_notify_finish_wait(lreq);
3934 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
3936 linger_cancel(lreq);
3941 EXPORT_SYMBOL(ceph_osdc_notify);
3944 * Return the number of milliseconds since the watch was last
3945 * confirmed, or an error. If there is an error, the watch is no
3946 * longer valid, and should be destroyed with ceph_osdc_unwatch().
3948 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
3949 struct ceph_osd_linger_request *lreq)
3951 unsigned long stamp, age;
3954 down_read(&osdc->lock);
3955 mutex_lock(&lreq->lock);
3956 stamp = lreq->watch_valid_thru;
3957 if (!list_empty(&lreq->pending_lworks)) {
3958 struct linger_work *lwork =
3959 list_first_entry(&lreq->pending_lworks,
3963 if (time_before(lwork->queued_stamp, stamp))
3964 stamp = lwork->queued_stamp;
3966 age = jiffies - stamp;
3967 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
3968 lreq, lreq->linger_id, age, lreq->last_error);
3969 /* we are truncating to msecs, so return a safe upper bound */
3970 ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
3972 mutex_unlock(&lreq->lock);
3973 up_read(&osdc->lock);
3977 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
3983 ret = ceph_start_decoding(p, end, 2, "watch_item_t",
3984 &struct_v, &struct_len);
3988 ceph_decode_copy(p, &item->name, sizeof(item->name));
3989 item->cookie = ceph_decode_64(p);
3990 *p += 4; /* skip timeout_seconds */
3991 if (struct_v >= 2) {
3992 ceph_decode_copy(p, &item->addr, sizeof(item->addr));
3993 ceph_decode_addr(&item->addr);
3996 dout("%s %s%llu cookie %llu addr %s\n", __func__,
3997 ENTITY_NAME(item->name), item->cookie,
3998 ceph_pr_addr(&item->addr.in_addr));
4002 static int decode_watchers(void **p, void *end,
4003 struct ceph_watch_item **watchers,
4011 ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4012 &struct_v, &struct_len);
4016 *num_watchers = ceph_decode_32(p);
4017 *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4021 for (i = 0; i < *num_watchers; i++) {
4022 ret = decode_watcher(p, end, *watchers + i);
4033 * On success, the caller is responsible for:
4037 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4038 struct ceph_object_id *oid,
4039 struct ceph_object_locator *oloc,
4040 struct ceph_watch_item **watchers,
4043 struct ceph_osd_request *req;
4044 struct page **pages;
4047 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4051 ceph_oid_copy(&req->r_base_oid, oid);
4052 ceph_oloc_copy(&req->r_base_oloc, oloc);
4053 req->r_flags = CEPH_OSD_FLAG_READ;
4055 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4059 pages = ceph_alloc_page_vector(1, GFP_NOIO);
4060 if (IS_ERR(pages)) {
4061 ret = PTR_ERR(pages);
4065 osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
4066 ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
4068 pages, PAGE_SIZE, 0, false, true);
4070 ceph_osdc_start_request(osdc, req, false);
4071 ret = ceph_osdc_wait_request(osdc, req);
4073 void *p = page_address(pages[0]);
4074 void *const end = p + req->r_ops[0].outdata_len;
4076 ret = decode_watchers(&p, end, watchers, num_watchers);
4080 ceph_osdc_put_request(req);
4083 EXPORT_SYMBOL(ceph_osdc_list_watchers);
4086 * Call all pending notify callbacks - for use after a watch is
4087 * unregistered, to make sure no more callbacks for it will be invoked
4089 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
4091 dout("%s osdc %p\n", __func__, osdc);
4092 flush_workqueue(osdc->notify_wq);
4094 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
4096 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
4098 down_read(&osdc->lock);
4099 maybe_request_map(osdc);
4100 up_read(&osdc->lock);
4102 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
4105 * Execute an OSD class method on an object.
4107 * @flags: CEPH_OSD_FLAG_*
4108 * @resp_len: in/out param for reply length
4110 int ceph_osdc_call(struct ceph_osd_client *osdc,
4111 struct ceph_object_id *oid,
4112 struct ceph_object_locator *oloc,
4113 const char *class, const char *method,
4115 struct page *req_page, size_t req_len,
4116 struct page *resp_page, size_t *resp_len)
4118 struct ceph_osd_request *req;
4121 if (req_len > PAGE_SIZE || (resp_page && *resp_len > PAGE_SIZE))
4124 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4128 ceph_oid_copy(&req->r_base_oid, oid);
4129 ceph_oloc_copy(&req->r_base_oloc, oloc);
4130 req->r_flags = flags;
4132 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4136 osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
4138 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
4141 osd_req_op_cls_response_data_pages(req, 0, &resp_page,
4142 *resp_len, 0, false, false);
4144 ceph_osdc_start_request(osdc, req, false);
4145 ret = ceph_osdc_wait_request(osdc, req);
4147 ret = req->r_ops[0].rval;
4149 *resp_len = req->r_ops[0].outdata_len;
4153 ceph_osdc_put_request(req);
4156 EXPORT_SYMBOL(ceph_osdc_call);
4161 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
4166 osdc->client = client;
4167 init_rwsem(&osdc->lock);
4168 osdc->osds = RB_ROOT;
4169 INIT_LIST_HEAD(&osdc->osd_lru);
4170 spin_lock_init(&osdc->osd_lru_lock);
4171 osd_init(&osdc->homeless_osd);
4172 osdc->homeless_osd.o_osdc = osdc;
4173 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
4174 osdc->last_linger_id = CEPH_LINGER_ID_START;
4175 osdc->linger_requests = RB_ROOT;
4176 osdc->map_checks = RB_ROOT;
4177 osdc->linger_map_checks = RB_ROOT;
4178 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
4179 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
4182 osdc->osdmap = ceph_osdmap_alloc();
4186 osdc->req_mempool = mempool_create_slab_pool(10,
4187 ceph_osd_request_cache);
4188 if (!osdc->req_mempool)
4191 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
4192 PAGE_SIZE, 10, true, "osd_op");
4195 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4196 PAGE_SIZE, 10, true, "osd_op_reply");
4201 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
4202 if (!osdc->notify_wq)
4203 goto out_msgpool_reply;
4205 schedule_delayed_work(&osdc->timeout_work,
4206 osdc->client->options->osd_keepalive_timeout);
4207 schedule_delayed_work(&osdc->osds_timeout_work,
4208 round_jiffies_relative(osdc->client->options->osd_idle_ttl));
4213 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4215 ceph_msgpool_destroy(&osdc->msgpool_op);
4217 mempool_destroy(osdc->req_mempool);
4219 ceph_osdmap_destroy(osdc->osdmap);
4224 void ceph_osdc_stop(struct ceph_osd_client *osdc)
4226 flush_workqueue(osdc->notify_wq);
4227 destroy_workqueue(osdc->notify_wq);
4228 cancel_delayed_work_sync(&osdc->timeout_work);
4229 cancel_delayed_work_sync(&osdc->osds_timeout_work);
4231 down_write(&osdc->lock);
4232 while (!RB_EMPTY_ROOT(&osdc->osds)) {
4233 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
4234 struct ceph_osd, o_node);
4237 up_write(&osdc->lock);
4238 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
4239 osd_cleanup(&osdc->homeless_osd);
4241 WARN_ON(!list_empty(&osdc->osd_lru));
4242 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
4243 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
4244 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
4245 WARN_ON(atomic_read(&osdc->num_requests));
4246 WARN_ON(atomic_read(&osdc->num_homeless));
4248 ceph_osdmap_destroy(osdc->osdmap);
4249 mempool_destroy(osdc->req_mempool);
4250 ceph_msgpool_destroy(&osdc->msgpool_op);
4251 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4255 * Read some contiguous pages. If we cross a stripe boundary, shorten
4256 * *plen. Return number of bytes read, or error.
4258 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
4259 struct ceph_vino vino, struct ceph_file_layout *layout,
4261 u32 truncate_seq, u64 truncate_size,
4262 struct page **pages, int num_pages, int page_align)
4264 struct ceph_osd_request *req;
4267 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
4268 vino.snap, off, *plen);
4269 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
4270 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
4271 NULL, truncate_seq, truncate_size,
4274 return PTR_ERR(req);
4276 /* it may be a short read due to an object boundary */
4277 osd_req_op_extent_osd_data_pages(req, 0,
4278 pages, *plen, page_align, false, false);
4280 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
4281 off, *plen, *plen, page_align);
4283 rc = ceph_osdc_start_request(osdc, req, false);
4285 rc = ceph_osdc_wait_request(osdc, req);
4287 ceph_osdc_put_request(req);
4288 dout("readpages result %d\n", rc);
4291 EXPORT_SYMBOL(ceph_osdc_readpages);
4294 * do a synchronous write on N pages
4296 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
4297 struct ceph_file_layout *layout,
4298 struct ceph_snap_context *snapc,
4300 u32 truncate_seq, u64 truncate_size,
4301 struct timespec *mtime,
4302 struct page **pages, int num_pages)
4304 struct ceph_osd_request *req;
4306 int page_align = off & ~PAGE_MASK;
4308 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
4309 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
4310 snapc, truncate_seq, truncate_size,
4313 return PTR_ERR(req);
4315 /* it may be a short write due to an object boundary */
4316 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
4318 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
4320 req->r_mtime = *mtime;
4321 rc = ceph_osdc_start_request(osdc, req, true);
4323 rc = ceph_osdc_wait_request(osdc, req);
4325 ceph_osdc_put_request(req);
4328 dout("writepages result %d\n", rc);
4331 EXPORT_SYMBOL(ceph_osdc_writepages);
4333 int ceph_osdc_setup(void)
4335 size_t size = sizeof(struct ceph_osd_request) +
4336 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
4338 BUG_ON(ceph_osd_request_cache);
4339 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
4342 return ceph_osd_request_cache ? 0 : -ENOMEM;
4344 EXPORT_SYMBOL(ceph_osdc_setup);
4346 void ceph_osdc_cleanup(void)
4348 BUG_ON(!ceph_osd_request_cache);
4349 kmem_cache_destroy(ceph_osd_request_cache);
4350 ceph_osd_request_cache = NULL;
4352 EXPORT_SYMBOL(ceph_osdc_cleanup);
4355 * handle incoming message
4357 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
4359 struct ceph_osd *osd = con->private;
4360 struct ceph_osd_client *osdc = osd->o_osdc;
4361 int type = le16_to_cpu(msg->hdr.type);
4364 case CEPH_MSG_OSD_MAP:
4365 ceph_osdc_handle_map(osdc, msg);
4367 case CEPH_MSG_OSD_OPREPLY:
4368 handle_reply(osd, msg);
4370 case CEPH_MSG_WATCH_NOTIFY:
4371 handle_watch_notify(osdc, msg);
4375 pr_err("received unknown message type %d %s\n", type,
4376 ceph_msg_type_name(type));
4383 * Lookup and return message for incoming reply. Don't try to do
4384 * anything about a larger than preallocated data portion of the
4385 * message at the moment - for now, just skip the message.
4387 static struct ceph_msg *get_reply(struct ceph_connection *con,
4388 struct ceph_msg_header *hdr,
4391 struct ceph_osd *osd = con->private;
4392 struct ceph_osd_client *osdc = osd->o_osdc;
4393 struct ceph_msg *m = NULL;
4394 struct ceph_osd_request *req;
4395 int front_len = le32_to_cpu(hdr->front_len);
4396 int data_len = le32_to_cpu(hdr->data_len);
4397 u64 tid = le64_to_cpu(hdr->tid);
4399 down_read(&osdc->lock);
4400 if (!osd_registered(osd)) {
4401 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
4403 goto out_unlock_osdc;
4405 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
4407 mutex_lock(&osd->lock);
4408 req = lookup_request(&osd->o_requests, tid);
4410 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
4413 goto out_unlock_session;
4416 ceph_msg_revoke_incoming(req->r_reply);
4418 if (front_len > req->r_reply->front_alloc_len) {
4419 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
4420 __func__, osd->o_osd, req->r_tid, front_len,
4421 req->r_reply->front_alloc_len);
4422 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
4425 goto out_unlock_session;
4426 ceph_msg_put(req->r_reply);
4430 if (data_len > req->r_reply->data_length) {
4431 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
4432 __func__, osd->o_osd, req->r_tid, data_len,
4433 req->r_reply->data_length);
4436 goto out_unlock_session;
4439 m = ceph_msg_get(req->r_reply);
4440 dout("get_reply tid %lld %p\n", tid, m);
4443 mutex_unlock(&osd->lock);
4445 up_read(&osdc->lock);
4450 * TODO: switch to a msg-owned pagelist
4452 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
4455 int type = le16_to_cpu(hdr->type);
4456 u32 front_len = le32_to_cpu(hdr->front_len);
4457 u32 data_len = le32_to_cpu(hdr->data_len);
4459 m = ceph_msg_new(type, front_len, GFP_NOIO, false);
4464 struct page **pages;
4465 struct ceph_osd_data osd_data;
4467 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
4469 if (IS_ERR(pages)) {
4474 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
4476 ceph_osdc_msg_data_add(m, &osd_data);
4482 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
4483 struct ceph_msg_header *hdr,
4486 struct ceph_osd *osd = con->private;
4487 int type = le16_to_cpu(hdr->type);
4491 case CEPH_MSG_OSD_MAP:
4492 case CEPH_MSG_WATCH_NOTIFY:
4493 return alloc_msg_with_page_vector(hdr);
4494 case CEPH_MSG_OSD_OPREPLY:
4495 return get_reply(con, hdr, skip);
4497 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
4505 * Wrappers to refcount containing ceph_osd struct
4507 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
4509 struct ceph_osd *osd = con->private;
4515 static void put_osd_con(struct ceph_connection *con)
4517 struct ceph_osd *osd = con->private;
4525 * Note: returned pointer is the address of a structure that's
4526 * managed separately. Caller must *not* attempt to free it.
4528 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
4529 int *proto, int force_new)
4531 struct ceph_osd *o = con->private;
4532 struct ceph_osd_client *osdc = o->o_osdc;
4533 struct ceph_auth_client *ac = osdc->client->monc.auth;
4534 struct ceph_auth_handshake *auth = &o->o_auth;
4536 if (force_new && auth->authorizer) {
4537 ceph_auth_destroy_authorizer(auth->authorizer);
4538 auth->authorizer = NULL;
4540 if (!auth->authorizer) {
4541 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4544 return ERR_PTR(ret);
4546 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4549 return ERR_PTR(ret);
4551 *proto = ac->protocol;
4557 static int verify_authorizer_reply(struct ceph_connection *con)
4559 struct ceph_osd *o = con->private;
4560 struct ceph_osd_client *osdc = o->o_osdc;
4561 struct ceph_auth_client *ac = osdc->client->monc.auth;
4563 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
4566 static int invalidate_authorizer(struct ceph_connection *con)
4568 struct ceph_osd *o = con->private;
4569 struct ceph_osd_client *osdc = o->o_osdc;
4570 struct ceph_auth_client *ac = osdc->client->monc.auth;
4572 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
4573 return ceph_monc_validate_auth(&osdc->client->monc);
4576 static int osd_sign_message(struct ceph_msg *msg)
4578 struct ceph_osd *o = msg->con->private;
4579 struct ceph_auth_handshake *auth = &o->o_auth;
4581 return ceph_auth_sign_message(auth, msg);
4584 static int osd_check_message_signature(struct ceph_msg *msg)
4586 struct ceph_osd *o = msg->con->private;
4587 struct ceph_auth_handshake *auth = &o->o_auth;
4589 return ceph_auth_check_message_signature(auth, msg);
4592 static const struct ceph_connection_operations osd_con_ops = {
4595 .dispatch = dispatch,
4596 .get_authorizer = get_authorizer,
4597 .verify_authorizer_reply = verify_authorizer_reply,
4598 .invalidate_authorizer = invalidate_authorizer,
4599 .alloc_msg = alloc_msg,
4600 .sign_message = osd_sign_message,
4601 .check_message_signature = osd_check_message_signature,