1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
4 #include <linux/types.h>
5 #include <linux/slab.h>
6 #include <linux/random.h>
7 #include <linux/sched.h>
9 #include <linux/ceph/mon_client.h>
10 #include <linux/ceph/libceph.h>
11 #include <linux/ceph/debugfs.h>
12 #include <linux/ceph/decode.h>
13 #include <linux/ceph/auth.h>
16 * Interact with Ceph monitor cluster. Handle requests for new map
17 * versions, and periodically resend as needed. Also implement
18 * statfs() and umount().
20 * A small cluster of Ceph "monitors" are responsible for managing critical
21 * cluster configuration and state information. An odd number (e.g., 3, 5)
22 * of cmon daemons use a modified version of the Paxos part-time parliament
23 * algorithm to manage the MDS map (mds cluster membership), OSD map, and
24 * list of clients who have mounted the file system.
26 * We maintain an open, active session with a monitor at all times in order to
27 * receive timely MDSMap updates. We periodically send a keepalive byte on the
28 * TCP socket to ensure we detect a failure. If the connection does break, we
29 * randomly hunt for a new monitor. Once the connection is reestablished, we
30 * resend any outstanding requests.
33 static const struct ceph_connection_operations mon_con_ops;
35 static int __validate_auth(struct ceph_mon_client *monc);
38 * Decode a monmap blob (e.g., during mount).
40 struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
42 struct ceph_monmap *m = NULL;
44 struct ceph_fsid fsid;
49 ceph_decode_32_safe(&p, end, len, bad);
50 ceph_decode_need(&p, end, len, bad);
52 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
54 ceph_decode_16_safe(&p, end, version, bad);
56 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
57 ceph_decode_copy(&p, &fsid, sizeof(fsid));
58 epoch = ceph_decode_32(&p);
60 num_mon = ceph_decode_32(&p);
61 ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad);
63 if (num_mon >= CEPH_MAX_MON)
65 m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS);
67 return ERR_PTR(-ENOMEM);
71 ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0]));
72 for (i = 0; i < num_mon; i++)
73 ceph_decode_addr(&m->mon_inst[i].addr);
75 dout("monmap_decode epoch %d, num_mon %d\n", m->epoch,
77 for (i = 0; i < m->num_mon; i++)
78 dout("monmap_decode mon%d is %s\n", i,
79 ceph_pr_addr(&m->mon_inst[i].addr.in_addr));
83 dout("monmap_decode failed with %d\n", err);
89 * return true if *addr is included in the monmap.
91 int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr)
95 for (i = 0; i < m->num_mon; i++)
96 if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0)
102 * Send an auth request.
104 static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
106 monc->pending_auth = 1;
107 monc->m_auth->front.iov_len = len;
108 monc->m_auth->hdr.front_len = cpu_to_le32(len);
109 ceph_msg_revoke(monc->m_auth);
110 ceph_msg_get(monc->m_auth); /* keep our ref */
111 ceph_con_send(&monc->con, monc->m_auth);
115 * Close monitor session, if any.
117 static void __close_session(struct ceph_mon_client *monc)
119 dout("__close_session closing mon%d\n", monc->cur_mon);
120 ceph_msg_revoke(monc->m_auth);
121 ceph_msg_revoke_incoming(monc->m_auth_reply);
122 ceph_msg_revoke(monc->m_subscribe);
123 ceph_msg_revoke_incoming(monc->m_subscribe_ack);
124 ceph_con_close(&monc->con);
126 monc->pending_auth = 0;
127 ceph_auth_reset(monc->auth);
131 * Pick a new monitor at random and set cur_mon. If we are repicking
132 * (i.e. cur_mon is already set), be sure to pick a different one.
134 static void pick_new_mon(struct ceph_mon_client *monc)
136 int old_mon = monc->cur_mon;
138 BUG_ON(monc->monmap->num_mon < 1);
140 if (monc->monmap->num_mon == 1) {
143 int max = monc->monmap->num_mon;
147 if (monc->cur_mon >= 0) {
148 if (monc->cur_mon < monc->monmap->num_mon)
154 n = prandom_u32() % max;
155 if (o >= 0 && n >= o)
161 dout("%s mon%d -> mon%d out of %d mons\n", __func__, old_mon,
162 monc->cur_mon, monc->monmap->num_mon);
166 * Open a session with a new monitor.
168 static void __open_session(struct ceph_mon_client *monc)
174 if (monc->had_a_connection) {
175 monc->hunt_mult *= CEPH_MONC_HUNT_BACKOFF;
176 if (monc->hunt_mult > CEPH_MONC_HUNT_MAX_MULT)
177 monc->hunt_mult = CEPH_MONC_HUNT_MAX_MULT;
180 monc->sub_renew_after = jiffies; /* i.e., expired */
181 monc->sub_renew_sent = 0;
183 dout("%s opening mon%d\n", __func__, monc->cur_mon);
184 ceph_con_open(&monc->con, CEPH_ENTITY_TYPE_MON, monc->cur_mon,
185 &monc->monmap->mon_inst[monc->cur_mon].addr);
188 * send an initial keepalive to ensure our timestamp is valid
189 * by the time we are in an OPENED state
191 ceph_con_keepalive(&monc->con);
193 /* initiate authentication handshake */
194 ret = ceph_auth_build_hello(monc->auth,
195 monc->m_auth->front.iov_base,
196 monc->m_auth->front_alloc_len);
198 __send_prepared_auth_request(monc, ret);
202 * Reschedule delayed work timer.
204 static void __schedule_delayed(struct ceph_mon_client *monc)
209 delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult;
211 delay = CEPH_MONC_PING_INTERVAL;
213 dout("__schedule_delayed after %lu\n", delay);
214 schedule_delayed_work(&monc->delayed_work,
215 round_jiffies_relative(delay));
218 const char *ceph_sub_str[] = {
219 [CEPH_SUB_MDSMAP] = "mdsmap",
220 [CEPH_SUB_MONMAP] = "monmap",
221 [CEPH_SUB_OSDMAP] = "osdmap",
225 * Send subscribe request for one or more maps, according to
228 static void __send_subscribe(struct ceph_mon_client *monc)
230 struct ceph_msg *msg = monc->m_subscribe;
231 void *p = msg->front.iov_base;
232 void *const end = p + msg->front_alloc_len;
236 dout("%s sent %lu\n", __func__, monc->sub_renew_sent);
238 BUG_ON(monc->cur_mon < 0);
240 if (!monc->sub_renew_sent)
241 monc->sub_renew_sent = jiffies | 1; /* never 0 */
243 msg->hdr.version = cpu_to_le16(2);
245 for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
246 if (monc->subs[i].want)
249 BUG_ON(num < 1); /* monmap sub is always there */
250 ceph_encode_32(&p, num);
251 for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
252 const char *s = ceph_sub_str[i];
254 if (!monc->subs[i].want)
257 dout("%s %s start %llu flags 0x%x\n", __func__, s,
258 le64_to_cpu(monc->subs[i].item.start),
259 monc->subs[i].item.flags);
260 ceph_encode_string(&p, end, s, strlen(s));
261 memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item));
262 p += sizeof(monc->subs[i].item);
265 BUG_ON(p != (end - 35 - (ARRAY_SIZE(monc->subs) - num) * 19));
266 msg->front.iov_len = p - msg->front.iov_base;
267 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
268 ceph_msg_revoke(msg);
269 ceph_con_send(&monc->con, ceph_msg_get(msg));
272 static void handle_subscribe_ack(struct ceph_mon_client *monc,
273 struct ceph_msg *msg)
275 unsigned int seconds;
276 struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
278 if (msg->front.iov_len < sizeof(*h))
280 seconds = le32_to_cpu(h->duration);
282 mutex_lock(&monc->mutex);
283 if (monc->sub_renew_sent) {
284 monc->sub_renew_after = monc->sub_renew_sent +
285 (seconds >> 1) * HZ - 1;
286 dout("%s sent %lu duration %d renew after %lu\n", __func__,
287 monc->sub_renew_sent, seconds, monc->sub_renew_after);
288 monc->sub_renew_sent = 0;
290 dout("%s sent %lu renew after %lu, ignoring\n", __func__,
291 monc->sub_renew_sent, monc->sub_renew_after);
293 mutex_unlock(&monc->mutex);
296 pr_err("got corrupt subscribe-ack msg\n");
301 * Register interest in a map
303 * @sub: one of CEPH_SUB_*
304 * @epoch: X for "every map since X", or 0 for "just the latest"
306 static bool __ceph_monc_want_map(struct ceph_mon_client *monc, int sub,
307 u32 epoch, bool continuous)
309 __le64 start = cpu_to_le64(epoch);
310 u8 flags = !continuous ? CEPH_SUBSCRIBE_ONETIME : 0;
312 dout("%s %s epoch %u continuous %d\n", __func__, ceph_sub_str[sub],
315 if (monc->subs[sub].want &&
316 monc->subs[sub].item.start == start &&
317 monc->subs[sub].item.flags == flags)
320 monc->subs[sub].item.start = start;
321 monc->subs[sub].item.flags = flags;
322 monc->subs[sub].want = true;
327 bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch,
332 mutex_lock(&monc->mutex);
333 need_request = __ceph_monc_want_map(monc, sub, epoch, continuous);
334 mutex_unlock(&monc->mutex);
338 EXPORT_SYMBOL(ceph_monc_want_map);
341 * Keep track of which maps we have
343 * @sub: one of CEPH_SUB_*
345 static void __ceph_monc_got_map(struct ceph_mon_client *monc, int sub,
348 dout("%s %s epoch %u\n", __func__, ceph_sub_str[sub], epoch);
350 if (monc->subs[sub].want) {
351 if (monc->subs[sub].item.flags & CEPH_SUBSCRIBE_ONETIME)
352 monc->subs[sub].want = false;
354 monc->subs[sub].item.start = cpu_to_le64(epoch + 1);
357 monc->subs[sub].have = epoch;
360 void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch)
362 mutex_lock(&monc->mutex);
363 __ceph_monc_got_map(monc, sub, epoch);
364 mutex_unlock(&monc->mutex);
366 EXPORT_SYMBOL(ceph_monc_got_map);
369 * Register interest in the next osdmap
371 void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
373 dout("%s have %u\n", __func__, monc->subs[CEPH_SUB_OSDMAP].have);
374 mutex_lock(&monc->mutex);
375 if (__ceph_monc_want_map(monc, CEPH_SUB_OSDMAP,
376 monc->subs[CEPH_SUB_OSDMAP].have + 1, false))
377 __send_subscribe(monc);
378 mutex_unlock(&monc->mutex);
380 EXPORT_SYMBOL(ceph_monc_request_next_osdmap);
383 * Wait for an osdmap with a given epoch.
385 * @epoch: epoch to wait for
386 * @timeout: in jiffies, 0 means "wait forever"
388 int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
389 unsigned long timeout)
391 unsigned long started = jiffies;
394 mutex_lock(&monc->mutex);
395 while (monc->subs[CEPH_SUB_OSDMAP].have < epoch) {
396 mutex_unlock(&monc->mutex);
398 if (timeout && time_after_eq(jiffies, started + timeout))
401 ret = wait_event_interruptible_timeout(monc->client->auth_wq,
402 monc->subs[CEPH_SUB_OSDMAP].have >= epoch,
403 ceph_timeout_jiffies(timeout));
407 mutex_lock(&monc->mutex);
410 mutex_unlock(&monc->mutex);
413 EXPORT_SYMBOL(ceph_monc_wait_osdmap);
416 * Open a session with a random monitor. Request monmap and osdmap,
417 * which are waited upon in __ceph_open_session().
419 int ceph_monc_open_session(struct ceph_mon_client *monc)
421 mutex_lock(&monc->mutex);
422 __ceph_monc_want_map(monc, CEPH_SUB_MONMAP, 0, true);
423 __ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, 0, false);
424 __open_session(monc);
425 __schedule_delayed(monc);
426 mutex_unlock(&monc->mutex);
429 EXPORT_SYMBOL(ceph_monc_open_session);
431 static void ceph_monc_handle_map(struct ceph_mon_client *monc,
432 struct ceph_msg *msg)
434 struct ceph_client *client = monc->client;
435 struct ceph_monmap *monmap = NULL, *old = monc->monmap;
438 mutex_lock(&monc->mutex);
440 dout("handle_monmap\n");
441 p = msg->front.iov_base;
442 end = p + msg->front.iov_len;
444 monmap = ceph_monmap_decode(p, end);
445 if (IS_ERR(monmap)) {
446 pr_err("problem decoding monmap, %d\n",
447 (int)PTR_ERR(monmap));
451 if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) {
456 client->monc.monmap = monmap;
459 __ceph_monc_got_map(monc, CEPH_SUB_MONMAP, monc->monmap->epoch);
460 client->have_fsid = true;
463 mutex_unlock(&monc->mutex);
464 wake_up_all(&client->auth_wq);
468 * generic requests (currently statfs, mon_get_version)
470 static struct ceph_mon_generic_request *__lookup_generic_req(
471 struct ceph_mon_client *monc, u64 tid)
473 struct ceph_mon_generic_request *req;
474 struct rb_node *n = monc->generic_request_tree.rb_node;
477 req = rb_entry(n, struct ceph_mon_generic_request, node);
480 else if (tid > req->tid)
488 static void __insert_generic_request(struct ceph_mon_client *monc,
489 struct ceph_mon_generic_request *new)
491 struct rb_node **p = &monc->generic_request_tree.rb_node;
492 struct rb_node *parent = NULL;
493 struct ceph_mon_generic_request *req = NULL;
497 req = rb_entry(parent, struct ceph_mon_generic_request, node);
498 if (new->tid < req->tid)
500 else if (new->tid > req->tid)
506 rb_link_node(&new->node, parent, p);
507 rb_insert_color(&new->node, &monc->generic_request_tree);
510 static void release_generic_request(struct kref *kref)
512 struct ceph_mon_generic_request *req =
513 container_of(kref, struct ceph_mon_generic_request, kref);
516 ceph_msg_put(req->reply);
518 ceph_msg_put(req->request);
523 static void put_generic_request(struct ceph_mon_generic_request *req)
525 kref_put(&req->kref, release_generic_request);
528 static void get_generic_request(struct ceph_mon_generic_request *req)
530 kref_get(&req->kref);
533 static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
534 struct ceph_msg_header *hdr,
537 struct ceph_mon_client *monc = con->private;
538 struct ceph_mon_generic_request *req;
539 u64 tid = le64_to_cpu(hdr->tid);
542 mutex_lock(&monc->mutex);
543 req = __lookup_generic_req(monc, tid);
545 dout("get_generic_reply %lld dne\n", tid);
549 dout("get_generic_reply %lld got %p\n", tid, req->reply);
551 m = ceph_msg_get(req->reply);
553 * we don't need to track the connection reading into
554 * this reply because we only have one open connection
558 mutex_unlock(&monc->mutex);
562 static int __do_generic_request(struct ceph_mon_client *monc, u64 tid,
563 struct ceph_mon_generic_request *req)
567 /* register request */
568 req->tid = tid != 0 ? tid : ++monc->last_tid;
569 req->request->hdr.tid = cpu_to_le64(req->tid);
570 __insert_generic_request(monc, req);
571 monc->num_generic_requests++;
572 ceph_con_send(&monc->con, ceph_msg_get(req->request));
573 mutex_unlock(&monc->mutex);
575 err = wait_for_completion_interruptible(&req->completion);
577 mutex_lock(&monc->mutex);
578 rb_erase(&req->node, &monc->generic_request_tree);
579 monc->num_generic_requests--;
586 static int do_generic_request(struct ceph_mon_client *monc,
587 struct ceph_mon_generic_request *req)
591 mutex_lock(&monc->mutex);
592 err = __do_generic_request(monc, 0, req);
593 mutex_unlock(&monc->mutex);
601 static void handle_statfs_reply(struct ceph_mon_client *monc,
602 struct ceph_msg *msg)
604 struct ceph_mon_generic_request *req;
605 struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
606 u64 tid = le64_to_cpu(msg->hdr.tid);
608 if (msg->front.iov_len != sizeof(*reply))
610 dout("handle_statfs_reply %p tid %llu\n", msg, tid);
612 mutex_lock(&monc->mutex);
613 req = __lookup_generic_req(monc, tid);
615 *(struct ceph_statfs *)req->buf = reply->st;
617 get_generic_request(req);
619 mutex_unlock(&monc->mutex);
621 complete_all(&req->completion);
622 put_generic_request(req);
627 pr_err("corrupt statfs reply, tid %llu\n", tid);
632 * Do a synchronous statfs().
634 int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
636 struct ceph_mon_generic_request *req;
637 struct ceph_mon_statfs *h;
640 req = kzalloc(sizeof(*req), GFP_NOFS);
644 kref_init(&req->kref);
646 init_completion(&req->completion);
649 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
653 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS,
658 /* fill out request */
659 h = req->request->front.iov_base;
660 h->monhdr.have_version = 0;
661 h->monhdr.session_mon = cpu_to_le16(-1);
662 h->monhdr.session_mon_tid = 0;
663 h->fsid = monc->monmap->fsid;
665 err = do_generic_request(monc, req);
668 put_generic_request(req);
671 EXPORT_SYMBOL(ceph_monc_do_statfs);
673 static void handle_get_version_reply(struct ceph_mon_client *monc,
674 struct ceph_msg *msg)
676 struct ceph_mon_generic_request *req;
677 u64 tid = le64_to_cpu(msg->hdr.tid);
678 void *p = msg->front.iov_base;
679 void *end = p + msg->front_alloc_len;
682 dout("%s %p tid %llu\n", __func__, msg, tid);
684 ceph_decode_need(&p, end, 2*sizeof(u64), bad);
685 handle = ceph_decode_64(&p);
686 if (tid != 0 && tid != handle)
689 mutex_lock(&monc->mutex);
690 req = __lookup_generic_req(monc, handle);
692 *(u64 *)req->buf = ceph_decode_64(&p);
694 get_generic_request(req);
696 mutex_unlock(&monc->mutex);
698 complete_all(&req->completion);
699 put_generic_request(req);
704 pr_err("corrupt mon_get_version reply, tid %llu\n", tid);
709 * Send MMonGetVersion and wait for the reply.
711 * @what: one of "mdsmap", "osdmap" or "monmap"
713 int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what,
716 struct ceph_mon_generic_request *req;
721 req = kzalloc(sizeof(*req), GFP_NOFS);
725 kref_init(&req->kref);
727 init_completion(&req->completion);
729 req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
730 sizeof(u64) + sizeof(u32) + strlen(what),
737 req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 1024,
744 p = req->request->front.iov_base;
745 end = p + req->request->front_alloc_len;
747 /* fill out request */
748 mutex_lock(&monc->mutex);
749 tid = ++monc->last_tid;
750 ceph_encode_64(&p, tid); /* handle */
751 ceph_encode_string(&p, end, what, strlen(what));
753 err = __do_generic_request(monc, tid, req);
755 mutex_unlock(&monc->mutex);
757 put_generic_request(req);
760 EXPORT_SYMBOL(ceph_monc_do_get_version);
763 * Resend pending generic requests.
765 static void __resend_generic_request(struct ceph_mon_client *monc)
767 struct ceph_mon_generic_request *req;
770 for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) {
771 req = rb_entry(p, struct ceph_mon_generic_request, node);
772 ceph_msg_revoke(req->request);
773 ceph_msg_revoke_incoming(req->reply);
774 ceph_con_send(&monc->con, ceph_msg_get(req->request));
779 * Delayed work. If we haven't mounted yet, retry. Otherwise,
780 * renew/retry subscription as needed (in case it is timing out, or we
781 * got an ENOMEM). And keep the monitor connection alive.
783 static void delayed_work(struct work_struct *work)
785 struct ceph_mon_client *monc =
786 container_of(work, struct ceph_mon_client, delayed_work.work);
788 dout("monc delayed_work\n");
789 mutex_lock(&monc->mutex);
791 __close_session(monc);
792 __open_session(monc); /* continue hunting */
794 int is_auth = ceph_auth_is_authenticated(monc->auth);
795 if (ceph_con_keepalive_expired(&monc->con,
796 CEPH_MONC_PING_TIMEOUT)) {
797 dout("monc keepalive timeout\n");
799 __close_session(monc);
800 monc->hunting = true;
801 __open_session(monc);
804 if (!monc->hunting) {
805 ceph_con_keepalive(&monc->con);
806 __validate_auth(monc);
810 unsigned long now = jiffies;
812 dout("%s renew subs? now %lu renew after %lu\n",
813 __func__, now, monc->sub_renew_after);
814 if (time_after_eq(now, monc->sub_renew_after))
815 __send_subscribe(monc);
818 __schedule_delayed(monc);
819 mutex_unlock(&monc->mutex);
823 * On startup, we build a temporary monmap populated with the IPs
824 * provided by mount(2).
826 static int build_initial_monmap(struct ceph_mon_client *monc)
828 struct ceph_options *opt = monc->client->options;
829 struct ceph_entity_addr *mon_addr = opt->mon_addr;
830 int num_mon = opt->num_mon;
833 /* build initial monmap */
834 monc->monmap = kzalloc(sizeof(*monc->monmap) +
835 num_mon*sizeof(monc->monmap->mon_inst[0]),
839 for (i = 0; i < num_mon; i++) {
840 monc->monmap->mon_inst[i].addr = mon_addr[i];
841 monc->monmap->mon_inst[i].addr.nonce = 0;
842 monc->monmap->mon_inst[i].name.type =
843 CEPH_ENTITY_TYPE_MON;
844 monc->monmap->mon_inst[i].name.num = cpu_to_le64(i);
846 monc->monmap->num_mon = num_mon;
850 int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
855 memset(monc, 0, sizeof(*monc));
858 mutex_init(&monc->mutex);
860 err = build_initial_monmap(monc);
866 monc->auth = ceph_auth_init(cl->options->name,
868 if (IS_ERR(monc->auth)) {
869 err = PTR_ERR(monc->auth);
872 monc->auth->want_keys =
873 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
874 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
878 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
879 sizeof(struct ceph_mon_subscribe_ack),
881 if (!monc->m_subscribe_ack)
884 monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS,
886 if (!monc->m_subscribe)
887 goto out_subscribe_ack;
889 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS,
891 if (!monc->m_auth_reply)
894 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true);
895 monc->pending_auth = 0;
899 ceph_con_init(&monc->con, monc, &mon_con_ops,
900 &monc->client->msgr);
903 monc->hunting = true;
904 monc->sub_renew_after = jiffies;
905 monc->sub_renew_sent = 0;
906 monc->had_a_connection = false;
909 INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
910 monc->generic_request_tree = RB_ROOT;
911 monc->num_generic_requests = 0;
917 ceph_msg_put(monc->m_auth_reply);
919 ceph_msg_put(monc->m_subscribe);
921 ceph_msg_put(monc->m_subscribe_ack);
923 ceph_auth_destroy(monc->auth);
929 EXPORT_SYMBOL(ceph_monc_init);
931 void ceph_monc_stop(struct ceph_mon_client *monc)
934 cancel_delayed_work_sync(&monc->delayed_work);
936 mutex_lock(&monc->mutex);
937 __close_session(monc);
939 mutex_unlock(&monc->mutex);
942 * flush msgr queue before we destroy ourselves to ensure that:
943 * - any work that references our embedded con is finished.
944 * - any osd_client or other work that may reference an authorizer
945 * finishes before we shut down the auth subsystem.
949 ceph_auth_destroy(monc->auth);
951 ceph_msg_put(monc->m_auth);
952 ceph_msg_put(monc->m_auth_reply);
953 ceph_msg_put(monc->m_subscribe);
954 ceph_msg_put(monc->m_subscribe_ack);
958 EXPORT_SYMBOL(ceph_monc_stop);
960 static void finish_hunting(struct ceph_mon_client *monc)
963 dout("%s found mon%d\n", __func__, monc->cur_mon);
964 monc->hunting = false;
965 monc->had_a_connection = true;
966 monc->hunt_mult /= 2; /* reduce by 50% */
967 if (monc->hunt_mult < 1)
972 static void handle_auth_reply(struct ceph_mon_client *monc,
973 struct ceph_msg *msg)
978 mutex_lock(&monc->mutex);
979 was_auth = ceph_auth_is_authenticated(monc->auth);
980 monc->pending_auth = 0;
981 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
983 monc->m_auth->front.iov_base,
984 monc->m_auth->front_alloc_len);
986 __send_prepared_auth_request(monc, ret);
990 finish_hunting(monc);
993 monc->client->auth_err = ret;
994 } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) {
995 dout("authenticated, starting session\n");
997 monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
998 monc->client->msgr.inst.name.num =
999 cpu_to_le64(monc->auth->global_id);
1001 __send_subscribe(monc);
1002 __resend_generic_request(monc);
1004 pr_info("mon%d %s session established\n", monc->cur_mon,
1005 ceph_pr_addr(&monc->con.peer_addr.in_addr));
1009 mutex_unlock(&monc->mutex);
1010 if (monc->client->auth_err < 0)
1011 wake_up_all(&monc->client->auth_wq);
1014 static int __validate_auth(struct ceph_mon_client *monc)
1018 if (monc->pending_auth)
1021 ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
1022 monc->m_auth->front_alloc_len);
1024 return ret; /* either an error, or no need to authenticate */
1025 __send_prepared_auth_request(monc, ret);
1029 int ceph_monc_validate_auth(struct ceph_mon_client *monc)
1033 mutex_lock(&monc->mutex);
1034 ret = __validate_auth(monc);
1035 mutex_unlock(&monc->mutex);
1038 EXPORT_SYMBOL(ceph_monc_validate_auth);
1041 * handle incoming message
1043 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1045 struct ceph_mon_client *monc = con->private;
1046 int type = le16_to_cpu(msg->hdr.type);
1052 case CEPH_MSG_AUTH_REPLY:
1053 handle_auth_reply(monc, msg);
1056 case CEPH_MSG_MON_SUBSCRIBE_ACK:
1057 handle_subscribe_ack(monc, msg);
1060 case CEPH_MSG_STATFS_REPLY:
1061 handle_statfs_reply(monc, msg);
1064 case CEPH_MSG_MON_GET_VERSION_REPLY:
1065 handle_get_version_reply(monc, msg);
1068 case CEPH_MSG_MON_MAP:
1069 ceph_monc_handle_map(monc, msg);
1072 case CEPH_MSG_OSD_MAP:
1073 ceph_osdc_handle_map(&monc->client->osdc, msg);
1077 /* can the chained handler handle it? */
1078 if (monc->client->extra_mon_dispatch &&
1079 monc->client->extra_mon_dispatch(monc->client, msg) == 0)
1082 pr_err("received unknown message type %d %s\n", type,
1083 ceph_msg_type_name(type));
1089 * Allocate memory for incoming message
1091 static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
1092 struct ceph_msg_header *hdr,
1095 struct ceph_mon_client *monc = con->private;
1096 int type = le16_to_cpu(hdr->type);
1097 int front_len = le32_to_cpu(hdr->front_len);
1098 struct ceph_msg *m = NULL;
1103 case CEPH_MSG_MON_SUBSCRIBE_ACK:
1104 m = ceph_msg_get(monc->m_subscribe_ack);
1106 case CEPH_MSG_STATFS_REPLY:
1107 return get_generic_reply(con, hdr, skip);
1108 case CEPH_MSG_AUTH_REPLY:
1109 m = ceph_msg_get(monc->m_auth_reply);
1111 case CEPH_MSG_MON_GET_VERSION_REPLY:
1112 if (le64_to_cpu(hdr->tid) != 0)
1113 return get_generic_reply(con, hdr, skip);
1116 * Older OSDs don't set reply tid even if the orignal
1117 * request had a non-zero tid. Workaround this weirdness
1118 * by falling through to the allocate case.
1120 case CEPH_MSG_MON_MAP:
1121 case CEPH_MSG_MDS_MAP:
1122 case CEPH_MSG_OSD_MAP:
1123 m = ceph_msg_new(type, front_len, GFP_NOFS, false);
1125 return NULL; /* ENOMEM--return skip == 0 */
1130 pr_info("alloc_msg unknown type %d\n", type);
1132 } else if (front_len > m->front_alloc_len) {
1133 pr_warn("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
1134 front_len, m->front_alloc_len,
1135 (unsigned int)con->peer_name.type,
1136 le64_to_cpu(con->peer_name.num));
1138 m = ceph_msg_new(type, front_len, GFP_NOFS, false);
1145 * If the monitor connection resets, pick a new monitor and resubmit
1146 * any pending requests.
1148 static void mon_fault(struct ceph_connection *con)
1150 struct ceph_mon_client *monc = con->private;
1155 dout("mon_fault\n");
1156 mutex_lock(&monc->mutex);
1161 pr_info("mon%d %s session lost, "
1162 "hunting for new mon\n", monc->cur_mon,
1163 ceph_pr_addr(&monc->con.peer_addr.in_addr));
1165 __close_session(monc);
1166 if (!monc->hunting) {
1168 monc->hunting = true;
1169 __open_session(monc);
1171 /* already hunting, let's wait a bit */
1172 __schedule_delayed(monc);
1175 mutex_unlock(&monc->mutex);
1179 * We can ignore refcounting on the connection struct, as all references
1180 * will come from the messenger workqueue, which is drained prior to
1181 * mon_client destruction.
1183 static struct ceph_connection *con_get(struct ceph_connection *con)
1188 static void con_put(struct ceph_connection *con)
1192 static const struct ceph_connection_operations mon_con_ops = {
1195 .dispatch = dispatch,
1197 .alloc_msg = mon_alloc_msg,