1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
4 #include <linux/types.h>
5 #include <linux/slab.h>
6 #include <linux/random.h>
7 #include <linux/sched.h>
9 #include <linux/ceph/mon_client.h>
10 #include <linux/ceph/libceph.h>
11 #include <linux/ceph/debugfs.h>
12 #include <linux/ceph/decode.h>
13 #include <linux/ceph/auth.h>
16 * Interact with Ceph monitor cluster. Handle requests for new map
17 * versions, and periodically resend as needed. Also implement
18 * statfs() and umount().
20 * A small cluster of Ceph "monitors" are responsible for managing critical
21 * cluster configuration and state information. An odd number (e.g., 3, 5)
22 * of cmon daemons use a modified version of the Paxos part-time parliament
23 * algorithm to manage the MDS map (mds cluster membership), OSD map, and
24 * list of clients who have mounted the file system.
26 * We maintain an open, active session with a monitor at all times in order to
27 * receive timely MDSMap updates. We periodically send a keepalive byte on the
28 * TCP socket to ensure we detect a failure. If the connection does break, we
29 * randomly hunt for a new monitor. Once the connection is reestablished, we
30 * resend any outstanding requests.
33 static const struct ceph_connection_operations mon_con_ops;
35 static int __validate_auth(struct ceph_mon_client *monc);
38 * Decode a monmap blob (e.g., during mount).
40 struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
42 struct ceph_monmap *m = NULL;
44 struct ceph_fsid fsid;
49 ceph_decode_32_safe(&p, end, len, bad);
50 ceph_decode_need(&p, end, len, bad);
52 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
54 ceph_decode_16_safe(&p, end, version, bad);
56 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
57 ceph_decode_copy(&p, &fsid, sizeof(fsid));
58 epoch = ceph_decode_32(&p);
60 num_mon = ceph_decode_32(&p);
61 ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad);
63 if (num_mon >= CEPH_MAX_MON)
65 m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS);
67 return ERR_PTR(-ENOMEM);
71 ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0]));
72 for (i = 0; i < num_mon; i++)
73 ceph_decode_addr(&m->mon_inst[i].addr);
75 dout("monmap_decode epoch %d, num_mon %d\n", m->epoch,
77 for (i = 0; i < m->num_mon; i++)
78 dout("monmap_decode mon%d is %s\n", i,
79 ceph_pr_addr(&m->mon_inst[i].addr.in_addr));
83 dout("monmap_decode failed with %d\n", err);
89 * return true if *addr is included in the monmap.
91 int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr)
95 for (i = 0; i < m->num_mon; i++)
96 if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0)
102 * Send an auth request.
104 static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
106 monc->pending_auth = 1;
107 monc->m_auth->front.iov_len = len;
108 monc->m_auth->hdr.front_len = cpu_to_le32(len);
109 ceph_msg_revoke(monc->m_auth);
110 ceph_msg_get(monc->m_auth); /* keep our ref */
111 ceph_con_send(&monc->con, monc->m_auth);
115 * Close monitor session, if any.
117 static void __close_session(struct ceph_mon_client *monc)
119 dout("__close_session closing mon%d\n", monc->cur_mon);
120 ceph_msg_revoke(monc->m_auth);
121 ceph_msg_revoke_incoming(monc->m_auth_reply);
122 ceph_msg_revoke(monc->m_subscribe);
123 ceph_msg_revoke_incoming(monc->m_subscribe_ack);
124 ceph_con_close(&monc->con);
126 monc->pending_auth = 0;
127 ceph_auth_reset(monc->auth);
131 * Pick a new monitor at random and set cur_mon. If we are repicking
132 * (i.e. cur_mon is already set), be sure to pick a different one.
134 static void pick_new_mon(struct ceph_mon_client *monc)
136 int old_mon = monc->cur_mon;
138 BUG_ON(monc->monmap->num_mon < 1);
140 if (monc->monmap->num_mon == 1) {
143 int max = monc->monmap->num_mon;
147 if (monc->cur_mon >= 0) {
148 if (monc->cur_mon < monc->monmap->num_mon)
154 n = prandom_u32() % max;
155 if (o >= 0 && n >= o)
161 dout("%s mon%d -> mon%d out of %d mons\n", __func__, old_mon,
162 monc->cur_mon, monc->monmap->num_mon);
166 * Open a session with a new monitor.
168 static void __open_session(struct ceph_mon_client *monc)
174 monc->sub_renew_after = jiffies; /* i.e., expired */
175 monc->sub_renew_sent = 0;
177 dout("%s opening mon%d\n", __func__, monc->cur_mon);
178 ceph_con_open(&monc->con, CEPH_ENTITY_TYPE_MON, monc->cur_mon,
179 &monc->monmap->mon_inst[monc->cur_mon].addr);
182 * send an initial keepalive to ensure our timestamp is valid
183 * by the time we are in an OPENED state
185 ceph_con_keepalive(&monc->con);
187 /* initiate authentication handshake */
188 ret = ceph_auth_build_hello(monc->auth,
189 monc->m_auth->front.iov_base,
190 monc->m_auth->front_alloc_len);
192 __send_prepared_auth_request(monc, ret);
195 static bool __sub_expired(struct ceph_mon_client *monc)
197 return time_after_eq(jiffies, monc->sub_renew_after);
201 * Reschedule delayed work timer.
203 static void __schedule_delayed(struct ceph_mon_client *monc)
207 if (monc->cur_mon < 0 || __sub_expired(monc)) {
210 delay = CEPH_MONC_PING_INTERVAL;
212 dout("__schedule_delayed after %lu\n", delay);
213 schedule_delayed_work(&monc->delayed_work,
214 round_jiffies_relative(delay));
217 const char *ceph_sub_str[] = {
218 [CEPH_SUB_MDSMAP] = "mdsmap",
219 [CEPH_SUB_MONMAP] = "monmap",
220 [CEPH_SUB_OSDMAP] = "osdmap",
224 * Send subscribe request for one or more maps, according to
227 static void __send_subscribe(struct ceph_mon_client *monc)
229 struct ceph_msg *msg = monc->m_subscribe;
230 void *p = msg->front.iov_base;
231 void *const end = p + msg->front_alloc_len;
235 dout("%s sent %lu\n", __func__, monc->sub_renew_sent);
237 BUG_ON(monc->cur_mon < 0);
239 if (!monc->sub_renew_sent)
240 monc->sub_renew_sent = jiffies | 1; /* never 0 */
242 msg->hdr.version = cpu_to_le16(2);
244 for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
245 if (monc->subs[i].want)
248 BUG_ON(num < 1); /* monmap sub is always there */
249 ceph_encode_32(&p, num);
250 for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
251 const char *s = ceph_sub_str[i];
253 if (!monc->subs[i].want)
256 dout("%s %s start %llu flags 0x%x\n", __func__, s,
257 le64_to_cpu(monc->subs[i].item.start),
258 monc->subs[i].item.flags);
259 ceph_encode_string(&p, end, s, strlen(s));
260 memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item));
261 p += sizeof(monc->subs[i].item);
264 BUG_ON(p != (end - 35 - (ARRAY_SIZE(monc->subs) - num) * 19));
265 msg->front.iov_len = p - msg->front.iov_base;
266 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
267 ceph_msg_revoke(msg);
268 ceph_con_send(&monc->con, ceph_msg_get(msg));
271 static void handle_subscribe_ack(struct ceph_mon_client *monc,
272 struct ceph_msg *msg)
274 unsigned int seconds;
275 struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
277 if (msg->front.iov_len < sizeof(*h))
279 seconds = le32_to_cpu(h->duration);
281 mutex_lock(&monc->mutex);
282 if (monc->sub_renew_sent) {
283 monc->sub_renew_after = monc->sub_renew_sent +
284 (seconds >> 1) * HZ - 1;
285 dout("%s sent %lu duration %d renew after %lu\n", __func__,
286 monc->sub_renew_sent, seconds, monc->sub_renew_after);
287 monc->sub_renew_sent = 0;
289 dout("%s sent %lu renew after %lu, ignoring\n", __func__,
290 monc->sub_renew_sent, monc->sub_renew_after);
292 mutex_unlock(&monc->mutex);
295 pr_err("got corrupt subscribe-ack msg\n");
300 * Register interest in a map
302 * @sub: one of CEPH_SUB_*
303 * @epoch: X for "every map since X", or 0 for "just the latest"
305 static bool __ceph_monc_want_map(struct ceph_mon_client *monc, int sub,
306 u32 epoch, bool continuous)
308 __le64 start = cpu_to_le64(epoch);
309 u8 flags = !continuous ? CEPH_SUBSCRIBE_ONETIME : 0;
311 dout("%s %s epoch %u continuous %d\n", __func__, ceph_sub_str[sub],
314 if (monc->subs[sub].want &&
315 monc->subs[sub].item.start == start &&
316 monc->subs[sub].item.flags == flags)
319 monc->subs[sub].item.start = start;
320 monc->subs[sub].item.flags = flags;
321 monc->subs[sub].want = true;
326 bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch,
331 mutex_lock(&monc->mutex);
332 need_request = __ceph_monc_want_map(monc, sub, epoch, continuous);
333 mutex_unlock(&monc->mutex);
337 EXPORT_SYMBOL(ceph_monc_want_map);
340 * Keep track of which maps we have
342 * @sub: one of CEPH_SUB_*
344 static void __ceph_monc_got_map(struct ceph_mon_client *monc, int sub,
347 dout("%s %s epoch %u\n", __func__, ceph_sub_str[sub], epoch);
349 if (monc->subs[sub].want) {
350 if (monc->subs[sub].item.flags & CEPH_SUBSCRIBE_ONETIME)
351 monc->subs[sub].want = false;
353 monc->subs[sub].item.start = cpu_to_le64(epoch + 1);
356 monc->subs[sub].have = epoch;
359 void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch)
361 mutex_lock(&monc->mutex);
362 __ceph_monc_got_map(monc, sub, epoch);
363 mutex_unlock(&monc->mutex);
365 EXPORT_SYMBOL(ceph_monc_got_map);
368 * Register interest in the next osdmap
370 void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
372 dout("%s have %u\n", __func__, monc->subs[CEPH_SUB_OSDMAP].have);
373 mutex_lock(&monc->mutex);
374 if (__ceph_monc_want_map(monc, CEPH_SUB_OSDMAP,
375 monc->subs[CEPH_SUB_OSDMAP].have + 1, false))
376 __send_subscribe(monc);
377 mutex_unlock(&monc->mutex);
379 EXPORT_SYMBOL(ceph_monc_request_next_osdmap);
382 * Wait for an osdmap with a given epoch.
384 * @epoch: epoch to wait for
385 * @timeout: in jiffies, 0 means "wait forever"
387 int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
388 unsigned long timeout)
390 unsigned long started = jiffies;
393 mutex_lock(&monc->mutex);
394 while (monc->subs[CEPH_SUB_OSDMAP].have < epoch) {
395 mutex_unlock(&monc->mutex);
397 if (timeout && time_after_eq(jiffies, started + timeout))
400 ret = wait_event_interruptible_timeout(monc->client->auth_wq,
401 monc->subs[CEPH_SUB_OSDMAP].have >= epoch,
402 ceph_timeout_jiffies(timeout));
406 mutex_lock(&monc->mutex);
409 mutex_unlock(&monc->mutex);
412 EXPORT_SYMBOL(ceph_monc_wait_osdmap);
415 * Open a session with a random monitor. Request monmap and osdmap,
416 * which are waited upon in __ceph_open_session().
418 int ceph_monc_open_session(struct ceph_mon_client *monc)
420 mutex_lock(&monc->mutex);
421 __ceph_monc_want_map(monc, CEPH_SUB_MONMAP, 0, true);
422 __ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, 0, false);
423 __open_session(monc);
424 __schedule_delayed(monc);
425 mutex_unlock(&monc->mutex);
428 EXPORT_SYMBOL(ceph_monc_open_session);
430 static void ceph_monc_handle_map(struct ceph_mon_client *monc,
431 struct ceph_msg *msg)
433 struct ceph_client *client = monc->client;
434 struct ceph_monmap *monmap = NULL, *old = monc->monmap;
437 mutex_lock(&monc->mutex);
439 dout("handle_monmap\n");
440 p = msg->front.iov_base;
441 end = p + msg->front.iov_len;
443 monmap = ceph_monmap_decode(p, end);
444 if (IS_ERR(monmap)) {
445 pr_err("problem decoding monmap, %d\n",
446 (int)PTR_ERR(monmap));
450 if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) {
455 client->monc.monmap = monmap;
458 __ceph_monc_got_map(monc, CEPH_SUB_MONMAP, monc->monmap->epoch);
459 client->have_fsid = true;
462 mutex_unlock(&monc->mutex);
463 wake_up_all(&client->auth_wq);
467 * generic requests (currently statfs, mon_get_version)
469 static struct ceph_mon_generic_request *__lookup_generic_req(
470 struct ceph_mon_client *monc, u64 tid)
472 struct ceph_mon_generic_request *req;
473 struct rb_node *n = monc->generic_request_tree.rb_node;
476 req = rb_entry(n, struct ceph_mon_generic_request, node);
479 else if (tid > req->tid)
487 static void __insert_generic_request(struct ceph_mon_client *monc,
488 struct ceph_mon_generic_request *new)
490 struct rb_node **p = &monc->generic_request_tree.rb_node;
491 struct rb_node *parent = NULL;
492 struct ceph_mon_generic_request *req = NULL;
496 req = rb_entry(parent, struct ceph_mon_generic_request, node);
497 if (new->tid < req->tid)
499 else if (new->tid > req->tid)
505 rb_link_node(&new->node, parent, p);
506 rb_insert_color(&new->node, &monc->generic_request_tree);
509 static void release_generic_request(struct kref *kref)
511 struct ceph_mon_generic_request *req =
512 container_of(kref, struct ceph_mon_generic_request, kref);
515 ceph_msg_put(req->reply);
517 ceph_msg_put(req->request);
522 static void put_generic_request(struct ceph_mon_generic_request *req)
524 kref_put(&req->kref, release_generic_request);
527 static void get_generic_request(struct ceph_mon_generic_request *req)
529 kref_get(&req->kref);
532 static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
533 struct ceph_msg_header *hdr,
536 struct ceph_mon_client *monc = con->private;
537 struct ceph_mon_generic_request *req;
538 u64 tid = le64_to_cpu(hdr->tid);
541 mutex_lock(&monc->mutex);
542 req = __lookup_generic_req(monc, tid);
544 dout("get_generic_reply %lld dne\n", tid);
548 dout("get_generic_reply %lld got %p\n", tid, req->reply);
550 m = ceph_msg_get(req->reply);
552 * we don't need to track the connection reading into
553 * this reply because we only have one open connection
557 mutex_unlock(&monc->mutex);
561 static int __do_generic_request(struct ceph_mon_client *monc, u64 tid,
562 struct ceph_mon_generic_request *req)
566 /* register request */
567 req->tid = tid != 0 ? tid : ++monc->last_tid;
568 req->request->hdr.tid = cpu_to_le64(req->tid);
569 __insert_generic_request(monc, req);
570 monc->num_generic_requests++;
571 ceph_con_send(&monc->con, ceph_msg_get(req->request));
572 mutex_unlock(&monc->mutex);
574 err = wait_for_completion_interruptible(&req->completion);
576 mutex_lock(&monc->mutex);
577 rb_erase(&req->node, &monc->generic_request_tree);
578 monc->num_generic_requests--;
585 static int do_generic_request(struct ceph_mon_client *monc,
586 struct ceph_mon_generic_request *req)
590 mutex_lock(&monc->mutex);
591 err = __do_generic_request(monc, 0, req);
592 mutex_unlock(&monc->mutex);
600 static void handle_statfs_reply(struct ceph_mon_client *monc,
601 struct ceph_msg *msg)
603 struct ceph_mon_generic_request *req;
604 struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
605 u64 tid = le64_to_cpu(msg->hdr.tid);
607 if (msg->front.iov_len != sizeof(*reply))
609 dout("handle_statfs_reply %p tid %llu\n", msg, tid);
611 mutex_lock(&monc->mutex);
612 req = __lookup_generic_req(monc, tid);
614 *(struct ceph_statfs *)req->buf = reply->st;
616 get_generic_request(req);
618 mutex_unlock(&monc->mutex);
620 complete_all(&req->completion);
621 put_generic_request(req);
626 pr_err("corrupt statfs reply, tid %llu\n", tid);
631 * Do a synchronous statfs().
633 int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
635 struct ceph_mon_generic_request *req;
636 struct ceph_mon_statfs *h;
639 req = kzalloc(sizeof(*req), GFP_NOFS);
643 kref_init(&req->kref);
645 init_completion(&req->completion);
648 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
652 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS,
657 /* fill out request */
658 h = req->request->front.iov_base;
659 h->monhdr.have_version = 0;
660 h->monhdr.session_mon = cpu_to_le16(-1);
661 h->monhdr.session_mon_tid = 0;
662 h->fsid = monc->monmap->fsid;
664 err = do_generic_request(monc, req);
667 put_generic_request(req);
670 EXPORT_SYMBOL(ceph_monc_do_statfs);
672 static void handle_get_version_reply(struct ceph_mon_client *monc,
673 struct ceph_msg *msg)
675 struct ceph_mon_generic_request *req;
676 u64 tid = le64_to_cpu(msg->hdr.tid);
677 void *p = msg->front.iov_base;
678 void *end = p + msg->front_alloc_len;
681 dout("%s %p tid %llu\n", __func__, msg, tid);
683 ceph_decode_need(&p, end, 2*sizeof(u64), bad);
684 handle = ceph_decode_64(&p);
685 if (tid != 0 && tid != handle)
688 mutex_lock(&monc->mutex);
689 req = __lookup_generic_req(monc, handle);
691 *(u64 *)req->buf = ceph_decode_64(&p);
693 get_generic_request(req);
695 mutex_unlock(&monc->mutex);
697 complete_all(&req->completion);
698 put_generic_request(req);
703 pr_err("corrupt mon_get_version reply, tid %llu\n", tid);
708 * Send MMonGetVersion and wait for the reply.
710 * @what: one of "mdsmap", "osdmap" or "monmap"
712 int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what,
715 struct ceph_mon_generic_request *req;
720 req = kzalloc(sizeof(*req), GFP_NOFS);
724 kref_init(&req->kref);
726 init_completion(&req->completion);
728 req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
729 sizeof(u64) + sizeof(u32) + strlen(what),
736 req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 1024,
743 p = req->request->front.iov_base;
744 end = p + req->request->front_alloc_len;
746 /* fill out request */
747 mutex_lock(&monc->mutex);
748 tid = ++monc->last_tid;
749 ceph_encode_64(&p, tid); /* handle */
750 ceph_encode_string(&p, end, what, strlen(what));
752 err = __do_generic_request(monc, tid, req);
754 mutex_unlock(&monc->mutex);
756 put_generic_request(req);
759 EXPORT_SYMBOL(ceph_monc_do_get_version);
762 * Resend pending generic requests.
764 static void __resend_generic_request(struct ceph_mon_client *monc)
766 struct ceph_mon_generic_request *req;
769 for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) {
770 req = rb_entry(p, struct ceph_mon_generic_request, node);
771 ceph_msg_revoke(req->request);
772 ceph_msg_revoke_incoming(req->reply);
773 ceph_con_send(&monc->con, ceph_msg_get(req->request));
778 * Delayed work. If we haven't mounted yet, retry. Otherwise,
779 * renew/retry subscription as needed (in case it is timing out, or we
780 * got an ENOMEM). And keep the monitor connection alive.
782 static void delayed_work(struct work_struct *work)
784 struct ceph_mon_client *monc =
785 container_of(work, struct ceph_mon_client, delayed_work.work);
787 dout("monc delayed_work\n");
788 mutex_lock(&monc->mutex);
790 __close_session(monc);
791 __open_session(monc); /* continue hunting */
793 int is_auth = ceph_auth_is_authenticated(monc->auth);
794 if (ceph_con_keepalive_expired(&monc->con,
795 CEPH_MONC_PING_TIMEOUT)) {
796 dout("monc keepalive timeout\n");
798 __close_session(monc);
799 monc->hunting = true;
800 __open_session(monc);
803 if (!monc->hunting) {
804 ceph_con_keepalive(&monc->con);
805 __validate_auth(monc);
809 unsigned long now = jiffies;
811 dout("%s renew subs? now %lu renew after %lu\n",
812 __func__, now, monc->sub_renew_after);
813 if (time_after_eq(now, monc->sub_renew_after))
814 __send_subscribe(monc);
817 __schedule_delayed(monc);
818 mutex_unlock(&monc->mutex);
822 * On startup, we build a temporary monmap populated with the IPs
823 * provided by mount(2).
825 static int build_initial_monmap(struct ceph_mon_client *monc)
827 struct ceph_options *opt = monc->client->options;
828 struct ceph_entity_addr *mon_addr = opt->mon_addr;
829 int num_mon = opt->num_mon;
832 /* build initial monmap */
833 monc->monmap = kzalloc(sizeof(*monc->monmap) +
834 num_mon*sizeof(monc->monmap->mon_inst[0]),
838 for (i = 0; i < num_mon; i++) {
839 monc->monmap->mon_inst[i].addr = mon_addr[i];
840 monc->monmap->mon_inst[i].addr.nonce = 0;
841 monc->monmap->mon_inst[i].name.type =
842 CEPH_ENTITY_TYPE_MON;
843 monc->monmap->mon_inst[i].name.num = cpu_to_le64(i);
845 monc->monmap->num_mon = num_mon;
849 int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
854 memset(monc, 0, sizeof(*monc));
857 mutex_init(&monc->mutex);
859 err = build_initial_monmap(monc);
865 monc->auth = ceph_auth_init(cl->options->name,
867 if (IS_ERR(monc->auth)) {
868 err = PTR_ERR(monc->auth);
871 monc->auth->want_keys =
872 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
873 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
877 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
878 sizeof(struct ceph_mon_subscribe_ack),
880 if (!monc->m_subscribe_ack)
883 monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS,
885 if (!monc->m_subscribe)
886 goto out_subscribe_ack;
888 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS,
890 if (!monc->m_auth_reply)
893 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true);
894 monc->pending_auth = 0;
898 ceph_con_init(&monc->con, monc, &mon_con_ops,
899 &monc->client->msgr);
902 monc->hunting = true;
903 monc->sub_renew_after = jiffies;
904 monc->sub_renew_sent = 0;
906 INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
907 monc->generic_request_tree = RB_ROOT;
908 monc->num_generic_requests = 0;
914 ceph_msg_put(monc->m_auth_reply);
916 ceph_msg_put(monc->m_subscribe);
918 ceph_msg_put(monc->m_subscribe_ack);
920 ceph_auth_destroy(monc->auth);
926 EXPORT_SYMBOL(ceph_monc_init);
928 void ceph_monc_stop(struct ceph_mon_client *monc)
931 cancel_delayed_work_sync(&monc->delayed_work);
933 mutex_lock(&monc->mutex);
934 __close_session(monc);
936 mutex_unlock(&monc->mutex);
939 * flush msgr queue before we destroy ourselves to ensure that:
940 * - any work that references our embedded con is finished.
941 * - any osd_client or other work that may reference an authorizer
942 * finishes before we shut down the auth subsystem.
946 ceph_auth_destroy(monc->auth);
948 ceph_msg_put(monc->m_auth);
949 ceph_msg_put(monc->m_auth_reply);
950 ceph_msg_put(monc->m_subscribe);
951 ceph_msg_put(monc->m_subscribe_ack);
955 EXPORT_SYMBOL(ceph_monc_stop);
957 static void finish_hunting(struct ceph_mon_client *monc)
960 dout("%s found mon%d\n", __func__, monc->cur_mon);
961 monc->hunting = false;
965 static void handle_auth_reply(struct ceph_mon_client *monc,
966 struct ceph_msg *msg)
971 mutex_lock(&monc->mutex);
972 was_auth = ceph_auth_is_authenticated(monc->auth);
973 monc->pending_auth = 0;
974 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
976 monc->m_auth->front.iov_base,
977 monc->m_auth->front_alloc_len);
979 __send_prepared_auth_request(monc, ret);
983 finish_hunting(monc);
986 monc->client->auth_err = ret;
987 } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) {
988 dout("authenticated, starting session\n");
990 monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
991 monc->client->msgr.inst.name.num =
992 cpu_to_le64(monc->auth->global_id);
994 __send_subscribe(monc);
995 __resend_generic_request(monc);
997 pr_info("mon%d %s session established\n", monc->cur_mon,
998 ceph_pr_addr(&monc->con.peer_addr.in_addr));
1002 mutex_unlock(&monc->mutex);
1003 if (monc->client->auth_err < 0)
1004 wake_up_all(&monc->client->auth_wq);
1007 static int __validate_auth(struct ceph_mon_client *monc)
1011 if (monc->pending_auth)
1014 ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
1015 monc->m_auth->front_alloc_len);
1017 return ret; /* either an error, or no need to authenticate */
1018 __send_prepared_auth_request(monc, ret);
1022 int ceph_monc_validate_auth(struct ceph_mon_client *monc)
1026 mutex_lock(&monc->mutex);
1027 ret = __validate_auth(monc);
1028 mutex_unlock(&monc->mutex);
1031 EXPORT_SYMBOL(ceph_monc_validate_auth);
1034 * handle incoming message
1036 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1038 struct ceph_mon_client *monc = con->private;
1039 int type = le16_to_cpu(msg->hdr.type);
1045 case CEPH_MSG_AUTH_REPLY:
1046 handle_auth_reply(monc, msg);
1049 case CEPH_MSG_MON_SUBSCRIBE_ACK:
1050 handle_subscribe_ack(monc, msg);
1053 case CEPH_MSG_STATFS_REPLY:
1054 handle_statfs_reply(monc, msg);
1057 case CEPH_MSG_MON_GET_VERSION_REPLY:
1058 handle_get_version_reply(monc, msg);
1061 case CEPH_MSG_MON_MAP:
1062 ceph_monc_handle_map(monc, msg);
1065 case CEPH_MSG_OSD_MAP:
1066 ceph_osdc_handle_map(&monc->client->osdc, msg);
1070 /* can the chained handler handle it? */
1071 if (monc->client->extra_mon_dispatch &&
1072 monc->client->extra_mon_dispatch(monc->client, msg) == 0)
1075 pr_err("received unknown message type %d %s\n", type,
1076 ceph_msg_type_name(type));
1082 * Allocate memory for incoming message
1084 static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
1085 struct ceph_msg_header *hdr,
1088 struct ceph_mon_client *monc = con->private;
1089 int type = le16_to_cpu(hdr->type);
1090 int front_len = le32_to_cpu(hdr->front_len);
1091 struct ceph_msg *m = NULL;
1096 case CEPH_MSG_MON_SUBSCRIBE_ACK:
1097 m = ceph_msg_get(monc->m_subscribe_ack);
1099 case CEPH_MSG_STATFS_REPLY:
1100 return get_generic_reply(con, hdr, skip);
1101 case CEPH_MSG_AUTH_REPLY:
1102 m = ceph_msg_get(monc->m_auth_reply);
1104 case CEPH_MSG_MON_GET_VERSION_REPLY:
1105 if (le64_to_cpu(hdr->tid) != 0)
1106 return get_generic_reply(con, hdr, skip);
1109 * Older OSDs don't set reply tid even if the orignal
1110 * request had a non-zero tid. Workaround this weirdness
1111 * by falling through to the allocate case.
1113 case CEPH_MSG_MON_MAP:
1114 case CEPH_MSG_MDS_MAP:
1115 case CEPH_MSG_OSD_MAP:
1116 m = ceph_msg_new(type, front_len, GFP_NOFS, false);
1118 return NULL; /* ENOMEM--return skip == 0 */
1123 pr_info("alloc_msg unknown type %d\n", type);
1125 } else if (front_len > m->front_alloc_len) {
1126 pr_warn("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
1127 front_len, m->front_alloc_len,
1128 (unsigned int)con->peer_name.type,
1129 le64_to_cpu(con->peer_name.num));
1131 m = ceph_msg_new(type, front_len, GFP_NOFS, false);
1138 * If the monitor connection resets, pick a new monitor and resubmit
1139 * any pending requests.
1141 static void mon_fault(struct ceph_connection *con)
1143 struct ceph_mon_client *monc = con->private;
1148 dout("mon_fault\n");
1149 mutex_lock(&monc->mutex);
1154 pr_info("mon%d %s session lost, "
1155 "hunting for new mon\n", monc->cur_mon,
1156 ceph_pr_addr(&monc->con.peer_addr.in_addr));
1158 __close_session(monc);
1159 if (!monc->hunting) {
1161 monc->hunting = true;
1162 __open_session(monc);
1164 /* already hunting, let's wait a bit */
1165 __schedule_delayed(monc);
1168 mutex_unlock(&monc->mutex);
1172 * We can ignore refcounting on the connection struct, as all references
1173 * will come from the messenger workqueue, which is drained prior to
1174 * mon_client destruction.
1176 static struct ceph_connection *con_get(struct ceph_connection *con)
1181 static void con_put(struct ceph_connection *con)
1185 static const struct ceph_connection_operations mon_con_ops = {
1188 .dispatch = dispatch,
1190 .alloc_msg = mon_alloc_msg,