Commit | Line | Data |
---|---|---|
3d14c5d2 | 1 | #include <linux/ceph/ceph_debug.h> |
ba75bb98 | 2 | |
3d14c5d2 | 3 | #include <linux/module.h> |
ba75bb98 | 4 | #include <linux/types.h> |
5a0e3ad6 | 5 | #include <linux/slab.h> |
ba75bb98 SW |
6 | #include <linux/random.h> |
7 | #include <linux/sched.h> | |
8 | ||
3d14c5d2 YS |
9 | #include <linux/ceph/mon_client.h> |
10 | #include <linux/ceph/libceph.h> | |
ab434b60 | 11 | #include <linux/ceph/debugfs.h> |
3d14c5d2 | 12 | #include <linux/ceph/decode.h> |
3d14c5d2 | 13 | #include <linux/ceph/auth.h> |
ba75bb98 SW |
14 | |
15 | /* | |
16 | * Interact with Ceph monitor cluster. Handle requests for new map | |
17 | * versions, and periodically resend as needed. Also implement | |
18 | * statfs() and umount(). | |
19 | * | |
20 | * A small cluster of Ceph "monitors" are responsible for managing critical | |
21 | * cluster configuration and state information. An odd number (e.g., 3, 5) | |
22 | * of cmon daemons use a modified version of the Paxos part-time parliament | |
23 | * algorithm to manage the MDS map (mds cluster membership), OSD map, and | |
24 | * list of clients who have mounted the file system. | |
25 | * | |
26 | * We maintain an open, active session with a monitor at all times in order to | |
27 | * receive timely MDSMap updates. We periodically send a keepalive byte on the | |
28 | * TCP socket to ensure we detect a failure. If the connection does break, we | |
29 | * randomly hunt for a new monitor. Once the connection is reestablished, we | |
30 | * resend any outstanding requests. | |
31 | */ | |
32 | ||
9e32789f | 33 | static const struct ceph_connection_operations mon_con_ops; |
ba75bb98 | 34 | |
9bd2e6f8 SW |
35 | static int __validate_auth(struct ceph_mon_client *monc); |
36 | ||
ba75bb98 SW |
37 | /* |
38 | * Decode a monmap blob (e.g., during mount). | |
39 | */ | |
40 | struct ceph_monmap *ceph_monmap_decode(void *p, void *end) | |
41 | { | |
42 | struct ceph_monmap *m = NULL; | |
43 | int i, err = -EINVAL; | |
44 | struct ceph_fsid fsid; | |
45 | u32 epoch, num_mon; | |
46 | u16 version; | |
4e7a5dcd SW |
47 | u32 len; |
48 | ||
49 | ceph_decode_32_safe(&p, end, len, bad); | |
50 | ceph_decode_need(&p, end, len, bad); | |
ba75bb98 SW |
51 | |
52 | dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p)); | |
53 | ||
54 | ceph_decode_16_safe(&p, end, version, bad); | |
55 | ||
56 | ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); | |
57 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
c89136ea | 58 | epoch = ceph_decode_32(&p); |
ba75bb98 | 59 | |
c89136ea | 60 | num_mon = ceph_decode_32(&p); |
ba75bb98 SW |
61 | ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad); |
62 | ||
63 | if (num_mon >= CEPH_MAX_MON) | |
64 | goto bad; | |
65 | m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS); | |
66 | if (m == NULL) | |
67 | return ERR_PTR(-ENOMEM); | |
68 | m->fsid = fsid; | |
69 | m->epoch = epoch; | |
70 | m->num_mon = num_mon; | |
71 | ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0])); | |
63f2d211 SW |
72 | for (i = 0; i < num_mon; i++) |
73 | ceph_decode_addr(&m->mon_inst[i].addr); | |
ba75bb98 | 74 | |
ba75bb98 SW |
75 | dout("monmap_decode epoch %d, num_mon %d\n", m->epoch, |
76 | m->num_mon); | |
77 | for (i = 0; i < m->num_mon; i++) | |
78 | dout("monmap_decode mon%d is %s\n", i, | |
3d14c5d2 | 79 | ceph_pr_addr(&m->mon_inst[i].addr.in_addr)); |
ba75bb98 SW |
80 | return m; |
81 | ||
82 | bad: | |
83 | dout("monmap_decode failed with %d\n", err); | |
84 | kfree(m); | |
85 | return ERR_PTR(err); | |
86 | } | |
87 | ||
88 | /* | |
89 | * return true if *addr is included in the monmap. | |
90 | */ | |
91 | int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) | |
92 | { | |
93 | int i; | |
94 | ||
95 | for (i = 0; i < m->num_mon; i++) | |
103e2d3a | 96 | if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0) |
ba75bb98 SW |
97 | return 1; |
98 | return 0; | |
99 | } | |
100 | ||
5ce6e9db SW |
101 | /* |
102 | * Send an auth request. | |
103 | */ | |
104 | static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) | |
105 | { | |
106 | monc->pending_auth = 1; | |
107 | monc->m_auth->front.iov_len = len; | |
108 | monc->m_auth->hdr.front_len = cpu_to_le32(len); | |
6740a845 | 109 | ceph_msg_revoke(monc->m_auth); |
5ce6e9db | 110 | ceph_msg_get(monc->m_auth); /* keep our ref */ |
67130934 | 111 | ceph_con_send(&monc->con, monc->m_auth); |
5ce6e9db SW |
112 | } |
113 | ||
ba75bb98 SW |
114 | /* |
115 | * Close monitor session, if any. | |
116 | */ | |
117 | static void __close_session(struct ceph_mon_client *monc) | |
118 | { | |
f6a2f5be | 119 | dout("__close_session closing mon%d\n", monc->cur_mon); |
6740a845 | 120 | ceph_msg_revoke(monc->m_auth); |
4f471e4a SW |
121 | ceph_msg_revoke_incoming(monc->m_auth_reply); |
122 | ceph_msg_revoke(monc->m_subscribe); | |
123 | ceph_msg_revoke_incoming(monc->m_subscribe_ack); | |
67130934 | 124 | ceph_con_close(&monc->con); |
0e04dc26 | 125 | |
f6a2f5be SW |
126 | monc->pending_auth = 0; |
127 | ceph_auth_reset(monc->auth); | |
ba75bb98 SW |
128 | } |
129 | ||
130 | /* | |
0e04dc26 ID |
131 | * Pick a new monitor at random and set cur_mon. If we are repicking |
132 | * (i.e. cur_mon is already set), be sure to pick a different one. | |
ba75bb98 | 133 | */ |
0e04dc26 | 134 | static void pick_new_mon(struct ceph_mon_client *monc) |
ba75bb98 | 135 | { |
0e04dc26 | 136 | int old_mon = monc->cur_mon; |
ba75bb98 | 137 | |
0e04dc26 | 138 | BUG_ON(monc->monmap->num_mon < 1); |
ba75bb98 | 139 | |
0e04dc26 ID |
140 | if (monc->monmap->num_mon == 1) { |
141 | monc->cur_mon = 0; | |
142 | } else { | |
143 | int max = monc->monmap->num_mon; | |
144 | int o = -1; | |
145 | int n; | |
146 | ||
147 | if (monc->cur_mon >= 0) { | |
148 | if (monc->cur_mon < monc->monmap->num_mon) | |
149 | o = monc->cur_mon; | |
150 | if (o >= 0) | |
151 | max--; | |
152 | } | |
4e7a5dcd | 153 | |
0e04dc26 ID |
154 | n = prandom_u32() % max; |
155 | if (o >= 0 && n >= o) | |
156 | n++; | |
8b9558aa | 157 | |
0e04dc26 | 158 | monc->cur_mon = n; |
ba75bb98 | 159 | } |
0e04dc26 ID |
160 | |
161 | dout("%s mon%d -> mon%d out of %d mons\n", __func__, old_mon, | |
162 | monc->cur_mon, monc->monmap->num_mon); | |
163 | } | |
164 | ||
165 | /* | |
166 | * Open a session with a new monitor. | |
167 | */ | |
168 | static void __open_session(struct ceph_mon_client *monc) | |
169 | { | |
170 | int ret; | |
171 | ||
172 | pick_new_mon(monc); | |
173 | ||
168b9090 ID |
174 | if (monc->had_a_connection) { |
175 | monc->hunt_mult *= CEPH_MONC_HUNT_BACKOFF; | |
176 | if (monc->hunt_mult > CEPH_MONC_HUNT_MAX_MULT) | |
177 | monc->hunt_mult = CEPH_MONC_HUNT_MAX_MULT; | |
178 | } | |
179 | ||
0e04dc26 ID |
180 | monc->sub_renew_after = jiffies; /* i.e., expired */ |
181 | monc->sub_renew_sent = 0; | |
182 | ||
183 | dout("%s opening mon%d\n", __func__, monc->cur_mon); | |
184 | ceph_con_open(&monc->con, CEPH_ENTITY_TYPE_MON, monc->cur_mon, | |
185 | &monc->monmap->mon_inst[monc->cur_mon].addr); | |
186 | ||
187 | /* | |
188 | * send an initial keepalive to ensure our timestamp is valid | |
189 | * by the time we are in an OPENED state | |
190 | */ | |
191 | ceph_con_keepalive(&monc->con); | |
192 | ||
193 | /* initiate authentication handshake */ | |
194 | ret = ceph_auth_build_hello(monc->auth, | |
195 | monc->m_auth->front.iov_base, | |
196 | monc->m_auth->front_alloc_len); | |
197 | BUG_ON(ret <= 0); | |
198 | __send_prepared_auth_request(monc, ret); | |
ba75bb98 SW |
199 | } |
200 | ||
ba75bb98 SW |
201 | /* |
202 | * Reschedule delayed work timer. | |
203 | */ | |
204 | static void __schedule_delayed(struct ceph_mon_client *monc) | |
205 | { | |
8b9558aa | 206 | unsigned long delay; |
ba75bb98 | 207 | |
168b9090 ID |
208 | if (monc->hunting) |
209 | delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult; | |
210 | else | |
58d81b12 | 211 | delay = CEPH_MONC_PING_INTERVAL; |
168b9090 | 212 | |
8b9558aa YZ |
213 | dout("__schedule_delayed after %lu\n", delay); |
214 | schedule_delayed_work(&monc->delayed_work, | |
215 | round_jiffies_relative(delay)); | |
ba75bb98 SW |
216 | } |
217 | ||
82dcabad ID |
218 | const char *ceph_sub_str[] = { |
219 | [CEPH_SUB_MDSMAP] = "mdsmap", | |
220 | [CEPH_SUB_MONMAP] = "monmap", | |
221 | [CEPH_SUB_OSDMAP] = "osdmap", | |
222 | }; | |
223 | ||
ba75bb98 | 224 | /* |
82dcabad ID |
225 | * Send subscribe request for one or more maps, according to |
226 | * monc->subs. | |
ba75bb98 SW |
227 | */ |
228 | static void __send_subscribe(struct ceph_mon_client *monc) | |
229 | { | |
82dcabad ID |
230 | struct ceph_msg *msg = monc->m_subscribe; |
231 | void *p = msg->front.iov_base; | |
232 | void *const end = p + msg->front_alloc_len; | |
233 | int num = 0; | |
234 | int i; | |
235 | ||
236 | dout("%s sent %lu\n", __func__, monc->sub_renew_sent); | |
237 | ||
238 | BUG_ON(monc->cur_mon < 0); | |
239 | ||
240 | if (!monc->sub_renew_sent) | |
241 | monc->sub_renew_sent = jiffies | 1; /* never 0 */ | |
242 | ||
243 | msg->hdr.version = cpu_to_le16(2); | |
244 | ||
245 | for (i = 0; i < ARRAY_SIZE(monc->subs); i++) { | |
246 | if (monc->subs[i].want) | |
247 | num++; | |
248 | } | |
249 | BUG_ON(num < 1); /* monmap sub is always there */ | |
250 | ceph_encode_32(&p, num); | |
251 | for (i = 0; i < ARRAY_SIZE(monc->subs); i++) { | |
252 | const char *s = ceph_sub_str[i]; | |
253 | ||
254 | if (!monc->subs[i].want) | |
255 | continue; | |
256 | ||
257 | dout("%s %s start %llu flags 0x%x\n", __func__, s, | |
258 | le64_to_cpu(monc->subs[i].item.start), | |
259 | monc->subs[i].item.flags); | |
260 | ceph_encode_string(&p, end, s, strlen(s)); | |
261 | memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item)); | |
262 | p += sizeof(monc->subs[i].item); | |
ba75bb98 | 263 | } |
82dcabad ID |
264 | |
265 | BUG_ON(p != (end - 35 - (ARRAY_SIZE(monc->subs) - num) * 19)); | |
266 | msg->front.iov_len = p - msg->front.iov_base; | |
267 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
268 | ceph_msg_revoke(msg); | |
269 | ceph_con_send(&monc->con, ceph_msg_get(msg)); | |
ba75bb98 SW |
270 | } |
271 | ||
272 | static void handle_subscribe_ack(struct ceph_mon_client *monc, | |
273 | struct ceph_msg *msg) | |
274 | { | |
95c96174 | 275 | unsigned int seconds; |
07bd10fb SW |
276 | struct ceph_mon_subscribe_ack *h = msg->front.iov_base; |
277 | ||
278 | if (msg->front.iov_len < sizeof(*h)) | |
279 | goto bad; | |
280 | seconds = le32_to_cpu(h->duration); | |
ba75bb98 | 281 | |
ba75bb98 | 282 | mutex_lock(&monc->mutex); |
82dcabad ID |
283 | if (monc->sub_renew_sent) { |
284 | monc->sub_renew_after = monc->sub_renew_sent + | |
285 | (seconds >> 1) * HZ - 1; | |
286 | dout("%s sent %lu duration %d renew after %lu\n", __func__, | |
287 | monc->sub_renew_sent, seconds, monc->sub_renew_after); | |
288 | monc->sub_renew_sent = 0; | |
289 | } else { | |
290 | dout("%s sent %lu renew after %lu, ignoring\n", __func__, | |
291 | monc->sub_renew_sent, monc->sub_renew_after); | |
292 | } | |
ba75bb98 SW |
293 | mutex_unlock(&monc->mutex); |
294 | return; | |
295 | bad: | |
296 | pr_err("got corrupt subscribe-ack msg\n"); | |
9ec7cab1 | 297 | ceph_msg_dump(msg); |
ba75bb98 SW |
298 | } |
299 | ||
300 | /* | |
82dcabad ID |
301 | * Register interest in a map |
302 | * | |
303 | * @sub: one of CEPH_SUB_* | |
304 | * @epoch: X for "every map since X", or 0 for "just the latest" | |
ba75bb98 | 305 | */ |
82dcabad ID |
306 | static bool __ceph_monc_want_map(struct ceph_mon_client *monc, int sub, |
307 | u32 epoch, bool continuous) | |
ba75bb98 | 308 | { |
82dcabad ID |
309 | __le64 start = cpu_to_le64(epoch); |
310 | u8 flags = !continuous ? CEPH_SUBSCRIBE_ONETIME : 0; | |
311 | ||
312 | dout("%s %s epoch %u continuous %d\n", __func__, ceph_sub_str[sub], | |
313 | epoch, continuous); | |
314 | ||
315 | if (monc->subs[sub].want && | |
316 | monc->subs[sub].item.start == start && | |
317 | monc->subs[sub].item.flags == flags) | |
318 | return false; | |
319 | ||
320 | monc->subs[sub].item.start = start; | |
321 | monc->subs[sub].item.flags = flags; | |
322 | monc->subs[sub].want = true; | |
323 | ||
324 | return true; | |
325 | } | |
326 | ||
327 | bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch, | |
328 | bool continuous) | |
329 | { | |
330 | bool need_request; | |
331 | ||
ba75bb98 | 332 | mutex_lock(&monc->mutex); |
82dcabad | 333 | need_request = __ceph_monc_want_map(monc, sub, epoch, continuous); |
ba75bb98 | 334 | mutex_unlock(&monc->mutex); |
82dcabad ID |
335 | |
336 | return need_request; | |
337 | } | |
338 | EXPORT_SYMBOL(ceph_monc_want_map); | |
339 | ||
340 | /* | |
341 | * Keep track of which maps we have | |
342 | * | |
343 | * @sub: one of CEPH_SUB_* | |
344 | */ | |
345 | static void __ceph_monc_got_map(struct ceph_mon_client *monc, int sub, | |
346 | u32 epoch) | |
347 | { | |
348 | dout("%s %s epoch %u\n", __func__, ceph_sub_str[sub], epoch); | |
349 | ||
350 | if (monc->subs[sub].want) { | |
351 | if (monc->subs[sub].item.flags & CEPH_SUBSCRIBE_ONETIME) | |
352 | monc->subs[sub].want = false; | |
353 | else | |
354 | monc->subs[sub].item.start = cpu_to_le64(epoch + 1); | |
355 | } | |
356 | ||
357 | monc->subs[sub].have = epoch; | |
ba75bb98 SW |
358 | } |
359 | ||
82dcabad | 360 | void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch) |
ba75bb98 SW |
361 | { |
362 | mutex_lock(&monc->mutex); | |
82dcabad | 363 | __ceph_monc_got_map(monc, sub, epoch); |
ba75bb98 | 364 | mutex_unlock(&monc->mutex); |
ba75bb98 | 365 | } |
82dcabad | 366 | EXPORT_SYMBOL(ceph_monc_got_map); |
ba75bb98 SW |
367 | |
368 | /* | |
369 | * Register interest in the next osdmap | |
370 | */ | |
371 | void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc) | |
372 | { | |
82dcabad | 373 | dout("%s have %u\n", __func__, monc->subs[CEPH_SUB_OSDMAP].have); |
ba75bb98 | 374 | mutex_lock(&monc->mutex); |
82dcabad ID |
375 | if (__ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, |
376 | monc->subs[CEPH_SUB_OSDMAP].have + 1, false)) | |
ba75bb98 SW |
377 | __send_subscribe(monc); |
378 | mutex_unlock(&monc->mutex); | |
379 | } | |
6044cde6 ID |
380 | EXPORT_SYMBOL(ceph_monc_request_next_osdmap); |
381 | ||
a319bf56 ID |
382 | /* |
383 | * Wait for an osdmap with a given epoch. | |
384 | * | |
385 | * @epoch: epoch to wait for | |
386 | * @timeout: in jiffies, 0 means "wait forever" | |
387 | */ | |
6044cde6 ID |
388 | int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, |
389 | unsigned long timeout) | |
390 | { | |
391 | unsigned long started = jiffies; | |
216639dd | 392 | long ret; |
6044cde6 ID |
393 | |
394 | mutex_lock(&monc->mutex); | |
82dcabad | 395 | while (monc->subs[CEPH_SUB_OSDMAP].have < epoch) { |
6044cde6 ID |
396 | mutex_unlock(&monc->mutex); |
397 | ||
a319bf56 | 398 | if (timeout && time_after_eq(jiffies, started + timeout)) |
6044cde6 ID |
399 | return -ETIMEDOUT; |
400 | ||
401 | ret = wait_event_interruptible_timeout(monc->client->auth_wq, | |
82dcabad ID |
402 | monc->subs[CEPH_SUB_OSDMAP].have >= epoch, |
403 | ceph_timeout_jiffies(timeout)); | |
6044cde6 ID |
404 | if (ret < 0) |
405 | return ret; | |
406 | ||
407 | mutex_lock(&monc->mutex); | |
408 | } | |
409 | ||
410 | mutex_unlock(&monc->mutex); | |
411 | return 0; | |
412 | } | |
413 | EXPORT_SYMBOL(ceph_monc_wait_osdmap); | |
ba75bb98 | 414 | |
4e7a5dcd | 415 | /* |
82dcabad ID |
416 | * Open a session with a random monitor. Request monmap and osdmap, |
417 | * which are waited upon in __ceph_open_session(). | |
4e7a5dcd SW |
418 | */ |
419 | int ceph_monc_open_session(struct ceph_mon_client *monc) | |
ba75bb98 | 420 | { |
ba75bb98 | 421 | mutex_lock(&monc->mutex); |
82dcabad ID |
422 | __ceph_monc_want_map(monc, CEPH_SUB_MONMAP, 0, true); |
423 | __ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, 0, false); | |
4e7a5dcd | 424 | __open_session(monc); |
ba75bb98 SW |
425 | __schedule_delayed(monc); |
426 | mutex_unlock(&monc->mutex); | |
427 | return 0; | |
428 | } | |
3d14c5d2 | 429 | EXPORT_SYMBOL(ceph_monc_open_session); |
ba75bb98 | 430 | |
0743304d SW |
431 | static void ceph_monc_handle_map(struct ceph_mon_client *monc, |
432 | struct ceph_msg *msg) | |
4e7a5dcd SW |
433 | { |
434 | struct ceph_client *client = monc->client; | |
435 | struct ceph_monmap *monmap = NULL, *old = monc->monmap; | |
436 | void *p, *end; | |
437 | ||
438 | mutex_lock(&monc->mutex); | |
439 | ||
440 | dout("handle_monmap\n"); | |
441 | p = msg->front.iov_base; | |
442 | end = p + msg->front.iov_len; | |
443 | ||
444 | monmap = ceph_monmap_decode(p, end); | |
445 | if (IS_ERR(monmap)) { | |
446 | pr_err("problem decoding monmap, %d\n", | |
447 | (int)PTR_ERR(monmap)); | |
d4a780ce | 448 | goto out; |
4e7a5dcd | 449 | } |
0743304d SW |
450 | |
451 | if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) { | |
4e7a5dcd | 452 | kfree(monmap); |
d4a780ce | 453 | goto out; |
4e7a5dcd SW |
454 | } |
455 | ||
456 | client->monc.monmap = monmap; | |
4e7a5dcd SW |
457 | kfree(old); |
458 | ||
82dcabad | 459 | __ceph_monc_got_map(monc, CEPH_SUB_MONMAP, monc->monmap->epoch); |
02ac956c | 460 | client->have_fsid = true; |
d1c338a5 | 461 | |
d4a780ce | 462 | out: |
4e7a5dcd | 463 | mutex_unlock(&monc->mutex); |
03066f23 | 464 | wake_up_all(&client->auth_wq); |
4e7a5dcd SW |
465 | } |
466 | ||
ba75bb98 | 467 | /* |
7a6fdeb2 | 468 | * generic requests (currently statfs, mon_get_version) |
ba75bb98 | 469 | */ |
f8c76f6f | 470 | static struct ceph_mon_generic_request *__lookup_generic_req( |
85ff03f6 SW |
471 | struct ceph_mon_client *monc, u64 tid) |
472 | { | |
f8c76f6f YS |
473 | struct ceph_mon_generic_request *req; |
474 | struct rb_node *n = monc->generic_request_tree.rb_node; | |
85ff03f6 SW |
475 | |
476 | while (n) { | |
f8c76f6f | 477 | req = rb_entry(n, struct ceph_mon_generic_request, node); |
85ff03f6 SW |
478 | if (tid < req->tid) |
479 | n = n->rb_left; | |
480 | else if (tid > req->tid) | |
481 | n = n->rb_right; | |
482 | else | |
483 | return req; | |
484 | } | |
485 | return NULL; | |
486 | } | |
487 | ||
f8c76f6f YS |
488 | static void __insert_generic_request(struct ceph_mon_client *monc, |
489 | struct ceph_mon_generic_request *new) | |
85ff03f6 | 490 | { |
f8c76f6f | 491 | struct rb_node **p = &monc->generic_request_tree.rb_node; |
85ff03f6 | 492 | struct rb_node *parent = NULL; |
f8c76f6f | 493 | struct ceph_mon_generic_request *req = NULL; |
85ff03f6 SW |
494 | |
495 | while (*p) { | |
496 | parent = *p; | |
f8c76f6f | 497 | req = rb_entry(parent, struct ceph_mon_generic_request, node); |
85ff03f6 SW |
498 | if (new->tid < req->tid) |
499 | p = &(*p)->rb_left; | |
500 | else if (new->tid > req->tid) | |
501 | p = &(*p)->rb_right; | |
502 | else | |
503 | BUG(); | |
504 | } | |
505 | ||
506 | rb_link_node(&new->node, parent, p); | |
f8c76f6f | 507 | rb_insert_color(&new->node, &monc->generic_request_tree); |
85ff03f6 SW |
508 | } |
509 | ||
f8c76f6f | 510 | static void release_generic_request(struct kref *kref) |
3143edd3 | 511 | { |
f8c76f6f YS |
512 | struct ceph_mon_generic_request *req = |
513 | container_of(kref, struct ceph_mon_generic_request, kref); | |
3143edd3 SW |
514 | |
515 | if (req->reply) | |
516 | ceph_msg_put(req->reply); | |
517 | if (req->request) | |
518 | ceph_msg_put(req->request); | |
20547567 YS |
519 | |
520 | kfree(req); | |
3143edd3 SW |
521 | } |
522 | ||
f8c76f6f | 523 | static void put_generic_request(struct ceph_mon_generic_request *req) |
3143edd3 | 524 | { |
f8c76f6f | 525 | kref_put(&req->kref, release_generic_request); |
3143edd3 SW |
526 | } |
527 | ||
f8c76f6f | 528 | static void get_generic_request(struct ceph_mon_generic_request *req) |
3143edd3 SW |
529 | { |
530 | kref_get(&req->kref); | |
531 | } | |
532 | ||
f8c76f6f | 533 | static struct ceph_msg *get_generic_reply(struct ceph_connection *con, |
3143edd3 SW |
534 | struct ceph_msg_header *hdr, |
535 | int *skip) | |
536 | { | |
537 | struct ceph_mon_client *monc = con->private; | |
f8c76f6f | 538 | struct ceph_mon_generic_request *req; |
3143edd3 SW |
539 | u64 tid = le64_to_cpu(hdr->tid); |
540 | struct ceph_msg *m; | |
541 | ||
542 | mutex_lock(&monc->mutex); | |
f8c76f6f | 543 | req = __lookup_generic_req(monc, tid); |
3143edd3 | 544 | if (!req) { |
f8c76f6f | 545 | dout("get_generic_reply %lld dne\n", tid); |
3143edd3 SW |
546 | *skip = 1; |
547 | m = NULL; | |
548 | } else { | |
f8c76f6f | 549 | dout("get_generic_reply %lld got %p\n", tid, req->reply); |
1c20f2d2 | 550 | *skip = 0; |
3143edd3 SW |
551 | m = ceph_msg_get(req->reply); |
552 | /* | |
553 | * we don't need to track the connection reading into | |
554 | * this reply because we only have one open connection | |
555 | * at a time, ever. | |
556 | */ | |
557 | } | |
558 | mutex_unlock(&monc->mutex); | |
559 | return m; | |
560 | } | |
561 | ||
513a8243 ID |
562 | static int __do_generic_request(struct ceph_mon_client *monc, u64 tid, |
563 | struct ceph_mon_generic_request *req) | |
e56fa10e YS |
564 | { |
565 | int err; | |
566 | ||
567 | /* register request */ | |
513a8243 | 568 | req->tid = tid != 0 ? tid : ++monc->last_tid; |
e56fa10e YS |
569 | req->request->hdr.tid = cpu_to_le64(req->tid); |
570 | __insert_generic_request(monc, req); | |
571 | monc->num_generic_requests++; | |
67130934 | 572 | ceph_con_send(&monc->con, ceph_msg_get(req->request)); |
e56fa10e YS |
573 | mutex_unlock(&monc->mutex); |
574 | ||
575 | err = wait_for_completion_interruptible(&req->completion); | |
576 | ||
577 | mutex_lock(&monc->mutex); | |
578 | rb_erase(&req->node, &monc->generic_request_tree); | |
579 | monc->num_generic_requests--; | |
e56fa10e YS |
580 | |
581 | if (!err) | |
582 | err = req->result; | |
583 | return err; | |
584 | } | |
585 | ||
513a8243 ID |
586 | static int do_generic_request(struct ceph_mon_client *monc, |
587 | struct ceph_mon_generic_request *req) | |
588 | { | |
589 | int err; | |
590 | ||
591 | mutex_lock(&monc->mutex); | |
592 | err = __do_generic_request(monc, 0, req); | |
593 | mutex_unlock(&monc->mutex); | |
594 | ||
595 | return err; | |
596 | } | |
597 | ||
e56fa10e YS |
598 | /* |
599 | * statfs | |
600 | */ | |
ba75bb98 SW |
601 | static void handle_statfs_reply(struct ceph_mon_client *monc, |
602 | struct ceph_msg *msg) | |
603 | { | |
f8c76f6f | 604 | struct ceph_mon_generic_request *req; |
ba75bb98 | 605 | struct ceph_mon_statfs_reply *reply = msg->front.iov_base; |
3143edd3 | 606 | u64 tid = le64_to_cpu(msg->hdr.tid); |
ba75bb98 SW |
607 | |
608 | if (msg->front.iov_len != sizeof(*reply)) | |
609 | goto bad; | |
ba75bb98 SW |
610 | dout("handle_statfs_reply %p tid %llu\n", msg, tid); |
611 | ||
612 | mutex_lock(&monc->mutex); | |
f8c76f6f | 613 | req = __lookup_generic_req(monc, tid); |
ba75bb98 | 614 | if (req) { |
f8c76f6f | 615 | *(struct ceph_statfs *)req->buf = reply->st; |
ba75bb98 | 616 | req->result = 0; |
f8c76f6f | 617 | get_generic_request(req); |
ba75bb98 SW |
618 | } |
619 | mutex_unlock(&monc->mutex); | |
3143edd3 | 620 | if (req) { |
03066f23 | 621 | complete_all(&req->completion); |
f8c76f6f | 622 | put_generic_request(req); |
3143edd3 | 623 | } |
ba75bb98 SW |
624 | return; |
625 | ||
626 | bad: | |
7a6fdeb2 | 627 | pr_err("corrupt statfs reply, tid %llu\n", tid); |
9ec7cab1 | 628 | ceph_msg_dump(msg); |
ba75bb98 SW |
629 | } |
630 | ||
631 | /* | |
3143edd3 | 632 | * Do a synchronous statfs(). |
ba75bb98 | 633 | */ |
3143edd3 | 634 | int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) |
ba75bb98 | 635 | { |
f8c76f6f | 636 | struct ceph_mon_generic_request *req; |
ba75bb98 | 637 | struct ceph_mon_statfs *h; |
3143edd3 SW |
638 | int err; |
639 | ||
cffe7b6d | 640 | req = kzalloc(sizeof(*req), GFP_NOFS); |
3143edd3 SW |
641 | if (!req) |
642 | return -ENOMEM; | |
643 | ||
3143edd3 SW |
644 | kref_init(&req->kref); |
645 | req->buf = buf; | |
646 | init_completion(&req->completion); | |
ba75bb98 | 647 | |
a79832f2 | 648 | err = -ENOMEM; |
b61c2763 SW |
649 | req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS, |
650 | true); | |
a79832f2 | 651 | if (!req->request) |
3143edd3 | 652 | goto out; |
b61c2763 SW |
653 | req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS, |
654 | true); | |
a79832f2 | 655 | if (!req->reply) |
3143edd3 | 656 | goto out; |
3143edd3 SW |
657 | |
658 | /* fill out request */ | |
659 | h = req->request->front.iov_base; | |
13e38c8a SW |
660 | h->monhdr.have_version = 0; |
661 | h->monhdr.session_mon = cpu_to_le16(-1); | |
662 | h->monhdr.session_mon_tid = 0; | |
ba75bb98 | 663 | h->fsid = monc->monmap->fsid; |
ba75bb98 | 664 | |
e56fa10e | 665 | err = do_generic_request(monc, req); |
ba75bb98 | 666 | |
e56fa10e | 667 | out: |
f646912d | 668 | put_generic_request(req); |
e56fa10e YS |
669 | return err; |
670 | } | |
3d14c5d2 | 671 | EXPORT_SYMBOL(ceph_monc_do_statfs); |
e56fa10e | 672 | |
513a8243 ID |
673 | static void handle_get_version_reply(struct ceph_mon_client *monc, |
674 | struct ceph_msg *msg) | |
675 | { | |
676 | struct ceph_mon_generic_request *req; | |
677 | u64 tid = le64_to_cpu(msg->hdr.tid); | |
678 | void *p = msg->front.iov_base; | |
679 | void *end = p + msg->front_alloc_len; | |
680 | u64 handle; | |
681 | ||
682 | dout("%s %p tid %llu\n", __func__, msg, tid); | |
683 | ||
684 | ceph_decode_need(&p, end, 2*sizeof(u64), bad); | |
685 | handle = ceph_decode_64(&p); | |
686 | if (tid != 0 && tid != handle) | |
687 | goto bad; | |
688 | ||
689 | mutex_lock(&monc->mutex); | |
690 | req = __lookup_generic_req(monc, handle); | |
691 | if (req) { | |
692 | *(u64 *)req->buf = ceph_decode_64(&p); | |
693 | req->result = 0; | |
694 | get_generic_request(req); | |
695 | } | |
696 | mutex_unlock(&monc->mutex); | |
697 | if (req) { | |
698 | complete_all(&req->completion); | |
699 | put_generic_request(req); | |
700 | } | |
701 | ||
702 | return; | |
703 | bad: | |
7a6fdeb2 | 704 | pr_err("corrupt mon_get_version reply, tid %llu\n", tid); |
513a8243 ID |
705 | ceph_msg_dump(msg); |
706 | } | |
707 | ||
708 | /* | |
709 | * Send MMonGetVersion and wait for the reply. | |
710 | * | |
711 | * @what: one of "mdsmap", "osdmap" or "monmap" | |
712 | */ | |
713 | int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what, | |
714 | u64 *newest) | |
715 | { | |
716 | struct ceph_mon_generic_request *req; | |
717 | void *p, *end; | |
718 | u64 tid; | |
719 | int err; | |
720 | ||
721 | req = kzalloc(sizeof(*req), GFP_NOFS); | |
722 | if (!req) | |
723 | return -ENOMEM; | |
724 | ||
725 | kref_init(&req->kref); | |
726 | req->buf = newest; | |
513a8243 ID |
727 | init_completion(&req->completion); |
728 | ||
729 | req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION, | |
730 | sizeof(u64) + sizeof(u32) + strlen(what), | |
731 | GFP_NOFS, true); | |
732 | if (!req->request) { | |
733 | err = -ENOMEM; | |
734 | goto out; | |
735 | } | |
736 | ||
737 | req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 1024, | |
738 | GFP_NOFS, true); | |
739 | if (!req->reply) { | |
740 | err = -ENOMEM; | |
741 | goto out; | |
742 | } | |
743 | ||
744 | p = req->request->front.iov_base; | |
745 | end = p + req->request->front_alloc_len; | |
746 | ||
747 | /* fill out request */ | |
748 | mutex_lock(&monc->mutex); | |
749 | tid = ++monc->last_tid; | |
750 | ceph_encode_64(&p, tid); /* handle */ | |
751 | ceph_encode_string(&p, end, what, strlen(what)); | |
752 | ||
753 | err = __do_generic_request(monc, tid, req); | |
754 | ||
755 | mutex_unlock(&monc->mutex); | |
756 | out: | |
f646912d | 757 | put_generic_request(req); |
513a8243 ID |
758 | return err; |
759 | } | |
760 | EXPORT_SYMBOL(ceph_monc_do_get_version); | |
761 | ||
ba75bb98 | 762 | /* |
e56fa10e | 763 | * Resend pending generic requests. |
ba75bb98 | 764 | */ |
f8c76f6f | 765 | static void __resend_generic_request(struct ceph_mon_client *monc) |
ba75bb98 | 766 | { |
f8c76f6f | 767 | struct ceph_mon_generic_request *req; |
85ff03f6 | 768 | struct rb_node *p; |
ba75bb98 | 769 | |
f8c76f6f YS |
770 | for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) { |
771 | req = rb_entry(p, struct ceph_mon_generic_request, node); | |
6740a845 | 772 | ceph_msg_revoke(req->request); |
4f471e4a | 773 | ceph_msg_revoke_incoming(req->reply); |
67130934 | 774 | ceph_con_send(&monc->con, ceph_msg_get(req->request)); |
ba75bb98 SW |
775 | } |
776 | } | |
777 | ||
778 | /* | |
779 | * Delayed work. If we haven't mounted yet, retry. Otherwise, | |
780 | * renew/retry subscription as needed (in case it is timing out, or we | |
781 | * got an ENOMEM). And keep the monitor connection alive. | |
782 | */ | |
783 | static void delayed_work(struct work_struct *work) | |
784 | { | |
785 | struct ceph_mon_client *monc = | |
786 | container_of(work, struct ceph_mon_client, delayed_work.work); | |
787 | ||
788 | dout("monc delayed_work\n"); | |
789 | mutex_lock(&monc->mutex); | |
4e7a5dcd SW |
790 | if (monc->hunting) { |
791 | __close_session(monc); | |
792 | __open_session(monc); /* continue hunting */ | |
ba75bb98 | 793 | } else { |
8b9558aa YZ |
794 | int is_auth = ceph_auth_is_authenticated(monc->auth); |
795 | if (ceph_con_keepalive_expired(&monc->con, | |
58d81b12 | 796 | CEPH_MONC_PING_TIMEOUT)) { |
8b9558aa YZ |
797 | dout("monc keepalive timeout\n"); |
798 | is_auth = 0; | |
799 | __close_session(monc); | |
800 | monc->hunting = true; | |
801 | __open_session(monc); | |
802 | } | |
9bd2e6f8 | 803 | |
8b9558aa YZ |
804 | if (!monc->hunting) { |
805 | ceph_con_keepalive(&monc->con); | |
806 | __validate_auth(monc); | |
807 | } | |
9bd2e6f8 | 808 | |
82dcabad ID |
809 | if (is_auth) { |
810 | unsigned long now = jiffies; | |
811 | ||
812 | dout("%s renew subs? now %lu renew after %lu\n", | |
813 | __func__, now, monc->sub_renew_after); | |
814 | if (time_after_eq(now, monc->sub_renew_after)) | |
815 | __send_subscribe(monc); | |
816 | } | |
ba75bb98 | 817 | } |
ba75bb98 SW |
818 | __schedule_delayed(monc); |
819 | mutex_unlock(&monc->mutex); | |
820 | } | |
821 | ||
6b805185 SW |
822 | /* |
823 | * On startup, we build a temporary monmap populated with the IPs | |
824 | * provided by mount(2). | |
825 | */ | |
826 | static int build_initial_monmap(struct ceph_mon_client *monc) | |
827 | { | |
3d14c5d2 YS |
828 | struct ceph_options *opt = monc->client->options; |
829 | struct ceph_entity_addr *mon_addr = opt->mon_addr; | |
830 | int num_mon = opt->num_mon; | |
6b805185 SW |
831 | int i; |
832 | ||
833 | /* build initial monmap */ | |
834 | monc->monmap = kzalloc(sizeof(*monc->monmap) + | |
835 | num_mon*sizeof(monc->monmap->mon_inst[0]), | |
836 | GFP_KERNEL); | |
837 | if (!monc->monmap) | |
838 | return -ENOMEM; | |
839 | for (i = 0; i < num_mon; i++) { | |
840 | monc->monmap->mon_inst[i].addr = mon_addr[i]; | |
6b805185 SW |
841 | monc->monmap->mon_inst[i].addr.nonce = 0; |
842 | monc->monmap->mon_inst[i].name.type = | |
843 | CEPH_ENTITY_TYPE_MON; | |
844 | monc->monmap->mon_inst[i].name.num = cpu_to_le64(i); | |
845 | } | |
846 | monc->monmap->num_mon = num_mon; | |
6b805185 SW |
847 | return 0; |
848 | } | |
849 | ||
ba75bb98 SW |
850 | int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) |
851 | { | |
852 | int err = 0; | |
853 | ||
854 | dout("init\n"); | |
855 | memset(monc, 0, sizeof(*monc)); | |
856 | monc->client = cl; | |
857 | monc->monmap = NULL; | |
858 | mutex_init(&monc->mutex); | |
859 | ||
6b805185 SW |
860 | err = build_initial_monmap(monc); |
861 | if (err) | |
862 | goto out; | |
863 | ||
f6a2f5be | 864 | /* connection */ |
4e7a5dcd | 865 | /* authentication */ |
3d14c5d2 | 866 | monc->auth = ceph_auth_init(cl->options->name, |
8323c3aa | 867 | cl->options->key); |
49d9224c NW |
868 | if (IS_ERR(monc->auth)) { |
869 | err = PTR_ERR(monc->auth); | |
67130934 | 870 | goto out_monmap; |
49d9224c | 871 | } |
4e7a5dcd SW |
872 | monc->auth->want_keys = |
873 | CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | | |
874 | CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; | |
875 | ||
240ed68e | 876 | /* msgs */ |
a79832f2 | 877 | err = -ENOMEM; |
7c315c55 | 878 | monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK, |
34d23762 | 879 | sizeof(struct ceph_mon_subscribe_ack), |
b61c2763 | 880 | GFP_NOFS, true); |
a79832f2 | 881 | if (!monc->m_subscribe_ack) |
49d9224c | 882 | goto out_auth; |
6694d6b9 | 883 | |
b61c2763 SW |
884 | monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS, |
885 | true); | |
240ed68e SW |
886 | if (!monc->m_subscribe) |
887 | goto out_subscribe_ack; | |
888 | ||
b61c2763 SW |
889 | monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS, |
890 | true); | |
a79832f2 | 891 | if (!monc->m_auth_reply) |
240ed68e | 892 | goto out_subscribe; |
4e7a5dcd | 893 | |
b61c2763 | 894 | monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true); |
9bd2e6f8 | 895 | monc->pending_auth = 0; |
a79832f2 | 896 | if (!monc->m_auth) |
6694d6b9 | 897 | goto out_auth_reply; |
ba75bb98 | 898 | |
735a72ef SW |
899 | ceph_con_init(&monc->con, monc, &mon_con_ops, |
900 | &monc->client->msgr); | |
901 | ||
ba75bb98 | 902 | monc->cur_mon = -1; |
4e7a5dcd | 903 | monc->hunting = true; |
ba75bb98 | 904 | monc->sub_renew_after = jiffies; |
82dcabad | 905 | monc->sub_renew_sent = 0; |
168b9090 ID |
906 | monc->had_a_connection = false; |
907 | monc->hunt_mult = 1; | |
ba75bb98 SW |
908 | |
909 | INIT_DELAYED_WORK(&monc->delayed_work, delayed_work); | |
f8c76f6f YS |
910 | monc->generic_request_tree = RB_ROOT; |
911 | monc->num_generic_requests = 0; | |
ba75bb98 SW |
912 | monc->last_tid = 0; |
913 | ||
4e7a5dcd SW |
914 | return 0; |
915 | ||
6694d6b9 SW |
916 | out_auth_reply: |
917 | ceph_msg_put(monc->m_auth_reply); | |
240ed68e SW |
918 | out_subscribe: |
919 | ceph_msg_put(monc->m_subscribe); | |
7c315c55 SW |
920 | out_subscribe_ack: |
921 | ceph_msg_put(monc->m_subscribe_ack); | |
49d9224c NW |
922 | out_auth: |
923 | ceph_auth_destroy(monc->auth); | |
4e7a5dcd SW |
924 | out_monmap: |
925 | kfree(monc->monmap); | |
ba75bb98 SW |
926 | out: |
927 | return err; | |
928 | } | |
3d14c5d2 | 929 | EXPORT_SYMBOL(ceph_monc_init); |
ba75bb98 SW |
930 | |
931 | void ceph_monc_stop(struct ceph_mon_client *monc) | |
932 | { | |
933 | dout("stop\n"); | |
934 | cancel_delayed_work_sync(&monc->delayed_work); | |
935 | ||
936 | mutex_lock(&monc->mutex); | |
937 | __close_session(monc); | |
0e04dc26 | 938 | monc->cur_mon = -1; |
ba75bb98 SW |
939 | mutex_unlock(&monc->mutex); |
940 | ||
f3dea7ed SW |
941 | /* |
942 | * flush msgr queue before we destroy ourselves to ensure that: | |
943 | * - any work that references our embedded con is finished. | |
944 | * - any osd_client or other work that may reference an authorizer | |
945 | * finishes before we shut down the auth subsystem. | |
946 | */ | |
947 | ceph_msgr_flush(); | |
948 | ||
4e7a5dcd SW |
949 | ceph_auth_destroy(monc->auth); |
950 | ||
951 | ceph_msg_put(monc->m_auth); | |
6694d6b9 | 952 | ceph_msg_put(monc->m_auth_reply); |
240ed68e | 953 | ceph_msg_put(monc->m_subscribe); |
7c315c55 | 954 | ceph_msg_put(monc->m_subscribe_ack); |
ba75bb98 SW |
955 | |
956 | kfree(monc->monmap); | |
957 | } | |
3d14c5d2 | 958 | EXPORT_SYMBOL(ceph_monc_stop); |
ba75bb98 | 959 | |
0f9af169 ID |
960 | static void finish_hunting(struct ceph_mon_client *monc) |
961 | { | |
962 | if (monc->hunting) { | |
963 | dout("%s found mon%d\n", __func__, monc->cur_mon); | |
964 | monc->hunting = false; | |
168b9090 ID |
965 | monc->had_a_connection = true; |
966 | monc->hunt_mult /= 2; /* reduce by 50% */ | |
967 | if (monc->hunt_mult < 1) | |
968 | monc->hunt_mult = 1; | |
0f9af169 ID |
969 | } |
970 | } | |
971 | ||
4e7a5dcd SW |
972 | static void handle_auth_reply(struct ceph_mon_client *monc, |
973 | struct ceph_msg *msg) | |
974 | { | |
975 | int ret; | |
09c4d6a7 | 976 | int was_auth = 0; |
4e7a5dcd SW |
977 | |
978 | mutex_lock(&monc->mutex); | |
27859f97 | 979 | was_auth = ceph_auth_is_authenticated(monc->auth); |
9bd2e6f8 | 980 | monc->pending_auth = 0; |
4e7a5dcd SW |
981 | ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, |
982 | msg->front.iov_len, | |
983 | monc->m_auth->front.iov_base, | |
3cea4c30 | 984 | monc->m_auth->front_alloc_len); |
0f9af169 ID |
985 | if (ret > 0) { |
986 | __send_prepared_auth_request(monc, ret); | |
987 | goto out; | |
988 | } | |
989 | ||
990 | finish_hunting(monc); | |
991 | ||
4e7a5dcd | 992 | if (ret < 0) { |
9bd2e6f8 | 993 | monc->client->auth_err = ret; |
27859f97 | 994 | } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) { |
4e7a5dcd | 995 | dout("authenticated, starting session\n"); |
0743304d | 996 | |
15d9882c AE |
997 | monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT; |
998 | monc->client->msgr.inst.name.num = | |
0cf5537b | 999 | cpu_to_le64(monc->auth->global_id); |
0743304d | 1000 | |
4e7a5dcd | 1001 | __send_subscribe(monc); |
f8c76f6f | 1002 | __resend_generic_request(monc); |
0f9af169 ID |
1003 | |
1004 | pr_info("mon%d %s session established\n", monc->cur_mon, | |
1005 | ceph_pr_addr(&monc->con.peer_addr.in_addr)); | |
4e7a5dcd | 1006 | } |
0f9af169 ID |
1007 | |
1008 | out: | |
4e7a5dcd | 1009 | mutex_unlock(&monc->mutex); |
0f9af169 ID |
1010 | if (monc->client->auth_err < 0) |
1011 | wake_up_all(&monc->client->auth_wq); | |
4e7a5dcd SW |
1012 | } |
1013 | ||
9bd2e6f8 SW |
1014 | static int __validate_auth(struct ceph_mon_client *monc) |
1015 | { | |
1016 | int ret; | |
1017 | ||
1018 | if (monc->pending_auth) | |
1019 | return 0; | |
1020 | ||
1021 | ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base, | |
3cea4c30 | 1022 | monc->m_auth->front_alloc_len); |
9bd2e6f8 SW |
1023 | if (ret <= 0) |
1024 | return ret; /* either an error, or no need to authenticate */ | |
1025 | __send_prepared_auth_request(monc, ret); | |
1026 | return 0; | |
1027 | } | |
1028 | ||
1029 | int ceph_monc_validate_auth(struct ceph_mon_client *monc) | |
1030 | { | |
1031 | int ret; | |
1032 | ||
1033 | mutex_lock(&monc->mutex); | |
1034 | ret = __validate_auth(monc); | |
1035 | mutex_unlock(&monc->mutex); | |
1036 | return ret; | |
1037 | } | |
3d14c5d2 | 1038 | EXPORT_SYMBOL(ceph_monc_validate_auth); |
9bd2e6f8 | 1039 | |
ba75bb98 SW |
1040 | /* |
1041 | * handle incoming message | |
1042 | */ | |
1043 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
1044 | { | |
1045 | struct ceph_mon_client *monc = con->private; | |
1046 | int type = le16_to_cpu(msg->hdr.type); | |
1047 | ||
1048 | if (!monc) | |
1049 | return; | |
1050 | ||
1051 | switch (type) { | |
4e7a5dcd SW |
1052 | case CEPH_MSG_AUTH_REPLY: |
1053 | handle_auth_reply(monc, msg); | |
ba75bb98 SW |
1054 | break; |
1055 | ||
1056 | case CEPH_MSG_MON_SUBSCRIBE_ACK: | |
1057 | handle_subscribe_ack(monc, msg); | |
1058 | break; | |
1059 | ||
1060 | case CEPH_MSG_STATFS_REPLY: | |
1061 | handle_statfs_reply(monc, msg); | |
1062 | break; | |
1063 | ||
513a8243 ID |
1064 | case CEPH_MSG_MON_GET_VERSION_REPLY: |
1065 | handle_get_version_reply(monc, msg); | |
1066 | break; | |
1067 | ||
4e7a5dcd SW |
1068 | case CEPH_MSG_MON_MAP: |
1069 | ceph_monc_handle_map(monc, msg); | |
1070 | break; | |
1071 | ||
ba75bb98 SW |
1072 | case CEPH_MSG_OSD_MAP: |
1073 | ceph_osdc_handle_map(&monc->client->osdc, msg); | |
1074 | break; | |
1075 | ||
1076 | default: | |
3d14c5d2 YS |
1077 | /* can the chained handler handle it? */ |
1078 | if (monc->client->extra_mon_dispatch && | |
1079 | monc->client->extra_mon_dispatch(monc->client, msg) == 0) | |
1080 | break; | |
1081 | ||
ba75bb98 SW |
1082 | pr_err("received unknown message type %d %s\n", type, |
1083 | ceph_msg_type_name(type)); | |
1084 | } | |
1085 | ceph_msg_put(msg); | |
1086 | } | |
1087 | ||
1088 | /* | |
1089 | * Allocate memory for incoming message | |
1090 | */ | |
1091 | static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, | |
2450418c YS |
1092 | struct ceph_msg_header *hdr, |
1093 | int *skip) | |
ba75bb98 SW |
1094 | { |
1095 | struct ceph_mon_client *monc = con->private; | |
1096 | int type = le16_to_cpu(hdr->type); | |
2450418c | 1097 | int front_len = le32_to_cpu(hdr->front_len); |
5b3a4db3 | 1098 | struct ceph_msg *m = NULL; |
ba75bb98 | 1099 | |
2450418c | 1100 | *skip = 0; |
0547a9b3 | 1101 | |
ba75bb98 | 1102 | switch (type) { |
ba75bb98 | 1103 | case CEPH_MSG_MON_SUBSCRIBE_ACK: |
7c315c55 | 1104 | m = ceph_msg_get(monc->m_subscribe_ack); |
2450418c | 1105 | break; |
ba75bb98 | 1106 | case CEPH_MSG_STATFS_REPLY: |
f8c76f6f | 1107 | return get_generic_reply(con, hdr, skip); |
4e7a5dcd | 1108 | case CEPH_MSG_AUTH_REPLY: |
6694d6b9 | 1109 | m = ceph_msg_get(monc->m_auth_reply); |
2450418c | 1110 | break; |
513a8243 ID |
1111 | case CEPH_MSG_MON_GET_VERSION_REPLY: |
1112 | if (le64_to_cpu(hdr->tid) != 0) | |
1113 | return get_generic_reply(con, hdr, skip); | |
1114 | ||
1115 | /* | |
1116 | * Older OSDs don't set reply tid even if the orignal | |
1117 | * request had a non-zero tid. Workaround this weirdness | |
1118 | * by falling through to the allocate case. | |
1119 | */ | |
5b3a4db3 SW |
1120 | case CEPH_MSG_MON_MAP: |
1121 | case CEPH_MSG_MDS_MAP: | |
1122 | case CEPH_MSG_OSD_MAP: | |
b61c2763 | 1123 | m = ceph_msg_new(type, front_len, GFP_NOFS, false); |
1c20f2d2 AE |
1124 | if (!m) |
1125 | return NULL; /* ENOMEM--return skip == 0 */ | |
5b3a4db3 | 1126 | break; |
ba75bb98 | 1127 | } |
2450418c | 1128 | |
5b3a4db3 SW |
1129 | if (!m) { |
1130 | pr_info("alloc_msg unknown type %d\n", type); | |
2450418c | 1131 | *skip = 1; |
73c3d481 | 1132 | } else if (front_len > m->front_alloc_len) { |
b9a67899 JP |
1133 | pr_warn("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n", |
1134 | front_len, m->front_alloc_len, | |
1135 | (unsigned int)con->peer_name.type, | |
1136 | le64_to_cpu(con->peer_name.num)); | |
73c3d481 SW |
1137 | ceph_msg_put(m); |
1138 | m = ceph_msg_new(type, front_len, GFP_NOFS, false); | |
5b3a4db3 | 1139 | } |
73c3d481 | 1140 | |
2450418c | 1141 | return m; |
ba75bb98 SW |
1142 | } |
1143 | ||
1144 | /* | |
1145 | * If the monitor connection resets, pick a new monitor and resubmit | |
1146 | * any pending requests. | |
1147 | */ | |
1148 | static void mon_fault(struct ceph_connection *con) | |
1149 | { | |
1150 | struct ceph_mon_client *monc = con->private; | |
1151 | ||
1152 | if (!monc) | |
1153 | return; | |
1154 | ||
1155 | dout("mon_fault\n"); | |
1156 | mutex_lock(&monc->mutex); | |
1157 | if (!con->private) | |
1158 | goto out; | |
1159 | ||
f6a2f5be | 1160 | if (!monc->hunting) |
ba75bb98 SW |
1161 | pr_info("mon%d %s session lost, " |
1162 | "hunting for new mon\n", monc->cur_mon, | |
67130934 | 1163 | ceph_pr_addr(&monc->con.peer_addr.in_addr)); |
ba75bb98 SW |
1164 | |
1165 | __close_session(monc); | |
1166 | if (!monc->hunting) { | |
1167 | /* start hunting */ | |
1168 | monc->hunting = true; | |
4e7a5dcd | 1169 | __open_session(monc); |
ba75bb98 SW |
1170 | } else { |
1171 | /* already hunting, let's wait a bit */ | |
1172 | __schedule_delayed(monc); | |
1173 | } | |
1174 | out: | |
1175 | mutex_unlock(&monc->mutex); | |
1176 | } | |
1177 | ||
ec87ef43 SW |
1178 | /* |
1179 | * We can ignore refcounting on the connection struct, as all references | |
1180 | * will come from the messenger workqueue, which is drained prior to | |
1181 | * mon_client destruction. | |
1182 | */ | |
1183 | static struct ceph_connection *con_get(struct ceph_connection *con) | |
1184 | { | |
1185 | return con; | |
1186 | } | |
1187 | ||
1188 | static void con_put(struct ceph_connection *con) | |
1189 | { | |
1190 | } | |
1191 | ||
9e32789f | 1192 | static const struct ceph_connection_operations mon_con_ops = { |
ec87ef43 SW |
1193 | .get = con_get, |
1194 | .put = con_put, | |
ba75bb98 SW |
1195 | .dispatch = dispatch, |
1196 | .fault = mon_fault, | |
1197 | .alloc_msg = mon_alloc_msg, | |
ba75bb98 | 1198 | }; |