Commit | Line | Data |
---|---|---|
3d14c5d2 | 1 | #include <linux/ceph/ceph_debug.h> |
ba75bb98 | 2 | |
3d14c5d2 | 3 | #include <linux/module.h> |
ba75bb98 | 4 | #include <linux/types.h> |
5a0e3ad6 | 5 | #include <linux/slab.h> |
ba75bb98 SW |
6 | #include <linux/random.h> |
7 | #include <linux/sched.h> | |
8 | ||
3d14c5d2 YS |
9 | #include <linux/ceph/mon_client.h> |
10 | #include <linux/ceph/libceph.h> | |
ab434b60 | 11 | #include <linux/ceph/debugfs.h> |
3d14c5d2 | 12 | #include <linux/ceph/decode.h> |
3d14c5d2 | 13 | #include <linux/ceph/auth.h> |
ba75bb98 SW |
14 | |
15 | /* | |
16 | * Interact with Ceph monitor cluster. Handle requests for new map | |
17 | * versions, and periodically resend as needed. Also implement | |
18 | * statfs() and umount(). | |
19 | * | |
20 | * A small cluster of Ceph "monitors" are responsible for managing critical | |
21 | * cluster configuration and state information. An odd number (e.g., 3, 5) | |
22 | * of cmon daemons use a modified version of the Paxos part-time parliament | |
23 | * algorithm to manage the MDS map (mds cluster membership), OSD map, and | |
24 | * list of clients who have mounted the file system. | |
25 | * | |
26 | * We maintain an open, active session with a monitor at all times in order to | |
27 | * receive timely MDSMap updates. We periodically send a keepalive byte on the | |
28 | * TCP socket to ensure we detect a failure. If the connection does break, we | |
29 | * randomly hunt for a new monitor. Once the connection is reestablished, we | |
30 | * resend any outstanding requests. | |
31 | */ | |
32 | ||
9e32789f | 33 | static const struct ceph_connection_operations mon_con_ops; |
ba75bb98 | 34 | |
9bd2e6f8 SW |
35 | static int __validate_auth(struct ceph_mon_client *monc); |
36 | ||
ba75bb98 SW |
37 | /* |
38 | * Decode a monmap blob (e.g., during mount). | |
39 | */ | |
40 | struct ceph_monmap *ceph_monmap_decode(void *p, void *end) | |
41 | { | |
42 | struct ceph_monmap *m = NULL; | |
43 | int i, err = -EINVAL; | |
44 | struct ceph_fsid fsid; | |
45 | u32 epoch, num_mon; | |
46 | u16 version; | |
4e7a5dcd SW |
47 | u32 len; |
48 | ||
49 | ceph_decode_32_safe(&p, end, len, bad); | |
50 | ceph_decode_need(&p, end, len, bad); | |
ba75bb98 SW |
51 | |
52 | dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p)); | |
53 | ||
54 | ceph_decode_16_safe(&p, end, version, bad); | |
55 | ||
56 | ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); | |
57 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
c89136ea | 58 | epoch = ceph_decode_32(&p); |
ba75bb98 | 59 | |
c89136ea | 60 | num_mon = ceph_decode_32(&p); |
ba75bb98 SW |
61 | ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad); |
62 | ||
63 | if (num_mon >= CEPH_MAX_MON) | |
64 | goto bad; | |
65 | m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS); | |
66 | if (m == NULL) | |
67 | return ERR_PTR(-ENOMEM); | |
68 | m->fsid = fsid; | |
69 | m->epoch = epoch; | |
70 | m->num_mon = num_mon; | |
71 | ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0])); | |
63f2d211 SW |
72 | for (i = 0; i < num_mon; i++) |
73 | ceph_decode_addr(&m->mon_inst[i].addr); | |
ba75bb98 | 74 | |
ba75bb98 SW |
75 | dout("monmap_decode epoch %d, num_mon %d\n", m->epoch, |
76 | m->num_mon); | |
77 | for (i = 0; i < m->num_mon; i++) | |
78 | dout("monmap_decode mon%d is %s\n", i, | |
3d14c5d2 | 79 | ceph_pr_addr(&m->mon_inst[i].addr.in_addr)); |
ba75bb98 SW |
80 | return m; |
81 | ||
82 | bad: | |
83 | dout("monmap_decode failed with %d\n", err); | |
84 | kfree(m); | |
85 | return ERR_PTR(err); | |
86 | } | |
87 | ||
88 | /* | |
89 | * return true if *addr is included in the monmap. | |
90 | */ | |
91 | int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) | |
92 | { | |
93 | int i; | |
94 | ||
95 | for (i = 0; i < m->num_mon; i++) | |
103e2d3a | 96 | if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0) |
ba75bb98 SW |
97 | return 1; |
98 | return 0; | |
99 | } | |
100 | ||
5ce6e9db SW |
101 | /* |
102 | * Send an auth request. | |
103 | */ | |
104 | static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) | |
105 | { | |
106 | monc->pending_auth = 1; | |
107 | monc->m_auth->front.iov_len = len; | |
108 | monc->m_auth->hdr.front_len = cpu_to_le32(len); | |
6740a845 | 109 | ceph_msg_revoke(monc->m_auth); |
5ce6e9db | 110 | ceph_msg_get(monc->m_auth); /* keep our ref */ |
67130934 | 111 | ceph_con_send(&monc->con, monc->m_auth); |
5ce6e9db SW |
112 | } |
113 | ||
ba75bb98 SW |
114 | /* |
115 | * Close monitor session, if any. | |
116 | */ | |
117 | static void __close_session(struct ceph_mon_client *monc) | |
118 | { | |
f6a2f5be | 119 | dout("__close_session closing mon%d\n", monc->cur_mon); |
6740a845 | 120 | ceph_msg_revoke(monc->m_auth); |
4f471e4a SW |
121 | ceph_msg_revoke_incoming(monc->m_auth_reply); |
122 | ceph_msg_revoke(monc->m_subscribe); | |
123 | ceph_msg_revoke_incoming(monc->m_subscribe_ack); | |
67130934 | 124 | ceph_con_close(&monc->con); |
0e04dc26 | 125 | |
f6a2f5be SW |
126 | monc->pending_auth = 0; |
127 | ceph_auth_reset(monc->auth); | |
ba75bb98 SW |
128 | } |
129 | ||
130 | /* | |
0e04dc26 ID |
131 | * Pick a new monitor at random and set cur_mon. If we are repicking |
132 | * (i.e. cur_mon is already set), be sure to pick a different one. | |
ba75bb98 | 133 | */ |
0e04dc26 | 134 | static void pick_new_mon(struct ceph_mon_client *monc) |
ba75bb98 | 135 | { |
0e04dc26 | 136 | int old_mon = monc->cur_mon; |
ba75bb98 | 137 | |
0e04dc26 | 138 | BUG_ON(monc->monmap->num_mon < 1); |
ba75bb98 | 139 | |
0e04dc26 ID |
140 | if (monc->monmap->num_mon == 1) { |
141 | monc->cur_mon = 0; | |
142 | } else { | |
143 | int max = monc->monmap->num_mon; | |
144 | int o = -1; | |
145 | int n; | |
146 | ||
147 | if (monc->cur_mon >= 0) { | |
148 | if (monc->cur_mon < monc->monmap->num_mon) | |
149 | o = monc->cur_mon; | |
150 | if (o >= 0) | |
151 | max--; | |
152 | } | |
4e7a5dcd | 153 | |
0e04dc26 ID |
154 | n = prandom_u32() % max; |
155 | if (o >= 0 && n >= o) | |
156 | n++; | |
8b9558aa | 157 | |
0e04dc26 | 158 | monc->cur_mon = n; |
ba75bb98 | 159 | } |
0e04dc26 ID |
160 | |
161 | dout("%s mon%d -> mon%d out of %d mons\n", __func__, old_mon, | |
162 | monc->cur_mon, monc->monmap->num_mon); | |
163 | } | |
164 | ||
165 | /* | |
166 | * Open a session with a new monitor. | |
167 | */ | |
168 | static void __open_session(struct ceph_mon_client *monc) | |
169 | { | |
170 | int ret; | |
171 | ||
172 | pick_new_mon(monc); | |
173 | ||
1752b50c | 174 | monc->hunting = true; |
168b9090 ID |
175 | if (monc->had_a_connection) { |
176 | monc->hunt_mult *= CEPH_MONC_HUNT_BACKOFF; | |
177 | if (monc->hunt_mult > CEPH_MONC_HUNT_MAX_MULT) | |
178 | monc->hunt_mult = CEPH_MONC_HUNT_MAX_MULT; | |
179 | } | |
180 | ||
0e04dc26 ID |
181 | monc->sub_renew_after = jiffies; /* i.e., expired */ |
182 | monc->sub_renew_sent = 0; | |
183 | ||
184 | dout("%s opening mon%d\n", __func__, monc->cur_mon); | |
185 | ceph_con_open(&monc->con, CEPH_ENTITY_TYPE_MON, monc->cur_mon, | |
186 | &monc->monmap->mon_inst[monc->cur_mon].addr); | |
187 | ||
188 | /* | |
189 | * send an initial keepalive to ensure our timestamp is valid | |
190 | * by the time we are in an OPENED state | |
191 | */ | |
192 | ceph_con_keepalive(&monc->con); | |
193 | ||
194 | /* initiate authentication handshake */ | |
195 | ret = ceph_auth_build_hello(monc->auth, | |
196 | monc->m_auth->front.iov_base, | |
197 | monc->m_auth->front_alloc_len); | |
198 | BUG_ON(ret <= 0); | |
199 | __send_prepared_auth_request(monc, ret); | |
ba75bb98 SW |
200 | } |
201 | ||
1752b50c ID |
202 | static void reopen_session(struct ceph_mon_client *monc) |
203 | { | |
204 | if (!monc->hunting) | |
205 | pr_info("mon%d %s session lost, hunting for new mon\n", | |
206 | monc->cur_mon, ceph_pr_addr(&monc->con.peer_addr.in_addr)); | |
207 | ||
208 | __close_session(monc); | |
209 | __open_session(monc); | |
210 | } | |
211 | ||
ba75bb98 SW |
212 | /* |
213 | * Reschedule delayed work timer. | |
214 | */ | |
215 | static void __schedule_delayed(struct ceph_mon_client *monc) | |
216 | { | |
8b9558aa | 217 | unsigned long delay; |
ba75bb98 | 218 | |
168b9090 ID |
219 | if (monc->hunting) |
220 | delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult; | |
221 | else | |
58d81b12 | 222 | delay = CEPH_MONC_PING_INTERVAL; |
168b9090 | 223 | |
8b9558aa | 224 | dout("__schedule_delayed after %lu\n", delay); |
bee3a37c ID |
225 | mod_delayed_work(system_wq, &monc->delayed_work, |
226 | round_jiffies_relative(delay)); | |
ba75bb98 SW |
227 | } |
228 | ||
82dcabad | 229 | const char *ceph_sub_str[] = { |
82dcabad ID |
230 | [CEPH_SUB_MONMAP] = "monmap", |
231 | [CEPH_SUB_OSDMAP] = "osdmap", | |
0cabbd94 YZ |
232 | [CEPH_SUB_FSMAP] = "fsmap.user", |
233 | [CEPH_SUB_MDSMAP] = "mdsmap", | |
82dcabad ID |
234 | }; |
235 | ||
ba75bb98 | 236 | /* |
82dcabad ID |
237 | * Send subscribe request for one or more maps, according to |
238 | * monc->subs. | |
ba75bb98 SW |
239 | */ |
240 | static void __send_subscribe(struct ceph_mon_client *monc) | |
241 | { | |
82dcabad ID |
242 | struct ceph_msg *msg = monc->m_subscribe; |
243 | void *p = msg->front.iov_base; | |
244 | void *const end = p + msg->front_alloc_len; | |
245 | int num = 0; | |
246 | int i; | |
247 | ||
248 | dout("%s sent %lu\n", __func__, monc->sub_renew_sent); | |
249 | ||
250 | BUG_ON(monc->cur_mon < 0); | |
251 | ||
252 | if (!monc->sub_renew_sent) | |
253 | monc->sub_renew_sent = jiffies | 1; /* never 0 */ | |
254 | ||
255 | msg->hdr.version = cpu_to_le16(2); | |
256 | ||
257 | for (i = 0; i < ARRAY_SIZE(monc->subs); i++) { | |
258 | if (monc->subs[i].want) | |
259 | num++; | |
260 | } | |
261 | BUG_ON(num < 1); /* monmap sub is always there */ | |
262 | ceph_encode_32(&p, num); | |
263 | for (i = 0; i < ARRAY_SIZE(monc->subs); i++) { | |
737cc81e ID |
264 | char buf[32]; |
265 | int len; | |
82dcabad ID |
266 | |
267 | if (!monc->subs[i].want) | |
268 | continue; | |
269 | ||
737cc81e ID |
270 | len = sprintf(buf, "%s", ceph_sub_str[i]); |
271 | if (i == CEPH_SUB_MDSMAP && | |
272 | monc->fs_cluster_id != CEPH_FS_CLUSTER_ID_NONE) | |
273 | len += sprintf(buf + len, ".%d", monc->fs_cluster_id); | |
274 | ||
275 | dout("%s %s start %llu flags 0x%x\n", __func__, buf, | |
82dcabad ID |
276 | le64_to_cpu(monc->subs[i].item.start), |
277 | monc->subs[i].item.flags); | |
737cc81e | 278 | ceph_encode_string(&p, end, buf, len); |
82dcabad ID |
279 | memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item)); |
280 | p += sizeof(monc->subs[i].item); | |
ba75bb98 | 281 | } |
82dcabad | 282 | |
737cc81e | 283 | BUG_ON(p > end); |
82dcabad ID |
284 | msg->front.iov_len = p - msg->front.iov_base; |
285 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
286 | ceph_msg_revoke(msg); | |
287 | ceph_con_send(&monc->con, ceph_msg_get(msg)); | |
ba75bb98 SW |
288 | } |
289 | ||
290 | static void handle_subscribe_ack(struct ceph_mon_client *monc, | |
291 | struct ceph_msg *msg) | |
292 | { | |
95c96174 | 293 | unsigned int seconds; |
07bd10fb SW |
294 | struct ceph_mon_subscribe_ack *h = msg->front.iov_base; |
295 | ||
296 | if (msg->front.iov_len < sizeof(*h)) | |
297 | goto bad; | |
298 | seconds = le32_to_cpu(h->duration); | |
ba75bb98 | 299 | |
ba75bb98 | 300 | mutex_lock(&monc->mutex); |
82dcabad ID |
301 | if (monc->sub_renew_sent) { |
302 | monc->sub_renew_after = monc->sub_renew_sent + | |
303 | (seconds >> 1) * HZ - 1; | |
304 | dout("%s sent %lu duration %d renew after %lu\n", __func__, | |
305 | monc->sub_renew_sent, seconds, monc->sub_renew_after); | |
306 | monc->sub_renew_sent = 0; | |
307 | } else { | |
308 | dout("%s sent %lu renew after %lu, ignoring\n", __func__, | |
309 | monc->sub_renew_sent, monc->sub_renew_after); | |
310 | } | |
ba75bb98 SW |
311 | mutex_unlock(&monc->mutex); |
312 | return; | |
313 | bad: | |
314 | pr_err("got corrupt subscribe-ack msg\n"); | |
9ec7cab1 | 315 | ceph_msg_dump(msg); |
ba75bb98 SW |
316 | } |
317 | ||
318 | /* | |
82dcabad ID |
319 | * Register interest in a map |
320 | * | |
321 | * @sub: one of CEPH_SUB_* | |
322 | * @epoch: X for "every map since X", or 0 for "just the latest" | |
ba75bb98 | 323 | */ |
82dcabad ID |
324 | static bool __ceph_monc_want_map(struct ceph_mon_client *monc, int sub, |
325 | u32 epoch, bool continuous) | |
ba75bb98 | 326 | { |
82dcabad ID |
327 | __le64 start = cpu_to_le64(epoch); |
328 | u8 flags = !continuous ? CEPH_SUBSCRIBE_ONETIME : 0; | |
329 | ||
330 | dout("%s %s epoch %u continuous %d\n", __func__, ceph_sub_str[sub], | |
331 | epoch, continuous); | |
332 | ||
333 | if (monc->subs[sub].want && | |
334 | monc->subs[sub].item.start == start && | |
335 | monc->subs[sub].item.flags == flags) | |
336 | return false; | |
337 | ||
338 | monc->subs[sub].item.start = start; | |
339 | monc->subs[sub].item.flags = flags; | |
340 | monc->subs[sub].want = true; | |
341 | ||
342 | return true; | |
343 | } | |
344 | ||
345 | bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch, | |
346 | bool continuous) | |
347 | { | |
348 | bool need_request; | |
349 | ||
ba75bb98 | 350 | mutex_lock(&monc->mutex); |
82dcabad | 351 | need_request = __ceph_monc_want_map(monc, sub, epoch, continuous); |
ba75bb98 | 352 | mutex_unlock(&monc->mutex); |
82dcabad ID |
353 | |
354 | return need_request; | |
355 | } | |
356 | EXPORT_SYMBOL(ceph_monc_want_map); | |
357 | ||
358 | /* | |
359 | * Keep track of which maps we have | |
360 | * | |
361 | * @sub: one of CEPH_SUB_* | |
362 | */ | |
363 | static void __ceph_monc_got_map(struct ceph_mon_client *monc, int sub, | |
364 | u32 epoch) | |
365 | { | |
366 | dout("%s %s epoch %u\n", __func__, ceph_sub_str[sub], epoch); | |
367 | ||
368 | if (monc->subs[sub].want) { | |
369 | if (monc->subs[sub].item.flags & CEPH_SUBSCRIBE_ONETIME) | |
370 | monc->subs[sub].want = false; | |
371 | else | |
372 | monc->subs[sub].item.start = cpu_to_le64(epoch + 1); | |
373 | } | |
374 | ||
375 | monc->subs[sub].have = epoch; | |
ba75bb98 SW |
376 | } |
377 | ||
82dcabad | 378 | void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch) |
ba75bb98 SW |
379 | { |
380 | mutex_lock(&monc->mutex); | |
82dcabad | 381 | __ceph_monc_got_map(monc, sub, epoch); |
ba75bb98 | 382 | mutex_unlock(&monc->mutex); |
ba75bb98 | 383 | } |
82dcabad | 384 | EXPORT_SYMBOL(ceph_monc_got_map); |
ba75bb98 | 385 | |
42c1b124 ID |
386 | void ceph_monc_renew_subs(struct ceph_mon_client *monc) |
387 | { | |
388 | mutex_lock(&monc->mutex); | |
389 | __send_subscribe(monc); | |
390 | mutex_unlock(&monc->mutex); | |
391 | } | |
392 | EXPORT_SYMBOL(ceph_monc_renew_subs); | |
393 | ||
a319bf56 ID |
394 | /* |
395 | * Wait for an osdmap with a given epoch. | |
396 | * | |
397 | * @epoch: epoch to wait for | |
398 | * @timeout: in jiffies, 0 means "wait forever" | |
399 | */ | |
6044cde6 ID |
400 | int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, |
401 | unsigned long timeout) | |
402 | { | |
403 | unsigned long started = jiffies; | |
216639dd | 404 | long ret; |
6044cde6 ID |
405 | |
406 | mutex_lock(&monc->mutex); | |
82dcabad | 407 | while (monc->subs[CEPH_SUB_OSDMAP].have < epoch) { |
6044cde6 ID |
408 | mutex_unlock(&monc->mutex); |
409 | ||
a319bf56 | 410 | if (timeout && time_after_eq(jiffies, started + timeout)) |
6044cde6 ID |
411 | return -ETIMEDOUT; |
412 | ||
413 | ret = wait_event_interruptible_timeout(monc->client->auth_wq, | |
82dcabad ID |
414 | monc->subs[CEPH_SUB_OSDMAP].have >= epoch, |
415 | ceph_timeout_jiffies(timeout)); | |
6044cde6 ID |
416 | if (ret < 0) |
417 | return ret; | |
418 | ||
419 | mutex_lock(&monc->mutex); | |
420 | } | |
421 | ||
422 | mutex_unlock(&monc->mutex); | |
423 | return 0; | |
424 | } | |
425 | EXPORT_SYMBOL(ceph_monc_wait_osdmap); | |
ba75bb98 | 426 | |
4e7a5dcd | 427 | /* |
82dcabad ID |
428 | * Open a session with a random monitor. Request monmap and osdmap, |
429 | * which are waited upon in __ceph_open_session(). | |
4e7a5dcd SW |
430 | */ |
431 | int ceph_monc_open_session(struct ceph_mon_client *monc) | |
ba75bb98 | 432 | { |
ba75bb98 | 433 | mutex_lock(&monc->mutex); |
82dcabad ID |
434 | __ceph_monc_want_map(monc, CEPH_SUB_MONMAP, 0, true); |
435 | __ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, 0, false); | |
4e7a5dcd | 436 | __open_session(monc); |
ba75bb98 SW |
437 | __schedule_delayed(monc); |
438 | mutex_unlock(&monc->mutex); | |
439 | return 0; | |
440 | } | |
3d14c5d2 | 441 | EXPORT_SYMBOL(ceph_monc_open_session); |
ba75bb98 | 442 | |
0743304d SW |
443 | static void ceph_monc_handle_map(struct ceph_mon_client *monc, |
444 | struct ceph_msg *msg) | |
4e7a5dcd SW |
445 | { |
446 | struct ceph_client *client = monc->client; | |
447 | struct ceph_monmap *monmap = NULL, *old = monc->monmap; | |
448 | void *p, *end; | |
449 | ||
450 | mutex_lock(&monc->mutex); | |
451 | ||
452 | dout("handle_monmap\n"); | |
453 | p = msg->front.iov_base; | |
454 | end = p + msg->front.iov_len; | |
455 | ||
456 | monmap = ceph_monmap_decode(p, end); | |
457 | if (IS_ERR(monmap)) { | |
458 | pr_err("problem decoding monmap, %d\n", | |
459 | (int)PTR_ERR(monmap)); | |
d4a780ce | 460 | goto out; |
4e7a5dcd | 461 | } |
0743304d SW |
462 | |
463 | if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) { | |
4e7a5dcd | 464 | kfree(monmap); |
d4a780ce | 465 | goto out; |
4e7a5dcd SW |
466 | } |
467 | ||
468 | client->monc.monmap = monmap; | |
4e7a5dcd SW |
469 | kfree(old); |
470 | ||
82dcabad | 471 | __ceph_monc_got_map(monc, CEPH_SUB_MONMAP, monc->monmap->epoch); |
02ac956c | 472 | client->have_fsid = true; |
d1c338a5 | 473 | |
d4a780ce | 474 | out: |
4e7a5dcd | 475 | mutex_unlock(&monc->mutex); |
03066f23 | 476 | wake_up_all(&client->auth_wq); |
4e7a5dcd SW |
477 | } |
478 | ||
ba75bb98 | 479 | /* |
7a6fdeb2 | 480 | * generic requests (currently statfs, mon_get_version) |
ba75bb98 | 481 | */ |
fcd00b68 | 482 | DEFINE_RB_FUNCS(generic_request, struct ceph_mon_generic_request, tid, node) |
85ff03f6 | 483 | |
f8c76f6f | 484 | static void release_generic_request(struct kref *kref) |
3143edd3 | 485 | { |
f8c76f6f YS |
486 | struct ceph_mon_generic_request *req = |
487 | container_of(kref, struct ceph_mon_generic_request, kref); | |
3143edd3 | 488 | |
d0b19705 ID |
489 | dout("%s greq %p request %p reply %p\n", __func__, req, req->request, |
490 | req->reply); | |
491 | WARN_ON(!RB_EMPTY_NODE(&req->node)); | |
492 | ||
3143edd3 SW |
493 | if (req->reply) |
494 | ceph_msg_put(req->reply); | |
495 | if (req->request) | |
496 | ceph_msg_put(req->request); | |
20547567 YS |
497 | |
498 | kfree(req); | |
3143edd3 SW |
499 | } |
500 | ||
f8c76f6f | 501 | static void put_generic_request(struct ceph_mon_generic_request *req) |
3143edd3 | 502 | { |
d0b19705 ID |
503 | if (req) |
504 | kref_put(&req->kref, release_generic_request); | |
3143edd3 SW |
505 | } |
506 | ||
f8c76f6f | 507 | static void get_generic_request(struct ceph_mon_generic_request *req) |
3143edd3 SW |
508 | { |
509 | kref_get(&req->kref); | |
510 | } | |
511 | ||
d0b19705 ID |
512 | static struct ceph_mon_generic_request * |
513 | alloc_generic_request(struct ceph_mon_client *monc, gfp_t gfp) | |
514 | { | |
515 | struct ceph_mon_generic_request *req; | |
516 | ||
517 | req = kzalloc(sizeof(*req), gfp); | |
518 | if (!req) | |
519 | return NULL; | |
520 | ||
521 | req->monc = monc; | |
522 | kref_init(&req->kref); | |
523 | RB_CLEAR_NODE(&req->node); | |
524 | init_completion(&req->completion); | |
525 | ||
526 | dout("%s greq %p\n", __func__, req); | |
527 | return req; | |
528 | } | |
529 | ||
530 | static void register_generic_request(struct ceph_mon_generic_request *req) | |
531 | { | |
532 | struct ceph_mon_client *monc = req->monc; | |
533 | ||
534 | WARN_ON(req->tid); | |
535 | ||
536 | get_generic_request(req); | |
537 | req->tid = ++monc->last_tid; | |
538 | insert_generic_request(&monc->generic_request_tree, req); | |
539 | } | |
540 | ||
541 | static void send_generic_request(struct ceph_mon_client *monc, | |
542 | struct ceph_mon_generic_request *req) | |
543 | { | |
544 | WARN_ON(!req->tid); | |
545 | ||
546 | dout("%s greq %p tid %llu\n", __func__, req, req->tid); | |
547 | req->request->hdr.tid = cpu_to_le64(req->tid); | |
548 | ceph_con_send(&monc->con, ceph_msg_get(req->request)); | |
549 | } | |
550 | ||
551 | static void __finish_generic_request(struct ceph_mon_generic_request *req) | |
552 | { | |
553 | struct ceph_mon_client *monc = req->monc; | |
554 | ||
555 | dout("%s greq %p tid %llu\n", __func__, req, req->tid); | |
556 | erase_generic_request(&monc->generic_request_tree, req); | |
557 | ||
558 | ceph_msg_revoke(req->request); | |
559 | ceph_msg_revoke_incoming(req->reply); | |
560 | } | |
561 | ||
562 | static void finish_generic_request(struct ceph_mon_generic_request *req) | |
563 | { | |
564 | __finish_generic_request(req); | |
565 | put_generic_request(req); | |
566 | } | |
567 | ||
568 | static void complete_generic_request(struct ceph_mon_generic_request *req) | |
569 | { | |
570 | if (req->complete_cb) | |
571 | req->complete_cb(req); | |
572 | else | |
573 | complete_all(&req->completion); | |
574 | put_generic_request(req); | |
575 | } | |
576 | ||
f52ec33c | 577 | static void cancel_generic_request(struct ceph_mon_generic_request *req) |
d0b19705 ID |
578 | { |
579 | struct ceph_mon_client *monc = req->monc; | |
580 | struct ceph_mon_generic_request *lookup_req; | |
581 | ||
582 | dout("%s greq %p tid %llu\n", __func__, req, req->tid); | |
583 | ||
584 | mutex_lock(&monc->mutex); | |
585 | lookup_req = lookup_generic_request(&monc->generic_request_tree, | |
586 | req->tid); | |
587 | if (lookup_req) { | |
588 | WARN_ON(lookup_req != req); | |
589 | finish_generic_request(req); | |
590 | } | |
591 | ||
592 | mutex_unlock(&monc->mutex); | |
593 | } | |
594 | ||
595 | static int wait_generic_request(struct ceph_mon_generic_request *req) | |
596 | { | |
597 | int ret; | |
598 | ||
599 | dout("%s greq %p tid %llu\n", __func__, req, req->tid); | |
600 | ret = wait_for_completion_interruptible(&req->completion); | |
601 | if (ret) | |
602 | cancel_generic_request(req); | |
603 | else | |
604 | ret = req->result; /* completed */ | |
605 | ||
606 | return ret; | |
607 | } | |
608 | ||
f8c76f6f | 609 | static struct ceph_msg *get_generic_reply(struct ceph_connection *con, |
3143edd3 SW |
610 | struct ceph_msg_header *hdr, |
611 | int *skip) | |
612 | { | |
613 | struct ceph_mon_client *monc = con->private; | |
f8c76f6f | 614 | struct ceph_mon_generic_request *req; |
3143edd3 SW |
615 | u64 tid = le64_to_cpu(hdr->tid); |
616 | struct ceph_msg *m; | |
617 | ||
618 | mutex_lock(&monc->mutex); | |
fcd00b68 | 619 | req = lookup_generic_request(&monc->generic_request_tree, tid); |
3143edd3 | 620 | if (!req) { |
f8c76f6f | 621 | dout("get_generic_reply %lld dne\n", tid); |
3143edd3 SW |
622 | *skip = 1; |
623 | m = NULL; | |
624 | } else { | |
f8c76f6f | 625 | dout("get_generic_reply %lld got %p\n", tid, req->reply); |
1c20f2d2 | 626 | *skip = 0; |
3143edd3 SW |
627 | m = ceph_msg_get(req->reply); |
628 | /* | |
629 | * we don't need to track the connection reading into | |
630 | * this reply because we only have one open connection | |
631 | * at a time, ever. | |
632 | */ | |
633 | } | |
634 | mutex_unlock(&monc->mutex); | |
635 | return m; | |
636 | } | |
637 | ||
e56fa10e YS |
638 | /* |
639 | * statfs | |
640 | */ | |
ba75bb98 SW |
641 | static void handle_statfs_reply(struct ceph_mon_client *monc, |
642 | struct ceph_msg *msg) | |
643 | { | |
f8c76f6f | 644 | struct ceph_mon_generic_request *req; |
ba75bb98 | 645 | struct ceph_mon_statfs_reply *reply = msg->front.iov_base; |
3143edd3 | 646 | u64 tid = le64_to_cpu(msg->hdr.tid); |
ba75bb98 | 647 | |
d0b19705 ID |
648 | dout("%s msg %p tid %llu\n", __func__, msg, tid); |
649 | ||
ba75bb98 SW |
650 | if (msg->front.iov_len != sizeof(*reply)) |
651 | goto bad; | |
ba75bb98 SW |
652 | |
653 | mutex_lock(&monc->mutex); | |
fcd00b68 | 654 | req = lookup_generic_request(&monc->generic_request_tree, tid); |
d0b19705 ID |
655 | if (!req) { |
656 | mutex_unlock(&monc->mutex); | |
657 | return; | |
ba75bb98 | 658 | } |
d0b19705 ID |
659 | |
660 | req->result = 0; | |
661 | *req->u.st = reply->st; /* struct */ | |
662 | __finish_generic_request(req); | |
ba75bb98 | 663 | mutex_unlock(&monc->mutex); |
d0b19705 ID |
664 | |
665 | complete_generic_request(req); | |
ba75bb98 SW |
666 | return; |
667 | ||
668 | bad: | |
7a6fdeb2 | 669 | pr_err("corrupt statfs reply, tid %llu\n", tid); |
9ec7cab1 | 670 | ceph_msg_dump(msg); |
ba75bb98 SW |
671 | } |
672 | ||
673 | /* | |
3143edd3 | 674 | * Do a synchronous statfs(). |
ba75bb98 | 675 | */ |
3143edd3 | 676 | int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) |
ba75bb98 | 677 | { |
f8c76f6f | 678 | struct ceph_mon_generic_request *req; |
ba75bb98 | 679 | struct ceph_mon_statfs *h; |
d0b19705 | 680 | int ret = -ENOMEM; |
3143edd3 | 681 | |
d0b19705 | 682 | req = alloc_generic_request(monc, GFP_NOFS); |
3143edd3 | 683 | if (!req) |
d0b19705 | 684 | goto out; |
ba75bb98 | 685 | |
b61c2763 SW |
686 | req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS, |
687 | true); | |
a79832f2 | 688 | if (!req->request) |
3143edd3 | 689 | goto out; |
d0b19705 ID |
690 | |
691 | req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 64, GFP_NOFS, true); | |
a79832f2 | 692 | if (!req->reply) |
3143edd3 | 693 | goto out; |
3143edd3 | 694 | |
d0b19705 ID |
695 | req->u.st = buf; |
696 | ||
697 | mutex_lock(&monc->mutex); | |
698 | register_generic_request(req); | |
3143edd3 SW |
699 | /* fill out request */ |
700 | h = req->request->front.iov_base; | |
13e38c8a SW |
701 | h->monhdr.have_version = 0; |
702 | h->monhdr.session_mon = cpu_to_le16(-1); | |
703 | h->monhdr.session_mon_tid = 0; | |
ba75bb98 | 704 | h->fsid = monc->monmap->fsid; |
d0b19705 ID |
705 | send_generic_request(monc, req); |
706 | mutex_unlock(&monc->mutex); | |
ba75bb98 | 707 | |
d0b19705 | 708 | ret = wait_generic_request(req); |
e56fa10e | 709 | out: |
f646912d | 710 | put_generic_request(req); |
d0b19705 | 711 | return ret; |
e56fa10e | 712 | } |
3d14c5d2 | 713 | EXPORT_SYMBOL(ceph_monc_do_statfs); |
e56fa10e | 714 | |
513a8243 ID |
715 | static void handle_get_version_reply(struct ceph_mon_client *monc, |
716 | struct ceph_msg *msg) | |
717 | { | |
718 | struct ceph_mon_generic_request *req; | |
719 | u64 tid = le64_to_cpu(msg->hdr.tid); | |
720 | void *p = msg->front.iov_base; | |
721 | void *end = p + msg->front_alloc_len; | |
722 | u64 handle; | |
723 | ||
d0b19705 | 724 | dout("%s msg %p tid %llu\n", __func__, msg, tid); |
513a8243 ID |
725 | |
726 | ceph_decode_need(&p, end, 2*sizeof(u64), bad); | |
727 | handle = ceph_decode_64(&p); | |
728 | if (tid != 0 && tid != handle) | |
729 | goto bad; | |
730 | ||
731 | mutex_lock(&monc->mutex); | |
fcd00b68 | 732 | req = lookup_generic_request(&monc->generic_request_tree, handle); |
d0b19705 ID |
733 | if (!req) { |
734 | mutex_unlock(&monc->mutex); | |
735 | return; | |
513a8243 | 736 | } |
d0b19705 ID |
737 | |
738 | req->result = 0; | |
739 | req->u.newest = ceph_decode_64(&p); | |
740 | __finish_generic_request(req); | |
513a8243 | 741 | mutex_unlock(&monc->mutex); |
513a8243 | 742 | |
d0b19705 | 743 | complete_generic_request(req); |
513a8243 | 744 | return; |
d0b19705 | 745 | |
513a8243 | 746 | bad: |
7a6fdeb2 | 747 | pr_err("corrupt mon_get_version reply, tid %llu\n", tid); |
513a8243 ID |
748 | ceph_msg_dump(msg); |
749 | } | |
750 | ||
d0b19705 ID |
751 | static struct ceph_mon_generic_request * |
752 | __ceph_monc_get_version(struct ceph_mon_client *monc, const char *what, | |
753 | ceph_monc_callback_t cb, u64 private_data) | |
513a8243 ID |
754 | { |
755 | struct ceph_mon_generic_request *req; | |
513a8243 | 756 | |
d0b19705 | 757 | req = alloc_generic_request(monc, GFP_NOIO); |
513a8243 | 758 | if (!req) |
d0b19705 | 759 | goto err_put_req; |
513a8243 ID |
760 | |
761 | req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION, | |
762 | sizeof(u64) + sizeof(u32) + strlen(what), | |
d0b19705 ID |
763 | GFP_NOIO, true); |
764 | if (!req->request) | |
765 | goto err_put_req; | |
513a8243 | 766 | |
d0b19705 ID |
767 | req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 32, GFP_NOIO, |
768 | true); | |
769 | if (!req->reply) | |
770 | goto err_put_req; | |
513a8243 | 771 | |
d0b19705 ID |
772 | req->complete_cb = cb; |
773 | req->private_data = private_data; | |
513a8243 | 774 | |
513a8243 | 775 | mutex_lock(&monc->mutex); |
d0b19705 ID |
776 | register_generic_request(req); |
777 | { | |
778 | void *p = req->request->front.iov_base; | |
779 | void *const end = p + req->request->front_alloc_len; | |
780 | ||
781 | ceph_encode_64(&p, req->tid); /* handle */ | |
782 | ceph_encode_string(&p, end, what, strlen(what)); | |
783 | WARN_ON(p != end); | |
784 | } | |
785 | send_generic_request(monc, req); | |
786 | mutex_unlock(&monc->mutex); | |
513a8243 | 787 | |
d0b19705 | 788 | return req; |
513a8243 | 789 | |
d0b19705 | 790 | err_put_req: |
f646912d | 791 | put_generic_request(req); |
d0b19705 | 792 | return ERR_PTR(-ENOMEM); |
513a8243 | 793 | } |
d0b19705 ID |
794 | |
795 | /* | |
796 | * Send MMonGetVersion and wait for the reply. | |
797 | * | |
798 | * @what: one of "mdsmap", "osdmap" or "monmap" | |
799 | */ | |
800 | int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what, | |
801 | u64 *newest) | |
802 | { | |
803 | struct ceph_mon_generic_request *req; | |
804 | int ret; | |
805 | ||
806 | req = __ceph_monc_get_version(monc, what, NULL, 0); | |
807 | if (IS_ERR(req)) | |
808 | return PTR_ERR(req); | |
809 | ||
810 | ret = wait_generic_request(req); | |
811 | if (!ret) | |
812 | *newest = req->u.newest; | |
813 | ||
814 | put_generic_request(req); | |
815 | return ret; | |
816 | } | |
817 | EXPORT_SYMBOL(ceph_monc_get_version); | |
818 | ||
819 | /* | |
820 | * Send MMonGetVersion, | |
821 | * | |
822 | * @what: one of "mdsmap", "osdmap" or "monmap" | |
823 | */ | |
824 | int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what, | |
825 | ceph_monc_callback_t cb, u64 private_data) | |
826 | { | |
827 | struct ceph_mon_generic_request *req; | |
828 | ||
829 | req = __ceph_monc_get_version(monc, what, cb, private_data); | |
830 | if (IS_ERR(req)) | |
831 | return PTR_ERR(req); | |
832 | ||
833 | put_generic_request(req); | |
834 | return 0; | |
835 | } | |
836 | EXPORT_SYMBOL(ceph_monc_get_version_async); | |
513a8243 | 837 | |
ba75bb98 | 838 | /* |
e56fa10e | 839 | * Resend pending generic requests. |
ba75bb98 | 840 | */ |
f8c76f6f | 841 | static void __resend_generic_request(struct ceph_mon_client *monc) |
ba75bb98 | 842 | { |
f8c76f6f | 843 | struct ceph_mon_generic_request *req; |
85ff03f6 | 844 | struct rb_node *p; |
ba75bb98 | 845 | |
f8c76f6f YS |
846 | for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) { |
847 | req = rb_entry(p, struct ceph_mon_generic_request, node); | |
6740a845 | 848 | ceph_msg_revoke(req->request); |
4f471e4a | 849 | ceph_msg_revoke_incoming(req->reply); |
67130934 | 850 | ceph_con_send(&monc->con, ceph_msg_get(req->request)); |
ba75bb98 SW |
851 | } |
852 | } | |
853 | ||
854 | /* | |
855 | * Delayed work. If we haven't mounted yet, retry. Otherwise, | |
856 | * renew/retry subscription as needed (in case it is timing out, or we | |
857 | * got an ENOMEM). And keep the monitor connection alive. | |
858 | */ | |
859 | static void delayed_work(struct work_struct *work) | |
860 | { | |
861 | struct ceph_mon_client *monc = | |
862 | container_of(work, struct ceph_mon_client, delayed_work.work); | |
863 | ||
864 | dout("monc delayed_work\n"); | |
865 | mutex_lock(&monc->mutex); | |
4e7a5dcd | 866 | if (monc->hunting) { |
1752b50c ID |
867 | dout("%s continuing hunt\n", __func__); |
868 | reopen_session(monc); | |
ba75bb98 | 869 | } else { |
8b9558aa YZ |
870 | int is_auth = ceph_auth_is_authenticated(monc->auth); |
871 | if (ceph_con_keepalive_expired(&monc->con, | |
58d81b12 | 872 | CEPH_MONC_PING_TIMEOUT)) { |
8b9558aa YZ |
873 | dout("monc keepalive timeout\n"); |
874 | is_auth = 0; | |
1752b50c | 875 | reopen_session(monc); |
8b9558aa | 876 | } |
9bd2e6f8 | 877 | |
8b9558aa YZ |
878 | if (!monc->hunting) { |
879 | ceph_con_keepalive(&monc->con); | |
880 | __validate_auth(monc); | |
881 | } | |
9bd2e6f8 | 882 | |
82dcabad ID |
883 | if (is_auth) { |
884 | unsigned long now = jiffies; | |
885 | ||
886 | dout("%s renew subs? now %lu renew after %lu\n", | |
887 | __func__, now, monc->sub_renew_after); | |
888 | if (time_after_eq(now, monc->sub_renew_after)) | |
889 | __send_subscribe(monc); | |
890 | } | |
ba75bb98 | 891 | } |
ba75bb98 SW |
892 | __schedule_delayed(monc); |
893 | mutex_unlock(&monc->mutex); | |
894 | } | |
895 | ||
6b805185 SW |
896 | /* |
897 | * On startup, we build a temporary monmap populated with the IPs | |
898 | * provided by mount(2). | |
899 | */ | |
900 | static int build_initial_monmap(struct ceph_mon_client *monc) | |
901 | { | |
3d14c5d2 YS |
902 | struct ceph_options *opt = monc->client->options; |
903 | struct ceph_entity_addr *mon_addr = opt->mon_addr; | |
904 | int num_mon = opt->num_mon; | |
6b805185 SW |
905 | int i; |
906 | ||
907 | /* build initial monmap */ | |
908 | monc->monmap = kzalloc(sizeof(*monc->monmap) + | |
909 | num_mon*sizeof(monc->monmap->mon_inst[0]), | |
910 | GFP_KERNEL); | |
911 | if (!monc->monmap) | |
912 | return -ENOMEM; | |
913 | for (i = 0; i < num_mon; i++) { | |
914 | monc->monmap->mon_inst[i].addr = mon_addr[i]; | |
6b805185 SW |
915 | monc->monmap->mon_inst[i].addr.nonce = 0; |
916 | monc->monmap->mon_inst[i].name.type = | |
917 | CEPH_ENTITY_TYPE_MON; | |
918 | monc->monmap->mon_inst[i].name.num = cpu_to_le64(i); | |
919 | } | |
920 | monc->monmap->num_mon = num_mon; | |
6b805185 SW |
921 | return 0; |
922 | } | |
923 | ||
ba75bb98 SW |
924 | int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) |
925 | { | |
926 | int err = 0; | |
927 | ||
928 | dout("init\n"); | |
929 | memset(monc, 0, sizeof(*monc)); | |
930 | monc->client = cl; | |
931 | monc->monmap = NULL; | |
932 | mutex_init(&monc->mutex); | |
933 | ||
6b805185 SW |
934 | err = build_initial_monmap(monc); |
935 | if (err) | |
936 | goto out; | |
937 | ||
f6a2f5be | 938 | /* connection */ |
4e7a5dcd | 939 | /* authentication */ |
3d14c5d2 | 940 | monc->auth = ceph_auth_init(cl->options->name, |
8323c3aa | 941 | cl->options->key); |
49d9224c NW |
942 | if (IS_ERR(monc->auth)) { |
943 | err = PTR_ERR(monc->auth); | |
67130934 | 944 | goto out_monmap; |
49d9224c | 945 | } |
4e7a5dcd SW |
946 | monc->auth->want_keys = |
947 | CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | | |
948 | CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; | |
949 | ||
240ed68e | 950 | /* msgs */ |
a79832f2 | 951 | err = -ENOMEM; |
7c315c55 | 952 | monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK, |
34d23762 | 953 | sizeof(struct ceph_mon_subscribe_ack), |
b61c2763 | 954 | GFP_NOFS, true); |
a79832f2 | 955 | if (!monc->m_subscribe_ack) |
49d9224c | 956 | goto out_auth; |
6694d6b9 | 957 | |
737cc81e | 958 | monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 128, GFP_NOFS, |
b61c2763 | 959 | true); |
240ed68e SW |
960 | if (!monc->m_subscribe) |
961 | goto out_subscribe_ack; | |
962 | ||
b61c2763 SW |
963 | monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS, |
964 | true); | |
a79832f2 | 965 | if (!monc->m_auth_reply) |
240ed68e | 966 | goto out_subscribe; |
4e7a5dcd | 967 | |
b61c2763 | 968 | monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true); |
9bd2e6f8 | 969 | monc->pending_auth = 0; |
a79832f2 | 970 | if (!monc->m_auth) |
6694d6b9 | 971 | goto out_auth_reply; |
ba75bb98 | 972 | |
735a72ef SW |
973 | ceph_con_init(&monc->con, monc, &mon_con_ops, |
974 | &monc->client->msgr); | |
975 | ||
ba75bb98 | 976 | monc->cur_mon = -1; |
168b9090 ID |
977 | monc->had_a_connection = false; |
978 | monc->hunt_mult = 1; | |
ba75bb98 SW |
979 | |
980 | INIT_DELAYED_WORK(&monc->delayed_work, delayed_work); | |
f8c76f6f | 981 | monc->generic_request_tree = RB_ROOT; |
ba75bb98 SW |
982 | monc->last_tid = 0; |
983 | ||
737cc81e ID |
984 | monc->fs_cluster_id = CEPH_FS_CLUSTER_ID_NONE; |
985 | ||
4e7a5dcd SW |
986 | return 0; |
987 | ||
6694d6b9 SW |
988 | out_auth_reply: |
989 | ceph_msg_put(monc->m_auth_reply); | |
240ed68e SW |
990 | out_subscribe: |
991 | ceph_msg_put(monc->m_subscribe); | |
7c315c55 SW |
992 | out_subscribe_ack: |
993 | ceph_msg_put(monc->m_subscribe_ack); | |
49d9224c NW |
994 | out_auth: |
995 | ceph_auth_destroy(monc->auth); | |
4e7a5dcd SW |
996 | out_monmap: |
997 | kfree(monc->monmap); | |
ba75bb98 SW |
998 | out: |
999 | return err; | |
1000 | } | |
3d14c5d2 | 1001 | EXPORT_SYMBOL(ceph_monc_init); |
ba75bb98 SW |
1002 | |
1003 | void ceph_monc_stop(struct ceph_mon_client *monc) | |
1004 | { | |
1005 | dout("stop\n"); | |
1006 | cancel_delayed_work_sync(&monc->delayed_work); | |
1007 | ||
1008 | mutex_lock(&monc->mutex); | |
1009 | __close_session(monc); | |
0e04dc26 | 1010 | monc->cur_mon = -1; |
ba75bb98 SW |
1011 | mutex_unlock(&monc->mutex); |
1012 | ||
f3dea7ed SW |
1013 | /* |
1014 | * flush msgr queue before we destroy ourselves to ensure that: | |
1015 | * - any work that references our embedded con is finished. | |
1016 | * - any osd_client or other work that may reference an authorizer | |
1017 | * finishes before we shut down the auth subsystem. | |
1018 | */ | |
1019 | ceph_msgr_flush(); | |
1020 | ||
4e7a5dcd SW |
1021 | ceph_auth_destroy(monc->auth); |
1022 | ||
d0b19705 ID |
1023 | WARN_ON(!RB_EMPTY_ROOT(&monc->generic_request_tree)); |
1024 | ||
4e7a5dcd | 1025 | ceph_msg_put(monc->m_auth); |
6694d6b9 | 1026 | ceph_msg_put(monc->m_auth_reply); |
240ed68e | 1027 | ceph_msg_put(monc->m_subscribe); |
7c315c55 | 1028 | ceph_msg_put(monc->m_subscribe_ack); |
ba75bb98 SW |
1029 | |
1030 | kfree(monc->monmap); | |
1031 | } | |
3d14c5d2 | 1032 | EXPORT_SYMBOL(ceph_monc_stop); |
ba75bb98 | 1033 | |
0f9af169 ID |
1034 | static void finish_hunting(struct ceph_mon_client *monc) |
1035 | { | |
1036 | if (monc->hunting) { | |
1037 | dout("%s found mon%d\n", __func__, monc->cur_mon); | |
1038 | monc->hunting = false; | |
168b9090 ID |
1039 | monc->had_a_connection = true; |
1040 | monc->hunt_mult /= 2; /* reduce by 50% */ | |
1041 | if (monc->hunt_mult < 1) | |
1042 | monc->hunt_mult = 1; | |
0f9af169 ID |
1043 | } |
1044 | } | |
1045 | ||
4e7a5dcd SW |
1046 | static void handle_auth_reply(struct ceph_mon_client *monc, |
1047 | struct ceph_msg *msg) | |
1048 | { | |
1049 | int ret; | |
09c4d6a7 | 1050 | int was_auth = 0; |
4e7a5dcd SW |
1051 | |
1052 | mutex_lock(&monc->mutex); | |
27859f97 | 1053 | was_auth = ceph_auth_is_authenticated(monc->auth); |
9bd2e6f8 | 1054 | monc->pending_auth = 0; |
4e7a5dcd SW |
1055 | ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, |
1056 | msg->front.iov_len, | |
1057 | monc->m_auth->front.iov_base, | |
3cea4c30 | 1058 | monc->m_auth->front_alloc_len); |
0f9af169 ID |
1059 | if (ret > 0) { |
1060 | __send_prepared_auth_request(monc, ret); | |
1061 | goto out; | |
1062 | } | |
1063 | ||
1064 | finish_hunting(monc); | |
1065 | ||
4e7a5dcd | 1066 | if (ret < 0) { |
9bd2e6f8 | 1067 | monc->client->auth_err = ret; |
27859f97 | 1068 | } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) { |
4e7a5dcd | 1069 | dout("authenticated, starting session\n"); |
0743304d | 1070 | |
15d9882c AE |
1071 | monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT; |
1072 | monc->client->msgr.inst.name.num = | |
0cf5537b | 1073 | cpu_to_le64(monc->auth->global_id); |
0743304d | 1074 | |
4e7a5dcd | 1075 | __send_subscribe(monc); |
f8c76f6f | 1076 | __resend_generic_request(monc); |
0f9af169 ID |
1077 | |
1078 | pr_info("mon%d %s session established\n", monc->cur_mon, | |
1079 | ceph_pr_addr(&monc->con.peer_addr.in_addr)); | |
4e7a5dcd | 1080 | } |
0f9af169 ID |
1081 | |
1082 | out: | |
4e7a5dcd | 1083 | mutex_unlock(&monc->mutex); |
0f9af169 ID |
1084 | if (monc->client->auth_err < 0) |
1085 | wake_up_all(&monc->client->auth_wq); | |
4e7a5dcd SW |
1086 | } |
1087 | ||
9bd2e6f8 SW |
1088 | static int __validate_auth(struct ceph_mon_client *monc) |
1089 | { | |
1090 | int ret; | |
1091 | ||
1092 | if (monc->pending_auth) | |
1093 | return 0; | |
1094 | ||
1095 | ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base, | |
3cea4c30 | 1096 | monc->m_auth->front_alloc_len); |
9bd2e6f8 SW |
1097 | if (ret <= 0) |
1098 | return ret; /* either an error, or no need to authenticate */ | |
1099 | __send_prepared_auth_request(monc, ret); | |
1100 | return 0; | |
1101 | } | |
1102 | ||
1103 | int ceph_monc_validate_auth(struct ceph_mon_client *monc) | |
1104 | { | |
1105 | int ret; | |
1106 | ||
1107 | mutex_lock(&monc->mutex); | |
1108 | ret = __validate_auth(monc); | |
1109 | mutex_unlock(&monc->mutex); | |
1110 | return ret; | |
1111 | } | |
3d14c5d2 | 1112 | EXPORT_SYMBOL(ceph_monc_validate_auth); |
9bd2e6f8 | 1113 | |
ba75bb98 SW |
1114 | /* |
1115 | * handle incoming message | |
1116 | */ | |
1117 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
1118 | { | |
1119 | struct ceph_mon_client *monc = con->private; | |
1120 | int type = le16_to_cpu(msg->hdr.type); | |
1121 | ||
1122 | if (!monc) | |
1123 | return; | |
1124 | ||
1125 | switch (type) { | |
4e7a5dcd SW |
1126 | case CEPH_MSG_AUTH_REPLY: |
1127 | handle_auth_reply(monc, msg); | |
ba75bb98 SW |
1128 | break; |
1129 | ||
1130 | case CEPH_MSG_MON_SUBSCRIBE_ACK: | |
1131 | handle_subscribe_ack(monc, msg); | |
1132 | break; | |
1133 | ||
1134 | case CEPH_MSG_STATFS_REPLY: | |
1135 | handle_statfs_reply(monc, msg); | |
1136 | break; | |
1137 | ||
513a8243 ID |
1138 | case CEPH_MSG_MON_GET_VERSION_REPLY: |
1139 | handle_get_version_reply(monc, msg); | |
1140 | break; | |
1141 | ||
4e7a5dcd SW |
1142 | case CEPH_MSG_MON_MAP: |
1143 | ceph_monc_handle_map(monc, msg); | |
1144 | break; | |
1145 | ||
ba75bb98 SW |
1146 | case CEPH_MSG_OSD_MAP: |
1147 | ceph_osdc_handle_map(&monc->client->osdc, msg); | |
1148 | break; | |
1149 | ||
1150 | default: | |
3d14c5d2 YS |
1151 | /* can the chained handler handle it? */ |
1152 | if (monc->client->extra_mon_dispatch && | |
1153 | monc->client->extra_mon_dispatch(monc->client, msg) == 0) | |
1154 | break; | |
1155 | ||
ba75bb98 SW |
1156 | pr_err("received unknown message type %d %s\n", type, |
1157 | ceph_msg_type_name(type)); | |
1158 | } | |
1159 | ceph_msg_put(msg); | |
1160 | } | |
1161 | ||
1162 | /* | |
1163 | * Allocate memory for incoming message | |
1164 | */ | |
1165 | static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, | |
2450418c YS |
1166 | struct ceph_msg_header *hdr, |
1167 | int *skip) | |
ba75bb98 SW |
1168 | { |
1169 | struct ceph_mon_client *monc = con->private; | |
1170 | int type = le16_to_cpu(hdr->type); | |
2450418c | 1171 | int front_len = le32_to_cpu(hdr->front_len); |
5b3a4db3 | 1172 | struct ceph_msg *m = NULL; |
ba75bb98 | 1173 | |
2450418c | 1174 | *skip = 0; |
0547a9b3 | 1175 | |
ba75bb98 | 1176 | switch (type) { |
ba75bb98 | 1177 | case CEPH_MSG_MON_SUBSCRIBE_ACK: |
7c315c55 | 1178 | m = ceph_msg_get(monc->m_subscribe_ack); |
2450418c | 1179 | break; |
ba75bb98 | 1180 | case CEPH_MSG_STATFS_REPLY: |
f8c76f6f | 1181 | return get_generic_reply(con, hdr, skip); |
4e7a5dcd | 1182 | case CEPH_MSG_AUTH_REPLY: |
6694d6b9 | 1183 | m = ceph_msg_get(monc->m_auth_reply); |
2450418c | 1184 | break; |
513a8243 ID |
1185 | case CEPH_MSG_MON_GET_VERSION_REPLY: |
1186 | if (le64_to_cpu(hdr->tid) != 0) | |
1187 | return get_generic_reply(con, hdr, skip); | |
1188 | ||
1189 | /* | |
1190 | * Older OSDs don't set reply tid even if the orignal | |
1191 | * request had a non-zero tid. Workaround this weirdness | |
1192 | * by falling through to the allocate case. | |
1193 | */ | |
5b3a4db3 SW |
1194 | case CEPH_MSG_MON_MAP: |
1195 | case CEPH_MSG_MDS_MAP: | |
1196 | case CEPH_MSG_OSD_MAP: | |
0cabbd94 | 1197 | case CEPH_MSG_FS_MAP_USER: |
b61c2763 | 1198 | m = ceph_msg_new(type, front_len, GFP_NOFS, false); |
1c20f2d2 AE |
1199 | if (!m) |
1200 | return NULL; /* ENOMEM--return skip == 0 */ | |
5b3a4db3 | 1201 | break; |
ba75bb98 | 1202 | } |
2450418c | 1203 | |
5b3a4db3 SW |
1204 | if (!m) { |
1205 | pr_info("alloc_msg unknown type %d\n", type); | |
2450418c | 1206 | *skip = 1; |
73c3d481 | 1207 | } else if (front_len > m->front_alloc_len) { |
b9a67899 JP |
1208 | pr_warn("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n", |
1209 | front_len, m->front_alloc_len, | |
1210 | (unsigned int)con->peer_name.type, | |
1211 | le64_to_cpu(con->peer_name.num)); | |
73c3d481 SW |
1212 | ceph_msg_put(m); |
1213 | m = ceph_msg_new(type, front_len, GFP_NOFS, false); | |
5b3a4db3 | 1214 | } |
73c3d481 | 1215 | |
2450418c | 1216 | return m; |
ba75bb98 SW |
1217 | } |
1218 | ||
1219 | /* | |
1220 | * If the monitor connection resets, pick a new monitor and resubmit | |
1221 | * any pending requests. | |
1222 | */ | |
1223 | static void mon_fault(struct ceph_connection *con) | |
1224 | { | |
1225 | struct ceph_mon_client *monc = con->private; | |
1226 | ||
ba75bb98 | 1227 | mutex_lock(&monc->mutex); |
b5d91704 ID |
1228 | dout("%s mon%d\n", __func__, monc->cur_mon); |
1229 | if (monc->cur_mon >= 0) { | |
1230 | if (!monc->hunting) { | |
1231 | dout("%s hunting for new mon\n", __func__); | |
1232 | reopen_session(monc); | |
1233 | __schedule_delayed(monc); | |
1234 | } else { | |
1235 | dout("%s already hunting\n", __func__); | |
1236 | } | |
ba75bb98 | 1237 | } |
ba75bb98 SW |
1238 | mutex_unlock(&monc->mutex); |
1239 | } | |
1240 | ||
ec87ef43 SW |
1241 | /* |
1242 | * We can ignore refcounting on the connection struct, as all references | |
1243 | * will come from the messenger workqueue, which is drained prior to | |
1244 | * mon_client destruction. | |
1245 | */ | |
1246 | static struct ceph_connection *con_get(struct ceph_connection *con) | |
1247 | { | |
1248 | return con; | |
1249 | } | |
1250 | ||
1251 | static void con_put(struct ceph_connection *con) | |
1252 | { | |
1253 | } | |
1254 | ||
9e32789f | 1255 | static const struct ceph_connection_operations mon_con_ops = { |
ec87ef43 SW |
1256 | .get = con_get, |
1257 | .put = con_put, | |
ba75bb98 SW |
1258 | .dispatch = dispatch, |
1259 | .fault = mon_fault, | |
1260 | .alloc_msg = mon_alloc_msg, | |
ba75bb98 | 1261 | }; |