Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
3d14c5d2 | 2 | #include <linux/ceph/ceph_debug.h> |
ba75bb98 | 3 | |
3d14c5d2 | 4 | #include <linux/module.h> |
ba75bb98 | 5 | #include <linux/types.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
ba75bb98 SW |
7 | #include <linux/random.h> |
8 | #include <linux/sched.h> | |
9 | ||
220abf5a | 10 | #include <linux/ceph/ceph_features.h> |
3d14c5d2 YS |
11 | #include <linux/ceph/mon_client.h> |
12 | #include <linux/ceph/libceph.h> | |
ab434b60 | 13 | #include <linux/ceph/debugfs.h> |
3d14c5d2 | 14 | #include <linux/ceph/decode.h> |
3d14c5d2 | 15 | #include <linux/ceph/auth.h> |
ba75bb98 SW |
16 | |
17 | /* | |
18 | * Interact with Ceph monitor cluster. Handle requests for new map | |
19 | * versions, and periodically resend as needed. Also implement | |
20 | * statfs() and umount(). | |
21 | * | |
22 | * A small cluster of Ceph "monitors" are responsible for managing critical | |
23 | * cluster configuration and state information. An odd number (e.g., 3, 5) | |
24 | * of cmon daemons use a modified version of the Paxos part-time parliament | |
25 | * algorithm to manage the MDS map (mds cluster membership), OSD map, and | |
26 | * list of clients who have mounted the file system. | |
27 | * | |
28 | * We maintain an open, active session with a monitor at all times in order to | |
29 | * receive timely MDSMap updates. We periodically send a keepalive byte on the | |
30 | * TCP socket to ensure we detect a failure. If the connection does break, we | |
31 | * randomly hunt for a new monitor. Once the connection is reestablished, we | |
32 | * resend any outstanding requests. | |
33 | */ | |
34 | ||
9e32789f | 35 | static const struct ceph_connection_operations mon_con_ops; |
ba75bb98 | 36 | |
9bd2e6f8 SW |
37 | static int __validate_auth(struct ceph_mon_client *monc); |
38 | ||
ba75bb98 SW |
39 | /* |
40 | * Decode a monmap blob (e.g., during mount). | |
41 | */ | |
0bfb0f28 | 42 | static struct ceph_monmap *ceph_monmap_decode(void *p, void *end) |
ba75bb98 SW |
43 | { |
44 | struct ceph_monmap *m = NULL; | |
45 | int i, err = -EINVAL; | |
46 | struct ceph_fsid fsid; | |
47 | u32 epoch, num_mon; | |
4e7a5dcd SW |
48 | u32 len; |
49 | ||
50 | ceph_decode_32_safe(&p, end, len, bad); | |
51 | ceph_decode_need(&p, end, len, bad); | |
ba75bb98 | 52 | |
0bfb0f28 | 53 | dout("monmap_decode %p %p len %d (%d)\n", p, end, len, (int)(end-p)); |
f3b4e55d | 54 | p += sizeof(u16); /* skip version */ |
ba75bb98 SW |
55 | |
56 | ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); | |
57 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
c89136ea | 58 | epoch = ceph_decode_32(&p); |
ba75bb98 | 59 | |
c89136ea | 60 | num_mon = ceph_decode_32(&p); |
ba75bb98 | 61 | |
7377324e | 62 | if (num_mon > CEPH_MAX_MON) |
ba75bb98 | 63 | goto bad; |
acafe7e3 | 64 | m = kmalloc(struct_size(m, mon_inst, num_mon), GFP_NOFS); |
ba75bb98 SW |
65 | if (m == NULL) |
66 | return ERR_PTR(-ENOMEM); | |
67 | m->fsid = fsid; | |
68 | m->epoch = epoch; | |
69 | m->num_mon = num_mon; | |
0bfb0f28 JL |
70 | for (i = 0; i < num_mon; ++i) { |
71 | struct ceph_entity_inst *inst = &m->mon_inst[i]; | |
72 | ||
73 | /* copy name portion */ | |
74 | ceph_decode_copy_safe(&p, end, &inst->name, | |
75 | sizeof(inst->name), bad); | |
76 | err = ceph_decode_entity_addr(&p, end, &inst->addr); | |
77 | if (err) | |
78 | goto bad; | |
79 | } | |
ba75bb98 SW |
80 | dout("monmap_decode epoch %d, num_mon %d\n", m->epoch, |
81 | m->num_mon); | |
82 | for (i = 0; i < m->num_mon; i++) | |
83 | dout("monmap_decode mon%d is %s\n", i, | |
b726ec97 | 84 | ceph_pr_addr(&m->mon_inst[i].addr)); |
ba75bb98 | 85 | return m; |
ba75bb98 SW |
86 | bad: |
87 | dout("monmap_decode failed with %d\n", err); | |
88 | kfree(m); | |
89 | return ERR_PTR(err); | |
90 | } | |
91 | ||
92 | /* | |
93 | * return true if *addr is included in the monmap. | |
94 | */ | |
95 | int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) | |
96 | { | |
97 | int i; | |
98 | ||
99 | for (i = 0; i < m->num_mon; i++) | |
103e2d3a | 100 | if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0) |
ba75bb98 SW |
101 | return 1; |
102 | return 0; | |
103 | } | |
104 | ||
5ce6e9db SW |
105 | /* |
106 | * Send an auth request. | |
107 | */ | |
108 | static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) | |
109 | { | |
110 | monc->pending_auth = 1; | |
111 | monc->m_auth->front.iov_len = len; | |
112 | monc->m_auth->hdr.front_len = cpu_to_le32(len); | |
6740a845 | 113 | ceph_msg_revoke(monc->m_auth); |
5ce6e9db | 114 | ceph_msg_get(monc->m_auth); /* keep our ref */ |
67130934 | 115 | ceph_con_send(&monc->con, monc->m_auth); |
5ce6e9db SW |
116 | } |
117 | ||
ba75bb98 SW |
118 | /* |
119 | * Close monitor session, if any. | |
120 | */ | |
121 | static void __close_session(struct ceph_mon_client *monc) | |
122 | { | |
f6a2f5be | 123 | dout("__close_session closing mon%d\n", monc->cur_mon); |
6740a845 | 124 | ceph_msg_revoke(monc->m_auth); |
4f471e4a SW |
125 | ceph_msg_revoke_incoming(monc->m_auth_reply); |
126 | ceph_msg_revoke(monc->m_subscribe); | |
127 | ceph_msg_revoke_incoming(monc->m_subscribe_ack); | |
67130934 | 128 | ceph_con_close(&monc->con); |
0e04dc26 | 129 | |
f6a2f5be SW |
130 | monc->pending_auth = 0; |
131 | ceph_auth_reset(monc->auth); | |
ba75bb98 SW |
132 | } |
133 | ||
134 | /* | |
0e04dc26 ID |
135 | * Pick a new monitor at random and set cur_mon. If we are repicking |
136 | * (i.e. cur_mon is already set), be sure to pick a different one. | |
ba75bb98 | 137 | */ |
0e04dc26 | 138 | static void pick_new_mon(struct ceph_mon_client *monc) |
ba75bb98 | 139 | { |
0e04dc26 | 140 | int old_mon = monc->cur_mon; |
ba75bb98 | 141 | |
0e04dc26 | 142 | BUG_ON(monc->monmap->num_mon < 1); |
ba75bb98 | 143 | |
0e04dc26 ID |
144 | if (monc->monmap->num_mon == 1) { |
145 | monc->cur_mon = 0; | |
146 | } else { | |
147 | int max = monc->monmap->num_mon; | |
148 | int o = -1; | |
149 | int n; | |
150 | ||
151 | if (monc->cur_mon >= 0) { | |
152 | if (monc->cur_mon < monc->monmap->num_mon) | |
153 | o = monc->cur_mon; | |
154 | if (o >= 0) | |
155 | max--; | |
156 | } | |
4e7a5dcd | 157 | |
0e04dc26 ID |
158 | n = prandom_u32() % max; |
159 | if (o >= 0 && n >= o) | |
160 | n++; | |
8b9558aa | 161 | |
0e04dc26 | 162 | monc->cur_mon = n; |
ba75bb98 | 163 | } |
0e04dc26 ID |
164 | |
165 | dout("%s mon%d -> mon%d out of %d mons\n", __func__, old_mon, | |
166 | monc->cur_mon, monc->monmap->num_mon); | |
167 | } | |
168 | ||
169 | /* | |
170 | * Open a session with a new monitor. | |
171 | */ | |
172 | static void __open_session(struct ceph_mon_client *monc) | |
173 | { | |
174 | int ret; | |
175 | ||
176 | pick_new_mon(monc); | |
177 | ||
1752b50c | 178 | monc->hunting = true; |
168b9090 ID |
179 | if (monc->had_a_connection) { |
180 | monc->hunt_mult *= CEPH_MONC_HUNT_BACKOFF; | |
181 | if (monc->hunt_mult > CEPH_MONC_HUNT_MAX_MULT) | |
182 | monc->hunt_mult = CEPH_MONC_HUNT_MAX_MULT; | |
183 | } | |
184 | ||
0e04dc26 ID |
185 | monc->sub_renew_after = jiffies; /* i.e., expired */ |
186 | monc->sub_renew_sent = 0; | |
187 | ||
188 | dout("%s opening mon%d\n", __func__, monc->cur_mon); | |
189 | ceph_con_open(&monc->con, CEPH_ENTITY_TYPE_MON, monc->cur_mon, | |
190 | &monc->monmap->mon_inst[monc->cur_mon].addr); | |
191 | ||
192 | /* | |
193 | * send an initial keepalive to ensure our timestamp is valid | |
194 | * by the time we are in an OPENED state | |
195 | */ | |
196 | ceph_con_keepalive(&monc->con); | |
197 | ||
198 | /* initiate authentication handshake */ | |
199 | ret = ceph_auth_build_hello(monc->auth, | |
200 | monc->m_auth->front.iov_base, | |
201 | monc->m_auth->front_alloc_len); | |
202 | BUG_ON(ret <= 0); | |
203 | __send_prepared_auth_request(monc, ret); | |
ba75bb98 SW |
204 | } |
205 | ||
1752b50c ID |
206 | static void reopen_session(struct ceph_mon_client *monc) |
207 | { | |
208 | if (!monc->hunting) | |
209 | pr_info("mon%d %s session lost, hunting for new mon\n", | |
b726ec97 | 210 | monc->cur_mon, ceph_pr_addr(&monc->con.peer_addr)); |
1752b50c ID |
211 | |
212 | __close_session(monc); | |
213 | __open_session(monc); | |
214 | } | |
215 | ||
facb9f6e ID |
216 | static void un_backoff(struct ceph_mon_client *monc) |
217 | { | |
218 | monc->hunt_mult /= 2; /* reduce by 50% */ | |
219 | if (monc->hunt_mult < 1) | |
220 | monc->hunt_mult = 1; | |
221 | dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult); | |
222 | } | |
223 | ||
ba75bb98 SW |
224 | /* |
225 | * Reschedule delayed work timer. | |
226 | */ | |
227 | static void __schedule_delayed(struct ceph_mon_client *monc) | |
228 | { | |
8b9558aa | 229 | unsigned long delay; |
ba75bb98 | 230 | |
168b9090 ID |
231 | if (monc->hunting) |
232 | delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult; | |
233 | else | |
58d81b12 | 234 | delay = CEPH_MONC_PING_INTERVAL; |
168b9090 | 235 | |
8b9558aa | 236 | dout("__schedule_delayed after %lu\n", delay); |
bee3a37c ID |
237 | mod_delayed_work(system_wq, &monc->delayed_work, |
238 | round_jiffies_relative(delay)); | |
ba75bb98 SW |
239 | } |
240 | ||
82dcabad | 241 | const char *ceph_sub_str[] = { |
82dcabad ID |
242 | [CEPH_SUB_MONMAP] = "monmap", |
243 | [CEPH_SUB_OSDMAP] = "osdmap", | |
0cabbd94 YZ |
244 | [CEPH_SUB_FSMAP] = "fsmap.user", |
245 | [CEPH_SUB_MDSMAP] = "mdsmap", | |
82dcabad ID |
246 | }; |
247 | ||
ba75bb98 | 248 | /* |
82dcabad ID |
249 | * Send subscribe request for one or more maps, according to |
250 | * monc->subs. | |
ba75bb98 SW |
251 | */ |
252 | static void __send_subscribe(struct ceph_mon_client *monc) | |
253 | { | |
82dcabad ID |
254 | struct ceph_msg *msg = monc->m_subscribe; |
255 | void *p = msg->front.iov_base; | |
256 | void *const end = p + msg->front_alloc_len; | |
257 | int num = 0; | |
258 | int i; | |
259 | ||
260 | dout("%s sent %lu\n", __func__, monc->sub_renew_sent); | |
261 | ||
262 | BUG_ON(monc->cur_mon < 0); | |
263 | ||
264 | if (!monc->sub_renew_sent) | |
265 | monc->sub_renew_sent = jiffies | 1; /* never 0 */ | |
266 | ||
267 | msg->hdr.version = cpu_to_le16(2); | |
268 | ||
269 | for (i = 0; i < ARRAY_SIZE(monc->subs); i++) { | |
270 | if (monc->subs[i].want) | |
271 | num++; | |
272 | } | |
273 | BUG_ON(num < 1); /* monmap sub is always there */ | |
274 | ceph_encode_32(&p, num); | |
275 | for (i = 0; i < ARRAY_SIZE(monc->subs); i++) { | |
737cc81e ID |
276 | char buf[32]; |
277 | int len; | |
82dcabad ID |
278 | |
279 | if (!monc->subs[i].want) | |
280 | continue; | |
281 | ||
737cc81e ID |
282 | len = sprintf(buf, "%s", ceph_sub_str[i]); |
283 | if (i == CEPH_SUB_MDSMAP && | |
284 | monc->fs_cluster_id != CEPH_FS_CLUSTER_ID_NONE) | |
285 | len += sprintf(buf + len, ".%d", monc->fs_cluster_id); | |
286 | ||
287 | dout("%s %s start %llu flags 0x%x\n", __func__, buf, | |
82dcabad ID |
288 | le64_to_cpu(monc->subs[i].item.start), |
289 | monc->subs[i].item.flags); | |
737cc81e | 290 | ceph_encode_string(&p, end, buf, len); |
82dcabad ID |
291 | memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item)); |
292 | p += sizeof(monc->subs[i].item); | |
ba75bb98 | 293 | } |
82dcabad | 294 | |
737cc81e | 295 | BUG_ON(p > end); |
82dcabad ID |
296 | msg->front.iov_len = p - msg->front.iov_base; |
297 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
298 | ceph_msg_revoke(msg); | |
299 | ceph_con_send(&monc->con, ceph_msg_get(msg)); | |
ba75bb98 SW |
300 | } |
301 | ||
302 | static void handle_subscribe_ack(struct ceph_mon_client *monc, | |
303 | struct ceph_msg *msg) | |
304 | { | |
95c96174 | 305 | unsigned int seconds; |
07bd10fb SW |
306 | struct ceph_mon_subscribe_ack *h = msg->front.iov_base; |
307 | ||
308 | if (msg->front.iov_len < sizeof(*h)) | |
309 | goto bad; | |
310 | seconds = le32_to_cpu(h->duration); | |
ba75bb98 | 311 | |
ba75bb98 | 312 | mutex_lock(&monc->mutex); |
82dcabad | 313 | if (monc->sub_renew_sent) { |
220abf5a ID |
314 | /* |
315 | * This is only needed for legacy (infernalis or older) | |
316 | * MONs -- see delayed_work(). | |
317 | */ | |
82dcabad ID |
318 | monc->sub_renew_after = monc->sub_renew_sent + |
319 | (seconds >> 1) * HZ - 1; | |
320 | dout("%s sent %lu duration %d renew after %lu\n", __func__, | |
321 | monc->sub_renew_sent, seconds, monc->sub_renew_after); | |
322 | monc->sub_renew_sent = 0; | |
323 | } else { | |
324 | dout("%s sent %lu renew after %lu, ignoring\n", __func__, | |
325 | monc->sub_renew_sent, monc->sub_renew_after); | |
326 | } | |
ba75bb98 SW |
327 | mutex_unlock(&monc->mutex); |
328 | return; | |
329 | bad: | |
330 | pr_err("got corrupt subscribe-ack msg\n"); | |
9ec7cab1 | 331 | ceph_msg_dump(msg); |
ba75bb98 SW |
332 | } |
333 | ||
334 | /* | |
82dcabad ID |
335 | * Register interest in a map |
336 | * | |
337 | * @sub: one of CEPH_SUB_* | |
338 | * @epoch: X for "every map since X", or 0 for "just the latest" | |
ba75bb98 | 339 | */ |
82dcabad ID |
340 | static bool __ceph_monc_want_map(struct ceph_mon_client *monc, int sub, |
341 | u32 epoch, bool continuous) | |
ba75bb98 | 342 | { |
82dcabad ID |
343 | __le64 start = cpu_to_le64(epoch); |
344 | u8 flags = !continuous ? CEPH_SUBSCRIBE_ONETIME : 0; | |
345 | ||
346 | dout("%s %s epoch %u continuous %d\n", __func__, ceph_sub_str[sub], | |
347 | epoch, continuous); | |
348 | ||
349 | if (monc->subs[sub].want && | |
350 | monc->subs[sub].item.start == start && | |
351 | monc->subs[sub].item.flags == flags) | |
352 | return false; | |
353 | ||
354 | monc->subs[sub].item.start = start; | |
355 | monc->subs[sub].item.flags = flags; | |
356 | monc->subs[sub].want = true; | |
357 | ||
358 | return true; | |
359 | } | |
360 | ||
361 | bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch, | |
362 | bool continuous) | |
363 | { | |
364 | bool need_request; | |
365 | ||
ba75bb98 | 366 | mutex_lock(&monc->mutex); |
82dcabad | 367 | need_request = __ceph_monc_want_map(monc, sub, epoch, continuous); |
ba75bb98 | 368 | mutex_unlock(&monc->mutex); |
82dcabad ID |
369 | |
370 | return need_request; | |
371 | } | |
372 | EXPORT_SYMBOL(ceph_monc_want_map); | |
373 | ||
374 | /* | |
375 | * Keep track of which maps we have | |
376 | * | |
377 | * @sub: one of CEPH_SUB_* | |
378 | */ | |
379 | static void __ceph_monc_got_map(struct ceph_mon_client *monc, int sub, | |
380 | u32 epoch) | |
381 | { | |
382 | dout("%s %s epoch %u\n", __func__, ceph_sub_str[sub], epoch); | |
383 | ||
384 | if (monc->subs[sub].want) { | |
385 | if (monc->subs[sub].item.flags & CEPH_SUBSCRIBE_ONETIME) | |
386 | monc->subs[sub].want = false; | |
387 | else | |
388 | monc->subs[sub].item.start = cpu_to_le64(epoch + 1); | |
389 | } | |
390 | ||
391 | monc->subs[sub].have = epoch; | |
ba75bb98 SW |
392 | } |
393 | ||
82dcabad | 394 | void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch) |
ba75bb98 SW |
395 | { |
396 | mutex_lock(&monc->mutex); | |
82dcabad | 397 | __ceph_monc_got_map(monc, sub, epoch); |
ba75bb98 | 398 | mutex_unlock(&monc->mutex); |
ba75bb98 | 399 | } |
82dcabad | 400 | EXPORT_SYMBOL(ceph_monc_got_map); |
ba75bb98 | 401 | |
42c1b124 ID |
402 | void ceph_monc_renew_subs(struct ceph_mon_client *monc) |
403 | { | |
404 | mutex_lock(&monc->mutex); | |
405 | __send_subscribe(monc); | |
406 | mutex_unlock(&monc->mutex); | |
407 | } | |
408 | EXPORT_SYMBOL(ceph_monc_renew_subs); | |
409 | ||
a319bf56 ID |
410 | /* |
411 | * Wait for an osdmap with a given epoch. | |
412 | * | |
413 | * @epoch: epoch to wait for | |
414 | * @timeout: in jiffies, 0 means "wait forever" | |
415 | */ | |
6044cde6 ID |
416 | int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, |
417 | unsigned long timeout) | |
418 | { | |
419 | unsigned long started = jiffies; | |
216639dd | 420 | long ret; |
6044cde6 ID |
421 | |
422 | mutex_lock(&monc->mutex); | |
82dcabad | 423 | while (monc->subs[CEPH_SUB_OSDMAP].have < epoch) { |
6044cde6 ID |
424 | mutex_unlock(&monc->mutex); |
425 | ||
a319bf56 | 426 | if (timeout && time_after_eq(jiffies, started + timeout)) |
6044cde6 ID |
427 | return -ETIMEDOUT; |
428 | ||
429 | ret = wait_event_interruptible_timeout(monc->client->auth_wq, | |
82dcabad ID |
430 | monc->subs[CEPH_SUB_OSDMAP].have >= epoch, |
431 | ceph_timeout_jiffies(timeout)); | |
6044cde6 ID |
432 | if (ret < 0) |
433 | return ret; | |
434 | ||
435 | mutex_lock(&monc->mutex); | |
436 | } | |
437 | ||
438 | mutex_unlock(&monc->mutex); | |
439 | return 0; | |
440 | } | |
441 | EXPORT_SYMBOL(ceph_monc_wait_osdmap); | |
ba75bb98 | 442 | |
4e7a5dcd | 443 | /* |
82dcabad ID |
444 | * Open a session with a random monitor. Request monmap and osdmap, |
445 | * which are waited upon in __ceph_open_session(). | |
4e7a5dcd SW |
446 | */ |
447 | int ceph_monc_open_session(struct ceph_mon_client *monc) | |
ba75bb98 | 448 | { |
ba75bb98 | 449 | mutex_lock(&monc->mutex); |
82dcabad ID |
450 | __ceph_monc_want_map(monc, CEPH_SUB_MONMAP, 0, true); |
451 | __ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, 0, false); | |
4e7a5dcd | 452 | __open_session(monc); |
ba75bb98 SW |
453 | __schedule_delayed(monc); |
454 | mutex_unlock(&monc->mutex); | |
455 | return 0; | |
456 | } | |
3d14c5d2 | 457 | EXPORT_SYMBOL(ceph_monc_open_session); |
ba75bb98 | 458 | |
0743304d SW |
459 | static void ceph_monc_handle_map(struct ceph_mon_client *monc, |
460 | struct ceph_msg *msg) | |
4e7a5dcd SW |
461 | { |
462 | struct ceph_client *client = monc->client; | |
463 | struct ceph_monmap *monmap = NULL, *old = monc->monmap; | |
464 | void *p, *end; | |
465 | ||
466 | mutex_lock(&monc->mutex); | |
467 | ||
468 | dout("handle_monmap\n"); | |
469 | p = msg->front.iov_base; | |
470 | end = p + msg->front.iov_len; | |
471 | ||
472 | monmap = ceph_monmap_decode(p, end); | |
473 | if (IS_ERR(monmap)) { | |
474 | pr_err("problem decoding monmap, %d\n", | |
475 | (int)PTR_ERR(monmap)); | |
0bfb0f28 | 476 | ceph_msg_dump(msg); |
d4a780ce | 477 | goto out; |
4e7a5dcd | 478 | } |
0743304d SW |
479 | |
480 | if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) { | |
4e7a5dcd | 481 | kfree(monmap); |
d4a780ce | 482 | goto out; |
4e7a5dcd SW |
483 | } |
484 | ||
485 | client->monc.monmap = monmap; | |
4e7a5dcd SW |
486 | kfree(old); |
487 | ||
82dcabad | 488 | __ceph_monc_got_map(monc, CEPH_SUB_MONMAP, monc->monmap->epoch); |
02ac956c | 489 | client->have_fsid = true; |
d1c338a5 | 490 | |
d4a780ce | 491 | out: |
4e7a5dcd | 492 | mutex_unlock(&monc->mutex); |
03066f23 | 493 | wake_up_all(&client->auth_wq); |
4e7a5dcd SW |
494 | } |
495 | ||
ba75bb98 | 496 | /* |
7a6fdeb2 | 497 | * generic requests (currently statfs, mon_get_version) |
ba75bb98 | 498 | */ |
fcd00b68 | 499 | DEFINE_RB_FUNCS(generic_request, struct ceph_mon_generic_request, tid, node) |
85ff03f6 | 500 | |
f8c76f6f | 501 | static void release_generic_request(struct kref *kref) |
3143edd3 | 502 | { |
f8c76f6f YS |
503 | struct ceph_mon_generic_request *req = |
504 | container_of(kref, struct ceph_mon_generic_request, kref); | |
3143edd3 | 505 | |
d0b19705 ID |
506 | dout("%s greq %p request %p reply %p\n", __func__, req, req->request, |
507 | req->reply); | |
508 | WARN_ON(!RB_EMPTY_NODE(&req->node)); | |
509 | ||
3143edd3 SW |
510 | if (req->reply) |
511 | ceph_msg_put(req->reply); | |
512 | if (req->request) | |
513 | ceph_msg_put(req->request); | |
20547567 YS |
514 | |
515 | kfree(req); | |
3143edd3 SW |
516 | } |
517 | ||
f8c76f6f | 518 | static void put_generic_request(struct ceph_mon_generic_request *req) |
3143edd3 | 519 | { |
d0b19705 ID |
520 | if (req) |
521 | kref_put(&req->kref, release_generic_request); | |
3143edd3 SW |
522 | } |
523 | ||
f8c76f6f | 524 | static void get_generic_request(struct ceph_mon_generic_request *req) |
3143edd3 SW |
525 | { |
526 | kref_get(&req->kref); | |
527 | } | |
528 | ||
d0b19705 ID |
529 | static struct ceph_mon_generic_request * |
530 | alloc_generic_request(struct ceph_mon_client *monc, gfp_t gfp) | |
531 | { | |
532 | struct ceph_mon_generic_request *req; | |
533 | ||
534 | req = kzalloc(sizeof(*req), gfp); | |
535 | if (!req) | |
536 | return NULL; | |
537 | ||
538 | req->monc = monc; | |
539 | kref_init(&req->kref); | |
540 | RB_CLEAR_NODE(&req->node); | |
541 | init_completion(&req->completion); | |
542 | ||
543 | dout("%s greq %p\n", __func__, req); | |
544 | return req; | |
545 | } | |
546 | ||
547 | static void register_generic_request(struct ceph_mon_generic_request *req) | |
548 | { | |
549 | struct ceph_mon_client *monc = req->monc; | |
550 | ||
551 | WARN_ON(req->tid); | |
552 | ||
553 | get_generic_request(req); | |
554 | req->tid = ++monc->last_tid; | |
555 | insert_generic_request(&monc->generic_request_tree, req); | |
556 | } | |
557 | ||
558 | static void send_generic_request(struct ceph_mon_client *monc, | |
559 | struct ceph_mon_generic_request *req) | |
560 | { | |
561 | WARN_ON(!req->tid); | |
562 | ||
563 | dout("%s greq %p tid %llu\n", __func__, req, req->tid); | |
564 | req->request->hdr.tid = cpu_to_le64(req->tid); | |
565 | ceph_con_send(&monc->con, ceph_msg_get(req->request)); | |
566 | } | |
567 | ||
568 | static void __finish_generic_request(struct ceph_mon_generic_request *req) | |
569 | { | |
570 | struct ceph_mon_client *monc = req->monc; | |
571 | ||
572 | dout("%s greq %p tid %llu\n", __func__, req, req->tid); | |
573 | erase_generic_request(&monc->generic_request_tree, req); | |
574 | ||
575 | ceph_msg_revoke(req->request); | |
576 | ceph_msg_revoke_incoming(req->reply); | |
577 | } | |
578 | ||
579 | static void finish_generic_request(struct ceph_mon_generic_request *req) | |
580 | { | |
581 | __finish_generic_request(req); | |
582 | put_generic_request(req); | |
583 | } | |
584 | ||
585 | static void complete_generic_request(struct ceph_mon_generic_request *req) | |
586 | { | |
587 | if (req->complete_cb) | |
588 | req->complete_cb(req); | |
589 | else | |
590 | complete_all(&req->completion); | |
591 | put_generic_request(req); | |
592 | } | |
593 | ||
f52ec33c | 594 | static void cancel_generic_request(struct ceph_mon_generic_request *req) |
d0b19705 ID |
595 | { |
596 | struct ceph_mon_client *monc = req->monc; | |
597 | struct ceph_mon_generic_request *lookup_req; | |
598 | ||
599 | dout("%s greq %p tid %llu\n", __func__, req, req->tid); | |
600 | ||
601 | mutex_lock(&monc->mutex); | |
602 | lookup_req = lookup_generic_request(&monc->generic_request_tree, | |
603 | req->tid); | |
604 | if (lookup_req) { | |
605 | WARN_ON(lookup_req != req); | |
606 | finish_generic_request(req); | |
607 | } | |
608 | ||
609 | mutex_unlock(&monc->mutex); | |
610 | } | |
611 | ||
612 | static int wait_generic_request(struct ceph_mon_generic_request *req) | |
613 | { | |
614 | int ret; | |
615 | ||
616 | dout("%s greq %p tid %llu\n", __func__, req, req->tid); | |
617 | ret = wait_for_completion_interruptible(&req->completion); | |
618 | if (ret) | |
619 | cancel_generic_request(req); | |
620 | else | |
621 | ret = req->result; /* completed */ | |
622 | ||
623 | return ret; | |
624 | } | |
625 | ||
f8c76f6f | 626 | static struct ceph_msg *get_generic_reply(struct ceph_connection *con, |
3143edd3 SW |
627 | struct ceph_msg_header *hdr, |
628 | int *skip) | |
629 | { | |
630 | struct ceph_mon_client *monc = con->private; | |
f8c76f6f | 631 | struct ceph_mon_generic_request *req; |
3143edd3 SW |
632 | u64 tid = le64_to_cpu(hdr->tid); |
633 | struct ceph_msg *m; | |
634 | ||
635 | mutex_lock(&monc->mutex); | |
fcd00b68 | 636 | req = lookup_generic_request(&monc->generic_request_tree, tid); |
3143edd3 | 637 | if (!req) { |
f8c76f6f | 638 | dout("get_generic_reply %lld dne\n", tid); |
3143edd3 SW |
639 | *skip = 1; |
640 | m = NULL; | |
641 | } else { | |
f8c76f6f | 642 | dout("get_generic_reply %lld got %p\n", tid, req->reply); |
1c20f2d2 | 643 | *skip = 0; |
3143edd3 SW |
644 | m = ceph_msg_get(req->reply); |
645 | /* | |
646 | * we don't need to track the connection reading into | |
647 | * this reply because we only have one open connection | |
648 | * at a time, ever. | |
649 | */ | |
650 | } | |
651 | mutex_unlock(&monc->mutex); | |
652 | return m; | |
653 | } | |
654 | ||
e56fa10e YS |
655 | /* |
656 | * statfs | |
657 | */ | |
ba75bb98 SW |
658 | static void handle_statfs_reply(struct ceph_mon_client *monc, |
659 | struct ceph_msg *msg) | |
660 | { | |
f8c76f6f | 661 | struct ceph_mon_generic_request *req; |
ba75bb98 | 662 | struct ceph_mon_statfs_reply *reply = msg->front.iov_base; |
3143edd3 | 663 | u64 tid = le64_to_cpu(msg->hdr.tid); |
ba75bb98 | 664 | |
d0b19705 ID |
665 | dout("%s msg %p tid %llu\n", __func__, msg, tid); |
666 | ||
ba75bb98 SW |
667 | if (msg->front.iov_len != sizeof(*reply)) |
668 | goto bad; | |
ba75bb98 SW |
669 | |
670 | mutex_lock(&monc->mutex); | |
fcd00b68 | 671 | req = lookup_generic_request(&monc->generic_request_tree, tid); |
d0b19705 ID |
672 | if (!req) { |
673 | mutex_unlock(&monc->mutex); | |
674 | return; | |
ba75bb98 | 675 | } |
d0b19705 ID |
676 | |
677 | req->result = 0; | |
678 | *req->u.st = reply->st; /* struct */ | |
679 | __finish_generic_request(req); | |
ba75bb98 | 680 | mutex_unlock(&monc->mutex); |
d0b19705 ID |
681 | |
682 | complete_generic_request(req); | |
ba75bb98 SW |
683 | return; |
684 | ||
685 | bad: | |
7a6fdeb2 | 686 | pr_err("corrupt statfs reply, tid %llu\n", tid); |
9ec7cab1 | 687 | ceph_msg_dump(msg); |
ba75bb98 SW |
688 | } |
689 | ||
690 | /* | |
3143edd3 | 691 | * Do a synchronous statfs(). |
ba75bb98 | 692 | */ |
06d74376 DF |
693 | int ceph_monc_do_statfs(struct ceph_mon_client *monc, u64 data_pool, |
694 | struct ceph_statfs *buf) | |
ba75bb98 | 695 | { |
f8c76f6f | 696 | struct ceph_mon_generic_request *req; |
ba75bb98 | 697 | struct ceph_mon_statfs *h; |
d0b19705 | 698 | int ret = -ENOMEM; |
3143edd3 | 699 | |
d0b19705 | 700 | req = alloc_generic_request(monc, GFP_NOFS); |
3143edd3 | 701 | if (!req) |
d0b19705 | 702 | goto out; |
ba75bb98 | 703 | |
b61c2763 SW |
704 | req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS, |
705 | true); | |
a79832f2 | 706 | if (!req->request) |
3143edd3 | 707 | goto out; |
d0b19705 ID |
708 | |
709 | req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 64, GFP_NOFS, true); | |
a79832f2 | 710 | if (!req->reply) |
3143edd3 | 711 | goto out; |
3143edd3 | 712 | |
d0b19705 | 713 | req->u.st = buf; |
06d74376 | 714 | req->request->hdr.version = cpu_to_le16(2); |
d0b19705 ID |
715 | |
716 | mutex_lock(&monc->mutex); | |
717 | register_generic_request(req); | |
3143edd3 SW |
718 | /* fill out request */ |
719 | h = req->request->front.iov_base; | |
13e38c8a SW |
720 | h->monhdr.have_version = 0; |
721 | h->monhdr.session_mon = cpu_to_le16(-1); | |
722 | h->monhdr.session_mon_tid = 0; | |
ba75bb98 | 723 | h->fsid = monc->monmap->fsid; |
06d74376 DF |
724 | h->contains_data_pool = (data_pool != CEPH_NOPOOL); |
725 | h->data_pool = cpu_to_le64(data_pool); | |
d0b19705 ID |
726 | send_generic_request(monc, req); |
727 | mutex_unlock(&monc->mutex); | |
ba75bb98 | 728 | |
d0b19705 | 729 | ret = wait_generic_request(req); |
e56fa10e | 730 | out: |
f646912d | 731 | put_generic_request(req); |
d0b19705 | 732 | return ret; |
e56fa10e | 733 | } |
3d14c5d2 | 734 | EXPORT_SYMBOL(ceph_monc_do_statfs); |
e56fa10e | 735 | |
513a8243 ID |
736 | static void handle_get_version_reply(struct ceph_mon_client *monc, |
737 | struct ceph_msg *msg) | |
738 | { | |
739 | struct ceph_mon_generic_request *req; | |
740 | u64 tid = le64_to_cpu(msg->hdr.tid); | |
741 | void *p = msg->front.iov_base; | |
742 | void *end = p + msg->front_alloc_len; | |
743 | u64 handle; | |
744 | ||
d0b19705 | 745 | dout("%s msg %p tid %llu\n", __func__, msg, tid); |
513a8243 ID |
746 | |
747 | ceph_decode_need(&p, end, 2*sizeof(u64), bad); | |
748 | handle = ceph_decode_64(&p); | |
749 | if (tid != 0 && tid != handle) | |
750 | goto bad; | |
751 | ||
752 | mutex_lock(&monc->mutex); | |
fcd00b68 | 753 | req = lookup_generic_request(&monc->generic_request_tree, handle); |
d0b19705 ID |
754 | if (!req) { |
755 | mutex_unlock(&monc->mutex); | |
756 | return; | |
513a8243 | 757 | } |
d0b19705 ID |
758 | |
759 | req->result = 0; | |
760 | req->u.newest = ceph_decode_64(&p); | |
761 | __finish_generic_request(req); | |
513a8243 | 762 | mutex_unlock(&monc->mutex); |
513a8243 | 763 | |
d0b19705 | 764 | complete_generic_request(req); |
513a8243 | 765 | return; |
d0b19705 | 766 | |
513a8243 | 767 | bad: |
7a6fdeb2 | 768 | pr_err("corrupt mon_get_version reply, tid %llu\n", tid); |
513a8243 ID |
769 | ceph_msg_dump(msg); |
770 | } | |
771 | ||
d0b19705 ID |
772 | static struct ceph_mon_generic_request * |
773 | __ceph_monc_get_version(struct ceph_mon_client *monc, const char *what, | |
774 | ceph_monc_callback_t cb, u64 private_data) | |
513a8243 ID |
775 | { |
776 | struct ceph_mon_generic_request *req; | |
513a8243 | 777 | |
d0b19705 | 778 | req = alloc_generic_request(monc, GFP_NOIO); |
513a8243 | 779 | if (!req) |
d0b19705 | 780 | goto err_put_req; |
513a8243 ID |
781 | |
782 | req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION, | |
783 | sizeof(u64) + sizeof(u32) + strlen(what), | |
d0b19705 ID |
784 | GFP_NOIO, true); |
785 | if (!req->request) | |
786 | goto err_put_req; | |
513a8243 | 787 | |
d0b19705 ID |
788 | req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 32, GFP_NOIO, |
789 | true); | |
790 | if (!req->reply) | |
791 | goto err_put_req; | |
513a8243 | 792 | |
d0b19705 ID |
793 | req->complete_cb = cb; |
794 | req->private_data = private_data; | |
513a8243 | 795 | |
513a8243 | 796 | mutex_lock(&monc->mutex); |
d0b19705 ID |
797 | register_generic_request(req); |
798 | { | |
799 | void *p = req->request->front.iov_base; | |
800 | void *const end = p + req->request->front_alloc_len; | |
801 | ||
802 | ceph_encode_64(&p, req->tid); /* handle */ | |
803 | ceph_encode_string(&p, end, what, strlen(what)); | |
804 | WARN_ON(p != end); | |
805 | } | |
806 | send_generic_request(monc, req); | |
807 | mutex_unlock(&monc->mutex); | |
513a8243 | 808 | |
d0b19705 | 809 | return req; |
513a8243 | 810 | |
d0b19705 | 811 | err_put_req: |
f646912d | 812 | put_generic_request(req); |
d0b19705 | 813 | return ERR_PTR(-ENOMEM); |
513a8243 | 814 | } |
d0b19705 ID |
815 | |
816 | /* | |
817 | * Send MMonGetVersion and wait for the reply. | |
818 | * | |
819 | * @what: one of "mdsmap", "osdmap" or "monmap" | |
820 | */ | |
821 | int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what, | |
822 | u64 *newest) | |
823 | { | |
824 | struct ceph_mon_generic_request *req; | |
825 | int ret; | |
826 | ||
827 | req = __ceph_monc_get_version(monc, what, NULL, 0); | |
828 | if (IS_ERR(req)) | |
829 | return PTR_ERR(req); | |
830 | ||
831 | ret = wait_generic_request(req); | |
832 | if (!ret) | |
833 | *newest = req->u.newest; | |
834 | ||
835 | put_generic_request(req); | |
836 | return ret; | |
837 | } | |
838 | EXPORT_SYMBOL(ceph_monc_get_version); | |
839 | ||
840 | /* | |
841 | * Send MMonGetVersion, | |
842 | * | |
843 | * @what: one of "mdsmap", "osdmap" or "monmap" | |
844 | */ | |
845 | int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what, | |
846 | ceph_monc_callback_t cb, u64 private_data) | |
847 | { | |
848 | struct ceph_mon_generic_request *req; | |
849 | ||
850 | req = __ceph_monc_get_version(monc, what, cb, private_data); | |
851 | if (IS_ERR(req)) | |
852 | return PTR_ERR(req); | |
853 | ||
854 | put_generic_request(req); | |
855 | return 0; | |
856 | } | |
857 | EXPORT_SYMBOL(ceph_monc_get_version_async); | |
513a8243 | 858 | |
6305a3b4 DF |
859 | static void handle_command_ack(struct ceph_mon_client *monc, |
860 | struct ceph_msg *msg) | |
861 | { | |
862 | struct ceph_mon_generic_request *req; | |
863 | void *p = msg->front.iov_base; | |
864 | void *const end = p + msg->front_alloc_len; | |
865 | u64 tid = le64_to_cpu(msg->hdr.tid); | |
866 | ||
867 | dout("%s msg %p tid %llu\n", __func__, msg, tid); | |
868 | ||
869 | ceph_decode_need(&p, end, sizeof(struct ceph_mon_request_header) + | |
870 | sizeof(u32), bad); | |
871 | p += sizeof(struct ceph_mon_request_header); | |
872 | ||
873 | mutex_lock(&monc->mutex); | |
874 | req = lookup_generic_request(&monc->generic_request_tree, tid); | |
875 | if (!req) { | |
876 | mutex_unlock(&monc->mutex); | |
877 | return; | |
878 | } | |
879 | ||
880 | req->result = ceph_decode_32(&p); | |
881 | __finish_generic_request(req); | |
882 | mutex_unlock(&monc->mutex); | |
883 | ||
884 | complete_generic_request(req); | |
885 | return; | |
886 | ||
887 | bad: | |
888 | pr_err("corrupt mon_command ack, tid %llu\n", tid); | |
889 | ceph_msg_dump(msg); | |
890 | } | |
891 | ||
892 | int ceph_monc_blacklist_add(struct ceph_mon_client *monc, | |
893 | struct ceph_entity_addr *client_addr) | |
894 | { | |
895 | struct ceph_mon_generic_request *req; | |
896 | struct ceph_mon_command *h; | |
897 | int ret = -ENOMEM; | |
898 | int len; | |
899 | ||
900 | req = alloc_generic_request(monc, GFP_NOIO); | |
901 | if (!req) | |
902 | goto out; | |
903 | ||
904 | req->request = ceph_msg_new(CEPH_MSG_MON_COMMAND, 256, GFP_NOIO, true); | |
905 | if (!req->request) | |
906 | goto out; | |
907 | ||
908 | req->reply = ceph_msg_new(CEPH_MSG_MON_COMMAND_ACK, 512, GFP_NOIO, | |
909 | true); | |
910 | if (!req->reply) | |
911 | goto out; | |
912 | ||
913 | mutex_lock(&monc->mutex); | |
914 | register_generic_request(req); | |
915 | h = req->request->front.iov_base; | |
916 | h->monhdr.have_version = 0; | |
917 | h->monhdr.session_mon = cpu_to_le16(-1); | |
918 | h->monhdr.session_mon_tid = 0; | |
919 | h->fsid = monc->monmap->fsid; | |
920 | h->num_strs = cpu_to_le32(1); | |
921 | len = sprintf(h->str, "{ \"prefix\": \"osd blacklist\", \ | |
922 | \"blacklistop\": \"add\", \ | |
923 | \"addr\": \"%pISpc/%u\" }", | |
924 | &client_addr->in_addr, le32_to_cpu(client_addr->nonce)); | |
925 | h->str_len = cpu_to_le32(len); | |
926 | send_generic_request(monc, req); | |
927 | mutex_unlock(&monc->mutex); | |
928 | ||
929 | ret = wait_generic_request(req); | |
bb229bbb ID |
930 | if (!ret) |
931 | /* | |
932 | * Make sure we have the osdmap that includes the blacklist | |
933 | * entry. This is needed to ensure that the OSDs pick up the | |
934 | * new blacklist before processing any future requests from | |
935 | * this client. | |
936 | */ | |
937 | ret = ceph_wait_for_latest_osdmap(monc->client, 0); | |
938 | ||
6305a3b4 DF |
939 | out: |
940 | put_generic_request(req); | |
941 | return ret; | |
942 | } | |
943 | EXPORT_SYMBOL(ceph_monc_blacklist_add); | |
944 | ||
ba75bb98 | 945 | /* |
e56fa10e | 946 | * Resend pending generic requests. |
ba75bb98 | 947 | */ |
f8c76f6f | 948 | static void __resend_generic_request(struct ceph_mon_client *monc) |
ba75bb98 | 949 | { |
f8c76f6f | 950 | struct ceph_mon_generic_request *req; |
85ff03f6 | 951 | struct rb_node *p; |
ba75bb98 | 952 | |
f8c76f6f YS |
953 | for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) { |
954 | req = rb_entry(p, struct ceph_mon_generic_request, node); | |
6740a845 | 955 | ceph_msg_revoke(req->request); |
4f471e4a | 956 | ceph_msg_revoke_incoming(req->reply); |
67130934 | 957 | ceph_con_send(&monc->con, ceph_msg_get(req->request)); |
ba75bb98 SW |
958 | } |
959 | } | |
960 | ||
961 | /* | |
962 | * Delayed work. If we haven't mounted yet, retry. Otherwise, | |
963 | * renew/retry subscription as needed (in case it is timing out, or we | |
964 | * got an ENOMEM). And keep the monitor connection alive. | |
965 | */ | |
966 | static void delayed_work(struct work_struct *work) | |
967 | { | |
968 | struct ceph_mon_client *monc = | |
969 | container_of(work, struct ceph_mon_client, delayed_work.work); | |
970 | ||
971 | dout("monc delayed_work\n"); | |
972 | mutex_lock(&monc->mutex); | |
4e7a5dcd | 973 | if (monc->hunting) { |
1752b50c ID |
974 | dout("%s continuing hunt\n", __func__); |
975 | reopen_session(monc); | |
ba75bb98 | 976 | } else { |
8b9558aa YZ |
977 | int is_auth = ceph_auth_is_authenticated(monc->auth); |
978 | if (ceph_con_keepalive_expired(&monc->con, | |
58d81b12 | 979 | CEPH_MONC_PING_TIMEOUT)) { |
8b9558aa YZ |
980 | dout("monc keepalive timeout\n"); |
981 | is_auth = 0; | |
1752b50c | 982 | reopen_session(monc); |
8b9558aa | 983 | } |
9bd2e6f8 | 984 | |
8b9558aa YZ |
985 | if (!monc->hunting) { |
986 | ceph_con_keepalive(&monc->con); | |
987 | __validate_auth(monc); | |
facb9f6e | 988 | un_backoff(monc); |
8b9558aa | 989 | } |
9bd2e6f8 | 990 | |
220abf5a ID |
991 | if (is_auth && |
992 | !(monc->con.peer_features & CEPH_FEATURE_MON_STATEFUL_SUB)) { | |
82dcabad ID |
993 | unsigned long now = jiffies; |
994 | ||
995 | dout("%s renew subs? now %lu renew after %lu\n", | |
996 | __func__, now, monc->sub_renew_after); | |
997 | if (time_after_eq(now, monc->sub_renew_after)) | |
998 | __send_subscribe(monc); | |
999 | } | |
ba75bb98 | 1000 | } |
ba75bb98 SW |
1001 | __schedule_delayed(monc); |
1002 | mutex_unlock(&monc->mutex); | |
1003 | } | |
1004 | ||
6b805185 SW |
1005 | /* |
1006 | * On startup, we build a temporary monmap populated with the IPs | |
1007 | * provided by mount(2). | |
1008 | */ | |
1009 | static int build_initial_monmap(struct ceph_mon_client *monc) | |
1010 | { | |
3d14c5d2 YS |
1011 | struct ceph_options *opt = monc->client->options; |
1012 | struct ceph_entity_addr *mon_addr = opt->mon_addr; | |
1013 | int num_mon = opt->num_mon; | |
6b805185 SW |
1014 | int i; |
1015 | ||
1016 | /* build initial monmap */ | |
acafe7e3 | 1017 | monc->monmap = kzalloc(struct_size(monc->monmap, mon_inst, num_mon), |
6b805185 SW |
1018 | GFP_KERNEL); |
1019 | if (!monc->monmap) | |
1020 | return -ENOMEM; | |
1021 | for (i = 0; i < num_mon; i++) { | |
1022 | monc->monmap->mon_inst[i].addr = mon_addr[i]; | |
6b805185 SW |
1023 | monc->monmap->mon_inst[i].addr.nonce = 0; |
1024 | monc->monmap->mon_inst[i].name.type = | |
1025 | CEPH_ENTITY_TYPE_MON; | |
1026 | monc->monmap->mon_inst[i].name.num = cpu_to_le64(i); | |
1027 | } | |
1028 | monc->monmap->num_mon = num_mon; | |
6b805185 SW |
1029 | return 0; |
1030 | } | |
1031 | ||
ba75bb98 SW |
1032 | int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) |
1033 | { | |
1034 | int err = 0; | |
1035 | ||
1036 | dout("init\n"); | |
1037 | memset(monc, 0, sizeof(*monc)); | |
1038 | monc->client = cl; | |
1039 | monc->monmap = NULL; | |
1040 | mutex_init(&monc->mutex); | |
1041 | ||
6b805185 SW |
1042 | err = build_initial_monmap(monc); |
1043 | if (err) | |
1044 | goto out; | |
1045 | ||
f6a2f5be | 1046 | /* connection */ |
4e7a5dcd | 1047 | /* authentication */ |
3d14c5d2 | 1048 | monc->auth = ceph_auth_init(cl->options->name, |
8323c3aa | 1049 | cl->options->key); |
49d9224c NW |
1050 | if (IS_ERR(monc->auth)) { |
1051 | err = PTR_ERR(monc->auth); | |
67130934 | 1052 | goto out_monmap; |
49d9224c | 1053 | } |
4e7a5dcd SW |
1054 | monc->auth->want_keys = |
1055 | CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | | |
1056 | CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; | |
1057 | ||
240ed68e | 1058 | /* msgs */ |
a79832f2 | 1059 | err = -ENOMEM; |
7c315c55 | 1060 | monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK, |
34d23762 | 1061 | sizeof(struct ceph_mon_subscribe_ack), |
5418d0a2 | 1062 | GFP_KERNEL, true); |
a79832f2 | 1063 | if (!monc->m_subscribe_ack) |
49d9224c | 1064 | goto out_auth; |
6694d6b9 | 1065 | |
5418d0a2 ID |
1066 | monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 128, |
1067 | GFP_KERNEL, true); | |
240ed68e SW |
1068 | if (!monc->m_subscribe) |
1069 | goto out_subscribe_ack; | |
1070 | ||
5418d0a2 ID |
1071 | monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, |
1072 | GFP_KERNEL, true); | |
a79832f2 | 1073 | if (!monc->m_auth_reply) |
240ed68e | 1074 | goto out_subscribe; |
4e7a5dcd | 1075 | |
5418d0a2 | 1076 | monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_KERNEL, true); |
9bd2e6f8 | 1077 | monc->pending_auth = 0; |
a79832f2 | 1078 | if (!monc->m_auth) |
6694d6b9 | 1079 | goto out_auth_reply; |
ba75bb98 | 1080 | |
735a72ef SW |
1081 | ceph_con_init(&monc->con, monc, &mon_con_ops, |
1082 | &monc->client->msgr); | |
1083 | ||
ba75bb98 | 1084 | monc->cur_mon = -1; |
168b9090 ID |
1085 | monc->had_a_connection = false; |
1086 | monc->hunt_mult = 1; | |
ba75bb98 SW |
1087 | |
1088 | INIT_DELAYED_WORK(&monc->delayed_work, delayed_work); | |
f8c76f6f | 1089 | monc->generic_request_tree = RB_ROOT; |
ba75bb98 SW |
1090 | monc->last_tid = 0; |
1091 | ||
737cc81e ID |
1092 | monc->fs_cluster_id = CEPH_FS_CLUSTER_ID_NONE; |
1093 | ||
4e7a5dcd SW |
1094 | return 0; |
1095 | ||
6694d6b9 SW |
1096 | out_auth_reply: |
1097 | ceph_msg_put(monc->m_auth_reply); | |
240ed68e SW |
1098 | out_subscribe: |
1099 | ceph_msg_put(monc->m_subscribe); | |
7c315c55 SW |
1100 | out_subscribe_ack: |
1101 | ceph_msg_put(monc->m_subscribe_ack); | |
49d9224c NW |
1102 | out_auth: |
1103 | ceph_auth_destroy(monc->auth); | |
4e7a5dcd SW |
1104 | out_monmap: |
1105 | kfree(monc->monmap); | |
ba75bb98 SW |
1106 | out: |
1107 | return err; | |
1108 | } | |
3d14c5d2 | 1109 | EXPORT_SYMBOL(ceph_monc_init); |
ba75bb98 SW |
1110 | |
1111 | void ceph_monc_stop(struct ceph_mon_client *monc) | |
1112 | { | |
1113 | dout("stop\n"); | |
1114 | cancel_delayed_work_sync(&monc->delayed_work); | |
1115 | ||
1116 | mutex_lock(&monc->mutex); | |
1117 | __close_session(monc); | |
0e04dc26 | 1118 | monc->cur_mon = -1; |
ba75bb98 SW |
1119 | mutex_unlock(&monc->mutex); |
1120 | ||
f3dea7ed SW |
1121 | /* |
1122 | * flush msgr queue before we destroy ourselves to ensure that: | |
1123 | * - any work that references our embedded con is finished. | |
1124 | * - any osd_client or other work that may reference an authorizer | |
1125 | * finishes before we shut down the auth subsystem. | |
1126 | */ | |
1127 | ceph_msgr_flush(); | |
1128 | ||
4e7a5dcd SW |
1129 | ceph_auth_destroy(monc->auth); |
1130 | ||
d0b19705 ID |
1131 | WARN_ON(!RB_EMPTY_ROOT(&monc->generic_request_tree)); |
1132 | ||
4e7a5dcd | 1133 | ceph_msg_put(monc->m_auth); |
6694d6b9 | 1134 | ceph_msg_put(monc->m_auth_reply); |
240ed68e | 1135 | ceph_msg_put(monc->m_subscribe); |
7c315c55 | 1136 | ceph_msg_put(monc->m_subscribe_ack); |
ba75bb98 SW |
1137 | |
1138 | kfree(monc->monmap); | |
1139 | } | |
3d14c5d2 | 1140 | EXPORT_SYMBOL(ceph_monc_stop); |
ba75bb98 | 1141 | |
0f9af169 ID |
1142 | static void finish_hunting(struct ceph_mon_client *monc) |
1143 | { | |
1144 | if (monc->hunting) { | |
1145 | dout("%s found mon%d\n", __func__, monc->cur_mon); | |
1146 | monc->hunting = false; | |
168b9090 | 1147 | monc->had_a_connection = true; |
facb9f6e | 1148 | un_backoff(monc); |
7b4c443d | 1149 | __schedule_delayed(monc); |
0f9af169 ID |
1150 | } |
1151 | } | |
1152 | ||
4e7a5dcd SW |
1153 | static void handle_auth_reply(struct ceph_mon_client *monc, |
1154 | struct ceph_msg *msg) | |
1155 | { | |
1156 | int ret; | |
09c4d6a7 | 1157 | int was_auth = 0; |
4e7a5dcd SW |
1158 | |
1159 | mutex_lock(&monc->mutex); | |
27859f97 | 1160 | was_auth = ceph_auth_is_authenticated(monc->auth); |
9bd2e6f8 | 1161 | monc->pending_auth = 0; |
4e7a5dcd SW |
1162 | ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, |
1163 | msg->front.iov_len, | |
1164 | monc->m_auth->front.iov_base, | |
3cea4c30 | 1165 | monc->m_auth->front_alloc_len); |
0f9af169 ID |
1166 | if (ret > 0) { |
1167 | __send_prepared_auth_request(monc, ret); | |
1168 | goto out; | |
1169 | } | |
1170 | ||
1171 | finish_hunting(monc); | |
1172 | ||
4e7a5dcd | 1173 | if (ret < 0) { |
9bd2e6f8 | 1174 | monc->client->auth_err = ret; |
27859f97 | 1175 | } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) { |
4e7a5dcd | 1176 | dout("authenticated, starting session\n"); |
0743304d | 1177 | |
15d9882c AE |
1178 | monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT; |
1179 | monc->client->msgr.inst.name.num = | |
0cf5537b | 1180 | cpu_to_le64(monc->auth->global_id); |
0743304d | 1181 | |
4e7a5dcd | 1182 | __send_subscribe(monc); |
f8c76f6f | 1183 | __resend_generic_request(monc); |
0f9af169 ID |
1184 | |
1185 | pr_info("mon%d %s session established\n", monc->cur_mon, | |
b726ec97 | 1186 | ceph_pr_addr(&monc->con.peer_addr)); |
4e7a5dcd | 1187 | } |
0f9af169 ID |
1188 | |
1189 | out: | |
4e7a5dcd | 1190 | mutex_unlock(&monc->mutex); |
0f9af169 ID |
1191 | if (monc->client->auth_err < 0) |
1192 | wake_up_all(&monc->client->auth_wq); | |
4e7a5dcd SW |
1193 | } |
1194 | ||
9bd2e6f8 SW |
1195 | static int __validate_auth(struct ceph_mon_client *monc) |
1196 | { | |
1197 | int ret; | |
1198 | ||
1199 | if (monc->pending_auth) | |
1200 | return 0; | |
1201 | ||
1202 | ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base, | |
3cea4c30 | 1203 | monc->m_auth->front_alloc_len); |
9bd2e6f8 SW |
1204 | if (ret <= 0) |
1205 | return ret; /* either an error, or no need to authenticate */ | |
1206 | __send_prepared_auth_request(monc, ret); | |
1207 | return 0; | |
1208 | } | |
1209 | ||
1210 | int ceph_monc_validate_auth(struct ceph_mon_client *monc) | |
1211 | { | |
1212 | int ret; | |
1213 | ||
1214 | mutex_lock(&monc->mutex); | |
1215 | ret = __validate_auth(monc); | |
1216 | mutex_unlock(&monc->mutex); | |
1217 | return ret; | |
1218 | } | |
3d14c5d2 | 1219 | EXPORT_SYMBOL(ceph_monc_validate_auth); |
9bd2e6f8 | 1220 | |
ba75bb98 SW |
1221 | /* |
1222 | * handle incoming message | |
1223 | */ | |
1224 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
1225 | { | |
1226 | struct ceph_mon_client *monc = con->private; | |
1227 | int type = le16_to_cpu(msg->hdr.type); | |
1228 | ||
1229 | if (!monc) | |
1230 | return; | |
1231 | ||
1232 | switch (type) { | |
4e7a5dcd SW |
1233 | case CEPH_MSG_AUTH_REPLY: |
1234 | handle_auth_reply(monc, msg); | |
ba75bb98 SW |
1235 | break; |
1236 | ||
1237 | case CEPH_MSG_MON_SUBSCRIBE_ACK: | |
1238 | handle_subscribe_ack(monc, msg); | |
1239 | break; | |
1240 | ||
1241 | case CEPH_MSG_STATFS_REPLY: | |
1242 | handle_statfs_reply(monc, msg); | |
1243 | break; | |
1244 | ||
513a8243 ID |
1245 | case CEPH_MSG_MON_GET_VERSION_REPLY: |
1246 | handle_get_version_reply(monc, msg); | |
1247 | break; | |
1248 | ||
6305a3b4 DF |
1249 | case CEPH_MSG_MON_COMMAND_ACK: |
1250 | handle_command_ack(monc, msg); | |
1251 | break; | |
1252 | ||
4e7a5dcd SW |
1253 | case CEPH_MSG_MON_MAP: |
1254 | ceph_monc_handle_map(monc, msg); | |
1255 | break; | |
1256 | ||
ba75bb98 SW |
1257 | case CEPH_MSG_OSD_MAP: |
1258 | ceph_osdc_handle_map(&monc->client->osdc, msg); | |
1259 | break; | |
1260 | ||
1261 | default: | |
3d14c5d2 YS |
1262 | /* can the chained handler handle it? */ |
1263 | if (monc->client->extra_mon_dispatch && | |
1264 | monc->client->extra_mon_dispatch(monc->client, msg) == 0) | |
1265 | break; | |
24e1dd6a | 1266 | |
ba75bb98 SW |
1267 | pr_err("received unknown message type %d %s\n", type, |
1268 | ceph_msg_type_name(type)); | |
1269 | } | |
1270 | ceph_msg_put(msg); | |
1271 | } | |
1272 | ||
1273 | /* | |
1274 | * Allocate memory for incoming message | |
1275 | */ | |
1276 | static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, | |
2450418c YS |
1277 | struct ceph_msg_header *hdr, |
1278 | int *skip) | |
ba75bb98 SW |
1279 | { |
1280 | struct ceph_mon_client *monc = con->private; | |
1281 | int type = le16_to_cpu(hdr->type); | |
2450418c | 1282 | int front_len = le32_to_cpu(hdr->front_len); |
5b3a4db3 | 1283 | struct ceph_msg *m = NULL; |
ba75bb98 | 1284 | |
2450418c | 1285 | *skip = 0; |
0547a9b3 | 1286 | |
ba75bb98 | 1287 | switch (type) { |
ba75bb98 | 1288 | case CEPH_MSG_MON_SUBSCRIBE_ACK: |
7c315c55 | 1289 | m = ceph_msg_get(monc->m_subscribe_ack); |
2450418c | 1290 | break; |
ba75bb98 | 1291 | case CEPH_MSG_STATFS_REPLY: |
6305a3b4 | 1292 | case CEPH_MSG_MON_COMMAND_ACK: |
f8c76f6f | 1293 | return get_generic_reply(con, hdr, skip); |
4e7a5dcd | 1294 | case CEPH_MSG_AUTH_REPLY: |
6694d6b9 | 1295 | m = ceph_msg_get(monc->m_auth_reply); |
2450418c | 1296 | break; |
513a8243 ID |
1297 | case CEPH_MSG_MON_GET_VERSION_REPLY: |
1298 | if (le64_to_cpu(hdr->tid) != 0) | |
1299 | return get_generic_reply(con, hdr, skip); | |
1300 | ||
1301 | /* | |
1302 | * Older OSDs don't set reply tid even if the orignal | |
18370b36 GS |
1303 | * request had a non-zero tid. Work around this weirdness |
1304 | * by allocating a new message. | |
513a8243 | 1305 | */ |
18370b36 | 1306 | /* fall through */ |
5b3a4db3 SW |
1307 | case CEPH_MSG_MON_MAP: |
1308 | case CEPH_MSG_MDS_MAP: | |
1309 | case CEPH_MSG_OSD_MAP: | |
0cabbd94 | 1310 | case CEPH_MSG_FS_MAP_USER: |
b61c2763 | 1311 | m = ceph_msg_new(type, front_len, GFP_NOFS, false); |
1c20f2d2 AE |
1312 | if (!m) |
1313 | return NULL; /* ENOMEM--return skip == 0 */ | |
5b3a4db3 | 1314 | break; |
ba75bb98 | 1315 | } |
2450418c | 1316 | |
5b3a4db3 SW |
1317 | if (!m) { |
1318 | pr_info("alloc_msg unknown type %d\n", type); | |
2450418c | 1319 | *skip = 1; |
73c3d481 | 1320 | } else if (front_len > m->front_alloc_len) { |
b9a67899 JP |
1321 | pr_warn("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n", |
1322 | front_len, m->front_alloc_len, | |
1323 | (unsigned int)con->peer_name.type, | |
1324 | le64_to_cpu(con->peer_name.num)); | |
73c3d481 SW |
1325 | ceph_msg_put(m); |
1326 | m = ceph_msg_new(type, front_len, GFP_NOFS, false); | |
5b3a4db3 | 1327 | } |
73c3d481 | 1328 | |
2450418c | 1329 | return m; |
ba75bb98 SW |
1330 | } |
1331 | ||
1332 | /* | |
1333 | * If the monitor connection resets, pick a new monitor and resubmit | |
1334 | * any pending requests. | |
1335 | */ | |
1336 | static void mon_fault(struct ceph_connection *con) | |
1337 | { | |
1338 | struct ceph_mon_client *monc = con->private; | |
1339 | ||
ba75bb98 | 1340 | mutex_lock(&monc->mutex); |
b5d91704 ID |
1341 | dout("%s mon%d\n", __func__, monc->cur_mon); |
1342 | if (monc->cur_mon >= 0) { | |
1343 | if (!monc->hunting) { | |
1344 | dout("%s hunting for new mon\n", __func__); | |
1345 | reopen_session(monc); | |
1346 | __schedule_delayed(monc); | |
1347 | } else { | |
1348 | dout("%s already hunting\n", __func__); | |
1349 | } | |
ba75bb98 | 1350 | } |
ba75bb98 SW |
1351 | mutex_unlock(&monc->mutex); |
1352 | } | |
1353 | ||
ec87ef43 SW |
1354 | /* |
1355 | * We can ignore refcounting on the connection struct, as all references | |
1356 | * will come from the messenger workqueue, which is drained prior to | |
1357 | * mon_client destruction. | |
1358 | */ | |
1359 | static struct ceph_connection *con_get(struct ceph_connection *con) | |
1360 | { | |
1361 | return con; | |
1362 | } | |
1363 | ||
1364 | static void con_put(struct ceph_connection *con) | |
1365 | { | |
1366 | } | |
1367 | ||
9e32789f | 1368 | static const struct ceph_connection_operations mon_con_ops = { |
ec87ef43 SW |
1369 | .get = con_get, |
1370 | .put = con_put, | |
ba75bb98 SW |
1371 | .dispatch = dispatch, |
1372 | .fault = mon_fault, | |
1373 | .alloc_msg = mon_alloc_msg, | |
ba75bb98 | 1374 | }; |