Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
3d14c5d2 | 2 | #include <linux/ceph/ceph_debug.h> |
2f2dc053 | 3 | |
496e5955 | 4 | #include <linux/fs.h> |
2f2dc053 | 5 | #include <linux/wait.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
54008399 | 7 | #include <linux/gfp.h> |
2f2dc053 | 8 | #include <linux/sched.h> |
3d14c5d2 YS |
9 | #include <linux/debugfs.h> |
10 | #include <linux/seq_file.h> | |
3e0708b9 | 11 | #include <linux/ratelimit.h> |
2f2dc053 | 12 | |
2f2dc053 | 13 | #include "super.h" |
3d14c5d2 YS |
14 | #include "mds_client.h" |
15 | ||
1fe60e51 | 16 | #include <linux/ceph/ceph_features.h> |
3d14c5d2 YS |
17 | #include <linux/ceph/messenger.h> |
18 | #include <linux/ceph/decode.h> | |
19 | #include <linux/ceph/pagelist.h> | |
20 | #include <linux/ceph/auth.h> | |
21 | #include <linux/ceph/debugfs.h> | |
2f2dc053 SW |
22 | |
23 | /* | |
24 | * A cluster of MDS (metadata server) daemons is responsible for | |
25 | * managing the file system namespace (the directory hierarchy and | |
26 | * inodes) and for coordinating shared access to storage. Metadata is | |
27 | * partitioning hierarchically across a number of servers, and that | |
28 | * partition varies over time as the cluster adjusts the distribution | |
29 | * in order to balance load. | |
30 | * | |
31 | * The MDS client is primarily responsible to managing synchronous | |
32 | * metadata requests for operations like open, unlink, and so forth. | |
33 | * If there is a MDS failure, we find out about it when we (possibly | |
34 | * request and) receive a new MDS map, and can resubmit affected | |
35 | * requests. | |
36 | * | |
37 | * For the most part, though, we take advantage of a lossless | |
38 | * communications channel to the MDS, and do not need to worry about | |
39 | * timing out or resubmitting requests. | |
40 | * | |
41 | * We maintain a stateful "session" with each MDS we interact with. | |
42 | * Within each session, we sent periodic heartbeat messages to ensure | |
43 | * any capabilities or leases we have been issues remain valid. If | |
44 | * the session times out and goes stale, our leases and capabilities | |
45 | * are no longer valid. | |
46 | */ | |
47 | ||
20cb34ae | 48 | struct ceph_reconnect_state { |
44c99757 | 49 | int nr_caps; |
20cb34ae | 50 | struct ceph_pagelist *pagelist; |
121f22a1 | 51 | unsigned msg_version; |
20cb34ae SW |
52 | }; |
53 | ||
2f2dc053 SW |
54 | static void __wake_requests(struct ceph_mds_client *mdsc, |
55 | struct list_head *head); | |
56 | ||
9e32789f | 57 | static const struct ceph_connection_operations mds_con_ops; |
2f2dc053 SW |
58 | |
59 | ||
60 | /* | |
61 | * mds reply parsing | |
62 | */ | |
63 | ||
64 | /* | |
65 | * parse individual inode info | |
66 | */ | |
67 | static int parse_reply_info_in(void **p, void *end, | |
14303d20 | 68 | struct ceph_mds_reply_info_in *info, |
12b4629a | 69 | u64 features) |
2f2dc053 SW |
70 | { |
71 | int err = -EIO; | |
72 | ||
73 | info->in = *p; | |
74 | *p += sizeof(struct ceph_mds_reply_inode) + | |
75 | sizeof(*info->in->fragtree.splits) * | |
76 | le32_to_cpu(info->in->fragtree.nsplits); | |
77 | ||
78 | ceph_decode_32_safe(p, end, info->symlink_len, bad); | |
79 | ceph_decode_need(p, end, info->symlink_len, bad); | |
80 | info->symlink = *p; | |
81 | *p += info->symlink_len; | |
82 | ||
14303d20 SW |
83 | if (features & CEPH_FEATURE_DIRLAYOUTHASH) |
84 | ceph_decode_copy_safe(p, end, &info->dir_layout, | |
85 | sizeof(info->dir_layout), bad); | |
86 | else | |
87 | memset(&info->dir_layout, 0, sizeof(info->dir_layout)); | |
88 | ||
2f2dc053 SW |
89 | ceph_decode_32_safe(p, end, info->xattr_len, bad); |
90 | ceph_decode_need(p, end, info->xattr_len, bad); | |
91 | info->xattr_data = *p; | |
92 | *p += info->xattr_len; | |
fb01d1f8 YZ |
93 | |
94 | if (features & CEPH_FEATURE_MDS_INLINE_DATA) { | |
95 | ceph_decode_64_safe(p, end, info->inline_version, bad); | |
96 | ceph_decode_32_safe(p, end, info->inline_len, bad); | |
97 | ceph_decode_need(p, end, info->inline_len, bad); | |
98 | info->inline_data = *p; | |
99 | *p += info->inline_len; | |
100 | } else | |
101 | info->inline_version = CEPH_INLINE_NONE; | |
102 | ||
fb18a575 LH |
103 | if (features & CEPH_FEATURE_MDS_QUOTA) { |
104 | u8 struct_v, struct_compat; | |
105 | u32 struct_len; | |
106 | ||
107 | /* | |
108 | * both struct_v and struct_compat are expected to be >= 1 | |
109 | */ | |
110 | ceph_decode_8_safe(p, end, struct_v, bad); | |
111 | ceph_decode_8_safe(p, end, struct_compat, bad); | |
112 | if (!struct_v || !struct_compat) | |
113 | goto bad; | |
114 | ceph_decode_32_safe(p, end, struct_len, bad); | |
115 | ceph_decode_need(p, end, struct_len, bad); | |
116 | ceph_decode_64_safe(p, end, info->max_bytes, bad); | |
117 | ceph_decode_64_safe(p, end, info->max_files, bad); | |
118 | } else { | |
119 | info->max_bytes = 0; | |
120 | info->max_files = 0; | |
121 | } | |
122 | ||
779fe0fb YZ |
123 | info->pool_ns_len = 0; |
124 | info->pool_ns_data = NULL; | |
5ea5c5e0 YZ |
125 | if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) { |
126 | ceph_decode_32_safe(p, end, info->pool_ns_len, bad); | |
779fe0fb YZ |
127 | if (info->pool_ns_len > 0) { |
128 | ceph_decode_need(p, end, info->pool_ns_len, bad); | |
129 | info->pool_ns_data = *p; | |
130 | *p += info->pool_ns_len; | |
131 | } | |
5ea5c5e0 YZ |
132 | } |
133 | ||
2f2dc053 SW |
134 | return 0; |
135 | bad: | |
136 | return err; | |
137 | } | |
138 | ||
139 | /* | |
140 | * parse a normal reply, which may contain a (dir+)dentry and/or a | |
141 | * target inode. | |
142 | */ | |
143 | static int parse_reply_info_trace(void **p, void *end, | |
14303d20 | 144 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 145 | u64 features) |
2f2dc053 SW |
146 | { |
147 | int err; | |
148 | ||
149 | if (info->head->is_dentry) { | |
14303d20 | 150 | err = parse_reply_info_in(p, end, &info->diri, features); |
2f2dc053 SW |
151 | if (err < 0) |
152 | goto out_bad; | |
153 | ||
154 | if (unlikely(*p + sizeof(*info->dirfrag) > end)) | |
155 | goto bad; | |
156 | info->dirfrag = *p; | |
157 | *p += sizeof(*info->dirfrag) + | |
158 | sizeof(u32)*le32_to_cpu(info->dirfrag->ndist); | |
159 | if (unlikely(*p > end)) | |
160 | goto bad; | |
161 | ||
162 | ceph_decode_32_safe(p, end, info->dname_len, bad); | |
163 | ceph_decode_need(p, end, info->dname_len, bad); | |
164 | info->dname = *p; | |
165 | *p += info->dname_len; | |
166 | info->dlease = *p; | |
167 | *p += sizeof(*info->dlease); | |
168 | } | |
169 | ||
170 | if (info->head->is_target) { | |
14303d20 | 171 | err = parse_reply_info_in(p, end, &info->targeti, features); |
2f2dc053 SW |
172 | if (err < 0) |
173 | goto out_bad; | |
174 | } | |
175 | ||
176 | if (unlikely(*p != end)) | |
177 | goto bad; | |
178 | return 0; | |
179 | ||
180 | bad: | |
181 | err = -EIO; | |
182 | out_bad: | |
183 | pr_err("problem parsing mds trace %d\n", err); | |
184 | return err; | |
185 | } | |
186 | ||
187 | /* | |
188 | * parse readdir results | |
189 | */ | |
190 | static int parse_reply_info_dir(void **p, void *end, | |
14303d20 | 191 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 192 | u64 features) |
2f2dc053 SW |
193 | { |
194 | u32 num, i = 0; | |
195 | int err; | |
196 | ||
197 | info->dir_dir = *p; | |
198 | if (*p + sizeof(*info->dir_dir) > end) | |
199 | goto bad; | |
200 | *p += sizeof(*info->dir_dir) + | |
201 | sizeof(u32)*le32_to_cpu(info->dir_dir->ndist); | |
202 | if (*p > end) | |
203 | goto bad; | |
204 | ||
205 | ceph_decode_need(p, end, sizeof(num) + 2, bad); | |
c89136ea | 206 | num = ceph_decode_32(p); |
956d39d6 YZ |
207 | { |
208 | u16 flags = ceph_decode_16(p); | |
209 | info->dir_end = !!(flags & CEPH_READDIR_FRAG_END); | |
210 | info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE); | |
f3c4ebe6 | 211 | info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER); |
79162547 | 212 | info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH); |
956d39d6 | 213 | } |
2f2dc053 SW |
214 | if (num == 0) |
215 | goto done; | |
216 | ||
2a5beea3 YZ |
217 | BUG_ON(!info->dir_entries); |
218 | if ((unsigned long)(info->dir_entries + num) > | |
219 | (unsigned long)info->dir_entries + info->dir_buf_size) { | |
54008399 YZ |
220 | pr_err("dir contents are larger than expected\n"); |
221 | WARN_ON(1); | |
222 | goto bad; | |
223 | } | |
2f2dc053 | 224 | |
54008399 | 225 | info->dir_nr = num; |
2f2dc053 | 226 | while (num) { |
2a5beea3 | 227 | struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i; |
2f2dc053 SW |
228 | /* dentry */ |
229 | ceph_decode_need(p, end, sizeof(u32)*2, bad); | |
2a5beea3 YZ |
230 | rde->name_len = ceph_decode_32(p); |
231 | ceph_decode_need(p, end, rde->name_len, bad); | |
232 | rde->name = *p; | |
233 | *p += rde->name_len; | |
234 | dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name); | |
235 | rde->lease = *p; | |
2f2dc053 SW |
236 | *p += sizeof(struct ceph_mds_reply_lease); |
237 | ||
238 | /* inode */ | |
2a5beea3 | 239 | err = parse_reply_info_in(p, end, &rde->inode, features); |
2f2dc053 SW |
240 | if (err < 0) |
241 | goto out_bad; | |
8974eebd YZ |
242 | /* ceph_readdir_prepopulate() will update it */ |
243 | rde->offset = 0; | |
2f2dc053 SW |
244 | i++; |
245 | num--; | |
246 | } | |
247 | ||
248 | done: | |
249 | if (*p != end) | |
250 | goto bad; | |
251 | return 0; | |
252 | ||
253 | bad: | |
254 | err = -EIO; | |
255 | out_bad: | |
256 | pr_err("problem parsing dir contents %d\n", err); | |
257 | return err; | |
258 | } | |
259 | ||
25933abd HS |
260 | /* |
261 | * parse fcntl F_GETLK results | |
262 | */ | |
263 | static int parse_reply_info_filelock(void **p, void *end, | |
14303d20 | 264 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 265 | u64 features) |
25933abd HS |
266 | { |
267 | if (*p + sizeof(*info->filelock_reply) > end) | |
268 | goto bad; | |
269 | ||
270 | info->filelock_reply = *p; | |
271 | *p += sizeof(*info->filelock_reply); | |
272 | ||
273 | if (unlikely(*p != end)) | |
274 | goto bad; | |
275 | return 0; | |
276 | ||
277 | bad: | |
278 | return -EIO; | |
279 | } | |
280 | ||
6e8575fa SL |
281 | /* |
282 | * parse create results | |
283 | */ | |
284 | static int parse_reply_info_create(void **p, void *end, | |
285 | struct ceph_mds_reply_info_parsed *info, | |
12b4629a | 286 | u64 features) |
6e8575fa SL |
287 | { |
288 | if (features & CEPH_FEATURE_REPLY_CREATE_INODE) { | |
289 | if (*p == end) { | |
290 | info->has_create_ino = false; | |
291 | } else { | |
292 | info->has_create_ino = true; | |
293 | info->ino = ceph_decode_64(p); | |
294 | } | |
295 | } | |
296 | ||
297 | if (unlikely(*p != end)) | |
298 | goto bad; | |
299 | return 0; | |
300 | ||
301 | bad: | |
302 | return -EIO; | |
303 | } | |
304 | ||
25933abd HS |
305 | /* |
306 | * parse extra results | |
307 | */ | |
308 | static int parse_reply_info_extra(void **p, void *end, | |
14303d20 | 309 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 310 | u64 features) |
25933abd | 311 | { |
6df8c9d8 JL |
312 | u32 op = le32_to_cpu(info->head->op); |
313 | ||
314 | if (op == CEPH_MDS_OP_GETFILELOCK) | |
14303d20 | 315 | return parse_reply_info_filelock(p, end, info, features); |
6df8c9d8 | 316 | else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP) |
14303d20 | 317 | return parse_reply_info_dir(p, end, info, features); |
6df8c9d8 | 318 | else if (op == CEPH_MDS_OP_CREATE) |
6e8575fa SL |
319 | return parse_reply_info_create(p, end, info, features); |
320 | else | |
321 | return -EIO; | |
25933abd HS |
322 | } |
323 | ||
2f2dc053 SW |
324 | /* |
325 | * parse entire mds reply | |
326 | */ | |
327 | static int parse_reply_info(struct ceph_msg *msg, | |
14303d20 | 328 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 329 | u64 features) |
2f2dc053 SW |
330 | { |
331 | void *p, *end; | |
332 | u32 len; | |
333 | int err; | |
334 | ||
335 | info->head = msg->front.iov_base; | |
336 | p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); | |
337 | end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); | |
338 | ||
339 | /* trace */ | |
340 | ceph_decode_32_safe(&p, end, len, bad); | |
341 | if (len > 0) { | |
32852a81 | 342 | ceph_decode_need(&p, end, len, bad); |
14303d20 | 343 | err = parse_reply_info_trace(&p, p+len, info, features); |
2f2dc053 SW |
344 | if (err < 0) |
345 | goto out_bad; | |
346 | } | |
347 | ||
25933abd | 348 | /* extra */ |
2f2dc053 SW |
349 | ceph_decode_32_safe(&p, end, len, bad); |
350 | if (len > 0) { | |
32852a81 | 351 | ceph_decode_need(&p, end, len, bad); |
14303d20 | 352 | err = parse_reply_info_extra(&p, p+len, info, features); |
2f2dc053 SW |
353 | if (err < 0) |
354 | goto out_bad; | |
355 | } | |
356 | ||
357 | /* snap blob */ | |
358 | ceph_decode_32_safe(&p, end, len, bad); | |
359 | info->snapblob_len = len; | |
360 | info->snapblob = p; | |
361 | p += len; | |
362 | ||
363 | if (p != end) | |
364 | goto bad; | |
365 | return 0; | |
366 | ||
367 | bad: | |
368 | err = -EIO; | |
369 | out_bad: | |
370 | pr_err("mds parse_reply err %d\n", err); | |
371 | return err; | |
372 | } | |
373 | ||
374 | static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) | |
375 | { | |
2a5beea3 | 376 | if (!info->dir_entries) |
54008399 | 377 | return; |
2a5beea3 | 378 | free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size)); |
2f2dc053 SW |
379 | } |
380 | ||
381 | ||
382 | /* | |
383 | * sessions | |
384 | */ | |
a687ecaf | 385 | const char *ceph_session_state_name(int s) |
2f2dc053 SW |
386 | { |
387 | switch (s) { | |
388 | case CEPH_MDS_SESSION_NEW: return "new"; | |
389 | case CEPH_MDS_SESSION_OPENING: return "opening"; | |
390 | case CEPH_MDS_SESSION_OPEN: return "open"; | |
391 | case CEPH_MDS_SESSION_HUNG: return "hung"; | |
392 | case CEPH_MDS_SESSION_CLOSING: return "closing"; | |
44ca18f2 | 393 | case CEPH_MDS_SESSION_RESTARTING: return "restarting"; |
2f2dc053 | 394 | case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; |
fcff415c | 395 | case CEPH_MDS_SESSION_REJECTED: return "rejected"; |
2f2dc053 SW |
396 | default: return "???"; |
397 | } | |
398 | } | |
399 | ||
400 | static struct ceph_mds_session *get_session(struct ceph_mds_session *s) | |
401 | { | |
3997c01d | 402 | if (refcount_inc_not_zero(&s->s_ref)) { |
2f2dc053 | 403 | dout("mdsc get_session %p %d -> %d\n", s, |
3997c01d | 404 | refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref)); |
2f2dc053 SW |
405 | return s; |
406 | } else { | |
4c069a58 | 407 | dout("mdsc get_session %p 0 -- FAIL\n", s); |
2f2dc053 SW |
408 | return NULL; |
409 | } | |
410 | } | |
411 | ||
412 | void ceph_put_mds_session(struct ceph_mds_session *s) | |
413 | { | |
414 | dout("mdsc put_session %p %d -> %d\n", s, | |
3997c01d ER |
415 | refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1); |
416 | if (refcount_dec_and_test(&s->s_ref)) { | |
6c4a1915 | 417 | if (s->s_auth.authorizer) |
6c1ea260 | 418 | ceph_auth_destroy_authorizer(s->s_auth.authorizer); |
2f2dc053 | 419 | kfree(s); |
4e7a5dcd | 420 | } |
2f2dc053 SW |
421 | } |
422 | ||
423 | /* | |
424 | * called under mdsc->mutex | |
425 | */ | |
426 | struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, | |
427 | int mds) | |
428 | { | |
429 | struct ceph_mds_session *session; | |
430 | ||
d37b1d99 | 431 | if (mds >= mdsc->max_sessions || !mdsc->sessions[mds]) |
2f2dc053 SW |
432 | return NULL; |
433 | session = mdsc->sessions[mds]; | |
434 | dout("lookup_mds_session %p %d\n", session, | |
3997c01d | 435 | refcount_read(&session->s_ref)); |
2f2dc053 SW |
436 | get_session(session); |
437 | return session; | |
438 | } | |
439 | ||
440 | static bool __have_session(struct ceph_mds_client *mdsc, int mds) | |
441 | { | |
98cfda81 | 442 | if (mds >= mdsc->max_sessions || !mdsc->sessions[mds]) |
2f2dc053 | 443 | return false; |
98cfda81 CX |
444 | else |
445 | return true; | |
2f2dc053 SW |
446 | } |
447 | ||
2600d2dd SW |
448 | static int __verify_registered_session(struct ceph_mds_client *mdsc, |
449 | struct ceph_mds_session *s) | |
450 | { | |
451 | if (s->s_mds >= mdsc->max_sessions || | |
452 | mdsc->sessions[s->s_mds] != s) | |
453 | return -ENOENT; | |
454 | return 0; | |
455 | } | |
456 | ||
2f2dc053 SW |
457 | /* |
458 | * create+register a new session for given mds. | |
459 | * called under mdsc->mutex. | |
460 | */ | |
461 | static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, | |
462 | int mds) | |
463 | { | |
464 | struct ceph_mds_session *s; | |
465 | ||
76201b63 | 466 | if (mds >= mdsc->mdsmap->m_num_mds) |
c338c07c NY |
467 | return ERR_PTR(-EINVAL); |
468 | ||
2f2dc053 | 469 | s = kzalloc(sizeof(*s), GFP_NOFS); |
4736b009 DC |
470 | if (!s) |
471 | return ERR_PTR(-ENOMEM); | |
47474d0b CX |
472 | |
473 | if (mds >= mdsc->max_sessions) { | |
474 | int newmax = 1 << get_count_order(mds + 1); | |
475 | struct ceph_mds_session **sa; | |
476 | ||
477 | dout("%s: realloc to %d\n", __func__, newmax); | |
478 | sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); | |
479 | if (!sa) | |
480 | goto fail_realloc; | |
481 | if (mdsc->sessions) { | |
482 | memcpy(sa, mdsc->sessions, | |
483 | mdsc->max_sessions * sizeof(void *)); | |
484 | kfree(mdsc->sessions); | |
485 | } | |
486 | mdsc->sessions = sa; | |
487 | mdsc->max_sessions = newmax; | |
488 | } | |
489 | ||
490 | dout("%s: mds%d\n", __func__, mds); | |
2f2dc053 SW |
491 | s->s_mdsc = mdsc; |
492 | s->s_mds = mds; | |
493 | s->s_state = CEPH_MDS_SESSION_NEW; | |
494 | s->s_ttl = 0; | |
495 | s->s_seq = 0; | |
496 | mutex_init(&s->s_mutex); | |
497 | ||
b7a9e5dd | 498 | ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); |
2f2dc053 | 499 | |
d8fb02ab | 500 | spin_lock_init(&s->s_gen_ttl_lock); |
2f2dc053 | 501 | s->s_cap_gen = 0; |
1ce208a6 | 502 | s->s_cap_ttl = jiffies - 1; |
d8fb02ab AE |
503 | |
504 | spin_lock_init(&s->s_cap_lock); | |
2f2dc053 SW |
505 | s->s_renew_requested = 0; |
506 | s->s_renew_seq = 0; | |
507 | INIT_LIST_HEAD(&s->s_caps); | |
508 | s->s_nr_caps = 0; | |
5dacf091 | 509 | s->s_trim_caps = 0; |
3997c01d | 510 | refcount_set(&s->s_ref, 1); |
2f2dc053 SW |
511 | INIT_LIST_HEAD(&s->s_waiting); |
512 | INIT_LIST_HEAD(&s->s_unsafe); | |
513 | s->s_num_cap_releases = 0; | |
99a9c273 | 514 | s->s_cap_reconnect = 0; |
7c1332b8 | 515 | s->s_cap_iterator = NULL; |
2f2dc053 | 516 | INIT_LIST_HEAD(&s->s_cap_releases); |
2f2dc053 | 517 | INIT_LIST_HEAD(&s->s_cap_flushing); |
2f2dc053 | 518 | |
2f2dc053 | 519 | mdsc->sessions[mds] = s; |
86d8f67b | 520 | atomic_inc(&mdsc->num_sessions); |
3997c01d | 521 | refcount_inc(&s->s_ref); /* one ref to sessions[], one to caller */ |
42ce56e5 | 522 | |
b7a9e5dd SW |
523 | ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, |
524 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); | |
42ce56e5 | 525 | |
2f2dc053 | 526 | return s; |
42ce56e5 SW |
527 | |
528 | fail_realloc: | |
529 | kfree(s); | |
530 | return ERR_PTR(-ENOMEM); | |
2f2dc053 SW |
531 | } |
532 | ||
533 | /* | |
534 | * called under mdsc->mutex | |
535 | */ | |
2600d2dd | 536 | static void __unregister_session(struct ceph_mds_client *mdsc, |
42ce56e5 | 537 | struct ceph_mds_session *s) |
2f2dc053 | 538 | { |
2600d2dd SW |
539 | dout("__unregister_session mds%d %p\n", s->s_mds, s); |
540 | BUG_ON(mdsc->sessions[s->s_mds] != s); | |
42ce56e5 SW |
541 | mdsc->sessions[s->s_mds] = NULL; |
542 | ceph_con_close(&s->s_con); | |
543 | ceph_put_mds_session(s); | |
86d8f67b | 544 | atomic_dec(&mdsc->num_sessions); |
2f2dc053 SW |
545 | } |
546 | ||
547 | /* | |
548 | * drop session refs in request. | |
549 | * | |
550 | * should be last request ref, or hold mdsc->mutex | |
551 | */ | |
552 | static void put_request_session(struct ceph_mds_request *req) | |
553 | { | |
554 | if (req->r_session) { | |
555 | ceph_put_mds_session(req->r_session); | |
556 | req->r_session = NULL; | |
557 | } | |
558 | } | |
559 | ||
153c8e6b | 560 | void ceph_mdsc_release_request(struct kref *kref) |
2f2dc053 | 561 | { |
153c8e6b SW |
562 | struct ceph_mds_request *req = container_of(kref, |
563 | struct ceph_mds_request, | |
564 | r_kref); | |
54008399 | 565 | destroy_reply_info(&req->r_reply_info); |
153c8e6b SW |
566 | if (req->r_request) |
567 | ceph_msg_put(req->r_request); | |
54008399 | 568 | if (req->r_reply) |
153c8e6b | 569 | ceph_msg_put(req->r_reply); |
153c8e6b | 570 | if (req->r_inode) { |
41b02e1f | 571 | ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); |
153c8e6b SW |
572 | iput(req->r_inode); |
573 | } | |
3dd69aab JL |
574 | if (req->r_parent) |
575 | ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); | |
e96a650a | 576 | iput(req->r_target_inode); |
153c8e6b SW |
577 | if (req->r_dentry) |
578 | dput(req->r_dentry); | |
844d87c3 SW |
579 | if (req->r_old_dentry) |
580 | dput(req->r_old_dentry); | |
581 | if (req->r_old_dentry_dir) { | |
41b02e1f SW |
582 | /* |
583 | * track (and drop pins for) r_old_dentry_dir | |
584 | * separately, since r_old_dentry's d_parent may have | |
585 | * changed between the dir mutex being dropped and | |
586 | * this request being freed. | |
587 | */ | |
588 | ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), | |
589 | CEPH_CAP_PIN); | |
41b02e1f | 590 | iput(req->r_old_dentry_dir); |
2f2dc053 | 591 | } |
153c8e6b SW |
592 | kfree(req->r_path1); |
593 | kfree(req->r_path2); | |
25e6bae3 YZ |
594 | if (req->r_pagelist) |
595 | ceph_pagelist_release(req->r_pagelist); | |
153c8e6b | 596 | put_request_session(req); |
37151668 | 597 | ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); |
153c8e6b | 598 | kfree(req); |
2f2dc053 SW |
599 | } |
600 | ||
fcd00b68 ID |
601 | DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node) |
602 | ||
2f2dc053 SW |
603 | /* |
604 | * lookup session, bump ref if found. | |
605 | * | |
606 | * called under mdsc->mutex. | |
607 | */ | |
fcd00b68 ID |
608 | static struct ceph_mds_request * |
609 | lookup_get_request(struct ceph_mds_client *mdsc, u64 tid) | |
2f2dc053 SW |
610 | { |
611 | struct ceph_mds_request *req; | |
44ca18f2 | 612 | |
fcd00b68 ID |
613 | req = lookup_request(&mdsc->request_tree, tid); |
614 | if (req) | |
615 | ceph_mdsc_get_request(req); | |
44ca18f2 | 616 | |
fcd00b68 | 617 | return req; |
2f2dc053 SW |
618 | } |
619 | ||
620 | /* | |
621 | * Register an in-flight request, and assign a tid. Link to directory | |
622 | * are modifying (if any). | |
623 | * | |
624 | * Called under mdsc->mutex. | |
625 | */ | |
626 | static void __register_request(struct ceph_mds_client *mdsc, | |
627 | struct ceph_mds_request *req, | |
628 | struct inode *dir) | |
629 | { | |
e30ee581 ZZ |
630 | int ret = 0; |
631 | ||
2f2dc053 | 632 | req->r_tid = ++mdsc->last_tid; |
e30ee581 ZZ |
633 | if (req->r_num_caps) { |
634 | ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation, | |
635 | req->r_num_caps); | |
636 | if (ret < 0) { | |
637 | pr_err("__register_request %p " | |
638 | "failed to reserve caps: %d\n", req, ret); | |
639 | /* set req->r_err to fail early from __do_request */ | |
640 | req->r_err = ret; | |
641 | return; | |
642 | } | |
643 | } | |
2f2dc053 SW |
644 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
645 | ceph_mdsc_get_request(req); | |
fcd00b68 | 646 | insert_request(&mdsc->request_tree, req); |
2f2dc053 | 647 | |
cb4276cc SW |
648 | req->r_uid = current_fsuid(); |
649 | req->r_gid = current_fsgid(); | |
650 | ||
e8a7b8b1 YZ |
651 | if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) |
652 | mdsc->oldest_tid = req->r_tid; | |
653 | ||
2f2dc053 | 654 | if (dir) { |
3b663780 | 655 | ihold(dir); |
2f2dc053 | 656 | req->r_unsafe_dir = dir; |
2f2dc053 SW |
657 | } |
658 | } | |
659 | ||
660 | static void __unregister_request(struct ceph_mds_client *mdsc, | |
661 | struct ceph_mds_request *req) | |
662 | { | |
663 | dout("__unregister_request %p tid %lld\n", req, req->r_tid); | |
e8a7b8b1 | 664 | |
df963ea8 JL |
665 | /* Never leave an unregistered request on an unsafe list! */ |
666 | list_del_init(&req->r_unsafe_item); | |
667 | ||
e8a7b8b1 YZ |
668 | if (req->r_tid == mdsc->oldest_tid) { |
669 | struct rb_node *p = rb_next(&req->r_node); | |
670 | mdsc->oldest_tid = 0; | |
671 | while (p) { | |
672 | struct ceph_mds_request *next_req = | |
673 | rb_entry(p, struct ceph_mds_request, r_node); | |
674 | if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) { | |
675 | mdsc->oldest_tid = next_req->r_tid; | |
676 | break; | |
677 | } | |
678 | p = rb_next(p); | |
679 | } | |
680 | } | |
681 | ||
fcd00b68 | 682 | erase_request(&mdsc->request_tree, req); |
2f2dc053 | 683 | |
bc2de10d JL |
684 | if (req->r_unsafe_dir && |
685 | test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { | |
2f2dc053 | 686 | struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); |
2f2dc053 SW |
687 | spin_lock(&ci->i_unsafe_lock); |
688 | list_del_init(&req->r_unsafe_dir_item); | |
689 | spin_unlock(&ci->i_unsafe_lock); | |
4c06ace8 | 690 | } |
bc2de10d JL |
691 | if (req->r_target_inode && |
692 | test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { | |
68cd5b4b YZ |
693 | struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); |
694 | spin_lock(&ci->i_unsafe_lock); | |
695 | list_del_init(&req->r_unsafe_target_item); | |
696 | spin_unlock(&ci->i_unsafe_lock); | |
697 | } | |
3b663780 | 698 | |
4c06ace8 | 699 | if (req->r_unsafe_dir) { |
3b663780 SW |
700 | iput(req->r_unsafe_dir); |
701 | req->r_unsafe_dir = NULL; | |
2f2dc053 | 702 | } |
94aa8ae1 | 703 | |
fc55d2c9 YZ |
704 | complete_all(&req->r_safe_completion); |
705 | ||
94aa8ae1 | 706 | ceph_mdsc_put_request(req); |
2f2dc053 SW |
707 | } |
708 | ||
30c71233 JL |
709 | /* |
710 | * Walk back up the dentry tree until we hit a dentry representing a | |
711 | * non-snapshot inode. We do this using the rcu_read_lock (which must be held | |
712 | * when calling this) to ensure that the objects won't disappear while we're | |
713 | * working with them. Once we hit a candidate dentry, we attempt to take a | |
714 | * reference to it, and return that as the result. | |
715 | */ | |
f1075480 DC |
716 | static struct inode *get_nonsnap_parent(struct dentry *dentry) |
717 | { | |
718 | struct inode *inode = NULL; | |
30c71233 JL |
719 | |
720 | while (dentry && !IS_ROOT(dentry)) { | |
721 | inode = d_inode_rcu(dentry); | |
722 | if (!inode || ceph_snap(inode) == CEPH_NOSNAP) | |
723 | break; | |
724 | dentry = dentry->d_parent; | |
725 | } | |
726 | if (inode) | |
727 | inode = igrab(inode); | |
728 | return inode; | |
729 | } | |
730 | ||
2f2dc053 SW |
731 | /* |
732 | * Choose mds to send request to next. If there is a hint set in the | |
733 | * request (e.g., due to a prior forward hint from the mds), use that. | |
734 | * Otherwise, consult frag tree and/or caps to identify the | |
735 | * appropriate mds. If all else fails, choose randomly. | |
736 | * | |
737 | * Called under mdsc->mutex. | |
738 | */ | |
739 | static int __choose_mds(struct ceph_mds_client *mdsc, | |
740 | struct ceph_mds_request *req) | |
741 | { | |
742 | struct inode *inode; | |
743 | struct ceph_inode_info *ci; | |
744 | struct ceph_cap *cap; | |
745 | int mode = req->r_direct_mode; | |
746 | int mds = -1; | |
747 | u32 hash = req->r_direct_hash; | |
bc2de10d | 748 | bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags); |
2f2dc053 SW |
749 | |
750 | /* | |
751 | * is there a specific mds we should try? ignore hint if we have | |
752 | * no session and the mds is not up (active or recovering). | |
753 | */ | |
754 | if (req->r_resend_mds >= 0 && | |
755 | (__have_session(mdsc, req->r_resend_mds) || | |
756 | ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { | |
757 | dout("choose_mds using resend_mds mds%d\n", | |
758 | req->r_resend_mds); | |
759 | return req->r_resend_mds; | |
760 | } | |
761 | ||
762 | if (mode == USE_RANDOM_MDS) | |
763 | goto random; | |
764 | ||
765 | inode = NULL; | |
766 | if (req->r_inode) { | |
5d37ca14 YZ |
767 | if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) { |
768 | inode = req->r_inode; | |
769 | ihold(inode); | |
770 | } else { | |
38f340cc YZ |
771 | /* req->r_dentry is non-null for LSSNAP request */ |
772 | rcu_read_lock(); | |
773 | inode = get_nonsnap_parent(req->r_dentry); | |
774 | rcu_read_unlock(); | |
775 | dout("__choose_mds using snapdir's parent %p\n", inode); | |
5d37ca14 | 776 | } |
38f340cc | 777 | } else if (req->r_dentry) { |
d79698da | 778 | /* ignore race with rename; old or new d_parent is okay */ |
30c71233 JL |
779 | struct dentry *parent; |
780 | struct inode *dir; | |
781 | ||
782 | rcu_read_lock(); | |
783 | parent = req->r_dentry->d_parent; | |
3dd69aab | 784 | dir = req->r_parent ? : d_inode_rcu(parent); |
eb6bb1c5 | 785 | |
30c71233 JL |
786 | if (!dir || dir->i_sb != mdsc->fsc->sb) { |
787 | /* not this fs or parent went negative */ | |
2b0143b5 | 788 | inode = d_inode(req->r_dentry); |
30c71233 JL |
789 | if (inode) |
790 | ihold(inode); | |
eb6bb1c5 SW |
791 | } else if (ceph_snap(dir) != CEPH_NOSNAP) { |
792 | /* direct snapped/virtual snapdir requests | |
793 | * based on parent dir inode */ | |
30c71233 | 794 | inode = get_nonsnap_parent(parent); |
eb6bb1c5 | 795 | dout("__choose_mds using nonsnap parent %p\n", inode); |
ca18bede | 796 | } else { |
eb6bb1c5 | 797 | /* dentry target */ |
2b0143b5 | 798 | inode = d_inode(req->r_dentry); |
ca18bede YZ |
799 | if (!inode || mode == USE_AUTH_MDS) { |
800 | /* dir + name */ | |
30c71233 | 801 | inode = igrab(dir); |
ca18bede YZ |
802 | hash = ceph_dentry_hash(dir, req->r_dentry); |
803 | is_hash = true; | |
30c71233 JL |
804 | } else { |
805 | ihold(inode); | |
ca18bede | 806 | } |
2f2dc053 | 807 | } |
30c71233 | 808 | rcu_read_unlock(); |
2f2dc053 | 809 | } |
eb6bb1c5 | 810 | |
2f2dc053 SW |
811 | dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, |
812 | (int)hash, mode); | |
813 | if (!inode) | |
814 | goto random; | |
815 | ci = ceph_inode(inode); | |
816 | ||
817 | if (is_hash && S_ISDIR(inode->i_mode)) { | |
818 | struct ceph_inode_frag frag; | |
819 | int found; | |
820 | ||
821 | ceph_choose_frag(ci, hash, &frag, &found); | |
822 | if (found) { | |
823 | if (mode == USE_ANY_MDS && frag.ndist > 0) { | |
824 | u8 r; | |
825 | ||
826 | /* choose a random replica */ | |
827 | get_random_bytes(&r, 1); | |
828 | r %= frag.ndist; | |
829 | mds = frag.dist[r]; | |
830 | dout("choose_mds %p %llx.%llx " | |
831 | "frag %u mds%d (%d/%d)\n", | |
832 | inode, ceph_vinop(inode), | |
d66bbd44 | 833 | frag.frag, mds, |
2f2dc053 | 834 | (int)r, frag.ndist); |
d66bbd44 SW |
835 | if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= |
836 | CEPH_MDS_STATE_ACTIVE) | |
30c71233 | 837 | goto out; |
2f2dc053 SW |
838 | } |
839 | ||
840 | /* since this file/dir wasn't known to be | |
841 | * replicated, then we want to look for the | |
842 | * authoritative mds. */ | |
843 | mode = USE_AUTH_MDS; | |
844 | if (frag.mds >= 0) { | |
845 | /* choose auth mds */ | |
846 | mds = frag.mds; | |
847 | dout("choose_mds %p %llx.%llx " | |
848 | "frag %u mds%d (auth)\n", | |
849 | inode, ceph_vinop(inode), frag.frag, mds); | |
d66bbd44 SW |
850 | if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= |
851 | CEPH_MDS_STATE_ACTIVE) | |
30c71233 | 852 | goto out; |
2f2dc053 SW |
853 | } |
854 | } | |
855 | } | |
856 | ||
be655596 | 857 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
858 | cap = NULL; |
859 | if (mode == USE_AUTH_MDS) | |
860 | cap = ci->i_auth_cap; | |
861 | if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) | |
862 | cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); | |
863 | if (!cap) { | |
be655596 | 864 | spin_unlock(&ci->i_ceph_lock); |
30c71233 | 865 | iput(inode); |
2f2dc053 SW |
866 | goto random; |
867 | } | |
868 | mds = cap->session->s_mds; | |
869 | dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n", | |
870 | inode, ceph_vinop(inode), mds, | |
871 | cap == ci->i_auth_cap ? "auth " : "", cap); | |
be655596 | 872 | spin_unlock(&ci->i_ceph_lock); |
30c71233 JL |
873 | out: |
874 | iput(inode); | |
2f2dc053 SW |
875 | return mds; |
876 | ||
877 | random: | |
878 | mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); | |
879 | dout("choose_mds chose random mds%d\n", mds); | |
880 | return mds; | |
881 | } | |
882 | ||
883 | ||
884 | /* | |
885 | * session messages | |
886 | */ | |
887 | static struct ceph_msg *create_session_msg(u32 op, u64 seq) | |
888 | { | |
889 | struct ceph_msg *msg; | |
890 | struct ceph_mds_session_head *h; | |
891 | ||
b61c2763 SW |
892 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, |
893 | false); | |
a79832f2 | 894 | if (!msg) { |
2f2dc053 | 895 | pr_err("create_session_msg ENOMEM creating msg\n"); |
a79832f2 | 896 | return NULL; |
2f2dc053 SW |
897 | } |
898 | h = msg->front.iov_base; | |
899 | h->op = cpu_to_le32(op); | |
900 | h->seq = cpu_to_le64(seq); | |
dbd0c8bf JS |
901 | |
902 | return msg; | |
903 | } | |
904 | ||
905 | /* | |
906 | * session message, specialization for CEPH_SESSION_REQUEST_OPEN | |
907 | * to include additional client metadata fields. | |
908 | */ | |
909 | static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq) | |
910 | { | |
911 | struct ceph_msg *msg; | |
912 | struct ceph_mds_session_head *h; | |
913 | int i = -1; | |
914 | int metadata_bytes = 0; | |
915 | int metadata_key_count = 0; | |
916 | struct ceph_options *opt = mdsc->fsc->client->options; | |
3f384954 | 917 | struct ceph_mount_options *fsopt = mdsc->fsc->mount_options; |
dbd0c8bf JS |
918 | void *p; |
919 | ||
a6a5ce4f | 920 | const char* metadata[][2] = { |
717e6f28 YZ |
921 | {"hostname", mdsc->nodename}, |
922 | {"kernel_version", init_utsname()->release}, | |
3f384954 YZ |
923 | {"entity_id", opt->name ? : ""}, |
924 | {"root", fsopt->server_path ? : "/"}, | |
dbd0c8bf JS |
925 | {NULL, NULL} |
926 | }; | |
927 | ||
928 | /* Calculate serialized length of metadata */ | |
929 | metadata_bytes = 4; /* map length */ | |
d37b1d99 | 930 | for (i = 0; metadata[i][0]; ++i) { |
dbd0c8bf JS |
931 | metadata_bytes += 8 + strlen(metadata[i][0]) + |
932 | strlen(metadata[i][1]); | |
933 | metadata_key_count++; | |
934 | } | |
935 | ||
936 | /* Allocate the message */ | |
937 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes, | |
938 | GFP_NOFS, false); | |
939 | if (!msg) { | |
940 | pr_err("create_session_msg ENOMEM creating msg\n"); | |
941 | return NULL; | |
942 | } | |
943 | h = msg->front.iov_base; | |
944 | h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN); | |
945 | h->seq = cpu_to_le64(seq); | |
946 | ||
947 | /* | |
948 | * Serialize client metadata into waiting buffer space, using | |
949 | * the format that userspace expects for map<string, string> | |
7cfa0313 JS |
950 | * |
951 | * ClientSession messages with metadata are v2 | |
dbd0c8bf | 952 | */ |
7cfa0313 JS |
953 | msg->hdr.version = cpu_to_le16(2); |
954 | msg->hdr.compat_version = cpu_to_le16(1); | |
dbd0c8bf JS |
955 | |
956 | /* The write pointer, following the session_head structure */ | |
957 | p = msg->front.iov_base + sizeof(*h); | |
958 | ||
959 | /* Number of entries in the map */ | |
960 | ceph_encode_32(&p, metadata_key_count); | |
961 | ||
962 | /* Two length-prefixed strings for each entry in the map */ | |
d37b1d99 | 963 | for (i = 0; metadata[i][0]; ++i) { |
dbd0c8bf JS |
964 | size_t const key_len = strlen(metadata[i][0]); |
965 | size_t const val_len = strlen(metadata[i][1]); | |
966 | ||
967 | ceph_encode_32(&p, key_len); | |
968 | memcpy(p, metadata[i][0], key_len); | |
969 | p += key_len; | |
970 | ceph_encode_32(&p, val_len); | |
971 | memcpy(p, metadata[i][1], val_len); | |
972 | p += val_len; | |
973 | } | |
974 | ||
2f2dc053 SW |
975 | return msg; |
976 | } | |
977 | ||
978 | /* | |
979 | * send session open request. | |
980 | * | |
981 | * called under mdsc->mutex | |
982 | */ | |
983 | static int __open_session(struct ceph_mds_client *mdsc, | |
984 | struct ceph_mds_session *session) | |
985 | { | |
986 | struct ceph_msg *msg; | |
987 | int mstate; | |
988 | int mds = session->s_mds; | |
2f2dc053 SW |
989 | |
990 | /* wait for mds to go active? */ | |
991 | mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); | |
992 | dout("open_session to mds%d (%s)\n", mds, | |
993 | ceph_mds_state_name(mstate)); | |
994 | session->s_state = CEPH_MDS_SESSION_OPENING; | |
995 | session->s_renew_requested = jiffies; | |
996 | ||
997 | /* send connect message */ | |
dbd0c8bf | 998 | msg = create_session_open_msg(mdsc, session->s_seq); |
a79832f2 SW |
999 | if (!msg) |
1000 | return -ENOMEM; | |
2f2dc053 | 1001 | ceph_con_send(&session->s_con, msg); |
2f2dc053 SW |
1002 | return 0; |
1003 | } | |
1004 | ||
ed0552a1 SW |
1005 | /* |
1006 | * open sessions for any export targets for the given mds | |
1007 | * | |
1008 | * called under mdsc->mutex | |
1009 | */ | |
5d72d13c YZ |
1010 | static struct ceph_mds_session * |
1011 | __open_export_target_session(struct ceph_mds_client *mdsc, int target) | |
1012 | { | |
1013 | struct ceph_mds_session *session; | |
1014 | ||
1015 | session = __ceph_lookup_mds_session(mdsc, target); | |
1016 | if (!session) { | |
1017 | session = register_session(mdsc, target); | |
1018 | if (IS_ERR(session)) | |
1019 | return session; | |
1020 | } | |
1021 | if (session->s_state == CEPH_MDS_SESSION_NEW || | |
1022 | session->s_state == CEPH_MDS_SESSION_CLOSING) | |
1023 | __open_session(mdsc, session); | |
1024 | ||
1025 | return session; | |
1026 | } | |
1027 | ||
1028 | struct ceph_mds_session * | |
1029 | ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target) | |
1030 | { | |
1031 | struct ceph_mds_session *session; | |
1032 | ||
1033 | dout("open_export_target_session to mds%d\n", target); | |
1034 | ||
1035 | mutex_lock(&mdsc->mutex); | |
1036 | session = __open_export_target_session(mdsc, target); | |
1037 | mutex_unlock(&mdsc->mutex); | |
1038 | ||
1039 | return session; | |
1040 | } | |
1041 | ||
ed0552a1 SW |
1042 | static void __open_export_target_sessions(struct ceph_mds_client *mdsc, |
1043 | struct ceph_mds_session *session) | |
1044 | { | |
1045 | struct ceph_mds_info *mi; | |
1046 | struct ceph_mds_session *ts; | |
1047 | int i, mds = session->s_mds; | |
ed0552a1 | 1048 | |
76201b63 | 1049 | if (mds >= mdsc->mdsmap->m_num_mds) |
ed0552a1 | 1050 | return; |
5d72d13c | 1051 | |
ed0552a1 SW |
1052 | mi = &mdsc->mdsmap->m_info[mds]; |
1053 | dout("open_export_target_sessions for mds%d (%d targets)\n", | |
1054 | session->s_mds, mi->num_export_targets); | |
1055 | ||
1056 | for (i = 0; i < mi->num_export_targets; i++) { | |
5d72d13c YZ |
1057 | ts = __open_export_target_session(mdsc, mi->export_targets[i]); |
1058 | if (!IS_ERR(ts)) | |
1059 | ceph_put_mds_session(ts); | |
ed0552a1 SW |
1060 | } |
1061 | } | |
1062 | ||
154f42c2 SW |
1063 | void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, |
1064 | struct ceph_mds_session *session) | |
1065 | { | |
1066 | mutex_lock(&mdsc->mutex); | |
1067 | __open_export_target_sessions(mdsc, session); | |
1068 | mutex_unlock(&mdsc->mutex); | |
1069 | } | |
1070 | ||
2f2dc053 SW |
1071 | /* |
1072 | * session caps | |
1073 | */ | |
1074 | ||
c8a96a31 JL |
1075 | static void detach_cap_releases(struct ceph_mds_session *session, |
1076 | struct list_head *target) | |
2f2dc053 | 1077 | { |
c8a96a31 JL |
1078 | lockdep_assert_held(&session->s_cap_lock); |
1079 | ||
1080 | list_splice_init(&session->s_cap_releases, target); | |
745a8e3b | 1081 | session->s_num_cap_releases = 0; |
c8a96a31 JL |
1082 | dout("dispose_cap_releases mds%d\n", session->s_mds); |
1083 | } | |
2f2dc053 | 1084 | |
c8a96a31 JL |
1085 | static void dispose_cap_releases(struct ceph_mds_client *mdsc, |
1086 | struct list_head *dispose) | |
1087 | { | |
1088 | while (!list_empty(dispose)) { | |
745a8e3b YZ |
1089 | struct ceph_cap *cap; |
1090 | /* zero out the in-progress message */ | |
c8a96a31 | 1091 | cap = list_first_entry(dispose, struct ceph_cap, session_caps); |
745a8e3b YZ |
1092 | list_del(&cap->session_caps); |
1093 | ceph_put_cap(mdsc, cap); | |
2f2dc053 | 1094 | } |
2f2dc053 SW |
1095 | } |
1096 | ||
1c841a96 YZ |
1097 | static void cleanup_session_requests(struct ceph_mds_client *mdsc, |
1098 | struct ceph_mds_session *session) | |
1099 | { | |
1100 | struct ceph_mds_request *req; | |
1101 | struct rb_node *p; | |
1102 | ||
1103 | dout("cleanup_session_requests mds%d\n", session->s_mds); | |
1104 | mutex_lock(&mdsc->mutex); | |
1105 | while (!list_empty(&session->s_unsafe)) { | |
1106 | req = list_first_entry(&session->s_unsafe, | |
1107 | struct ceph_mds_request, r_unsafe_item); | |
3e0708b9 YZ |
1108 | pr_warn_ratelimited(" dropping unsafe request %llu\n", |
1109 | req->r_tid); | |
1c841a96 YZ |
1110 | __unregister_request(mdsc, req); |
1111 | } | |
1112 | /* zero r_attempts, so kick_requests() will re-send requests */ | |
1113 | p = rb_first(&mdsc->request_tree); | |
1114 | while (p) { | |
1115 | req = rb_entry(p, struct ceph_mds_request, r_node); | |
1116 | p = rb_next(p); | |
1117 | if (req->r_session && | |
1118 | req->r_session->s_mds == session->s_mds) | |
1119 | req->r_attempts = 0; | |
1120 | } | |
1121 | mutex_unlock(&mdsc->mutex); | |
1122 | } | |
1123 | ||
2f2dc053 | 1124 | /* |
f818a736 SW |
1125 | * Helper to safely iterate over all caps associated with a session, with |
1126 | * special care taken to handle a racing __ceph_remove_cap(). | |
2f2dc053 | 1127 | * |
f818a736 | 1128 | * Caller must hold session s_mutex. |
2f2dc053 SW |
1129 | */ |
1130 | static int iterate_session_caps(struct ceph_mds_session *session, | |
1131 | int (*cb)(struct inode *, struct ceph_cap *, | |
1132 | void *), void *arg) | |
1133 | { | |
7c1332b8 SW |
1134 | struct list_head *p; |
1135 | struct ceph_cap *cap; | |
1136 | struct inode *inode, *last_inode = NULL; | |
1137 | struct ceph_cap *old_cap = NULL; | |
2f2dc053 SW |
1138 | int ret; |
1139 | ||
1140 | dout("iterate_session_caps %p mds%d\n", session, session->s_mds); | |
1141 | spin_lock(&session->s_cap_lock); | |
7c1332b8 SW |
1142 | p = session->s_caps.next; |
1143 | while (p != &session->s_caps) { | |
1144 | cap = list_entry(p, struct ceph_cap, session_caps); | |
2f2dc053 | 1145 | inode = igrab(&cap->ci->vfs_inode); |
7c1332b8 SW |
1146 | if (!inode) { |
1147 | p = p->next; | |
2f2dc053 | 1148 | continue; |
7c1332b8 SW |
1149 | } |
1150 | session->s_cap_iterator = cap; | |
2f2dc053 | 1151 | spin_unlock(&session->s_cap_lock); |
7c1332b8 SW |
1152 | |
1153 | if (last_inode) { | |
1154 | iput(last_inode); | |
1155 | last_inode = NULL; | |
1156 | } | |
1157 | if (old_cap) { | |
37151668 | 1158 | ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8 SW |
1159 | old_cap = NULL; |
1160 | } | |
1161 | ||
2f2dc053 | 1162 | ret = cb(inode, cap, arg); |
7c1332b8 SW |
1163 | last_inode = inode; |
1164 | ||
2f2dc053 | 1165 | spin_lock(&session->s_cap_lock); |
7c1332b8 | 1166 | p = p->next; |
d37b1d99 | 1167 | if (!cap->ci) { |
7c1332b8 SW |
1168 | dout("iterate_session_caps finishing cap %p removal\n", |
1169 | cap); | |
1170 | BUG_ON(cap->session != session); | |
745a8e3b | 1171 | cap->session = NULL; |
7c1332b8 SW |
1172 | list_del_init(&cap->session_caps); |
1173 | session->s_nr_caps--; | |
745a8e3b YZ |
1174 | if (cap->queue_release) { |
1175 | list_add_tail(&cap->session_caps, | |
1176 | &session->s_cap_releases); | |
1177 | session->s_num_cap_releases++; | |
1178 | } else { | |
1179 | old_cap = cap; /* put_cap it w/o locks held */ | |
1180 | } | |
7c1332b8 | 1181 | } |
5dacf091 SW |
1182 | if (ret < 0) |
1183 | goto out; | |
2f2dc053 | 1184 | } |
5dacf091 SW |
1185 | ret = 0; |
1186 | out: | |
7c1332b8 | 1187 | session->s_cap_iterator = NULL; |
2f2dc053 | 1188 | spin_unlock(&session->s_cap_lock); |
7c1332b8 | 1189 | |
e96a650a | 1190 | iput(last_inode); |
7c1332b8 | 1191 | if (old_cap) |
37151668 | 1192 | ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8 | 1193 | |
5dacf091 | 1194 | return ret; |
2f2dc053 SW |
1195 | } |
1196 | ||
1197 | static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, | |
6c99f254 | 1198 | void *arg) |
2f2dc053 | 1199 | { |
6c93df5d | 1200 | struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg; |
2f2dc053 | 1201 | struct ceph_inode_info *ci = ceph_inode(inode); |
553adfd9 | 1202 | LIST_HEAD(to_remove); |
6c93df5d YZ |
1203 | bool drop = false; |
1204 | bool invalidate = false; | |
6c99f254 | 1205 | |
2f2dc053 SW |
1206 | dout("removing cap %p, ci is %p, inode is %p\n", |
1207 | cap, ci, &ci->vfs_inode); | |
be655596 | 1208 | spin_lock(&ci->i_ceph_lock); |
a096b09a | 1209 | __ceph_remove_cap(cap, false); |
571ade33 | 1210 | if (!ci->i_auth_cap) { |
553adfd9 | 1211 | struct ceph_cap_flush *cf; |
6c93df5d | 1212 | struct ceph_mds_client *mdsc = fsc->mdsc; |
6c99f254 | 1213 | |
77310320 YZ |
1214 | ci->i_ceph_flags |= CEPH_I_CAP_DROPPED; |
1215 | ||
6c93df5d | 1216 | if (ci->i_wrbuffer_ref > 0 && |
52953d55 | 1217 | READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) |
6c93df5d YZ |
1218 | invalidate = true; |
1219 | ||
e4500b5e YZ |
1220 | while (!list_empty(&ci->i_cap_flush_list)) { |
1221 | cf = list_first_entry(&ci->i_cap_flush_list, | |
1222 | struct ceph_cap_flush, i_list); | |
8cdcc07d | 1223 | list_move(&cf->i_list, &to_remove); |
553adfd9 YZ |
1224 | } |
1225 | ||
6c99f254 | 1226 | spin_lock(&mdsc->cap_dirty_lock); |
8310b089 | 1227 | |
e4500b5e YZ |
1228 | list_for_each_entry(cf, &to_remove, i_list) |
1229 | list_del(&cf->g_list); | |
8310b089 | 1230 | |
6c99f254 | 1231 | if (!list_empty(&ci->i_dirty_item)) { |
3e0708b9 YZ |
1232 | pr_warn_ratelimited( |
1233 | " dropping dirty %s state for %p %lld\n", | |
6c99f254 SW |
1234 | ceph_cap_string(ci->i_dirty_caps), |
1235 | inode, ceph_ino(inode)); | |
1236 | ci->i_dirty_caps = 0; | |
1237 | list_del_init(&ci->i_dirty_item); | |
6c93df5d | 1238 | drop = true; |
6c99f254 SW |
1239 | } |
1240 | if (!list_empty(&ci->i_flushing_item)) { | |
3e0708b9 YZ |
1241 | pr_warn_ratelimited( |
1242 | " dropping dirty+flushing %s state for %p %lld\n", | |
6c99f254 SW |
1243 | ceph_cap_string(ci->i_flushing_caps), |
1244 | inode, ceph_ino(inode)); | |
1245 | ci->i_flushing_caps = 0; | |
1246 | list_del_init(&ci->i_flushing_item); | |
1247 | mdsc->num_cap_flushing--; | |
6c93df5d | 1248 | drop = true; |
6c99f254 | 1249 | } |
6c99f254 | 1250 | spin_unlock(&mdsc->cap_dirty_lock); |
553adfd9 | 1251 | |
b3f8d68f YZ |
1252 | if (atomic_read(&ci->i_filelock_ref) > 0) { |
1253 | /* make further file lock syscall return -EIO */ | |
1254 | ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK; | |
1255 | pr_warn_ratelimited(" dropping file locks for %p %lld\n", | |
1256 | inode, ceph_ino(inode)); | |
1257 | } | |
1258 | ||
f66fd9f0 | 1259 | if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) { |
e4500b5e | 1260 | list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove); |
f66fd9f0 YZ |
1261 | ci->i_prealloc_cap_flush = NULL; |
1262 | } | |
6c99f254 | 1263 | } |
be655596 | 1264 | spin_unlock(&ci->i_ceph_lock); |
553adfd9 YZ |
1265 | while (!list_empty(&to_remove)) { |
1266 | struct ceph_cap_flush *cf; | |
1267 | cf = list_first_entry(&to_remove, | |
e4500b5e YZ |
1268 | struct ceph_cap_flush, i_list); |
1269 | list_del(&cf->i_list); | |
f66fd9f0 | 1270 | ceph_free_cap_flush(cf); |
553adfd9 | 1271 | } |
77310320 YZ |
1272 | |
1273 | wake_up_all(&ci->i_cap_wq); | |
6c93df5d YZ |
1274 | if (invalidate) |
1275 | ceph_queue_invalidate(inode); | |
77310320 | 1276 | if (drop) |
6c99f254 | 1277 | iput(inode); |
2f2dc053 SW |
1278 | return 0; |
1279 | } | |
1280 | ||
1281 | /* | |
1282 | * caller must hold session s_mutex | |
1283 | */ | |
1284 | static void remove_session_caps(struct ceph_mds_session *session) | |
1285 | { | |
6c93df5d YZ |
1286 | struct ceph_fs_client *fsc = session->s_mdsc->fsc; |
1287 | struct super_block *sb = fsc->sb; | |
c8a96a31 JL |
1288 | LIST_HEAD(dispose); |
1289 | ||
2f2dc053 | 1290 | dout("remove_session_caps on %p\n", session); |
6c93df5d | 1291 | iterate_session_caps(session, remove_session_caps_cb, fsc); |
6f60f889 | 1292 | |
c8799fc4 YZ |
1293 | wake_up_all(&fsc->mdsc->cap_flushing_wq); |
1294 | ||
6f60f889 YZ |
1295 | spin_lock(&session->s_cap_lock); |
1296 | if (session->s_nr_caps > 0) { | |
6f60f889 YZ |
1297 | struct inode *inode; |
1298 | struct ceph_cap *cap, *prev = NULL; | |
1299 | struct ceph_vino vino; | |
1300 | /* | |
1301 | * iterate_session_caps() skips inodes that are being | |
1302 | * deleted, we need to wait until deletions are complete. | |
1303 | * __wait_on_freeing_inode() is designed for the job, | |
1304 | * but it is not exported, so use lookup inode function | |
1305 | * to access it. | |
1306 | */ | |
1307 | while (!list_empty(&session->s_caps)) { | |
1308 | cap = list_entry(session->s_caps.next, | |
1309 | struct ceph_cap, session_caps); | |
1310 | if (cap == prev) | |
1311 | break; | |
1312 | prev = cap; | |
1313 | vino = cap->ci->i_vino; | |
1314 | spin_unlock(&session->s_cap_lock); | |
1315 | ||
ed284c49 | 1316 | inode = ceph_find_inode(sb, vino); |
6f60f889 YZ |
1317 | iput(inode); |
1318 | ||
1319 | spin_lock(&session->s_cap_lock); | |
1320 | } | |
1321 | } | |
745a8e3b YZ |
1322 | |
1323 | // drop cap expires and unlock s_cap_lock | |
c8a96a31 | 1324 | detach_cap_releases(session, &dispose); |
6f60f889 | 1325 | |
2f2dc053 | 1326 | BUG_ON(session->s_nr_caps > 0); |
6c99f254 | 1327 | BUG_ON(!list_empty(&session->s_cap_flushing)); |
c8a96a31 JL |
1328 | spin_unlock(&session->s_cap_lock); |
1329 | dispose_cap_releases(session->s_mdsc, &dispose); | |
2f2dc053 SW |
1330 | } |
1331 | ||
1332 | /* | |
1333 | * wake up any threads waiting on this session's caps. if the cap is | |
1334 | * old (didn't get renewed on the client reconnect), remove it now. | |
1335 | * | |
1336 | * caller must hold s_mutex. | |
1337 | */ | |
1338 | static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, | |
1339 | void *arg) | |
1340 | { | |
0dc2570f SW |
1341 | struct ceph_inode_info *ci = ceph_inode(inode); |
1342 | ||
0dc2570f | 1343 | if (arg) { |
be655596 | 1344 | spin_lock(&ci->i_ceph_lock); |
0dc2570f SW |
1345 | ci->i_wanted_max_size = 0; |
1346 | ci->i_requested_max_size = 0; | |
be655596 | 1347 | spin_unlock(&ci->i_ceph_lock); |
0dc2570f | 1348 | } |
e5360309 | 1349 | wake_up_all(&ci->i_cap_wq); |
2f2dc053 SW |
1350 | return 0; |
1351 | } | |
1352 | ||
0dc2570f SW |
1353 | static void wake_up_session_caps(struct ceph_mds_session *session, |
1354 | int reconnect) | |
2f2dc053 SW |
1355 | { |
1356 | dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); | |
0dc2570f SW |
1357 | iterate_session_caps(session, wake_up_session_cb, |
1358 | (void *)(unsigned long)reconnect); | |
2f2dc053 SW |
1359 | } |
1360 | ||
1361 | /* | |
1362 | * Send periodic message to MDS renewing all currently held caps. The | |
1363 | * ack will reset the expiration for all caps from this session. | |
1364 | * | |
1365 | * caller holds s_mutex | |
1366 | */ | |
1367 | static int send_renew_caps(struct ceph_mds_client *mdsc, | |
1368 | struct ceph_mds_session *session) | |
1369 | { | |
1370 | struct ceph_msg *msg; | |
1371 | int state; | |
1372 | ||
1373 | if (time_after_eq(jiffies, session->s_cap_ttl) && | |
1374 | time_after_eq(session->s_cap_ttl, session->s_renew_requested)) | |
1375 | pr_info("mds%d caps stale\n", session->s_mds); | |
e4cb4cb8 | 1376 | session->s_renew_requested = jiffies; |
2f2dc053 SW |
1377 | |
1378 | /* do not try to renew caps until a recovering mds has reconnected | |
1379 | * with its clients. */ | |
1380 | state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); | |
1381 | if (state < CEPH_MDS_STATE_RECONNECT) { | |
1382 | dout("send_renew_caps ignoring mds%d (%s)\n", | |
1383 | session->s_mds, ceph_mds_state_name(state)); | |
1384 | return 0; | |
1385 | } | |
1386 | ||
1387 | dout("send_renew_caps to mds%d (%s)\n", session->s_mds, | |
1388 | ceph_mds_state_name(state)); | |
2f2dc053 SW |
1389 | msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, |
1390 | ++session->s_renew_seq); | |
a79832f2 SW |
1391 | if (!msg) |
1392 | return -ENOMEM; | |
2f2dc053 SW |
1393 | ceph_con_send(&session->s_con, msg); |
1394 | return 0; | |
1395 | } | |
1396 | ||
186e4f7a YZ |
1397 | static int send_flushmsg_ack(struct ceph_mds_client *mdsc, |
1398 | struct ceph_mds_session *session, u64 seq) | |
1399 | { | |
1400 | struct ceph_msg *msg; | |
1401 | ||
1402 | dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n", | |
a687ecaf | 1403 | session->s_mds, ceph_session_state_name(session->s_state), seq); |
186e4f7a YZ |
1404 | msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); |
1405 | if (!msg) | |
1406 | return -ENOMEM; | |
1407 | ceph_con_send(&session->s_con, msg); | |
1408 | return 0; | |
1409 | } | |
1410 | ||
1411 | ||
2f2dc053 SW |
1412 | /* |
1413 | * Note new cap ttl, and any transition from stale -> not stale (fresh?). | |
0dc2570f SW |
1414 | * |
1415 | * Called under session->s_mutex | |
2f2dc053 SW |
1416 | */ |
1417 | static void renewed_caps(struct ceph_mds_client *mdsc, | |
1418 | struct ceph_mds_session *session, int is_renew) | |
1419 | { | |
1420 | int was_stale; | |
1421 | int wake = 0; | |
1422 | ||
1423 | spin_lock(&session->s_cap_lock); | |
1ce208a6 | 1424 | was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl); |
2f2dc053 SW |
1425 | |
1426 | session->s_cap_ttl = session->s_renew_requested + | |
1427 | mdsc->mdsmap->m_session_timeout*HZ; | |
1428 | ||
1429 | if (was_stale) { | |
1430 | if (time_before(jiffies, session->s_cap_ttl)) { | |
1431 | pr_info("mds%d caps renewed\n", session->s_mds); | |
1432 | wake = 1; | |
1433 | } else { | |
1434 | pr_info("mds%d caps still stale\n", session->s_mds); | |
1435 | } | |
1436 | } | |
1437 | dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n", | |
1438 | session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", | |
1439 | time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); | |
1440 | spin_unlock(&session->s_cap_lock); | |
1441 | ||
1442 | if (wake) | |
0dc2570f | 1443 | wake_up_session_caps(session, 0); |
2f2dc053 SW |
1444 | } |
1445 | ||
1446 | /* | |
1447 | * send a session close request | |
1448 | */ | |
1449 | static int request_close_session(struct ceph_mds_client *mdsc, | |
1450 | struct ceph_mds_session *session) | |
1451 | { | |
1452 | struct ceph_msg *msg; | |
2f2dc053 SW |
1453 | |
1454 | dout("request_close_session mds%d state %s seq %lld\n", | |
a687ecaf | 1455 | session->s_mds, ceph_session_state_name(session->s_state), |
2f2dc053 SW |
1456 | session->s_seq); |
1457 | msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); | |
a79832f2 SW |
1458 | if (!msg) |
1459 | return -ENOMEM; | |
1460 | ceph_con_send(&session->s_con, msg); | |
fcff415c | 1461 | return 1; |
2f2dc053 SW |
1462 | } |
1463 | ||
1464 | /* | |
1465 | * Called with s_mutex held. | |
1466 | */ | |
1467 | static int __close_session(struct ceph_mds_client *mdsc, | |
1468 | struct ceph_mds_session *session) | |
1469 | { | |
1470 | if (session->s_state >= CEPH_MDS_SESSION_CLOSING) | |
1471 | return 0; | |
1472 | session->s_state = CEPH_MDS_SESSION_CLOSING; | |
1473 | return request_close_session(mdsc, session); | |
1474 | } | |
1475 | ||
040d7860 YZ |
1476 | static bool drop_negative_children(struct dentry *dentry) |
1477 | { | |
1478 | struct dentry *child; | |
1479 | bool all_negative = true; | |
1480 | ||
1481 | if (!d_is_dir(dentry)) | |
1482 | goto out; | |
1483 | ||
1484 | spin_lock(&dentry->d_lock); | |
1485 | list_for_each_entry(child, &dentry->d_subdirs, d_child) { | |
1486 | if (d_really_is_positive(child)) { | |
1487 | all_negative = false; | |
1488 | break; | |
1489 | } | |
1490 | } | |
1491 | spin_unlock(&dentry->d_lock); | |
1492 | ||
1493 | if (all_negative) | |
1494 | shrink_dcache_parent(dentry); | |
1495 | out: | |
1496 | return all_negative; | |
1497 | } | |
1498 | ||
2f2dc053 SW |
1499 | /* |
1500 | * Trim old(er) caps. | |
1501 | * | |
1502 | * Because we can't cache an inode without one or more caps, we do | |
1503 | * this indirectly: if a cap is unused, we prune its aliases, at which | |
1504 | * point the inode will hopefully get dropped to. | |
1505 | * | |
1506 | * Yes, this is a bit sloppy. Our only real goal here is to respond to | |
1507 | * memory pressure from the MDS, though, so it needn't be perfect. | |
1508 | */ | |
1509 | static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) | |
1510 | { | |
1511 | struct ceph_mds_session *session = arg; | |
1512 | struct ceph_inode_info *ci = ceph_inode(inode); | |
979abfdd | 1513 | int used, wanted, oissued, mine; |
2f2dc053 SW |
1514 | |
1515 | if (session->s_trim_caps <= 0) | |
1516 | return -1; | |
1517 | ||
be655596 | 1518 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
1519 | mine = cap->issued | cap->implemented; |
1520 | used = __ceph_caps_used(ci); | |
979abfdd | 1521 | wanted = __ceph_caps_file_wanted(ci); |
2f2dc053 SW |
1522 | oissued = __ceph_caps_issued_other(ci, cap); |
1523 | ||
979abfdd | 1524 | dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n", |
2f2dc053 | 1525 | inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), |
979abfdd YZ |
1526 | ceph_cap_string(used), ceph_cap_string(wanted)); |
1527 | if (cap == ci->i_auth_cap) { | |
622f3e25 YZ |
1528 | if (ci->i_dirty_caps || ci->i_flushing_caps || |
1529 | !list_empty(&ci->i_cap_snaps)) | |
979abfdd YZ |
1530 | goto out; |
1531 | if ((used | wanted) & CEPH_CAP_ANY_WR) | |
1532 | goto out; | |
89aa5930 YZ |
1533 | /* Note: it's possible that i_filelock_ref becomes non-zero |
1534 | * after dropping auth caps. It doesn't hurt because reply | |
1535 | * of lock mds request will re-add auth caps. */ | |
1536 | if (atomic_read(&ci->i_filelock_ref) > 0) | |
1537 | goto out; | |
979abfdd | 1538 | } |
5e804ac4 YZ |
1539 | /* The inode has cached pages, but it's no longer used. |
1540 | * we can safely drop it */ | |
1541 | if (wanted == 0 && used == CEPH_CAP_FILE_CACHE && | |
1542 | !(oissued & CEPH_CAP_FILE_CACHE)) { | |
1543 | used = 0; | |
1544 | oissued = 0; | |
1545 | } | |
979abfdd | 1546 | if ((used | wanted) & ~oissued & mine) |
2f2dc053 SW |
1547 | goto out; /* we need these caps */ |
1548 | ||
2f2dc053 SW |
1549 | if (oissued) { |
1550 | /* we aren't the only cap.. just remove us */ | |
a096b09a | 1551 | __ceph_remove_cap(cap, true); |
040d7860 | 1552 | session->s_trim_caps--; |
2f2dc053 | 1553 | } else { |
040d7860 | 1554 | struct dentry *dentry; |
5e804ac4 | 1555 | /* try dropping referring dentries */ |
be655596 | 1556 | spin_unlock(&ci->i_ceph_lock); |
040d7860 YZ |
1557 | dentry = d_find_any_alias(inode); |
1558 | if (dentry && drop_negative_children(dentry)) { | |
1559 | int count; | |
1560 | dput(dentry); | |
1561 | d_prune_aliases(inode); | |
1562 | count = atomic_read(&inode->i_count); | |
1563 | if (count == 1) | |
1564 | session->s_trim_caps--; | |
1565 | dout("trim_caps_cb %p cap %p pruned, count now %d\n", | |
1566 | inode, cap, count); | |
1567 | } else { | |
1568 | dput(dentry); | |
1569 | } | |
2f2dc053 SW |
1570 | return 0; |
1571 | } | |
1572 | ||
1573 | out: | |
be655596 | 1574 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
1575 | return 0; |
1576 | } | |
1577 | ||
1578 | /* | |
1579 | * Trim session cap count down to some max number. | |
1580 | */ | |
e30ee581 ZZ |
1581 | int ceph_trim_caps(struct ceph_mds_client *mdsc, |
1582 | struct ceph_mds_session *session, | |
1583 | int max_caps) | |
2f2dc053 SW |
1584 | { |
1585 | int trim_caps = session->s_nr_caps - max_caps; | |
1586 | ||
1587 | dout("trim_caps mds%d start: %d / %d, trim %d\n", | |
1588 | session->s_mds, session->s_nr_caps, max_caps, trim_caps); | |
1589 | if (trim_caps > 0) { | |
1590 | session->s_trim_caps = trim_caps; | |
1591 | iterate_session_caps(session, trim_caps_cb, session); | |
1592 | dout("trim_caps mds%d done: %d / %d, trimmed %d\n", | |
1593 | session->s_mds, session->s_nr_caps, max_caps, | |
1594 | trim_caps - session->s_trim_caps); | |
5dacf091 | 1595 | session->s_trim_caps = 0; |
2f2dc053 | 1596 | } |
a56371d9 | 1597 | |
a56371d9 | 1598 | ceph_send_cap_releases(mdsc, session); |
2f2dc053 SW |
1599 | return 0; |
1600 | } | |
1601 | ||
8310b089 YZ |
1602 | static int check_caps_flush(struct ceph_mds_client *mdsc, |
1603 | u64 want_flush_tid) | |
1604 | { | |
8310b089 YZ |
1605 | int ret = 1; |
1606 | ||
1607 | spin_lock(&mdsc->cap_dirty_lock); | |
e4500b5e YZ |
1608 | if (!list_empty(&mdsc->cap_flush_list)) { |
1609 | struct ceph_cap_flush *cf = | |
1610 | list_first_entry(&mdsc->cap_flush_list, | |
1611 | struct ceph_cap_flush, g_list); | |
1612 | if (cf->tid <= want_flush_tid) { | |
1613 | dout("check_caps_flush still flushing tid " | |
1614 | "%llu <= %llu\n", cf->tid, want_flush_tid); | |
1615 | ret = 0; | |
1616 | } | |
8310b089 YZ |
1617 | } |
1618 | spin_unlock(&mdsc->cap_dirty_lock); | |
1619 | return ret; | |
d3383a8e YZ |
1620 | } |
1621 | ||
2f2dc053 SW |
1622 | /* |
1623 | * flush all dirty inode data to disk. | |
1624 | * | |
8310b089 | 1625 | * returns true if we've flushed through want_flush_tid |
2f2dc053 | 1626 | */ |
affbc19a | 1627 | static void wait_caps_flush(struct ceph_mds_client *mdsc, |
0e294387 | 1628 | u64 want_flush_tid) |
2f2dc053 | 1629 | { |
0e294387 | 1630 | dout("check_caps_flush want %llu\n", want_flush_tid); |
8310b089 YZ |
1631 | |
1632 | wait_event(mdsc->cap_flushing_wq, | |
1633 | check_caps_flush(mdsc, want_flush_tid)); | |
1634 | ||
1635 | dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid); | |
2f2dc053 SW |
1636 | } |
1637 | ||
1638 | /* | |
1639 | * called under s_mutex | |
1640 | */ | |
3d7ded4d SW |
1641 | void ceph_send_cap_releases(struct ceph_mds_client *mdsc, |
1642 | struct ceph_mds_session *session) | |
2f2dc053 | 1643 | { |
745a8e3b YZ |
1644 | struct ceph_msg *msg = NULL; |
1645 | struct ceph_mds_cap_release *head; | |
1646 | struct ceph_mds_cap_item *item; | |
92475f05 | 1647 | struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc; |
745a8e3b YZ |
1648 | struct ceph_cap *cap; |
1649 | LIST_HEAD(tmp_list); | |
1650 | int num_cap_releases; | |
92475f05 JL |
1651 | __le32 barrier, *cap_barrier; |
1652 | ||
1653 | down_read(&osdc->lock); | |
1654 | barrier = cpu_to_le32(osdc->epoch_barrier); | |
1655 | up_read(&osdc->lock); | |
2f2dc053 | 1656 | |
0f8605f2 | 1657 | spin_lock(&session->s_cap_lock); |
745a8e3b YZ |
1658 | again: |
1659 | list_splice_init(&session->s_cap_releases, &tmp_list); | |
1660 | num_cap_releases = session->s_num_cap_releases; | |
1661 | session->s_num_cap_releases = 0; | |
2f2dc053 | 1662 | spin_unlock(&session->s_cap_lock); |
e01a5946 | 1663 | |
745a8e3b YZ |
1664 | while (!list_empty(&tmp_list)) { |
1665 | if (!msg) { | |
1666 | msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, | |
09cbfeaf | 1667 | PAGE_SIZE, GFP_NOFS, false); |
745a8e3b YZ |
1668 | if (!msg) |
1669 | goto out_err; | |
1670 | head = msg->front.iov_base; | |
1671 | head->num = cpu_to_le32(0); | |
1672 | msg->front.iov_len = sizeof(*head); | |
92475f05 JL |
1673 | |
1674 | msg->hdr.version = cpu_to_le16(2); | |
1675 | msg->hdr.compat_version = cpu_to_le16(1); | |
745a8e3b | 1676 | } |
92475f05 | 1677 | |
745a8e3b YZ |
1678 | cap = list_first_entry(&tmp_list, struct ceph_cap, |
1679 | session_caps); | |
1680 | list_del(&cap->session_caps); | |
1681 | num_cap_releases--; | |
e01a5946 | 1682 | |
00bd8edb | 1683 | head = msg->front.iov_base; |
745a8e3b YZ |
1684 | le32_add_cpu(&head->num, 1); |
1685 | item = msg->front.iov_base + msg->front.iov_len; | |
1686 | item->ino = cpu_to_le64(cap->cap_ino); | |
1687 | item->cap_id = cpu_to_le64(cap->cap_id); | |
1688 | item->migrate_seq = cpu_to_le32(cap->mseq); | |
1689 | item->seq = cpu_to_le32(cap->issue_seq); | |
1690 | msg->front.iov_len += sizeof(*item); | |
1691 | ||
1692 | ceph_put_cap(mdsc, cap); | |
1693 | ||
1694 | if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { | |
92475f05 JL |
1695 | // Append cap_barrier field |
1696 | cap_barrier = msg->front.iov_base + msg->front.iov_len; | |
1697 | *cap_barrier = barrier; | |
1698 | msg->front.iov_len += sizeof(*cap_barrier); | |
1699 | ||
745a8e3b YZ |
1700 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
1701 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); | |
1702 | ceph_con_send(&session->s_con, msg); | |
1703 | msg = NULL; | |
1704 | } | |
00bd8edb | 1705 | } |
e01a5946 | 1706 | |
745a8e3b | 1707 | BUG_ON(num_cap_releases != 0); |
e01a5946 | 1708 | |
745a8e3b YZ |
1709 | spin_lock(&session->s_cap_lock); |
1710 | if (!list_empty(&session->s_cap_releases)) | |
1711 | goto again; | |
1712 | spin_unlock(&session->s_cap_lock); | |
1713 | ||
1714 | if (msg) { | |
92475f05 JL |
1715 | // Append cap_barrier field |
1716 | cap_barrier = msg->front.iov_base + msg->front.iov_len; | |
1717 | *cap_barrier = barrier; | |
1718 | msg->front.iov_len += sizeof(*cap_barrier); | |
1719 | ||
745a8e3b YZ |
1720 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
1721 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); | |
1722 | ceph_con_send(&session->s_con, msg); | |
e01a5946 | 1723 | } |
745a8e3b YZ |
1724 | return; |
1725 | out_err: | |
1726 | pr_err("send_cap_releases mds%d, failed to allocate message\n", | |
1727 | session->s_mds); | |
1728 | spin_lock(&session->s_cap_lock); | |
1729 | list_splice(&tmp_list, &session->s_cap_releases); | |
1730 | session->s_num_cap_releases += num_cap_releases; | |
1731 | spin_unlock(&session->s_cap_lock); | |
e01a5946 SW |
1732 | } |
1733 | ||
2f2dc053 SW |
1734 | /* |
1735 | * requests | |
1736 | */ | |
1737 | ||
54008399 YZ |
1738 | int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, |
1739 | struct inode *dir) | |
1740 | { | |
1741 | struct ceph_inode_info *ci = ceph_inode(dir); | |
1742 | struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; | |
1743 | struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options; | |
2a5beea3 | 1744 | size_t size = sizeof(struct ceph_mds_reply_dir_entry); |
54008399 YZ |
1745 | int order, num_entries; |
1746 | ||
1747 | spin_lock(&ci->i_ceph_lock); | |
1748 | num_entries = ci->i_files + ci->i_subdirs; | |
1749 | spin_unlock(&ci->i_ceph_lock); | |
1750 | num_entries = max(num_entries, 1); | |
1751 | num_entries = min(num_entries, opt->max_readdir); | |
1752 | ||
1753 | order = get_order(size * num_entries); | |
1754 | while (order >= 0) { | |
2a5beea3 YZ |
1755 | rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL | |
1756 | __GFP_NOWARN, | |
1757 | order); | |
1758 | if (rinfo->dir_entries) | |
54008399 YZ |
1759 | break; |
1760 | order--; | |
1761 | } | |
2a5beea3 | 1762 | if (!rinfo->dir_entries) |
54008399 YZ |
1763 | return -ENOMEM; |
1764 | ||
1765 | num_entries = (PAGE_SIZE << order) / size; | |
1766 | num_entries = min(num_entries, opt->max_readdir); | |
1767 | ||
1768 | rinfo->dir_buf_size = PAGE_SIZE << order; | |
1769 | req->r_num_caps = num_entries + 1; | |
1770 | req->r_args.readdir.max_entries = cpu_to_le32(num_entries); | |
1771 | req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes); | |
1772 | return 0; | |
1773 | } | |
1774 | ||
2f2dc053 SW |
1775 | /* |
1776 | * Create an mds request. | |
1777 | */ | |
1778 | struct ceph_mds_request * | |
1779 | ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) | |
1780 | { | |
1781 | struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); | |
63ecae7e | 1782 | struct timespec64 ts; |
2f2dc053 SW |
1783 | |
1784 | if (!req) | |
1785 | return ERR_PTR(-ENOMEM); | |
1786 | ||
b4556396 | 1787 | mutex_init(&req->r_fill_mutex); |
37151668 | 1788 | req->r_mdsc = mdsc; |
2f2dc053 SW |
1789 | req->r_started = jiffies; |
1790 | req->r_resend_mds = -1; | |
1791 | INIT_LIST_HEAD(&req->r_unsafe_dir_item); | |
68cd5b4b | 1792 | INIT_LIST_HEAD(&req->r_unsafe_target_item); |
2f2dc053 | 1793 | req->r_fmode = -1; |
153c8e6b | 1794 | kref_init(&req->r_kref); |
fcd00b68 | 1795 | RB_CLEAR_NODE(&req->r_node); |
2f2dc053 SW |
1796 | INIT_LIST_HEAD(&req->r_wait); |
1797 | init_completion(&req->r_completion); | |
1798 | init_completion(&req->r_safe_completion); | |
1799 | INIT_LIST_HEAD(&req->r_unsafe_item); | |
1800 | ||
63ecae7e AB |
1801 | ktime_get_coarse_real_ts64(&ts); |
1802 | req->r_stamp = timespec64_to_timespec(timespec64_trunc(ts, | |
1803 | mdsc->fsc->sb->s_time_gran)); | |
b8e69066 | 1804 | |
2f2dc053 SW |
1805 | req->r_op = op; |
1806 | req->r_direct_mode = mode; | |
1807 | return req; | |
1808 | } | |
1809 | ||
1810 | /* | |
44ca18f2 | 1811 | * return oldest (lowest) request, tid in request tree, 0 if none. |
2f2dc053 SW |
1812 | * |
1813 | * called under mdsc->mutex. | |
1814 | */ | |
44ca18f2 SW |
1815 | static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) |
1816 | { | |
1817 | if (RB_EMPTY_ROOT(&mdsc->request_tree)) | |
1818 | return NULL; | |
1819 | return rb_entry(rb_first(&mdsc->request_tree), | |
1820 | struct ceph_mds_request, r_node); | |
1821 | } | |
1822 | ||
e8a7b8b1 | 1823 | static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc) |
2f2dc053 | 1824 | { |
e8a7b8b1 | 1825 | return mdsc->oldest_tid; |
2f2dc053 SW |
1826 | } |
1827 | ||
1828 | /* | |
1829 | * Build a dentry's path. Allocate on heap; caller must kfree. Based | |
1830 | * on build_path_from_dentry in fs/cifs/dir.c. | |
1831 | * | |
1832 | * If @stop_on_nosnap, generate path relative to the first non-snapped | |
1833 | * inode. | |
1834 | * | |
1835 | * Encode hidden .snap dirs as a double /, i.e. | |
1836 | * foo/.snap/bar -> foo//bar | |
1837 | */ | |
1838 | char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, | |
1839 | int stop_on_nosnap) | |
1840 | { | |
1841 | struct dentry *temp; | |
1842 | char *path; | |
1843 | int len, pos; | |
1b71fe2e | 1844 | unsigned seq; |
2f2dc053 | 1845 | |
d37b1d99 | 1846 | if (!dentry) |
2f2dc053 SW |
1847 | return ERR_PTR(-EINVAL); |
1848 | ||
1849 | retry: | |
1850 | len = 0; | |
1b71fe2e AV |
1851 | seq = read_seqbegin(&rename_lock); |
1852 | rcu_read_lock(); | |
2f2dc053 | 1853 | for (temp = dentry; !IS_ROOT(temp);) { |
2b0143b5 | 1854 | struct inode *inode = d_inode(temp); |
2f2dc053 SW |
1855 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) |
1856 | len++; /* slash only */ | |
1857 | else if (stop_on_nosnap && inode && | |
1858 | ceph_snap(inode) == CEPH_NOSNAP) | |
1859 | break; | |
1860 | else | |
1861 | len += 1 + temp->d_name.len; | |
1862 | temp = temp->d_parent; | |
2f2dc053 | 1863 | } |
1b71fe2e | 1864 | rcu_read_unlock(); |
2f2dc053 SW |
1865 | if (len) |
1866 | len--; /* no leading '/' */ | |
1867 | ||
1868 | path = kmalloc(len+1, GFP_NOFS); | |
d37b1d99 | 1869 | if (!path) |
2f2dc053 SW |
1870 | return ERR_PTR(-ENOMEM); |
1871 | pos = len; | |
1872 | path[pos] = 0; /* trailing null */ | |
1b71fe2e | 1873 | rcu_read_lock(); |
2f2dc053 | 1874 | for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { |
1b71fe2e | 1875 | struct inode *inode; |
2f2dc053 | 1876 | |
1b71fe2e | 1877 | spin_lock(&temp->d_lock); |
2b0143b5 | 1878 | inode = d_inode(temp); |
2f2dc053 | 1879 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { |
104648ad | 1880 | dout("build_path path+%d: %p SNAPDIR\n", |
2f2dc053 SW |
1881 | pos, temp); |
1882 | } else if (stop_on_nosnap && inode && | |
1883 | ceph_snap(inode) == CEPH_NOSNAP) { | |
9d5a09e6 | 1884 | spin_unlock(&temp->d_lock); |
2f2dc053 SW |
1885 | break; |
1886 | } else { | |
1887 | pos -= temp->d_name.len; | |
1b71fe2e AV |
1888 | if (pos < 0) { |
1889 | spin_unlock(&temp->d_lock); | |
2f2dc053 | 1890 | break; |
1b71fe2e | 1891 | } |
2f2dc053 SW |
1892 | strncpy(path + pos, temp->d_name.name, |
1893 | temp->d_name.len); | |
2f2dc053 | 1894 | } |
1b71fe2e | 1895 | spin_unlock(&temp->d_lock); |
2f2dc053 SW |
1896 | if (pos) |
1897 | path[--pos] = '/'; | |
1898 | temp = temp->d_parent; | |
2f2dc053 | 1899 | } |
1b71fe2e AV |
1900 | rcu_read_unlock(); |
1901 | if (pos != 0 || read_seqretry(&rename_lock, seq)) { | |
104648ad | 1902 | pr_err("build_path did not end path lookup where " |
2f2dc053 SW |
1903 | "expected, namelen is %d, pos is %d\n", len, pos); |
1904 | /* presumably this is only possible if racing with a | |
1905 | rename of one of the parent directories (we can not | |
1906 | lock the dentries above us to prevent this, but | |
1907 | retrying should be harmless) */ | |
1908 | kfree(path); | |
1909 | goto retry; | |
1910 | } | |
1911 | ||
2b0143b5 | 1912 | *base = ceph_ino(d_inode(temp)); |
2f2dc053 | 1913 | *plen = len; |
104648ad | 1914 | dout("build_path on %p %d built %llx '%.*s'\n", |
84d08fa8 | 1915 | dentry, d_count(dentry), *base, len, path); |
2f2dc053 SW |
1916 | return path; |
1917 | } | |
1918 | ||
fd36a717 | 1919 | static int build_dentry_path(struct dentry *dentry, struct inode *dir, |
2f2dc053 SW |
1920 | const char **ppath, int *ppathlen, u64 *pino, |
1921 | int *pfreepath) | |
1922 | { | |
1923 | char *path; | |
1924 | ||
c6b0b656 | 1925 | rcu_read_lock(); |
fd36a717 JL |
1926 | if (!dir) |
1927 | dir = d_inode_rcu(dentry->d_parent); | |
c6b0b656 JL |
1928 | if (dir && ceph_snap(dir) == CEPH_NOSNAP) { |
1929 | *pino = ceph_ino(dir); | |
1930 | rcu_read_unlock(); | |
2f2dc053 SW |
1931 | *ppath = dentry->d_name.name; |
1932 | *ppathlen = dentry->d_name.len; | |
1933 | return 0; | |
1934 | } | |
c6b0b656 | 1935 | rcu_read_unlock(); |
2f2dc053 SW |
1936 | path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); |
1937 | if (IS_ERR(path)) | |
1938 | return PTR_ERR(path); | |
1939 | *ppath = path; | |
1940 | *pfreepath = 1; | |
1941 | return 0; | |
1942 | } | |
1943 | ||
1944 | static int build_inode_path(struct inode *inode, | |
1945 | const char **ppath, int *ppathlen, u64 *pino, | |
1946 | int *pfreepath) | |
1947 | { | |
1948 | struct dentry *dentry; | |
1949 | char *path; | |
1950 | ||
1951 | if (ceph_snap(inode) == CEPH_NOSNAP) { | |
1952 | *pino = ceph_ino(inode); | |
1953 | *ppathlen = 0; | |
1954 | return 0; | |
1955 | } | |
1956 | dentry = d_find_alias(inode); | |
1957 | path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); | |
1958 | dput(dentry); | |
1959 | if (IS_ERR(path)) | |
1960 | return PTR_ERR(path); | |
1961 | *ppath = path; | |
1962 | *pfreepath = 1; | |
1963 | return 0; | |
1964 | } | |
1965 | ||
1966 | /* | |
1967 | * request arguments may be specified via an inode *, a dentry *, or | |
1968 | * an explicit ino+path. | |
1969 | */ | |
1970 | static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, | |
fd36a717 JL |
1971 | struct inode *rdiri, const char *rpath, |
1972 | u64 rino, const char **ppath, int *pathlen, | |
2f2dc053 SW |
1973 | u64 *ino, int *freepath) |
1974 | { | |
1975 | int r = 0; | |
1976 | ||
1977 | if (rinode) { | |
1978 | r = build_inode_path(rinode, ppath, pathlen, ino, freepath); | |
1979 | dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), | |
1980 | ceph_snap(rinode)); | |
1981 | } else if (rdentry) { | |
fd36a717 JL |
1982 | r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino, |
1983 | freepath); | |
2f2dc053 SW |
1984 | dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, |
1985 | *ppath); | |
795858db | 1986 | } else if (rpath || rino) { |
2f2dc053 SW |
1987 | *ino = rino; |
1988 | *ppath = rpath; | |
b000056a | 1989 | *pathlen = rpath ? strlen(rpath) : 0; |
2f2dc053 SW |
1990 | dout(" path %.*s\n", *pathlen, rpath); |
1991 | } | |
1992 | ||
1993 | return r; | |
1994 | } | |
1995 | ||
1996 | /* | |
1997 | * called under mdsc->mutex | |
1998 | */ | |
1999 | static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | |
2000 | struct ceph_mds_request *req, | |
6e6f0923 | 2001 | int mds, bool drop_cap_releases) |
2f2dc053 SW |
2002 | { |
2003 | struct ceph_msg *msg; | |
2004 | struct ceph_mds_request_head *head; | |
2005 | const char *path1 = NULL; | |
2006 | const char *path2 = NULL; | |
2007 | u64 ino1 = 0, ino2 = 0; | |
2008 | int pathlen1 = 0, pathlen2 = 0; | |
2009 | int freepath1 = 0, freepath2 = 0; | |
2010 | int len; | |
2011 | u16 releases; | |
2012 | void *p, *end; | |
2013 | int ret; | |
2014 | ||
2015 | ret = set_request_path_attr(req->r_inode, req->r_dentry, | |
3dd69aab | 2016 | req->r_parent, req->r_path1, req->r_ino1.ino, |
2f2dc053 SW |
2017 | &path1, &pathlen1, &ino1, &freepath1); |
2018 | if (ret < 0) { | |
2019 | msg = ERR_PTR(ret); | |
2020 | goto out; | |
2021 | } | |
2022 | ||
2023 | ret = set_request_path_attr(NULL, req->r_old_dentry, | |
fd36a717 | 2024 | req->r_old_dentry_dir, |
2f2dc053 SW |
2025 | req->r_path2, req->r_ino2.ino, |
2026 | &path2, &pathlen2, &ino2, &freepath2); | |
2027 | if (ret < 0) { | |
2028 | msg = ERR_PTR(ret); | |
2029 | goto out_free1; | |
2030 | } | |
2031 | ||
2032 | len = sizeof(*head) + | |
b8e69066 | 2033 | pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) + |
777d738a | 2034 | sizeof(struct ceph_timespec); |
2f2dc053 SW |
2035 | |
2036 | /* calculate (max) length for cap releases */ | |
2037 | len += sizeof(struct ceph_mds_request_release) * | |
2038 | (!!req->r_inode_drop + !!req->r_dentry_drop + | |
2039 | !!req->r_old_inode_drop + !!req->r_old_dentry_drop); | |
2040 | if (req->r_dentry_drop) | |
2041 | len += req->r_dentry->d_name.len; | |
2042 | if (req->r_old_dentry_drop) | |
2043 | len += req->r_old_dentry->d_name.len; | |
2044 | ||
b61c2763 | 2045 | msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false); |
a79832f2 SW |
2046 | if (!msg) { |
2047 | msg = ERR_PTR(-ENOMEM); | |
2f2dc053 | 2048 | goto out_free2; |
a79832f2 | 2049 | } |
2f2dc053 | 2050 | |
7cfa0313 | 2051 | msg->hdr.version = cpu_to_le16(2); |
6df058c0 SW |
2052 | msg->hdr.tid = cpu_to_le64(req->r_tid); |
2053 | ||
2f2dc053 SW |
2054 | head = msg->front.iov_base; |
2055 | p = msg->front.iov_base + sizeof(*head); | |
2056 | end = msg->front.iov_base + msg->front.iov_len; | |
2057 | ||
2058 | head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); | |
2059 | head->op = cpu_to_le32(req->r_op); | |
ff3d0046 EB |
2060 | head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid)); |
2061 | head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid)); | |
2f2dc053 SW |
2062 | head->args = req->r_args; |
2063 | ||
2064 | ceph_encode_filepath(&p, end, ino1, path1); | |
2065 | ceph_encode_filepath(&p, end, ino2, path2); | |
2066 | ||
e979cf50 SW |
2067 | /* make note of release offset, in case we need to replay */ |
2068 | req->r_request_release_offset = p - msg->front.iov_base; | |
2069 | ||
2f2dc053 SW |
2070 | /* cap releases */ |
2071 | releases = 0; | |
2072 | if (req->r_inode_drop) | |
2073 | releases += ceph_encode_inode_release(&p, | |
2b0143b5 | 2074 | req->r_inode ? req->r_inode : d_inode(req->r_dentry), |
2f2dc053 SW |
2075 | mds, req->r_inode_drop, req->r_inode_unless, 0); |
2076 | if (req->r_dentry_drop) | |
2077 | releases += ceph_encode_dentry_release(&p, req->r_dentry, | |
3dd69aab | 2078 | req->r_parent, mds, req->r_dentry_drop, |
ca6c8ae0 | 2079 | req->r_dentry_unless); |
2f2dc053 SW |
2080 | if (req->r_old_dentry_drop) |
2081 | releases += ceph_encode_dentry_release(&p, req->r_old_dentry, | |
ca6c8ae0 JL |
2082 | req->r_old_dentry_dir, mds, |
2083 | req->r_old_dentry_drop, | |
2084 | req->r_old_dentry_unless); | |
2f2dc053 SW |
2085 | if (req->r_old_inode_drop) |
2086 | releases += ceph_encode_inode_release(&p, | |
2b0143b5 | 2087 | d_inode(req->r_old_dentry), |
2f2dc053 | 2088 | mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); |
6e6f0923 YZ |
2089 | |
2090 | if (drop_cap_releases) { | |
2091 | releases = 0; | |
2092 | p = msg->front.iov_base + req->r_request_release_offset; | |
2093 | } | |
2094 | ||
2f2dc053 SW |
2095 | head->num_releases = cpu_to_le16(releases); |
2096 | ||
b8e69066 | 2097 | /* time stamp */ |
1f041a89 YZ |
2098 | { |
2099 | struct ceph_timespec ts; | |
2100 | ceph_encode_timespec(&ts, &req->r_stamp); | |
2101 | ceph_encode_copy(&p, &ts, sizeof(ts)); | |
2102 | } | |
b8e69066 | 2103 | |
2f2dc053 SW |
2104 | BUG_ON(p > end); |
2105 | msg->front.iov_len = p - msg->front.iov_base; | |
2106 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
2107 | ||
25e6bae3 YZ |
2108 | if (req->r_pagelist) { |
2109 | struct ceph_pagelist *pagelist = req->r_pagelist; | |
0e1a5ee6 | 2110 | refcount_inc(&pagelist->refcnt); |
25e6bae3 YZ |
2111 | ceph_msg_data_add_pagelist(msg, pagelist); |
2112 | msg->hdr.data_len = cpu_to_le32(pagelist->length); | |
2113 | } else { | |
2114 | msg->hdr.data_len = 0; | |
ebf18f47 | 2115 | } |
02afca6c | 2116 | |
2f2dc053 SW |
2117 | msg->hdr.data_off = cpu_to_le16(0); |
2118 | ||
2119 | out_free2: | |
2120 | if (freepath2) | |
2121 | kfree((char *)path2); | |
2122 | out_free1: | |
2123 | if (freepath1) | |
2124 | kfree((char *)path1); | |
2125 | out: | |
2126 | return msg; | |
2127 | } | |
2128 | ||
2129 | /* | |
2130 | * called under mdsc->mutex if error, under no mutex if | |
2131 | * success. | |
2132 | */ | |
2133 | static void complete_request(struct ceph_mds_client *mdsc, | |
2134 | struct ceph_mds_request *req) | |
2135 | { | |
2136 | if (req->r_callback) | |
2137 | req->r_callback(mdsc, req); | |
2138 | else | |
03066f23 | 2139 | complete_all(&req->r_completion); |
2f2dc053 SW |
2140 | } |
2141 | ||
2142 | /* | |
2143 | * called under mdsc->mutex | |
2144 | */ | |
2145 | static int __prepare_send_request(struct ceph_mds_client *mdsc, | |
2146 | struct ceph_mds_request *req, | |
6e6f0923 | 2147 | int mds, bool drop_cap_releases) |
2f2dc053 SW |
2148 | { |
2149 | struct ceph_mds_request_head *rhead; | |
2150 | struct ceph_msg *msg; | |
2151 | int flags = 0; | |
2152 | ||
2f2dc053 | 2153 | req->r_attempts++; |
e55b71f8 GF |
2154 | if (req->r_inode) { |
2155 | struct ceph_cap *cap = | |
2156 | ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); | |
2157 | ||
2158 | if (cap) | |
2159 | req->r_sent_on_mseq = cap->mseq; | |
2160 | else | |
2161 | req->r_sent_on_mseq = -1; | |
2162 | } | |
2f2dc053 SW |
2163 | dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, |
2164 | req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); | |
2165 | ||
bc2de10d | 2166 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { |
c5c9a0bf | 2167 | void *p; |
01a92f17 SW |
2168 | /* |
2169 | * Replay. Do not regenerate message (and rebuild | |
2170 | * paths, etc.); just use the original message. | |
2171 | * Rebuilding paths will break for renames because | |
2172 | * d_move mangles the src name. | |
2173 | */ | |
2174 | msg = req->r_request; | |
2175 | rhead = msg->front.iov_base; | |
2176 | ||
2177 | flags = le32_to_cpu(rhead->flags); | |
2178 | flags |= CEPH_MDS_FLAG_REPLAY; | |
2179 | rhead->flags = cpu_to_le32(flags); | |
2180 | ||
2181 | if (req->r_target_inode) | |
2182 | rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); | |
2183 | ||
2184 | rhead->num_retry = req->r_attempts - 1; | |
e979cf50 SW |
2185 | |
2186 | /* remove cap/dentry releases from message */ | |
2187 | rhead->num_releases = 0; | |
c5c9a0bf YZ |
2188 | |
2189 | /* time stamp */ | |
2190 | p = msg->front.iov_base + req->r_request_release_offset; | |
1f041a89 YZ |
2191 | { |
2192 | struct ceph_timespec ts; | |
2193 | ceph_encode_timespec(&ts, &req->r_stamp); | |
2194 | ceph_encode_copy(&p, &ts, sizeof(ts)); | |
2195 | } | |
c5c9a0bf YZ |
2196 | |
2197 | msg->front.iov_len = p - msg->front.iov_base; | |
2198 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
01a92f17 SW |
2199 | return 0; |
2200 | } | |
2201 | ||
2f2dc053 SW |
2202 | if (req->r_request) { |
2203 | ceph_msg_put(req->r_request); | |
2204 | req->r_request = NULL; | |
2205 | } | |
6e6f0923 | 2206 | msg = create_request_message(mdsc, req, mds, drop_cap_releases); |
2f2dc053 | 2207 | if (IS_ERR(msg)) { |
e1518c7c | 2208 | req->r_err = PTR_ERR(msg); |
a79832f2 | 2209 | return PTR_ERR(msg); |
2f2dc053 SW |
2210 | } |
2211 | req->r_request = msg; | |
2212 | ||
2213 | rhead = msg->front.iov_base; | |
2f2dc053 | 2214 | rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); |
bc2de10d | 2215 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
2f2dc053 | 2216 | flags |= CEPH_MDS_FLAG_REPLAY; |
3dd69aab | 2217 | if (req->r_parent) |
2f2dc053 SW |
2218 | flags |= CEPH_MDS_FLAG_WANT_DENTRY; |
2219 | rhead->flags = cpu_to_le32(flags); | |
2220 | rhead->num_fwd = req->r_num_fwd; | |
2221 | rhead->num_retry = req->r_attempts - 1; | |
01a92f17 | 2222 | rhead->ino = 0; |
2f2dc053 | 2223 | |
3dd69aab | 2224 | dout(" r_parent = %p\n", req->r_parent); |
2f2dc053 SW |
2225 | return 0; |
2226 | } | |
2227 | ||
2228 | /* | |
2229 | * send request, or put it on the appropriate wait list. | |
2230 | */ | |
2231 | static int __do_request(struct ceph_mds_client *mdsc, | |
2232 | struct ceph_mds_request *req) | |
2233 | { | |
2234 | struct ceph_mds_session *session = NULL; | |
2235 | int mds = -1; | |
48fec5d0 | 2236 | int err = 0; |
2f2dc053 | 2237 | |
bc2de10d JL |
2238 | if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { |
2239 | if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) | |
eb1b8af3 | 2240 | __unregister_request(mdsc, req); |
2f2dc053 | 2241 | goto out; |
eb1b8af3 | 2242 | } |
2f2dc053 SW |
2243 | |
2244 | if (req->r_timeout && | |
2245 | time_after_eq(jiffies, req->r_started + req->r_timeout)) { | |
2246 | dout("do_request timed out\n"); | |
2247 | err = -EIO; | |
2248 | goto finish; | |
2249 | } | |
52953d55 | 2250 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { |
48fec5d0 YZ |
2251 | dout("do_request forced umount\n"); |
2252 | err = -EIO; | |
2253 | goto finish; | |
2254 | } | |
52953d55 | 2255 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) { |
e9e427f0 YZ |
2256 | if (mdsc->mdsmap_err) { |
2257 | err = mdsc->mdsmap_err; | |
2258 | dout("do_request mdsmap err %d\n", err); | |
2259 | goto finish; | |
2260 | } | |
cc8e8342 YZ |
2261 | if (mdsc->mdsmap->m_epoch == 0) { |
2262 | dout("do_request no mdsmap, waiting for map\n"); | |
2263 | list_add(&req->r_wait, &mdsc->waiting_for_map); | |
2264 | goto finish; | |
2265 | } | |
e9e427f0 YZ |
2266 | if (!(mdsc->fsc->mount_options->flags & |
2267 | CEPH_MOUNT_OPT_MOUNTWAIT) && | |
2268 | !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) { | |
2269 | err = -ENOENT; | |
2270 | pr_info("probably no mds server is up\n"); | |
2271 | goto finish; | |
2272 | } | |
2273 | } | |
2f2dc053 | 2274 | |
dc69e2e9 SW |
2275 | put_request_session(req); |
2276 | ||
2f2dc053 SW |
2277 | mds = __choose_mds(mdsc, req); |
2278 | if (mds < 0 || | |
2279 | ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { | |
2280 | dout("do_request no mds or not active, waiting for map\n"); | |
2281 | list_add(&req->r_wait, &mdsc->waiting_for_map); | |
2282 | goto out; | |
2283 | } | |
2284 | ||
2285 | /* get, open session */ | |
2286 | session = __ceph_lookup_mds_session(mdsc, mds); | |
9c423956 | 2287 | if (!session) { |
2f2dc053 | 2288 | session = register_session(mdsc, mds); |
9c423956 SW |
2289 | if (IS_ERR(session)) { |
2290 | err = PTR_ERR(session); | |
2291 | goto finish; | |
2292 | } | |
2293 | } | |
dc69e2e9 SW |
2294 | req->r_session = get_session(session); |
2295 | ||
2f2dc053 | 2296 | dout("do_request mds%d session %p state %s\n", mds, session, |
a687ecaf | 2297 | ceph_session_state_name(session->s_state)); |
2f2dc053 SW |
2298 | if (session->s_state != CEPH_MDS_SESSION_OPEN && |
2299 | session->s_state != CEPH_MDS_SESSION_HUNG) { | |
fcff415c YZ |
2300 | if (session->s_state == CEPH_MDS_SESSION_REJECTED) { |
2301 | err = -EACCES; | |
2302 | goto out_session; | |
2303 | } | |
2f2dc053 SW |
2304 | if (session->s_state == CEPH_MDS_SESSION_NEW || |
2305 | session->s_state == CEPH_MDS_SESSION_CLOSING) | |
2306 | __open_session(mdsc, session); | |
2307 | list_add(&req->r_wait, &session->s_waiting); | |
2308 | goto out_session; | |
2309 | } | |
2310 | ||
2311 | /* send request */ | |
2f2dc053 SW |
2312 | req->r_resend_mds = -1; /* forget any previous mds hint */ |
2313 | ||
2314 | if (req->r_request_started == 0) /* note request start time */ | |
2315 | req->r_request_started = jiffies; | |
2316 | ||
6e6f0923 | 2317 | err = __prepare_send_request(mdsc, req, mds, false); |
2f2dc053 SW |
2318 | if (!err) { |
2319 | ceph_msg_get(req->r_request); | |
2320 | ceph_con_send(&session->s_con, req->r_request); | |
2321 | } | |
2322 | ||
2323 | out_session: | |
2324 | ceph_put_mds_session(session); | |
48fec5d0 YZ |
2325 | finish: |
2326 | if (err) { | |
2327 | dout("__do_request early error %d\n", err); | |
2328 | req->r_err = err; | |
2329 | complete_request(mdsc, req); | |
2330 | __unregister_request(mdsc, req); | |
2331 | } | |
2f2dc053 SW |
2332 | out: |
2333 | return err; | |
2f2dc053 SW |
2334 | } |
2335 | ||
2336 | /* | |
2337 | * called under mdsc->mutex | |
2338 | */ | |
2339 | static void __wake_requests(struct ceph_mds_client *mdsc, | |
2340 | struct list_head *head) | |
2341 | { | |
ed75ec2c YZ |
2342 | struct ceph_mds_request *req; |
2343 | LIST_HEAD(tmp_list); | |
2344 | ||
2345 | list_splice_init(head, &tmp_list); | |
2f2dc053 | 2346 | |
ed75ec2c YZ |
2347 | while (!list_empty(&tmp_list)) { |
2348 | req = list_entry(tmp_list.next, | |
2349 | struct ceph_mds_request, r_wait); | |
2f2dc053 | 2350 | list_del_init(&req->r_wait); |
7971bd92 | 2351 | dout(" wake request %p tid %llu\n", req, req->r_tid); |
2f2dc053 SW |
2352 | __do_request(mdsc, req); |
2353 | } | |
2354 | } | |
2355 | ||
2356 | /* | |
2357 | * Wake up threads with requests pending for @mds, so that they can | |
29790f26 | 2358 | * resubmit their requests to a possibly different mds. |
2f2dc053 | 2359 | */ |
29790f26 | 2360 | static void kick_requests(struct ceph_mds_client *mdsc, int mds) |
2f2dc053 | 2361 | { |
44ca18f2 | 2362 | struct ceph_mds_request *req; |
282c1052 | 2363 | struct rb_node *p = rb_first(&mdsc->request_tree); |
2f2dc053 SW |
2364 | |
2365 | dout("kick_requests mds%d\n", mds); | |
282c1052 | 2366 | while (p) { |
44ca18f2 | 2367 | req = rb_entry(p, struct ceph_mds_request, r_node); |
282c1052 | 2368 | p = rb_next(p); |
bc2de10d | 2369 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
44ca18f2 | 2370 | continue; |
3de22be6 YZ |
2371 | if (req->r_attempts > 0) |
2372 | continue; /* only new requests */ | |
44ca18f2 SW |
2373 | if (req->r_session && |
2374 | req->r_session->s_mds == mds) { | |
2375 | dout(" kicking tid %llu\n", req->r_tid); | |
03974e81 | 2376 | list_del_init(&req->r_wait); |
44ca18f2 | 2377 | __do_request(mdsc, req); |
2f2dc053 SW |
2378 | } |
2379 | } | |
2380 | } | |
2381 | ||
2382 | void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, | |
2383 | struct ceph_mds_request *req) | |
2384 | { | |
2385 | dout("submit_request on %p\n", req); | |
2386 | mutex_lock(&mdsc->mutex); | |
2387 | __register_request(mdsc, req, NULL); | |
2388 | __do_request(mdsc, req); | |
2389 | mutex_unlock(&mdsc->mutex); | |
2390 | } | |
2391 | ||
2392 | /* | |
2393 | * Synchrously perform an mds request. Take care of all of the | |
2394 | * session setup, forwarding, retry details. | |
2395 | */ | |
2396 | int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, | |
2397 | struct inode *dir, | |
2398 | struct ceph_mds_request *req) | |
2399 | { | |
2400 | int err; | |
2401 | ||
2402 | dout("do_request on %p\n", req); | |
2403 | ||
3dd69aab | 2404 | /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */ |
2f2dc053 SW |
2405 | if (req->r_inode) |
2406 | ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); | |
3dd69aab JL |
2407 | if (req->r_parent) |
2408 | ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); | |
844d87c3 | 2409 | if (req->r_old_dentry_dir) |
41b02e1f SW |
2410 | ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), |
2411 | CEPH_CAP_PIN); | |
2f2dc053 SW |
2412 | |
2413 | /* issue */ | |
2414 | mutex_lock(&mdsc->mutex); | |
2415 | __register_request(mdsc, req, dir); | |
2416 | __do_request(mdsc, req); | |
2417 | ||
e1518c7c SW |
2418 | if (req->r_err) { |
2419 | err = req->r_err; | |
e1518c7c | 2420 | goto out; |
2f2dc053 SW |
2421 | } |
2422 | ||
e1518c7c SW |
2423 | /* wait */ |
2424 | mutex_unlock(&mdsc->mutex); | |
2425 | dout("do_request waiting\n"); | |
5be73034 | 2426 | if (!req->r_timeout && req->r_wait_for_completion) { |
9280be24 | 2427 | err = req->r_wait_for_completion(mdsc, req); |
e1518c7c | 2428 | } else { |
5be73034 ID |
2429 | long timeleft = wait_for_completion_killable_timeout( |
2430 | &req->r_completion, | |
2431 | ceph_timeout_jiffies(req->r_timeout)); | |
2432 | if (timeleft > 0) | |
2433 | err = 0; | |
2434 | else if (!timeleft) | |
2435 | err = -EIO; /* timed out */ | |
2436 | else | |
2437 | err = timeleft; /* killed */ | |
e1518c7c SW |
2438 | } |
2439 | dout("do_request waited, got %d\n", err); | |
2440 | mutex_lock(&mdsc->mutex); | |
5b1daecd | 2441 | |
e1518c7c | 2442 | /* only abort if we didn't race with a real reply */ |
bc2de10d | 2443 | if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { |
e1518c7c SW |
2444 | err = le32_to_cpu(req->r_reply_info.head->result); |
2445 | } else if (err < 0) { | |
2446 | dout("aborted request %lld with %d\n", req->r_tid, err); | |
b4556396 SW |
2447 | |
2448 | /* | |
2449 | * ensure we aren't running concurrently with | |
2450 | * ceph_fill_trace or ceph_readdir_prepopulate, which | |
2451 | * rely on locks (dir mutex) held by our caller. | |
2452 | */ | |
2453 | mutex_lock(&req->r_fill_mutex); | |
e1518c7c | 2454 | req->r_err = err; |
bc2de10d | 2455 | set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags); |
b4556396 | 2456 | mutex_unlock(&req->r_fill_mutex); |
5b1daecd | 2457 | |
3dd69aab | 2458 | if (req->r_parent && |
167c9e35 SW |
2459 | (req->r_op & CEPH_MDS_OP_WRITE)) |
2460 | ceph_invalidate_dir_request(req); | |
2f2dc053 | 2461 | } else { |
e1518c7c | 2462 | err = req->r_err; |
2f2dc053 | 2463 | } |
2f2dc053 | 2464 | |
e1518c7c SW |
2465 | out: |
2466 | mutex_unlock(&mdsc->mutex); | |
2f2dc053 SW |
2467 | dout("do_request %p done, result %d\n", req, err); |
2468 | return err; | |
2469 | } | |
2470 | ||
167c9e35 | 2471 | /* |
2f276c51 | 2472 | * Invalidate dir's completeness, dentry lease state on an aborted MDS |
167c9e35 SW |
2473 | * namespace request. |
2474 | */ | |
2475 | void ceph_invalidate_dir_request(struct ceph_mds_request *req) | |
2476 | { | |
8d8f371c YZ |
2477 | struct inode *dir = req->r_parent; |
2478 | struct inode *old_dir = req->r_old_dentry_dir; | |
167c9e35 | 2479 | |
8d8f371c | 2480 | dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir); |
167c9e35 | 2481 | |
8d8f371c YZ |
2482 | ceph_dir_clear_complete(dir); |
2483 | if (old_dir) | |
2484 | ceph_dir_clear_complete(old_dir); | |
167c9e35 SW |
2485 | if (req->r_dentry) |
2486 | ceph_invalidate_dentry_lease(req->r_dentry); | |
2487 | if (req->r_old_dentry) | |
2488 | ceph_invalidate_dentry_lease(req->r_old_dentry); | |
2489 | } | |
2490 | ||
2f2dc053 SW |
2491 | /* |
2492 | * Handle mds reply. | |
2493 | * | |
2494 | * We take the session mutex and parse and process the reply immediately. | |
2495 | * This preserves the logical ordering of replies, capabilities, etc., sent | |
2496 | * by the MDS as they are applied to our local cache. | |
2497 | */ | |
2498 | static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |
2499 | { | |
2500 | struct ceph_mds_client *mdsc = session->s_mdsc; | |
2501 | struct ceph_mds_request *req; | |
2502 | struct ceph_mds_reply_head *head = msg->front.iov_base; | |
2503 | struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ | |
982d6011 | 2504 | struct ceph_snap_realm *realm; |
2f2dc053 SW |
2505 | u64 tid; |
2506 | int err, result; | |
2600d2dd | 2507 | int mds = session->s_mds; |
2f2dc053 | 2508 | |
2f2dc053 SW |
2509 | if (msg->front.iov_len < sizeof(*head)) { |
2510 | pr_err("mdsc_handle_reply got corrupt (short) reply\n"); | |
9ec7cab1 | 2511 | ceph_msg_dump(msg); |
2f2dc053 SW |
2512 | return; |
2513 | } | |
2514 | ||
2515 | /* get request, session */ | |
6df058c0 | 2516 | tid = le64_to_cpu(msg->hdr.tid); |
2f2dc053 | 2517 | mutex_lock(&mdsc->mutex); |
fcd00b68 | 2518 | req = lookup_get_request(mdsc, tid); |
2f2dc053 SW |
2519 | if (!req) { |
2520 | dout("handle_reply on unknown tid %llu\n", tid); | |
2521 | mutex_unlock(&mdsc->mutex); | |
2522 | return; | |
2523 | } | |
2524 | dout("handle_reply %p\n", req); | |
2f2dc053 SW |
2525 | |
2526 | /* correct session? */ | |
d96d6049 | 2527 | if (req->r_session != session) { |
2f2dc053 SW |
2528 | pr_err("mdsc_handle_reply got %llu on session mds%d" |
2529 | " not mds%d\n", tid, session->s_mds, | |
2530 | req->r_session ? req->r_session->s_mds : -1); | |
2531 | mutex_unlock(&mdsc->mutex); | |
2532 | goto out; | |
2533 | } | |
2534 | ||
2535 | /* dup? */ | |
bc2de10d JL |
2536 | if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) || |
2537 | (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) { | |
f3ae1b97 | 2538 | pr_warn("got a dup %s reply on %llu from mds%d\n", |
2f2dc053 SW |
2539 | head->safe ? "safe" : "unsafe", tid, mds); |
2540 | mutex_unlock(&mdsc->mutex); | |
2541 | goto out; | |
2542 | } | |
bc2de10d | 2543 | if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) { |
f3ae1b97 | 2544 | pr_warn("got unsafe after safe on %llu from mds%d\n", |
85792d0d SW |
2545 | tid, mds); |
2546 | mutex_unlock(&mdsc->mutex); | |
2547 | goto out; | |
2548 | } | |
2f2dc053 SW |
2549 | |
2550 | result = le32_to_cpu(head->result); | |
2551 | ||
2552 | /* | |
e55b71f8 GF |
2553 | * Handle an ESTALE |
2554 | * if we're not talking to the authority, send to them | |
2555 | * if the authority has changed while we weren't looking, | |
2556 | * send to new authority | |
2557 | * Otherwise we just have to return an ESTALE | |
2f2dc053 SW |
2558 | */ |
2559 | if (result == -ESTALE) { | |
4c069a58 | 2560 | dout("got ESTALE on request %llu\n", req->r_tid); |
51da8e8c | 2561 | req->r_resend_mds = -1; |
ca18bede | 2562 | if (req->r_direct_mode != USE_AUTH_MDS) { |
4c069a58 | 2563 | dout("not using auth, setting for that now\n"); |
e55b71f8 | 2564 | req->r_direct_mode = USE_AUTH_MDS; |
2f2dc053 SW |
2565 | __do_request(mdsc, req); |
2566 | mutex_unlock(&mdsc->mutex); | |
2567 | goto out; | |
e55b71f8 | 2568 | } else { |
ca18bede YZ |
2569 | int mds = __choose_mds(mdsc, req); |
2570 | if (mds >= 0 && mds != req->r_session->s_mds) { | |
4c069a58 | 2571 | dout("but auth changed, so resending\n"); |
e55b71f8 GF |
2572 | __do_request(mdsc, req); |
2573 | mutex_unlock(&mdsc->mutex); | |
2574 | goto out; | |
2575 | } | |
2f2dc053 | 2576 | } |
4c069a58 | 2577 | dout("have to return ESTALE on request %llu\n", req->r_tid); |
2f2dc053 SW |
2578 | } |
2579 | ||
e55b71f8 | 2580 | |
2f2dc053 | 2581 | if (head->safe) { |
bc2de10d | 2582 | set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags); |
2f2dc053 | 2583 | __unregister_request(mdsc, req); |
2f2dc053 | 2584 | |
bc2de10d | 2585 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { |
2f2dc053 SW |
2586 | /* |
2587 | * We already handled the unsafe response, now do the | |
2588 | * cleanup. No need to examine the response; the MDS | |
2589 | * doesn't include any result info in the safe | |
2590 | * response. And even if it did, there is nothing | |
2591 | * useful we could do with a revised return value. | |
2592 | */ | |
2593 | dout("got safe reply %llu, mds%d\n", tid, mds); | |
2f2dc053 SW |
2594 | |
2595 | /* last unsafe request during umount? */ | |
44ca18f2 | 2596 | if (mdsc->stopping && !__get_oldest_req(mdsc)) |
03066f23 | 2597 | complete_all(&mdsc->safe_umount_waiters); |
2f2dc053 SW |
2598 | mutex_unlock(&mdsc->mutex); |
2599 | goto out; | |
2600 | } | |
e1518c7c | 2601 | } else { |
bc2de10d | 2602 | set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags); |
2f2dc053 | 2603 | list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); |
4c06ace8 YZ |
2604 | if (req->r_unsafe_dir) { |
2605 | struct ceph_inode_info *ci = | |
2606 | ceph_inode(req->r_unsafe_dir); | |
2607 | spin_lock(&ci->i_unsafe_lock); | |
2608 | list_add_tail(&req->r_unsafe_dir_item, | |
2609 | &ci->i_unsafe_dirops); | |
2610 | spin_unlock(&ci->i_unsafe_lock); | |
2611 | } | |
2f2dc053 SW |
2612 | } |
2613 | ||
2614 | dout("handle_reply tid %lld result %d\n", tid, result); | |
2615 | rinfo = &req->r_reply_info; | |
14303d20 | 2616 | err = parse_reply_info(msg, rinfo, session->s_con.peer_features); |
2f2dc053 SW |
2617 | mutex_unlock(&mdsc->mutex); |
2618 | ||
2619 | mutex_lock(&session->s_mutex); | |
2620 | if (err < 0) { | |
25933abd | 2621 | pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid); |
9ec7cab1 | 2622 | ceph_msg_dump(msg); |
2f2dc053 SW |
2623 | goto out_err; |
2624 | } | |
2625 | ||
2626 | /* snap trace */ | |
982d6011 | 2627 | realm = NULL; |
2f2dc053 SW |
2628 | if (rinfo->snapblob_len) { |
2629 | down_write(&mdsc->snap_rwsem); | |
2630 | ceph_update_snap_trace(mdsc, rinfo->snapblob, | |
982d6011 YZ |
2631 | rinfo->snapblob + rinfo->snapblob_len, |
2632 | le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, | |
2633 | &realm); | |
2f2dc053 SW |
2634 | downgrade_write(&mdsc->snap_rwsem); |
2635 | } else { | |
2636 | down_read(&mdsc->snap_rwsem); | |
2637 | } | |
2638 | ||
2639 | /* insert trace into our cache */ | |
b4556396 | 2640 | mutex_lock(&req->r_fill_mutex); |
315f2408 | 2641 | current->journal_info = req; |
f5a03b08 | 2642 | err = ceph_fill_trace(mdsc->fsc->sb, req); |
2f2dc053 | 2643 | if (err == 0) { |
6e8575fa | 2644 | if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || |
81c6aea5 | 2645 | req->r_op == CEPH_MDS_OP_LSSNAP)) |
2f2dc053 | 2646 | ceph_readdir_prepopulate(req, req->r_session); |
37151668 | 2647 | ceph_unreserve_caps(mdsc, &req->r_caps_reservation); |
2f2dc053 | 2648 | } |
315f2408 | 2649 | current->journal_info = NULL; |
b4556396 | 2650 | mutex_unlock(&req->r_fill_mutex); |
2f2dc053 SW |
2651 | |
2652 | up_read(&mdsc->snap_rwsem); | |
982d6011 YZ |
2653 | if (realm) |
2654 | ceph_put_snap_realm(mdsc, realm); | |
68cd5b4b | 2655 | |
bc2de10d JL |
2656 | if (err == 0 && req->r_target_inode && |
2657 | test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { | |
68cd5b4b YZ |
2658 | struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); |
2659 | spin_lock(&ci->i_unsafe_lock); | |
2660 | list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops); | |
2661 | spin_unlock(&ci->i_unsafe_lock); | |
2662 | } | |
2f2dc053 | 2663 | out_err: |
e1518c7c | 2664 | mutex_lock(&mdsc->mutex); |
bc2de10d | 2665 | if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { |
e1518c7c SW |
2666 | if (err) { |
2667 | req->r_err = err; | |
2668 | } else { | |
5fdb1389 | 2669 | req->r_reply = ceph_msg_get(msg); |
bc2de10d | 2670 | set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags); |
e1518c7c | 2671 | } |
2f2dc053 | 2672 | } else { |
e1518c7c | 2673 | dout("reply arrived after request %lld was aborted\n", tid); |
2f2dc053 | 2674 | } |
e1518c7c | 2675 | mutex_unlock(&mdsc->mutex); |
2f2dc053 | 2676 | |
2f2dc053 SW |
2677 | mutex_unlock(&session->s_mutex); |
2678 | ||
2679 | /* kick calling process */ | |
2680 | complete_request(mdsc, req); | |
2681 | out: | |
2682 | ceph_mdsc_put_request(req); | |
2683 | return; | |
2684 | } | |
2685 | ||
2686 | ||
2687 | ||
2688 | /* | |
2689 | * handle mds notification that our request has been forwarded. | |
2690 | */ | |
2600d2dd SW |
2691 | static void handle_forward(struct ceph_mds_client *mdsc, |
2692 | struct ceph_mds_session *session, | |
2693 | struct ceph_msg *msg) | |
2f2dc053 SW |
2694 | { |
2695 | struct ceph_mds_request *req; | |
a1ea787c | 2696 | u64 tid = le64_to_cpu(msg->hdr.tid); |
2f2dc053 SW |
2697 | u32 next_mds; |
2698 | u32 fwd_seq; | |
2f2dc053 SW |
2699 | int err = -EINVAL; |
2700 | void *p = msg->front.iov_base; | |
2701 | void *end = p + msg->front.iov_len; | |
2f2dc053 | 2702 | |
a1ea787c | 2703 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); |
c89136ea SW |
2704 | next_mds = ceph_decode_32(&p); |
2705 | fwd_seq = ceph_decode_32(&p); | |
2f2dc053 SW |
2706 | |
2707 | mutex_lock(&mdsc->mutex); | |
fcd00b68 | 2708 | req = lookup_get_request(mdsc, tid); |
2f2dc053 | 2709 | if (!req) { |
2a8e5e36 | 2710 | dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); |
2f2dc053 SW |
2711 | goto out; /* dup reply? */ |
2712 | } | |
2713 | ||
bc2de10d | 2714 | if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { |
2a8e5e36 SW |
2715 | dout("forward tid %llu aborted, unregistering\n", tid); |
2716 | __unregister_request(mdsc, req); | |
2717 | } else if (fwd_seq <= req->r_num_fwd) { | |
2718 | dout("forward tid %llu to mds%d - old seq %d <= %d\n", | |
2f2dc053 SW |
2719 | tid, next_mds, req->r_num_fwd, fwd_seq); |
2720 | } else { | |
2721 | /* resend. forward race not possible; mds would drop */ | |
2a8e5e36 SW |
2722 | dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); |
2723 | BUG_ON(req->r_err); | |
bc2de10d | 2724 | BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)); |
3de22be6 | 2725 | req->r_attempts = 0; |
2f2dc053 SW |
2726 | req->r_num_fwd = fwd_seq; |
2727 | req->r_resend_mds = next_mds; | |
2728 | put_request_session(req); | |
2729 | __do_request(mdsc, req); | |
2730 | } | |
2731 | ceph_mdsc_put_request(req); | |
2732 | out: | |
2733 | mutex_unlock(&mdsc->mutex); | |
2734 | return; | |
2735 | ||
2736 | bad: | |
2737 | pr_err("mdsc_handle_forward decode error err=%d\n", err); | |
2738 | } | |
2739 | ||
2740 | /* | |
2741 | * handle a mds session control message | |
2742 | */ | |
2743 | static void handle_session(struct ceph_mds_session *session, | |
2744 | struct ceph_msg *msg) | |
2745 | { | |
2746 | struct ceph_mds_client *mdsc = session->s_mdsc; | |
2747 | u32 op; | |
2748 | u64 seq; | |
2600d2dd | 2749 | int mds = session->s_mds; |
2f2dc053 SW |
2750 | struct ceph_mds_session_head *h = msg->front.iov_base; |
2751 | int wake = 0; | |
2752 | ||
2f2dc053 SW |
2753 | /* decode */ |
2754 | if (msg->front.iov_len != sizeof(*h)) | |
2755 | goto bad; | |
2756 | op = le32_to_cpu(h->op); | |
2757 | seq = le64_to_cpu(h->seq); | |
2758 | ||
2759 | mutex_lock(&mdsc->mutex); | |
0a07fc8c YZ |
2760 | if (op == CEPH_SESSION_CLOSE) { |
2761 | get_session(session); | |
2600d2dd | 2762 | __unregister_session(mdsc, session); |
0a07fc8c | 2763 | } |
2f2dc053 SW |
2764 | /* FIXME: this ttl calculation is generous */ |
2765 | session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; | |
2766 | mutex_unlock(&mdsc->mutex); | |
2767 | ||
2768 | mutex_lock(&session->s_mutex); | |
2769 | ||
2770 | dout("handle_session mds%d %s %p state %s seq %llu\n", | |
2771 | mds, ceph_session_op_name(op), session, | |
a687ecaf | 2772 | ceph_session_state_name(session->s_state), seq); |
2f2dc053 SW |
2773 | |
2774 | if (session->s_state == CEPH_MDS_SESSION_HUNG) { | |
2775 | session->s_state = CEPH_MDS_SESSION_OPEN; | |
2776 | pr_info("mds%d came back\n", session->s_mds); | |
2777 | } | |
2778 | ||
2779 | switch (op) { | |
2780 | case CEPH_SESSION_OPEN: | |
29790f26 SW |
2781 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
2782 | pr_info("mds%d reconnect success\n", session->s_mds); | |
2f2dc053 SW |
2783 | session->s_state = CEPH_MDS_SESSION_OPEN; |
2784 | renewed_caps(mdsc, session, 0); | |
2785 | wake = 1; | |
2786 | if (mdsc->stopping) | |
2787 | __close_session(mdsc, session); | |
2788 | break; | |
2789 | ||
2790 | case CEPH_SESSION_RENEWCAPS: | |
2791 | if (session->s_renew_seq == seq) | |
2792 | renewed_caps(mdsc, session, 1); | |
2793 | break; | |
2794 | ||
2795 | case CEPH_SESSION_CLOSE: | |
29790f26 SW |
2796 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
2797 | pr_info("mds%d reconnect denied\n", session->s_mds); | |
1c841a96 | 2798 | cleanup_session_requests(mdsc, session); |
2f2dc053 | 2799 | remove_session_caps(session); |
656e4382 | 2800 | wake = 2; /* for good measure */ |
f3c60c59 | 2801 | wake_up_all(&mdsc->session_close_wq); |
2f2dc053 SW |
2802 | break; |
2803 | ||
2804 | case CEPH_SESSION_STALE: | |
2805 | pr_info("mds%d caps went stale, renewing\n", | |
2806 | session->s_mds); | |
d8fb02ab | 2807 | spin_lock(&session->s_gen_ttl_lock); |
2f2dc053 | 2808 | session->s_cap_gen++; |
1ce208a6 | 2809 | session->s_cap_ttl = jiffies - 1; |
d8fb02ab | 2810 | spin_unlock(&session->s_gen_ttl_lock); |
2f2dc053 SW |
2811 | send_renew_caps(mdsc, session); |
2812 | break; | |
2813 | ||
2814 | case CEPH_SESSION_RECALL_STATE: | |
e30ee581 | 2815 | ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); |
2f2dc053 SW |
2816 | break; |
2817 | ||
186e4f7a YZ |
2818 | case CEPH_SESSION_FLUSHMSG: |
2819 | send_flushmsg_ack(mdsc, session, seq); | |
2820 | break; | |
2821 | ||
03f4fcb0 YZ |
2822 | case CEPH_SESSION_FORCE_RO: |
2823 | dout("force_session_readonly %p\n", session); | |
2824 | spin_lock(&session->s_cap_lock); | |
2825 | session->s_readonly = true; | |
2826 | spin_unlock(&session->s_cap_lock); | |
2827 | wake_up_session_caps(session, 0); | |
2828 | break; | |
2829 | ||
fcff415c YZ |
2830 | case CEPH_SESSION_REJECT: |
2831 | WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING); | |
2832 | pr_info("mds%d rejected session\n", session->s_mds); | |
2833 | session->s_state = CEPH_MDS_SESSION_REJECTED; | |
2834 | cleanup_session_requests(mdsc, session); | |
2835 | remove_session_caps(session); | |
2836 | wake = 2; /* for good measure */ | |
2837 | break; | |
2838 | ||
2f2dc053 SW |
2839 | default: |
2840 | pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); | |
2841 | WARN_ON(1); | |
2842 | } | |
2843 | ||
2844 | mutex_unlock(&session->s_mutex); | |
2845 | if (wake) { | |
2846 | mutex_lock(&mdsc->mutex); | |
2847 | __wake_requests(mdsc, &session->s_waiting); | |
656e4382 YZ |
2848 | if (wake == 2) |
2849 | kick_requests(mdsc, mds); | |
2f2dc053 SW |
2850 | mutex_unlock(&mdsc->mutex); |
2851 | } | |
0a07fc8c YZ |
2852 | if (op == CEPH_SESSION_CLOSE) |
2853 | ceph_put_mds_session(session); | |
2f2dc053 SW |
2854 | return; |
2855 | ||
2856 | bad: | |
2857 | pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, | |
2858 | (int)msg->front.iov_len); | |
9ec7cab1 | 2859 | ceph_msg_dump(msg); |
2f2dc053 SW |
2860 | return; |
2861 | } | |
2862 | ||
2863 | ||
2864 | /* | |
2865 | * called under session->mutex. | |
2866 | */ | |
2867 | static void replay_unsafe_requests(struct ceph_mds_client *mdsc, | |
2868 | struct ceph_mds_session *session) | |
2869 | { | |
2870 | struct ceph_mds_request *req, *nreq; | |
3de22be6 | 2871 | struct rb_node *p; |
2f2dc053 SW |
2872 | int err; |
2873 | ||
2874 | dout("replay_unsafe_requests mds%d\n", session->s_mds); | |
2875 | ||
2876 | mutex_lock(&mdsc->mutex); | |
2877 | list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { | |
6e6f0923 | 2878 | err = __prepare_send_request(mdsc, req, session->s_mds, true); |
2f2dc053 SW |
2879 | if (!err) { |
2880 | ceph_msg_get(req->r_request); | |
2881 | ceph_con_send(&session->s_con, req->r_request); | |
2882 | } | |
2883 | } | |
3de22be6 YZ |
2884 | |
2885 | /* | |
2886 | * also re-send old requests when MDS enters reconnect stage. So that MDS | |
2887 | * can process completed request in clientreplay stage. | |
2888 | */ | |
2889 | p = rb_first(&mdsc->request_tree); | |
2890 | while (p) { | |
2891 | req = rb_entry(p, struct ceph_mds_request, r_node); | |
2892 | p = rb_next(p); | |
bc2de10d | 2893 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
3de22be6 YZ |
2894 | continue; |
2895 | if (req->r_attempts == 0) | |
2896 | continue; /* only old requests */ | |
2897 | if (req->r_session && | |
2898 | req->r_session->s_mds == session->s_mds) { | |
6e6f0923 YZ |
2899 | err = __prepare_send_request(mdsc, req, |
2900 | session->s_mds, true); | |
3de22be6 YZ |
2901 | if (!err) { |
2902 | ceph_msg_get(req->r_request); | |
2903 | ceph_con_send(&session->s_con, req->r_request); | |
2904 | } | |
2905 | } | |
2906 | } | |
2f2dc053 SW |
2907 | mutex_unlock(&mdsc->mutex); |
2908 | } | |
2909 | ||
2910 | /* | |
2911 | * Encode information about a cap for a reconnect with the MDS. | |
2912 | */ | |
2f2dc053 SW |
2913 | static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, |
2914 | void *arg) | |
2915 | { | |
20cb34ae SW |
2916 | union { |
2917 | struct ceph_mds_cap_reconnect v2; | |
2918 | struct ceph_mds_cap_reconnect_v1 v1; | |
2919 | } rec; | |
b3f8d68f | 2920 | struct ceph_inode_info *ci = cap->ci; |
20cb34ae SW |
2921 | struct ceph_reconnect_state *recon_state = arg; |
2922 | struct ceph_pagelist *pagelist = recon_state->pagelist; | |
2f2dc053 SW |
2923 | char *path; |
2924 | int pathlen, err; | |
2925 | u64 pathbase; | |
3469ed0d | 2926 | u64 snap_follows; |
2f2dc053 SW |
2927 | struct dentry *dentry; |
2928 | ||
2f2dc053 SW |
2929 | dout(" adding %p ino %llx.%llx cap %p %lld %s\n", |
2930 | inode, ceph_vinop(inode), cap, cap->cap_id, | |
2931 | ceph_cap_string(cap->issued)); | |
93cea5be SW |
2932 | err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); |
2933 | if (err) | |
2934 | return err; | |
2f2dc053 SW |
2935 | |
2936 | dentry = d_find_alias(inode); | |
2937 | if (dentry) { | |
2938 | path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); | |
2939 | if (IS_ERR(path)) { | |
2940 | err = PTR_ERR(path); | |
e072f8aa | 2941 | goto out_dput; |
2f2dc053 SW |
2942 | } |
2943 | } else { | |
2944 | path = NULL; | |
2945 | pathlen = 0; | |
4eacd4cb | 2946 | pathbase = 0; |
2f2dc053 | 2947 | } |
2f2dc053 | 2948 | |
be655596 | 2949 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
2950 | cap->seq = 0; /* reset cap seq */ |
2951 | cap->issue_seq = 0; /* and issue_seq */ | |
667ca05c | 2952 | cap->mseq = 0; /* and migrate_seq */ |
99a9c273 | 2953 | cap->cap_gen = cap->session->s_cap_gen; |
20cb34ae | 2954 | |
121f22a1 | 2955 | if (recon_state->msg_version >= 2) { |
20cb34ae SW |
2956 | rec.v2.cap_id = cpu_to_le64(cap->cap_id); |
2957 | rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); | |
2958 | rec.v2.issued = cpu_to_le32(cap->issued); | |
2959 | rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); | |
2960 | rec.v2.pathbase = cpu_to_le64(pathbase); | |
ec1dff25 JL |
2961 | rec.v2.flock_len = (__force __le32) |
2962 | ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1); | |
20cb34ae SW |
2963 | } else { |
2964 | rec.v1.cap_id = cpu_to_le64(cap->cap_id); | |
2965 | rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); | |
2966 | rec.v1.issued = cpu_to_le32(cap->issued); | |
2967 | rec.v1.size = cpu_to_le64(inode->i_size); | |
9bbeab41 AB |
2968 | ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime); |
2969 | ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime); | |
20cb34ae SW |
2970 | rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); |
2971 | rec.v1.pathbase = cpu_to_le64(pathbase); | |
20cb34ae | 2972 | } |
3469ed0d YZ |
2973 | |
2974 | if (list_empty(&ci->i_cap_snaps)) { | |
92776fd2 | 2975 | snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0; |
3469ed0d YZ |
2976 | } else { |
2977 | struct ceph_cap_snap *capsnap = | |
2978 | list_first_entry(&ci->i_cap_snaps, | |
2979 | struct ceph_cap_snap, ci_item); | |
2980 | snap_follows = capsnap->follows; | |
20cb34ae | 2981 | } |
be655596 | 2982 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 | 2983 | |
121f22a1 | 2984 | if (recon_state->msg_version >= 2) { |
40819f6f | 2985 | int num_fcntl_locks, num_flock_locks; |
4deb14a2 | 2986 | struct ceph_filelock *flocks = NULL; |
121f22a1 YZ |
2987 | size_t struct_len, total_len = 0; |
2988 | u8 struct_v = 0; | |
39be95e9 JS |
2989 | |
2990 | encode_again: | |
b3f8d68f YZ |
2991 | if (rec.v2.flock_len) { |
2992 | ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); | |
2993 | } else { | |
2994 | num_fcntl_locks = 0; | |
2995 | num_flock_locks = 0; | |
2996 | } | |
4deb14a2 | 2997 | if (num_fcntl_locks + num_flock_locks > 0) { |
6da2ec56 KC |
2998 | flocks = kmalloc_array(num_fcntl_locks + num_flock_locks, |
2999 | sizeof(struct ceph_filelock), | |
3000 | GFP_NOFS); | |
4deb14a2 YZ |
3001 | if (!flocks) { |
3002 | err = -ENOMEM; | |
3003 | goto out_free; | |
3004 | } | |
3005 | err = ceph_encode_locks_to_buffer(inode, flocks, | |
3006 | num_fcntl_locks, | |
3007 | num_flock_locks); | |
3008 | if (err) { | |
3009 | kfree(flocks); | |
3010 | flocks = NULL; | |
3011 | if (err == -ENOSPC) | |
3012 | goto encode_again; | |
3013 | goto out_free; | |
3014 | } | |
3015 | } else { | |
39be95e9 | 3016 | kfree(flocks); |
4deb14a2 | 3017 | flocks = NULL; |
39be95e9 | 3018 | } |
121f22a1 YZ |
3019 | |
3020 | if (recon_state->msg_version >= 3) { | |
3021 | /* version, compat_version and struct_len */ | |
3022 | total_len = 2 * sizeof(u8) + sizeof(u32); | |
3469ed0d | 3023 | struct_v = 2; |
121f22a1 | 3024 | } |
39be95e9 JS |
3025 | /* |
3026 | * number of encoded locks is stable, so copy to pagelist | |
3027 | */ | |
121f22a1 YZ |
3028 | struct_len = 2 * sizeof(u32) + |
3029 | (num_fcntl_locks + num_flock_locks) * | |
3030 | sizeof(struct ceph_filelock); | |
3031 | rec.v2.flock_len = cpu_to_le32(struct_len); | |
3032 | ||
3033 | struct_len += sizeof(rec.v2); | |
3034 | struct_len += sizeof(u32) + pathlen; | |
3035 | ||
3469ed0d YZ |
3036 | if (struct_v >= 2) |
3037 | struct_len += sizeof(u64); /* snap_follows */ | |
3038 | ||
121f22a1 YZ |
3039 | total_len += struct_len; |
3040 | err = ceph_pagelist_reserve(pagelist, total_len); | |
3041 | ||
3042 | if (!err) { | |
3043 | if (recon_state->msg_version >= 3) { | |
3044 | ceph_pagelist_encode_8(pagelist, struct_v); | |
3045 | ceph_pagelist_encode_8(pagelist, 1); | |
3046 | ceph_pagelist_encode_32(pagelist, struct_len); | |
3047 | } | |
3048 | ceph_pagelist_encode_string(pagelist, path, pathlen); | |
3049 | ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2)); | |
3050 | ceph_locks_to_pagelist(flocks, pagelist, | |
3051 | num_fcntl_locks, | |
3052 | num_flock_locks); | |
3469ed0d YZ |
3053 | if (struct_v >= 2) |
3054 | ceph_pagelist_encode_64(pagelist, snap_follows); | |
121f22a1 | 3055 | } |
39be95e9 | 3056 | kfree(flocks); |
3612abbd | 3057 | } else { |
121f22a1 YZ |
3058 | size_t size = sizeof(u32) + pathlen + sizeof(rec.v1); |
3059 | err = ceph_pagelist_reserve(pagelist, size); | |
3060 | if (!err) { | |
3061 | ceph_pagelist_encode_string(pagelist, path, pathlen); | |
3062 | ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1)); | |
3063 | } | |
40819f6f | 3064 | } |
44c99757 YZ |
3065 | |
3066 | recon_state->nr_caps++; | |
e072f8aa | 3067 | out_free: |
2f2dc053 | 3068 | kfree(path); |
e072f8aa | 3069 | out_dput: |
2f2dc053 | 3070 | dput(dentry); |
93cea5be | 3071 | return err; |
2f2dc053 SW |
3072 | } |
3073 | ||
3074 | ||
3075 | /* | |
3076 | * If an MDS fails and recovers, clients need to reconnect in order to | |
3077 | * reestablish shared state. This includes all caps issued through | |
3078 | * this session _and_ the snap_realm hierarchy. Because it's not | |
3079 | * clear which snap realms the mds cares about, we send everything we | |
3080 | * know about.. that ensures we'll then get any new info the | |
3081 | * recovering MDS might have. | |
3082 | * | |
3083 | * This is a relatively heavyweight operation, but it's rare. | |
3084 | * | |
3085 | * called with mdsc->mutex held. | |
3086 | */ | |
34b6c855 SW |
3087 | static void send_mds_reconnect(struct ceph_mds_client *mdsc, |
3088 | struct ceph_mds_session *session) | |
2f2dc053 | 3089 | { |
2f2dc053 | 3090 | struct ceph_msg *reply; |
a105f00c | 3091 | struct rb_node *p; |
34b6c855 | 3092 | int mds = session->s_mds; |
9abf82b8 | 3093 | int err = -ENOMEM; |
44c99757 | 3094 | int s_nr_caps; |
93cea5be | 3095 | struct ceph_pagelist *pagelist; |
20cb34ae | 3096 | struct ceph_reconnect_state recon_state; |
c8a96a31 | 3097 | LIST_HEAD(dispose); |
2f2dc053 | 3098 | |
34b6c855 | 3099 | pr_info("mds%d reconnect start\n", mds); |
2f2dc053 | 3100 | |
93cea5be SW |
3101 | pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); |
3102 | if (!pagelist) | |
3103 | goto fail_nopagelist; | |
3104 | ceph_pagelist_init(pagelist); | |
3105 | ||
b61c2763 | 3106 | reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false); |
a79832f2 | 3107 | if (!reply) |
93cea5be | 3108 | goto fail_nomsg; |
93cea5be | 3109 | |
34b6c855 SW |
3110 | mutex_lock(&session->s_mutex); |
3111 | session->s_state = CEPH_MDS_SESSION_RECONNECTING; | |
3112 | session->s_seq = 0; | |
2f2dc053 | 3113 | |
2f2dc053 | 3114 | dout("session %p state %s\n", session, |
a687ecaf | 3115 | ceph_session_state_name(session->s_state)); |
2f2dc053 | 3116 | |
99a9c273 YZ |
3117 | spin_lock(&session->s_gen_ttl_lock); |
3118 | session->s_cap_gen++; | |
3119 | spin_unlock(&session->s_gen_ttl_lock); | |
3120 | ||
3121 | spin_lock(&session->s_cap_lock); | |
03f4fcb0 YZ |
3122 | /* don't know if session is readonly */ |
3123 | session->s_readonly = 0; | |
99a9c273 YZ |
3124 | /* |
3125 | * notify __ceph_remove_cap() that we are composing cap reconnect. | |
3126 | * If a cap get released before being added to the cap reconnect, | |
3127 | * __ceph_remove_cap() should skip queuing cap release. | |
3128 | */ | |
3129 | session->s_cap_reconnect = 1; | |
e01a5946 | 3130 | /* drop old cap expires; we're about to reestablish that state */ |
c8a96a31 JL |
3131 | detach_cap_releases(session, &dispose); |
3132 | spin_unlock(&session->s_cap_lock); | |
3133 | dispose_cap_releases(mdsc, &dispose); | |
e01a5946 | 3134 | |
5d23371f | 3135 | /* trim unused caps to reduce MDS's cache rejoin time */ |
c0bd50e2 YZ |
3136 | if (mdsc->fsc->sb->s_root) |
3137 | shrink_dcache_parent(mdsc->fsc->sb->s_root); | |
5d23371f YZ |
3138 | |
3139 | ceph_con_close(&session->s_con); | |
3140 | ceph_con_open(&session->s_con, | |
3141 | CEPH_ENTITY_TYPE_MDS, mds, | |
3142 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); | |
3143 | ||
3144 | /* replay unsafe requests */ | |
3145 | replay_unsafe_requests(mdsc, session); | |
3146 | ||
3147 | down_read(&mdsc->snap_rwsem); | |
3148 | ||
2f2dc053 | 3149 | /* traverse this session's caps */ |
44c99757 YZ |
3150 | s_nr_caps = session->s_nr_caps; |
3151 | err = ceph_pagelist_encode_32(pagelist, s_nr_caps); | |
93cea5be SW |
3152 | if (err) |
3153 | goto fail; | |
20cb34ae | 3154 | |
44c99757 | 3155 | recon_state.nr_caps = 0; |
20cb34ae | 3156 | recon_state.pagelist = pagelist; |
121f22a1 YZ |
3157 | if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) |
3158 | recon_state.msg_version = 3; | |
3159 | else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK) | |
3160 | recon_state.msg_version = 2; | |
3161 | else | |
3162 | recon_state.msg_version = 1; | |
20cb34ae | 3163 | err = iterate_session_caps(session, encode_caps_cb, &recon_state); |
2f2dc053 | 3164 | if (err < 0) |
9abf82b8 | 3165 | goto fail; |
2f2dc053 | 3166 | |
99a9c273 YZ |
3167 | spin_lock(&session->s_cap_lock); |
3168 | session->s_cap_reconnect = 0; | |
3169 | spin_unlock(&session->s_cap_lock); | |
3170 | ||
2f2dc053 SW |
3171 | /* |
3172 | * snaprealms. we provide mds with the ino, seq (version), and | |
3173 | * parent for all of our realms. If the mds has any newer info, | |
3174 | * it will tell us. | |
3175 | */ | |
a105f00c SW |
3176 | for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { |
3177 | struct ceph_snap_realm *realm = | |
3178 | rb_entry(p, struct ceph_snap_realm, node); | |
93cea5be | 3179 | struct ceph_mds_snaprealm_reconnect sr_rec; |
2f2dc053 SW |
3180 | |
3181 | dout(" adding snap realm %llx seq %lld parent %llx\n", | |
3182 | realm->ino, realm->seq, realm->parent_ino); | |
93cea5be SW |
3183 | sr_rec.ino = cpu_to_le64(realm->ino); |
3184 | sr_rec.seq = cpu_to_le64(realm->seq); | |
3185 | sr_rec.parent = cpu_to_le64(realm->parent_ino); | |
3186 | err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); | |
3187 | if (err) | |
3188 | goto fail; | |
2f2dc053 | 3189 | } |
2f2dc053 | 3190 | |
121f22a1 | 3191 | reply->hdr.version = cpu_to_le16(recon_state.msg_version); |
44c99757 YZ |
3192 | |
3193 | /* raced with cap release? */ | |
3194 | if (s_nr_caps != recon_state.nr_caps) { | |
3195 | struct page *page = list_first_entry(&pagelist->head, | |
3196 | struct page, lru); | |
3197 | __le32 *addr = kmap_atomic(page); | |
3198 | *addr = cpu_to_le32(recon_state.nr_caps); | |
3199 | kunmap_atomic(addr); | |
ebf18f47 | 3200 | } |
44c99757 YZ |
3201 | |
3202 | reply->hdr.data_len = cpu_to_le32(pagelist->length); | |
3203 | ceph_msg_data_add_pagelist(reply, pagelist); | |
e548e9b9 YZ |
3204 | |
3205 | ceph_early_kick_flushing_caps(mdsc, session); | |
3206 | ||
2f2dc053 SW |
3207 | ceph_con_send(&session->s_con, reply); |
3208 | ||
9abf82b8 SW |
3209 | mutex_unlock(&session->s_mutex); |
3210 | ||
3211 | mutex_lock(&mdsc->mutex); | |
3212 | __wake_requests(mdsc, &session->s_waiting); | |
3213 | mutex_unlock(&mdsc->mutex); | |
3214 | ||
2f2dc053 | 3215 | up_read(&mdsc->snap_rwsem); |
2f2dc053 SW |
3216 | return; |
3217 | ||
93cea5be | 3218 | fail: |
2f2dc053 | 3219 | ceph_msg_put(reply); |
9abf82b8 SW |
3220 | up_read(&mdsc->snap_rwsem); |
3221 | mutex_unlock(&session->s_mutex); | |
93cea5be SW |
3222 | fail_nomsg: |
3223 | ceph_pagelist_release(pagelist); | |
93cea5be | 3224 | fail_nopagelist: |
9abf82b8 | 3225 | pr_err("error %d preparing reconnect for mds%d\n", err, mds); |
9abf82b8 | 3226 | return; |
2f2dc053 SW |
3227 | } |
3228 | ||
3229 | ||
3230 | /* | |
3231 | * compare old and new mdsmaps, kicking requests | |
3232 | * and closing out old connections as necessary | |
3233 | * | |
3234 | * called under mdsc->mutex. | |
3235 | */ | |
3236 | static void check_new_map(struct ceph_mds_client *mdsc, | |
3237 | struct ceph_mdsmap *newmap, | |
3238 | struct ceph_mdsmap *oldmap) | |
3239 | { | |
3240 | int i; | |
3241 | int oldstate, newstate; | |
3242 | struct ceph_mds_session *s; | |
3243 | ||
3244 | dout("check_new_map new %u old %u\n", | |
3245 | newmap->m_epoch, oldmap->m_epoch); | |
3246 | ||
76201b63 | 3247 | for (i = 0; i < oldmap->m_num_mds && i < mdsc->max_sessions; i++) { |
d37b1d99 | 3248 | if (!mdsc->sessions[i]) |
2f2dc053 SW |
3249 | continue; |
3250 | s = mdsc->sessions[i]; | |
3251 | oldstate = ceph_mdsmap_get_state(oldmap, i); | |
3252 | newstate = ceph_mdsmap_get_state(newmap, i); | |
3253 | ||
0deb01c9 | 3254 | dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n", |
2f2dc053 | 3255 | i, ceph_mds_state_name(oldstate), |
0deb01c9 | 3256 | ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", |
2f2dc053 | 3257 | ceph_mds_state_name(newstate), |
0deb01c9 | 3258 | ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", |
a687ecaf | 3259 | ceph_session_state_name(s->s_state)); |
2f2dc053 | 3260 | |
76201b63 | 3261 | if (i >= newmap->m_num_mds || |
3e8f43a0 | 3262 | memcmp(ceph_mdsmap_get_addr(oldmap, i), |
2f2dc053 SW |
3263 | ceph_mdsmap_get_addr(newmap, i), |
3264 | sizeof(struct ceph_entity_addr))) { | |
3265 | if (s->s_state == CEPH_MDS_SESSION_OPENING) { | |
3266 | /* the session never opened, just close it | |
3267 | * out now */ | |
0a07fc8c | 3268 | get_session(s); |
2600d2dd | 3269 | __unregister_session(mdsc, s); |
2f2dc053 | 3270 | __wake_requests(mdsc, &s->s_waiting); |
0a07fc8c | 3271 | ceph_put_mds_session(s); |
2827528d YZ |
3272 | } else if (i >= newmap->m_num_mds) { |
3273 | /* force close session for stopped mds */ | |
3274 | get_session(s); | |
2600d2dd | 3275 | __unregister_session(mdsc, s); |
2827528d YZ |
3276 | __wake_requests(mdsc, &s->s_waiting); |
3277 | kick_requests(mdsc, i); | |
3278 | mutex_unlock(&mdsc->mutex); | |
3279 | ||
3280 | mutex_lock(&s->s_mutex); | |
3281 | cleanup_session_requests(mdsc, s); | |
3282 | remove_session_caps(s); | |
3283 | mutex_unlock(&s->s_mutex); | |
3284 | ||
3285 | ceph_put_mds_session(s); | |
3286 | ||
3287 | mutex_lock(&mdsc->mutex); | |
2f2dc053 SW |
3288 | } else { |
3289 | /* just close it */ | |
3290 | mutex_unlock(&mdsc->mutex); | |
3291 | mutex_lock(&s->s_mutex); | |
3292 | mutex_lock(&mdsc->mutex); | |
3293 | ceph_con_close(&s->s_con); | |
3294 | mutex_unlock(&s->s_mutex); | |
3295 | s->s_state = CEPH_MDS_SESSION_RESTARTING; | |
3296 | } | |
2f2dc053 SW |
3297 | } else if (oldstate == newstate) { |
3298 | continue; /* nothing new with this mds */ | |
3299 | } | |
3300 | ||
3301 | /* | |
3302 | * send reconnect? | |
3303 | */ | |
3304 | if (s->s_state == CEPH_MDS_SESSION_RESTARTING && | |
34b6c855 SW |
3305 | newstate >= CEPH_MDS_STATE_RECONNECT) { |
3306 | mutex_unlock(&mdsc->mutex); | |
3307 | send_mds_reconnect(mdsc, s); | |
3308 | mutex_lock(&mdsc->mutex); | |
3309 | } | |
2f2dc053 SW |
3310 | |
3311 | /* | |
29790f26 | 3312 | * kick request on any mds that has gone active. |
2f2dc053 SW |
3313 | */ |
3314 | if (oldstate < CEPH_MDS_STATE_ACTIVE && | |
3315 | newstate >= CEPH_MDS_STATE_ACTIVE) { | |
29790f26 SW |
3316 | if (oldstate != CEPH_MDS_STATE_CREATING && |
3317 | oldstate != CEPH_MDS_STATE_STARTING) | |
3318 | pr_info("mds%d recovery completed\n", s->s_mds); | |
3319 | kick_requests(mdsc, i); | |
2f2dc053 | 3320 | ceph_kick_flushing_caps(mdsc, s); |
0dc2570f | 3321 | wake_up_session_caps(s, 1); |
2f2dc053 SW |
3322 | } |
3323 | } | |
cb170a22 | 3324 | |
76201b63 | 3325 | for (i = 0; i < newmap->m_num_mds && i < mdsc->max_sessions; i++) { |
cb170a22 SW |
3326 | s = mdsc->sessions[i]; |
3327 | if (!s) | |
3328 | continue; | |
3329 | if (!ceph_mdsmap_is_laggy(newmap, i)) | |
3330 | continue; | |
3331 | if (s->s_state == CEPH_MDS_SESSION_OPEN || | |
3332 | s->s_state == CEPH_MDS_SESSION_HUNG || | |
3333 | s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3334 | dout(" connecting to export targets of laggy mds%d\n", | |
3335 | i); | |
3336 | __open_export_target_sessions(mdsc, s); | |
3337 | } | |
3338 | } | |
2f2dc053 SW |
3339 | } |
3340 | ||
3341 | ||
3342 | ||
3343 | /* | |
3344 | * leases | |
3345 | */ | |
3346 | ||
3347 | /* | |
3348 | * caller must hold session s_mutex, dentry->d_lock | |
3349 | */ | |
3350 | void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) | |
3351 | { | |
3352 | struct ceph_dentry_info *di = ceph_dentry(dentry); | |
3353 | ||
3354 | ceph_put_mds_session(di->lease_session); | |
3355 | di->lease_session = NULL; | |
3356 | } | |
3357 | ||
2600d2dd SW |
3358 | static void handle_lease(struct ceph_mds_client *mdsc, |
3359 | struct ceph_mds_session *session, | |
3360 | struct ceph_msg *msg) | |
2f2dc053 | 3361 | { |
3d14c5d2 | 3362 | struct super_block *sb = mdsc->fsc->sb; |
2f2dc053 | 3363 | struct inode *inode; |
2f2dc053 SW |
3364 | struct dentry *parent, *dentry; |
3365 | struct ceph_dentry_info *di; | |
2600d2dd | 3366 | int mds = session->s_mds; |
2f2dc053 | 3367 | struct ceph_mds_lease *h = msg->front.iov_base; |
1e5ea23d | 3368 | u32 seq; |
2f2dc053 | 3369 | struct ceph_vino vino; |
2f2dc053 SW |
3370 | struct qstr dname; |
3371 | int release = 0; | |
3372 | ||
2f2dc053 SW |
3373 | dout("handle_lease from mds%d\n", mds); |
3374 | ||
3375 | /* decode */ | |
3376 | if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) | |
3377 | goto bad; | |
3378 | vino.ino = le64_to_cpu(h->ino); | |
3379 | vino.snap = CEPH_NOSNAP; | |
1e5ea23d | 3380 | seq = le32_to_cpu(h->seq); |
2f2dc053 SW |
3381 | dname.name = (void *)h + sizeof(*h) + sizeof(u32); |
3382 | dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); | |
3383 | if (dname.len != get_unaligned_le32(h+1)) | |
3384 | goto bad; | |
3385 | ||
2f2dc053 SW |
3386 | /* lookup inode */ |
3387 | inode = ceph_find_inode(sb, vino); | |
2f90b852 SW |
3388 | dout("handle_lease %s, ino %llx %p %.*s\n", |
3389 | ceph_lease_op_name(h->action), vino.ino, inode, | |
1e5ea23d | 3390 | dname.len, dname.name); |
6cd3bcad YZ |
3391 | |
3392 | mutex_lock(&session->s_mutex); | |
3393 | session->s_seq++; | |
3394 | ||
d37b1d99 | 3395 | if (!inode) { |
2f2dc053 SW |
3396 | dout("handle_lease no inode %llx\n", vino.ino); |
3397 | goto release; | |
3398 | } | |
2f2dc053 SW |
3399 | |
3400 | /* dentry */ | |
3401 | parent = d_find_alias(inode); | |
3402 | if (!parent) { | |
3403 | dout("no parent dentry on inode %p\n", inode); | |
3404 | WARN_ON(1); | |
3405 | goto release; /* hrm... */ | |
3406 | } | |
8387ff25 | 3407 | dname.hash = full_name_hash(parent, dname.name, dname.len); |
2f2dc053 SW |
3408 | dentry = d_lookup(parent, &dname); |
3409 | dput(parent); | |
3410 | if (!dentry) | |
3411 | goto release; | |
3412 | ||
3413 | spin_lock(&dentry->d_lock); | |
3414 | di = ceph_dentry(dentry); | |
3415 | switch (h->action) { | |
3416 | case CEPH_MDS_LEASE_REVOKE: | |
3d8eb7a9 | 3417 | if (di->lease_session == session) { |
1e5ea23d SW |
3418 | if (ceph_seq_cmp(di->lease_seq, seq) > 0) |
3419 | h->seq = cpu_to_le32(di->lease_seq); | |
2f2dc053 SW |
3420 | __ceph_mdsc_drop_dentry_lease(dentry); |
3421 | } | |
3422 | release = 1; | |
3423 | break; | |
3424 | ||
3425 | case CEPH_MDS_LEASE_RENEW: | |
3d8eb7a9 | 3426 | if (di->lease_session == session && |
2f2dc053 SW |
3427 | di->lease_gen == session->s_cap_gen && |
3428 | di->lease_renew_from && | |
3429 | di->lease_renew_after == 0) { | |
3430 | unsigned long duration = | |
3563dbdd | 3431 | msecs_to_jiffies(le32_to_cpu(h->duration_ms)); |
2f2dc053 | 3432 | |
1e5ea23d | 3433 | di->lease_seq = seq; |
9b16f03c | 3434 | di->time = di->lease_renew_from + duration; |
2f2dc053 SW |
3435 | di->lease_renew_after = di->lease_renew_from + |
3436 | (duration >> 1); | |
3437 | di->lease_renew_from = 0; | |
3438 | } | |
3439 | break; | |
3440 | } | |
3441 | spin_unlock(&dentry->d_lock); | |
3442 | dput(dentry); | |
3443 | ||
3444 | if (!release) | |
3445 | goto out; | |
3446 | ||
3447 | release: | |
3448 | /* let's just reuse the same message */ | |
3449 | h->action = CEPH_MDS_LEASE_REVOKE_ACK; | |
3450 | ceph_msg_get(msg); | |
3451 | ceph_con_send(&session->s_con, msg); | |
3452 | ||
3453 | out: | |
3454 | iput(inode); | |
3455 | mutex_unlock(&session->s_mutex); | |
2f2dc053 SW |
3456 | return; |
3457 | ||
3458 | bad: | |
3459 | pr_err("corrupt lease message\n"); | |
9ec7cab1 | 3460 | ceph_msg_dump(msg); |
2f2dc053 SW |
3461 | } |
3462 | ||
3463 | void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, | |
3464 | struct inode *inode, | |
3465 | struct dentry *dentry, char action, | |
3466 | u32 seq) | |
3467 | { | |
3468 | struct ceph_msg *msg; | |
3469 | struct ceph_mds_lease *lease; | |
3470 | int len = sizeof(*lease) + sizeof(u32); | |
3471 | int dnamelen = 0; | |
3472 | ||
3473 | dout("lease_send_msg inode %p dentry %p %s to mds%d\n", | |
3474 | inode, dentry, ceph_lease_op_name(action), session->s_mds); | |
3475 | dnamelen = dentry->d_name.len; | |
3476 | len += dnamelen; | |
3477 | ||
b61c2763 | 3478 | msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false); |
a79832f2 | 3479 | if (!msg) |
2f2dc053 SW |
3480 | return; |
3481 | lease = msg->front.iov_base; | |
3482 | lease->action = action; | |
2f2dc053 SW |
3483 | lease->ino = cpu_to_le64(ceph_vino(inode).ino); |
3484 | lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); | |
3485 | lease->seq = cpu_to_le32(seq); | |
3486 | put_unaligned_le32(dnamelen, lease + 1); | |
3487 | memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen); | |
3488 | ||
3489 | /* | |
3490 | * if this is a preemptive lease RELEASE, no need to | |
3491 | * flush request stream, since the actual request will | |
3492 | * soon follow. | |
3493 | */ | |
3494 | msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); | |
3495 | ||
3496 | ceph_con_send(&session->s_con, msg); | |
3497 | } | |
3498 | ||
2f2dc053 | 3499 | /* |
7aac453a | 3500 | * lock unlock sessions, to wait ongoing session activities |
2f2dc053 | 3501 | */ |
7aac453a | 3502 | static void lock_unlock_sessions(struct ceph_mds_client *mdsc) |
2f2dc053 SW |
3503 | { |
3504 | int i; | |
3505 | ||
2f2dc053 SW |
3506 | mutex_lock(&mdsc->mutex); |
3507 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3508 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); | |
3509 | if (!s) | |
3510 | continue; | |
3511 | mutex_unlock(&mdsc->mutex); | |
3512 | mutex_lock(&s->s_mutex); | |
3513 | mutex_unlock(&s->s_mutex); | |
3514 | ceph_put_mds_session(s); | |
3515 | mutex_lock(&mdsc->mutex); | |
3516 | } | |
3517 | mutex_unlock(&mdsc->mutex); | |
3518 | } | |
3519 | ||
3520 | ||
3521 | ||
3522 | /* | |
3523 | * delayed work -- periodically trim expired leases, renew caps with mds | |
3524 | */ | |
3525 | static void schedule_delayed(struct ceph_mds_client *mdsc) | |
3526 | { | |
3527 | int delay = 5; | |
3528 | unsigned hz = round_jiffies_relative(HZ * delay); | |
3529 | schedule_delayed_work(&mdsc->delayed_work, hz); | |
3530 | } | |
3531 | ||
3532 | static void delayed_work(struct work_struct *work) | |
3533 | { | |
3534 | int i; | |
3535 | struct ceph_mds_client *mdsc = | |
3536 | container_of(work, struct ceph_mds_client, delayed_work.work); | |
3537 | int renew_interval; | |
3538 | int renew_caps; | |
3539 | ||
3540 | dout("mdsc delayed_work\n"); | |
afcdaea3 | 3541 | ceph_check_delayed_caps(mdsc); |
2f2dc053 SW |
3542 | |
3543 | mutex_lock(&mdsc->mutex); | |
3544 | renew_interval = mdsc->mdsmap->m_session_timeout >> 2; | |
3545 | renew_caps = time_after_eq(jiffies, HZ*renew_interval + | |
3546 | mdsc->last_renew_caps); | |
3547 | if (renew_caps) | |
3548 | mdsc->last_renew_caps = jiffies; | |
3549 | ||
3550 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3551 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); | |
d37b1d99 | 3552 | if (!s) |
2f2dc053 SW |
3553 | continue; |
3554 | if (s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3555 | dout("resending session close request for mds%d\n", | |
3556 | s->s_mds); | |
3557 | request_close_session(mdsc, s); | |
3558 | ceph_put_mds_session(s); | |
3559 | continue; | |
3560 | } | |
3561 | if (s->s_ttl && time_after(jiffies, s->s_ttl)) { | |
3562 | if (s->s_state == CEPH_MDS_SESSION_OPEN) { | |
3563 | s->s_state = CEPH_MDS_SESSION_HUNG; | |
3564 | pr_info("mds%d hung\n", s->s_mds); | |
3565 | } | |
3566 | } | |
3567 | if (s->s_state < CEPH_MDS_SESSION_OPEN) { | |
3568 | /* this mds is failed or recovering, just wait */ | |
3569 | ceph_put_mds_session(s); | |
3570 | continue; | |
3571 | } | |
3572 | mutex_unlock(&mdsc->mutex); | |
3573 | ||
3574 | mutex_lock(&s->s_mutex); | |
3575 | if (renew_caps) | |
3576 | send_renew_caps(mdsc, s); | |
3577 | else | |
3578 | ceph_con_keepalive(&s->s_con); | |
aab53dd9 SW |
3579 | if (s->s_state == CEPH_MDS_SESSION_OPEN || |
3580 | s->s_state == CEPH_MDS_SESSION_HUNG) | |
3d7ded4d | 3581 | ceph_send_cap_releases(mdsc, s); |
2f2dc053 SW |
3582 | mutex_unlock(&s->s_mutex); |
3583 | ceph_put_mds_session(s); | |
3584 | ||
3585 | mutex_lock(&mdsc->mutex); | |
3586 | } | |
3587 | mutex_unlock(&mdsc->mutex); | |
3588 | ||
3589 | schedule_delayed(mdsc); | |
3590 | } | |
3591 | ||
3d14c5d2 | 3592 | int ceph_mdsc_init(struct ceph_fs_client *fsc) |
2f2dc053 | 3593 | |
2f2dc053 | 3594 | { |
3d14c5d2 YS |
3595 | struct ceph_mds_client *mdsc; |
3596 | ||
3597 | mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); | |
3598 | if (!mdsc) | |
3599 | return -ENOMEM; | |
3600 | mdsc->fsc = fsc; | |
2f2dc053 SW |
3601 | mutex_init(&mdsc->mutex); |
3602 | mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); | |
d37b1d99 | 3603 | if (!mdsc->mdsmap) { |
fb3101b6 | 3604 | kfree(mdsc); |
2d06eeb8 | 3605 | return -ENOMEM; |
fb3101b6 | 3606 | } |
2d06eeb8 | 3607 | |
50c55aec | 3608 | fsc->mdsc = mdsc; |
2f2dc053 | 3609 | init_completion(&mdsc->safe_umount_waiters); |
f3c60c59 | 3610 | init_waitqueue_head(&mdsc->session_close_wq); |
2f2dc053 SW |
3611 | INIT_LIST_HEAD(&mdsc->waiting_for_map); |
3612 | mdsc->sessions = NULL; | |
86d8f67b | 3613 | atomic_set(&mdsc->num_sessions, 0); |
2f2dc053 SW |
3614 | mdsc->max_sessions = 0; |
3615 | mdsc->stopping = 0; | |
d557c48d | 3616 | atomic64_set(&mdsc->quotarealms_count, 0); |
affbc19a | 3617 | mdsc->last_snap_seq = 0; |
2f2dc053 | 3618 | init_rwsem(&mdsc->snap_rwsem); |
a105f00c | 3619 | mdsc->snap_realms = RB_ROOT; |
2f2dc053 SW |
3620 | INIT_LIST_HEAD(&mdsc->snap_empty); |
3621 | spin_lock_init(&mdsc->snap_empty_lock); | |
3622 | mdsc->last_tid = 0; | |
e8a7b8b1 | 3623 | mdsc->oldest_tid = 0; |
44ca18f2 | 3624 | mdsc->request_tree = RB_ROOT; |
2f2dc053 SW |
3625 | INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); |
3626 | mdsc->last_renew_caps = jiffies; | |
3627 | INIT_LIST_HEAD(&mdsc->cap_delay_list); | |
3628 | spin_lock_init(&mdsc->cap_delay_lock); | |
3629 | INIT_LIST_HEAD(&mdsc->snap_flush_list); | |
3630 | spin_lock_init(&mdsc->snap_flush_lock); | |
553adfd9 | 3631 | mdsc->last_cap_flush_tid = 1; |
e4500b5e | 3632 | INIT_LIST_HEAD(&mdsc->cap_flush_list); |
2f2dc053 | 3633 | INIT_LIST_HEAD(&mdsc->cap_dirty); |
db354052 | 3634 | INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); |
2f2dc053 SW |
3635 | mdsc->num_cap_flushing = 0; |
3636 | spin_lock_init(&mdsc->cap_dirty_lock); | |
3637 | init_waitqueue_head(&mdsc->cap_flushing_wq); | |
3638 | spin_lock_init(&mdsc->dentry_lru_lock); | |
3639 | INIT_LIST_HEAD(&mdsc->dentry_lru); | |
2d06eeb8 | 3640 | |
37151668 | 3641 | ceph_caps_init(mdsc); |
3d14c5d2 | 3642 | ceph_adjust_min_caps(mdsc, fsc->min_caps); |
37151668 | 3643 | |
10183a69 YZ |
3644 | init_rwsem(&mdsc->pool_perm_rwsem); |
3645 | mdsc->pool_perm_tree = RB_ROOT; | |
3646 | ||
dfeb84d4 YZ |
3647 | strscpy(mdsc->nodename, utsname()->nodename, |
3648 | sizeof(mdsc->nodename)); | |
5f44f142 | 3649 | return 0; |
2f2dc053 SW |
3650 | } |
3651 | ||
3652 | /* | |
3653 | * Wait for safe replies on open mds requests. If we time out, drop | |
3654 | * all requests from the tree to avoid dangling dentry refs. | |
3655 | */ | |
3656 | static void wait_requests(struct ceph_mds_client *mdsc) | |
3657 | { | |
a319bf56 | 3658 | struct ceph_options *opts = mdsc->fsc->client->options; |
2f2dc053 | 3659 | struct ceph_mds_request *req; |
2f2dc053 SW |
3660 | |
3661 | mutex_lock(&mdsc->mutex); | |
44ca18f2 | 3662 | if (__get_oldest_req(mdsc)) { |
2f2dc053 | 3663 | mutex_unlock(&mdsc->mutex); |
44ca18f2 | 3664 | |
2f2dc053 SW |
3665 | dout("wait_requests waiting for requests\n"); |
3666 | wait_for_completion_timeout(&mdsc->safe_umount_waiters, | |
a319bf56 | 3667 | ceph_timeout_jiffies(opts->mount_timeout)); |
2f2dc053 SW |
3668 | |
3669 | /* tear down remaining requests */ | |
44ca18f2 SW |
3670 | mutex_lock(&mdsc->mutex); |
3671 | while ((req = __get_oldest_req(mdsc))) { | |
2f2dc053 SW |
3672 | dout("wait_requests timed out on tid %llu\n", |
3673 | req->r_tid); | |
44ca18f2 | 3674 | __unregister_request(mdsc, req); |
2f2dc053 SW |
3675 | } |
3676 | } | |
3677 | mutex_unlock(&mdsc->mutex); | |
3678 | dout("wait_requests done\n"); | |
3679 | } | |
3680 | ||
3681 | /* | |
3682 | * called before mount is ro, and before dentries are torn down. | |
3683 | * (hmm, does this still race with new lookups?) | |
3684 | */ | |
3685 | void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) | |
3686 | { | |
3687 | dout("pre_umount\n"); | |
3688 | mdsc->stopping = 1; | |
3689 | ||
7aac453a | 3690 | lock_unlock_sessions(mdsc); |
afcdaea3 | 3691 | ceph_flush_dirty_caps(mdsc); |
2f2dc053 | 3692 | wait_requests(mdsc); |
17c688c3 SW |
3693 | |
3694 | /* | |
3695 | * wait for reply handlers to drop their request refs and | |
3696 | * their inode/dcache refs | |
3697 | */ | |
3698 | ceph_msgr_flush(); | |
2f2dc053 SW |
3699 | } |
3700 | ||
3701 | /* | |
3702 | * wait for all write mds requests to flush. | |
3703 | */ | |
3704 | static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) | |
3705 | { | |
80fc7314 | 3706 | struct ceph_mds_request *req = NULL, *nextreq; |
44ca18f2 | 3707 | struct rb_node *n; |
2f2dc053 SW |
3708 | |
3709 | mutex_lock(&mdsc->mutex); | |
3710 | dout("wait_unsafe_requests want %lld\n", want_tid); | |
80fc7314 | 3711 | restart: |
44ca18f2 SW |
3712 | req = __get_oldest_req(mdsc); |
3713 | while (req && req->r_tid <= want_tid) { | |
80fc7314 SW |
3714 | /* find next request */ |
3715 | n = rb_next(&req->r_node); | |
3716 | if (n) | |
3717 | nextreq = rb_entry(n, struct ceph_mds_request, r_node); | |
3718 | else | |
3719 | nextreq = NULL; | |
e8a7b8b1 YZ |
3720 | if (req->r_op != CEPH_MDS_OP_SETFILELOCK && |
3721 | (req->r_op & CEPH_MDS_OP_WRITE)) { | |
44ca18f2 SW |
3722 | /* write op */ |
3723 | ceph_mdsc_get_request(req); | |
80fc7314 SW |
3724 | if (nextreq) |
3725 | ceph_mdsc_get_request(nextreq); | |
44ca18f2 SW |
3726 | mutex_unlock(&mdsc->mutex); |
3727 | dout("wait_unsafe_requests wait on %llu (want %llu)\n", | |
3728 | req->r_tid, want_tid); | |
3729 | wait_for_completion(&req->r_safe_completion); | |
3730 | mutex_lock(&mdsc->mutex); | |
44ca18f2 | 3731 | ceph_mdsc_put_request(req); |
80fc7314 SW |
3732 | if (!nextreq) |
3733 | break; /* next dne before, so we're done! */ | |
3734 | if (RB_EMPTY_NODE(&nextreq->r_node)) { | |
3735 | /* next request was removed from tree */ | |
3736 | ceph_mdsc_put_request(nextreq); | |
3737 | goto restart; | |
3738 | } | |
3739 | ceph_mdsc_put_request(nextreq); /* won't go away */ | |
44ca18f2 | 3740 | } |
80fc7314 | 3741 | req = nextreq; |
2f2dc053 SW |
3742 | } |
3743 | mutex_unlock(&mdsc->mutex); | |
3744 | dout("wait_unsafe_requests done\n"); | |
3745 | } | |
3746 | ||
3747 | void ceph_mdsc_sync(struct ceph_mds_client *mdsc) | |
3748 | { | |
0e294387 | 3749 | u64 want_tid, want_flush; |
2f2dc053 | 3750 | |
52953d55 | 3751 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) |
56b7cf95 SW |
3752 | return; |
3753 | ||
2f2dc053 SW |
3754 | dout("sync\n"); |
3755 | mutex_lock(&mdsc->mutex); | |
3756 | want_tid = mdsc->last_tid; | |
2f2dc053 | 3757 | mutex_unlock(&mdsc->mutex); |
2f2dc053 | 3758 | |
afcdaea3 | 3759 | ceph_flush_dirty_caps(mdsc); |
d3383a8e | 3760 | spin_lock(&mdsc->cap_dirty_lock); |
8310b089 | 3761 | want_flush = mdsc->last_cap_flush_tid; |
c8799fc4 YZ |
3762 | if (!list_empty(&mdsc->cap_flush_list)) { |
3763 | struct ceph_cap_flush *cf = | |
3764 | list_last_entry(&mdsc->cap_flush_list, | |
3765 | struct ceph_cap_flush, g_list); | |
3766 | cf->wake = true; | |
3767 | } | |
d3383a8e YZ |
3768 | spin_unlock(&mdsc->cap_dirty_lock); |
3769 | ||
0e294387 YZ |
3770 | dout("sync want tid %lld flush_seq %lld\n", |
3771 | want_tid, want_flush); | |
2f2dc053 SW |
3772 | |
3773 | wait_unsafe_requests(mdsc, want_tid); | |
0e294387 | 3774 | wait_caps_flush(mdsc, want_flush); |
2f2dc053 SW |
3775 | } |
3776 | ||
f3c60c59 SW |
3777 | /* |
3778 | * true if all sessions are closed, or we force unmount | |
3779 | */ | |
fcff415c | 3780 | static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped) |
f3c60c59 | 3781 | { |
52953d55 | 3782 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) |
f3c60c59 | 3783 | return true; |
fcff415c | 3784 | return atomic_read(&mdsc->num_sessions) <= skipped; |
f3c60c59 | 3785 | } |
2f2dc053 SW |
3786 | |
3787 | /* | |
3788 | * called after sb is ro. | |
3789 | */ | |
3790 | void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) | |
3791 | { | |
a319bf56 | 3792 | struct ceph_options *opts = mdsc->fsc->client->options; |
2f2dc053 SW |
3793 | struct ceph_mds_session *session; |
3794 | int i; | |
fcff415c | 3795 | int skipped = 0; |
2f2dc053 SW |
3796 | |
3797 | dout("close_sessions\n"); | |
3798 | ||
2f2dc053 | 3799 | /* close sessions */ |
f3c60c59 SW |
3800 | mutex_lock(&mdsc->mutex); |
3801 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3802 | session = __ceph_lookup_mds_session(mdsc, i); | |
3803 | if (!session) | |
3804 | continue; | |
2f2dc053 | 3805 | mutex_unlock(&mdsc->mutex); |
f3c60c59 | 3806 | mutex_lock(&session->s_mutex); |
fcff415c YZ |
3807 | if (__close_session(mdsc, session) <= 0) |
3808 | skipped++; | |
f3c60c59 SW |
3809 | mutex_unlock(&session->s_mutex); |
3810 | ceph_put_mds_session(session); | |
2f2dc053 SW |
3811 | mutex_lock(&mdsc->mutex); |
3812 | } | |
f3c60c59 SW |
3813 | mutex_unlock(&mdsc->mutex); |
3814 | ||
3815 | dout("waiting for sessions to close\n"); | |
fcff415c YZ |
3816 | wait_event_timeout(mdsc->session_close_wq, |
3817 | done_closing_sessions(mdsc, skipped), | |
a319bf56 | 3818 | ceph_timeout_jiffies(opts->mount_timeout)); |
2f2dc053 SW |
3819 | |
3820 | /* tear down remaining sessions */ | |
f3c60c59 | 3821 | mutex_lock(&mdsc->mutex); |
2f2dc053 SW |
3822 | for (i = 0; i < mdsc->max_sessions; i++) { |
3823 | if (mdsc->sessions[i]) { | |
3824 | session = get_session(mdsc->sessions[i]); | |
2600d2dd | 3825 | __unregister_session(mdsc, session); |
2f2dc053 SW |
3826 | mutex_unlock(&mdsc->mutex); |
3827 | mutex_lock(&session->s_mutex); | |
3828 | remove_session_caps(session); | |
3829 | mutex_unlock(&session->s_mutex); | |
3830 | ceph_put_mds_session(session); | |
3831 | mutex_lock(&mdsc->mutex); | |
3832 | } | |
3833 | } | |
2f2dc053 | 3834 | WARN_ON(!list_empty(&mdsc->cap_delay_list)); |
2f2dc053 SW |
3835 | mutex_unlock(&mdsc->mutex); |
3836 | ||
3837 | ceph_cleanup_empty_realms(mdsc); | |
3838 | ||
3839 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | |
3840 | ||
3841 | dout("stopped\n"); | |
3842 | } | |
3843 | ||
48fec5d0 YZ |
3844 | void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) |
3845 | { | |
3846 | struct ceph_mds_session *session; | |
3847 | int mds; | |
3848 | ||
3849 | dout("force umount\n"); | |
3850 | ||
3851 | mutex_lock(&mdsc->mutex); | |
3852 | for (mds = 0; mds < mdsc->max_sessions; mds++) { | |
3853 | session = __ceph_lookup_mds_session(mdsc, mds); | |
3854 | if (!session) | |
3855 | continue; | |
3856 | mutex_unlock(&mdsc->mutex); | |
3857 | mutex_lock(&session->s_mutex); | |
3858 | __close_session(mdsc, session); | |
3859 | if (session->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3860 | cleanup_session_requests(mdsc, session); | |
3861 | remove_session_caps(session); | |
3862 | } | |
3863 | mutex_unlock(&session->s_mutex); | |
3864 | ceph_put_mds_session(session); | |
3865 | mutex_lock(&mdsc->mutex); | |
3866 | kick_requests(mdsc, mds); | |
3867 | } | |
3868 | __wake_requests(mdsc, &mdsc->waiting_for_map); | |
3869 | mutex_unlock(&mdsc->mutex); | |
3870 | } | |
3871 | ||
3d14c5d2 | 3872 | static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) |
2f2dc053 SW |
3873 | { |
3874 | dout("stop\n"); | |
3875 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | |
3876 | if (mdsc->mdsmap) | |
3877 | ceph_mdsmap_destroy(mdsc->mdsmap); | |
3878 | kfree(mdsc->sessions); | |
37151668 | 3879 | ceph_caps_finalize(mdsc); |
10183a69 | 3880 | ceph_pool_perm_destroy(mdsc); |
2f2dc053 SW |
3881 | } |
3882 | ||
3d14c5d2 YS |
3883 | void ceph_mdsc_destroy(struct ceph_fs_client *fsc) |
3884 | { | |
3885 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
ef550f6f | 3886 | dout("mdsc_destroy %p\n", mdsc); |
ef550f6f | 3887 | |
50c55aec CX |
3888 | if (!mdsc) |
3889 | return; | |
3890 | ||
ef550f6f SW |
3891 | /* flush out any connection work with references to us */ |
3892 | ceph_msgr_flush(); | |
3893 | ||
62a65f36 YZ |
3894 | ceph_mdsc_stop(mdsc); |
3895 | ||
3d14c5d2 YS |
3896 | fsc->mdsc = NULL; |
3897 | kfree(mdsc); | |
ef550f6f | 3898 | dout("mdsc_destroy %p done\n", mdsc); |
3d14c5d2 YS |
3899 | } |
3900 | ||
430afbad YZ |
3901 | void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) |
3902 | { | |
3903 | struct ceph_fs_client *fsc = mdsc->fsc; | |
3904 | const char *mds_namespace = fsc->mount_options->mds_namespace; | |
3905 | void *p = msg->front.iov_base; | |
3906 | void *end = p + msg->front.iov_len; | |
3907 | u32 epoch; | |
3908 | u32 map_len; | |
3909 | u32 num_fs; | |
3910 | u32 mount_fscid = (u32)-1; | |
3911 | u8 struct_v, struct_cv; | |
3912 | int err = -EINVAL; | |
3913 | ||
3914 | ceph_decode_need(&p, end, sizeof(u32), bad); | |
3915 | epoch = ceph_decode_32(&p); | |
3916 | ||
3917 | dout("handle_fsmap epoch %u\n", epoch); | |
3918 | ||
3919 | ceph_decode_need(&p, end, 2 + sizeof(u32), bad); | |
3920 | struct_v = ceph_decode_8(&p); | |
3921 | struct_cv = ceph_decode_8(&p); | |
3922 | map_len = ceph_decode_32(&p); | |
3923 | ||
3924 | ceph_decode_need(&p, end, sizeof(u32) * 3, bad); | |
3925 | p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */ | |
3926 | ||
3927 | num_fs = ceph_decode_32(&p); | |
3928 | while (num_fs-- > 0) { | |
3929 | void *info_p, *info_end; | |
3930 | u32 info_len; | |
3931 | u8 info_v, info_cv; | |
3932 | u32 fscid, namelen; | |
3933 | ||
3934 | ceph_decode_need(&p, end, 2 + sizeof(u32), bad); | |
3935 | info_v = ceph_decode_8(&p); | |
3936 | info_cv = ceph_decode_8(&p); | |
3937 | info_len = ceph_decode_32(&p); | |
3938 | ceph_decode_need(&p, end, info_len, bad); | |
3939 | info_p = p; | |
3940 | info_end = p + info_len; | |
3941 | p = info_end; | |
3942 | ||
3943 | ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad); | |
3944 | fscid = ceph_decode_32(&info_p); | |
3945 | namelen = ceph_decode_32(&info_p); | |
3946 | ceph_decode_need(&info_p, info_end, namelen, bad); | |
3947 | ||
3948 | if (mds_namespace && | |
3949 | strlen(mds_namespace) == namelen && | |
3950 | !strncmp(mds_namespace, (char *)info_p, namelen)) { | |
3951 | mount_fscid = fscid; | |
3952 | break; | |
3953 | } | |
3954 | } | |
3955 | ||
3956 | ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch); | |
3957 | if (mount_fscid != (u32)-1) { | |
3958 | fsc->client->monc.fs_cluster_id = mount_fscid; | |
3959 | ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, | |
3960 | 0, true); | |
3961 | ceph_monc_renew_subs(&fsc->client->monc); | |
3962 | } else { | |
3963 | err = -ENOENT; | |
3964 | goto err_out; | |
3965 | } | |
3966 | return; | |
76bd6ec4 | 3967 | |
430afbad YZ |
3968 | bad: |
3969 | pr_err("error decoding fsmap\n"); | |
3970 | err_out: | |
3971 | mutex_lock(&mdsc->mutex); | |
76bd6ec4 | 3972 | mdsc->mdsmap_err = err; |
430afbad YZ |
3973 | __wake_requests(mdsc, &mdsc->waiting_for_map); |
3974 | mutex_unlock(&mdsc->mutex); | |
430afbad | 3975 | } |
2f2dc053 SW |
3976 | |
3977 | /* | |
3978 | * handle mds map update. | |
3979 | */ | |
430afbad | 3980 | void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) |
2f2dc053 SW |
3981 | { |
3982 | u32 epoch; | |
3983 | u32 maplen; | |
3984 | void *p = msg->front.iov_base; | |
3985 | void *end = p + msg->front.iov_len; | |
3986 | struct ceph_mdsmap *newmap, *oldmap; | |
3987 | struct ceph_fsid fsid; | |
3988 | int err = -EINVAL; | |
3989 | ||
3990 | ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); | |
3991 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
3d14c5d2 | 3992 | if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) |
0743304d | 3993 | return; |
c89136ea SW |
3994 | epoch = ceph_decode_32(&p); |
3995 | maplen = ceph_decode_32(&p); | |
2f2dc053 SW |
3996 | dout("handle_map epoch %u len %d\n", epoch, (int)maplen); |
3997 | ||
3998 | /* do we need it? */ | |
2f2dc053 SW |
3999 | mutex_lock(&mdsc->mutex); |
4000 | if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { | |
4001 | dout("handle_map epoch %u <= our %u\n", | |
4002 | epoch, mdsc->mdsmap->m_epoch); | |
4003 | mutex_unlock(&mdsc->mutex); | |
4004 | return; | |
4005 | } | |
4006 | ||
4007 | newmap = ceph_mdsmap_decode(&p, end); | |
4008 | if (IS_ERR(newmap)) { | |
4009 | err = PTR_ERR(newmap); | |
4010 | goto bad_unlock; | |
4011 | } | |
4012 | ||
4013 | /* swap into place */ | |
4014 | if (mdsc->mdsmap) { | |
4015 | oldmap = mdsc->mdsmap; | |
4016 | mdsc->mdsmap = newmap; | |
4017 | check_new_map(mdsc, newmap, oldmap); | |
4018 | ceph_mdsmap_destroy(oldmap); | |
4019 | } else { | |
4020 | mdsc->mdsmap = newmap; /* first mds map */ | |
4021 | } | |
3d14c5d2 | 4022 | mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; |
2f2dc053 SW |
4023 | |
4024 | __wake_requests(mdsc, &mdsc->waiting_for_map); | |
82dcabad ID |
4025 | ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP, |
4026 | mdsc->mdsmap->m_epoch); | |
2f2dc053 SW |
4027 | |
4028 | mutex_unlock(&mdsc->mutex); | |
4029 | schedule_delayed(mdsc); | |
4030 | return; | |
4031 | ||
4032 | bad_unlock: | |
4033 | mutex_unlock(&mdsc->mutex); | |
4034 | bad: | |
4035 | pr_err("error decoding mdsmap %d\n", err); | |
4036 | return; | |
4037 | } | |
4038 | ||
4039 | static struct ceph_connection *con_get(struct ceph_connection *con) | |
4040 | { | |
4041 | struct ceph_mds_session *s = con->private; | |
4042 | ||
4043 | if (get_session(s)) { | |
3997c01d | 4044 | dout("mdsc con_get %p ok (%d)\n", s, refcount_read(&s->s_ref)); |
2f2dc053 SW |
4045 | return con; |
4046 | } | |
4047 | dout("mdsc con_get %p FAIL\n", s); | |
4048 | return NULL; | |
4049 | } | |
4050 | ||
4051 | static void con_put(struct ceph_connection *con) | |
4052 | { | |
4053 | struct ceph_mds_session *s = con->private; | |
4054 | ||
3997c01d | 4055 | dout("mdsc con_put %p (%d)\n", s, refcount_read(&s->s_ref) - 1); |
2f2dc053 SW |
4056 | ceph_put_mds_session(s); |
4057 | } | |
4058 | ||
4059 | /* | |
4060 | * if the client is unresponsive for long enough, the mds will kill | |
4061 | * the session entirely. | |
4062 | */ | |
4063 | static void peer_reset(struct ceph_connection *con) | |
4064 | { | |
4065 | struct ceph_mds_session *s = con->private; | |
7e70f0ed | 4066 | struct ceph_mds_client *mdsc = s->s_mdsc; |
2f2dc053 | 4067 | |
f3ae1b97 | 4068 | pr_warn("mds%d closed our session\n", s->s_mds); |
7e70f0ed | 4069 | send_mds_reconnect(mdsc, s); |
2f2dc053 SW |
4070 | } |
4071 | ||
4072 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
4073 | { | |
4074 | struct ceph_mds_session *s = con->private; | |
4075 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
4076 | int type = le16_to_cpu(msg->hdr.type); | |
4077 | ||
2600d2dd SW |
4078 | mutex_lock(&mdsc->mutex); |
4079 | if (__verify_registered_session(mdsc, s) < 0) { | |
4080 | mutex_unlock(&mdsc->mutex); | |
4081 | goto out; | |
4082 | } | |
4083 | mutex_unlock(&mdsc->mutex); | |
4084 | ||
2f2dc053 SW |
4085 | switch (type) { |
4086 | case CEPH_MSG_MDS_MAP: | |
430afbad YZ |
4087 | ceph_mdsc_handle_mdsmap(mdsc, msg); |
4088 | break; | |
4089 | case CEPH_MSG_FS_MAP_USER: | |
4090 | ceph_mdsc_handle_fsmap(mdsc, msg); | |
2f2dc053 SW |
4091 | break; |
4092 | case CEPH_MSG_CLIENT_SESSION: | |
4093 | handle_session(s, msg); | |
4094 | break; | |
4095 | case CEPH_MSG_CLIENT_REPLY: | |
4096 | handle_reply(s, msg); | |
4097 | break; | |
4098 | case CEPH_MSG_CLIENT_REQUEST_FORWARD: | |
2600d2dd | 4099 | handle_forward(mdsc, s, msg); |
2f2dc053 SW |
4100 | break; |
4101 | case CEPH_MSG_CLIENT_CAPS: | |
4102 | ceph_handle_caps(s, msg); | |
4103 | break; | |
4104 | case CEPH_MSG_CLIENT_SNAP: | |
2600d2dd | 4105 | ceph_handle_snap(mdsc, s, msg); |
2f2dc053 SW |
4106 | break; |
4107 | case CEPH_MSG_CLIENT_LEASE: | |
2600d2dd | 4108 | handle_lease(mdsc, s, msg); |
2f2dc053 | 4109 | break; |
fb18a575 LH |
4110 | case CEPH_MSG_CLIENT_QUOTA: |
4111 | ceph_handle_quota(mdsc, s, msg); | |
4112 | break; | |
2f2dc053 SW |
4113 | |
4114 | default: | |
4115 | pr_err("received unknown message type %d %s\n", type, | |
4116 | ceph_msg_type_name(type)); | |
4117 | } | |
2600d2dd | 4118 | out: |
2f2dc053 SW |
4119 | ceph_msg_put(msg); |
4120 | } | |
4121 | ||
4e7a5dcd SW |
4122 | /* |
4123 | * authentication | |
4124 | */ | |
a3530df3 AE |
4125 | |
4126 | /* | |
4127 | * Note: returned pointer is the address of a structure that's | |
4128 | * managed separately. Caller must *not* attempt to free it. | |
4129 | */ | |
4130 | static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | |
8f43fb53 | 4131 | int *proto, int force_new) |
4e7a5dcd SW |
4132 | { |
4133 | struct ceph_mds_session *s = con->private; | |
4134 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 4135 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
74f1869f | 4136 | struct ceph_auth_handshake *auth = &s->s_auth; |
4e7a5dcd | 4137 | |
74f1869f | 4138 | if (force_new && auth->authorizer) { |
6c1ea260 | 4139 | ceph_auth_destroy_authorizer(auth->authorizer); |
74f1869f | 4140 | auth->authorizer = NULL; |
4e7a5dcd | 4141 | } |
27859f97 SW |
4142 | if (!auth->authorizer) { |
4143 | int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | |
4144 | auth); | |
0bed9b5c SW |
4145 | if (ret) |
4146 | return ERR_PTR(ret); | |
27859f97 SW |
4147 | } else { |
4148 | int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | |
4149 | auth); | |
a255651d | 4150 | if (ret) |
a3530df3 | 4151 | return ERR_PTR(ret); |
4e7a5dcd | 4152 | } |
4e7a5dcd | 4153 | *proto = ac->protocol; |
74f1869f | 4154 | |
a3530df3 | 4155 | return auth; |
4e7a5dcd SW |
4156 | } |
4157 | ||
4158 | ||
0dde5848 | 4159 | static int verify_authorizer_reply(struct ceph_connection *con) |
4e7a5dcd SW |
4160 | { |
4161 | struct ceph_mds_session *s = con->private; | |
4162 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 4163 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
4e7a5dcd | 4164 | |
0dde5848 | 4165 | return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer); |
4e7a5dcd SW |
4166 | } |
4167 | ||
9bd2e6f8 SW |
4168 | static int invalidate_authorizer(struct ceph_connection *con) |
4169 | { | |
4170 | struct ceph_mds_session *s = con->private; | |
4171 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 4172 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
9bd2e6f8 | 4173 | |
27859f97 | 4174 | ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); |
9bd2e6f8 | 4175 | |
3d14c5d2 | 4176 | return ceph_monc_validate_auth(&mdsc->fsc->client->monc); |
9bd2e6f8 SW |
4177 | } |
4178 | ||
53ded495 AE |
4179 | static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, |
4180 | struct ceph_msg_header *hdr, int *skip) | |
4181 | { | |
4182 | struct ceph_msg *msg; | |
4183 | int type = (int) le16_to_cpu(hdr->type); | |
4184 | int front_len = (int) le32_to_cpu(hdr->front_len); | |
4185 | ||
4186 | if (con->in_msg) | |
4187 | return con->in_msg; | |
4188 | ||
4189 | *skip = 0; | |
4190 | msg = ceph_msg_new(type, front_len, GFP_NOFS, false); | |
4191 | if (!msg) { | |
4192 | pr_err("unable to allocate msg type %d len %d\n", | |
4193 | type, front_len); | |
4194 | return NULL; | |
4195 | } | |
53ded495 AE |
4196 | |
4197 | return msg; | |
4198 | } | |
4199 | ||
79dbd1ba | 4200 | static int mds_sign_message(struct ceph_msg *msg) |
33d07337 | 4201 | { |
79dbd1ba | 4202 | struct ceph_mds_session *s = msg->con->private; |
33d07337 | 4203 | struct ceph_auth_handshake *auth = &s->s_auth; |
79dbd1ba | 4204 | |
33d07337 YZ |
4205 | return ceph_auth_sign_message(auth, msg); |
4206 | } | |
4207 | ||
79dbd1ba | 4208 | static int mds_check_message_signature(struct ceph_msg *msg) |
33d07337 | 4209 | { |
79dbd1ba | 4210 | struct ceph_mds_session *s = msg->con->private; |
33d07337 | 4211 | struct ceph_auth_handshake *auth = &s->s_auth; |
79dbd1ba | 4212 | |
33d07337 YZ |
4213 | return ceph_auth_check_message_signature(auth, msg); |
4214 | } | |
4215 | ||
9e32789f | 4216 | static const struct ceph_connection_operations mds_con_ops = { |
2f2dc053 SW |
4217 | .get = con_get, |
4218 | .put = con_put, | |
4219 | .dispatch = dispatch, | |
4e7a5dcd SW |
4220 | .get_authorizer = get_authorizer, |
4221 | .verify_authorizer_reply = verify_authorizer_reply, | |
9bd2e6f8 | 4222 | .invalidate_authorizer = invalidate_authorizer, |
2f2dc053 | 4223 | .peer_reset = peer_reset, |
53ded495 | 4224 | .alloc_msg = mds_alloc_msg, |
79dbd1ba ID |
4225 | .sign_message = mds_sign_message, |
4226 | .check_message_signature = mds_check_message_signature, | |
2f2dc053 SW |
4227 | }; |
4228 | ||
2f2dc053 | 4229 | /* eof */ |